fix and update tests

This commit is contained in:
Jack Robison 2018-05-29 16:22:30 -04:00
parent 545930cea4
commit 659632b66c
No known key found for this signature in database
GPG key ID: DF25C68FE0239BB2
11 changed files with 139 additions and 506 deletions

View file

@ -32,14 +32,11 @@ class Session(object):
peers can connect to this peer.
"""
def __init__(self, blob_data_payment_rate, db_dir=None,
node_id=None, peer_manager=None, dht_node_port=None,
known_dht_nodes=None, peer_finder=None,
hash_announcer=None, blob_dir=None,
blob_manager=None, peer_port=None, use_upnp=True,
rate_limiter=None, wallet=None,
dht_node_class=node.Node, blob_tracker_class=None,
payment_rate_manager_class=None, is_generous=True, external_ip=None, storage=None):
def __init__(self, blob_data_payment_rate, db_dir=None, node_id=None, peer_manager=None, dht_node_port=None,
known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None,
peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node,
blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True, external_ip=None,
storage=None):
"""@param blob_data_payment_rate: The default payment rate for blob data
@param db_dir: The directory in which levelDB files should be stored

View file

@ -63,10 +63,7 @@ class TreeRoutingTable(object):
contacts = self.get_contacts()
distance = Distance(self._parentNodeID)
contacts.sort(key=lambda c: distance(c.id))
if len(contacts) < constants.k:
kth_contact = contacts[-1]
else:
kth_contact = contacts[constants.k-1]
kth_contact = contacts[-1] if len(contacts) < constants.k else contacts[constants.k-1]
return distance(toAdd) < distance(kth_contact.id)
def addContact(self, contact):
@ -91,7 +88,6 @@ class TreeRoutingTable(object):
# Retry the insertion attempt
return self.addContact(contact)
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket

View file

@ -1,3 +1,4 @@
from twisted.trial import unittest
from dht_test_environment import TestKademliaBase
@ -8,3 +9,26 @@ class TestKademliaBootstrap(TestKademliaBase):
def test_bootstrap_seed_nodes(self):
pass
@unittest.SkipTest
class TestKademliaBootstrap40Nodes(TestKademliaBase):
network_size = 40
def test_bootstrap_network(self):
pass
class TestKademliaBootstrap80Nodes(TestKademliaBase):
network_size = 80
def test_bootstrap_network(self):
pass
@unittest.SkipTest
class TestKademliaBootstrap120Nodes(TestKademliaBase):
network_size = 120
def test_bootstrap_network(self):
pass

View file

@ -1,5 +1,6 @@
import logging
from twisted.internet import defer
from lbrynet.dht import constants
from dht_test_environment import TestKademliaBase
log = logging.getLogger()
@ -12,7 +13,6 @@ class TestPeerExpiration(TestKademliaBase):
def test_expire_stale_peers(self):
removed_addresses = set()
removed_nodes = []
self.show_info()
# stop 5 nodes
for _ in range(5):
@ -26,16 +26,15 @@ class TestPeerExpiration(TestKademliaBase):
self.assertSetEqual(offline_addresses, removed_addresses)
get_nodes_with_stale_contacts = lambda: filter(lambda node: any(contact.address in offline_addresses
for contact in node.contacts), self.nodes + self._seeds)
for contact in node.contacts),
self.nodes + self._seeds)
self.assertRaises(AssertionError, self.verify_all_nodes_are_routable)
self.assertTrue(len(get_nodes_with_stale_contacts()) > 1)
# run the network for an hour, which should expire the removed nodes
for _ in range(60):
log.info("Time is %f, nodes with stale contacts: %i/%i", self.clock.seconds(),
len(get_nodes_with_stale_contacts()), len(self.nodes + self._seeds))
self.pump_clock(60)
self.assertTrue(len(get_nodes_with_stale_contacts()) == 0)
# run the network long enough for two failures to happen
self.pump_clock(constants.checkRefreshInterval * 3)
self.assertEquals(len(get_nodes_with_stale_contacts()), 0)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()

View file

@ -1,5 +1,6 @@
import logging
from twisted.internet import defer
from lbrynet.dht import constants
from dht_test_environment import TestKademliaBase
log = logging.getLogger()
@ -8,63 +9,30 @@ log = logging.getLogger()
class TestReJoin(TestKademliaBase):
network_size = 40
@defer.inlineCallbacks
def setUp(self):
yield super(TestReJoin, self).setUp()
self.removed_node = self.nodes[20]
self.nodes.remove(self.removed_node)
yield self.run_reactor(1, [self.removed_node.stop()])
self.pump_clock(constants.checkRefreshInterval * 2)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
@defer.inlineCallbacks
def test_re_join(self):
removed_node = self.nodes[0]
self.nodes.remove(removed_node)
yield self.run_reactor(1, [removed_node.stop()])
# run the network for an hour, which should expire the removed node
self.pump_clock(3600)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
self.nodes.append(removed_node)
self.nodes.append(self.removed_node)
yield self.run_reactor(
31, [removed_node.start([(seed_name, 4444) for seed_name in sorted(self.seed_dns.keys())])]
31, [self.removed_node.start([(seed_name, 4444) for seed_name in sorted(self.seed_dns.keys())])]
)
self.pump_clock(901)
self.pump_clock(constants.checkRefreshInterval*2)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
@defer.inlineCallbacks
def test_re_join_with_new_ip(self):
self.removed_node.externalIP = "10.43.43.43"
return self.test_re_join()
removed_node = self.nodes[0]
self.nodes.remove(removed_node)
yield self.run_reactor(1, [removed_node.stop()])
# run the network for an hour, which should expire the removed node
for _ in range(60):
self.pump_clock(60)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
removed_node.externalIP = "10.43.43.43"
self.nodes.append(removed_node)
yield self.run_reactor(
31, [removed_node.start([(seed_name, 4444) for seed_name in sorted(self.seed_dns.keys())])]
)
self.pump_clock(901)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
@defer.inlineCallbacks
def test_re_join_with_new_node_id(self):
removed_node = self.nodes[0]
self.nodes.remove(removed_node)
yield self.run_reactor(1, [removed_node.stop()])
# run the network for an hour, which should expire the removed node
for _ in range(60):
self.pump_clock(60)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
removed_node.node_id = removed_node._generateID()
self.nodes.append(removed_node)
yield self.run_reactor(
31, [removed_node.start([(seed_name, 4444) for seed_name in sorted(self.seed_dns.keys())])]
)
self.pump_clock(901)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
self.removed_node.node_id = self.removed_node._generateID()
return self.test_re_join()

View file

@ -8,12 +8,12 @@ import logging
log = logging.getLogger()
class TestStore(TestKademliaBase):
class TestStoreExpiration(TestKademliaBase):
network_size = 40
@defer.inlineCallbacks
def test_store_and_expire(self):
blob_hash = generate_id()
blob_hash = generate_id(1)
announcing_node = self.nodes[20]
# announce the blob
announce_d = announcing_node.announceHaveBlob(blob_hash)
@ -62,8 +62,8 @@ class TestStore(TestKademliaBase):
self.assertTrue(blob_hash not in node._dataStore._dict) # the looping call should have fired
@defer.inlineCallbacks
def test_refresh_storing_peers(self):
blob_hash = generate_id()
def test_storing_node_went_stale_then_came_back(self):
blob_hash = generate_id(1)
announcing_node = self.nodes[20]
# announce the blob
announce_d = announcing_node.announceHaveBlob(blob_hash)
@ -93,11 +93,42 @@ class TestStore(TestKademliaBase):
self.assertEquals(expanded_peers[0],
(announcing_node.node_id, announcing_node.externalIP, announcing_node.peerPort))
self.pump_clock(constants.checkRefreshInterval + 1) # tick the clock forward (so the nodes refresh)
self.pump_clock(constants.checkRefreshInterval*2)
# stop the node
self.nodes.remove(announcing_node)
yield self.run_reactor(31, [announcing_node.stop()])
# run the network for an hour, which should expire the removed node and turn the announced value stale
self.pump_clock(constants.checkRefreshInterval * 4, constants.checkRefreshInterval/2)
self.verify_all_nodes_are_routable()
# make sure the contact isn't returned as a peer for the blob, but that we still have the entry in the
# datastore in case the node comes back
for node in storing_nodes:
self.assertFalse(node._dataStore.hasPeersForBlob(blob_hash))
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
self.assertEquals(len(datastore_result), 0)
self.assertEquals(len(node._dataStore.getStoringContacts()), 1)
self.assertTrue(blob_hash in node._dataStore._dict)
# # bring the announcing node back online
self.nodes.append(announcing_node)
yield self.run_reactor(
31, [announcing_node.start([(seed_name, 4444) for seed_name in sorted(self.seed_dns.keys())])]
)
self.pump_clock(constants.checkRefreshInterval * 2)
self.verify_all_nodes_are_routable()
# now the announcing node should once again be returned as a peer for the blob
for node in storing_nodes:
self.assertTrue(node._dataStore.hasPeersForBlob(blob_hash))
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
self.assertEquals(len(datastore_result), 1)
self.assertEquals(len(node._dataStore.getStoringContacts()), 1)
self.assertTrue(blob_hash in node._dataStore._dict)
# verify the announced blob expires in the storing nodes datastores
self.clock.advance(constants.dataExpireTimeout) # skip the clock directly ahead
self.clock.advance(constants.dataExpireTimeout) # skip the clock directly ahead
for node in storing_nodes:
self.assertFalse(node._dataStore.hasPeersForBlob(blob_hash))
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
@ -111,76 +142,4 @@ class TestStore(TestKademliaBase):
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
self.assertEquals(len(datastore_result), 0)
self.assertEquals(len(node._dataStore.getStoringContacts()), 0)
self.assertTrue(blob_hash not in node._dataStore._dict) # the looping call should have fired after
class TestStoringNodeWentStale(TestKademliaBase):
network_size = 40
@defer.inlineCallbacks
def test_storing_node_went_stale_then_came_back(self):
blob_hash = generate_id()
announcing_node = self.nodes[20]
# announce the blob
announce_d = announcing_node.announceHaveBlob(blob_hash)
announce_time = self.clock.seconds()
self.pump_clock(5)
storing_node_ids = yield announce_d
all_nodes = set(self.nodes).union(set(self._seeds))
# verify the nodes we think stored it did actually store it
storing_nodes = [node for node in all_nodes if node.node_id.encode('hex') in storing_node_ids]
self.assertEquals(len(storing_nodes), len(storing_node_ids))
self.assertEquals(len(storing_nodes), constants.k)
for node in storing_nodes:
self.assertTrue(node._dataStore.hasPeersForBlob(blob_hash))
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
self.assertEquals(map(lambda contact: (contact.id, contact.address, contact.port),
node._dataStore.getStoringContacts()), [(announcing_node.node_id,
announcing_node.externalIP,
announcing_node.port)])
self.assertEquals(len(datastore_result), 1)
expanded_peers = []
for peer in datastore_result:
host = ".".join([str(ord(d)) for d in peer[:4]])
port, = struct.unpack('>H', peer[4:6])
peer_node_id = peer[6:]
if (host, port, peer_node_id) not in expanded_peers:
expanded_peers.append((peer_node_id, host, port))
self.assertEquals(expanded_peers[0],
(announcing_node.node_id, announcing_node.externalIP, announcing_node.peerPort))
self.nodes.remove(announcing_node)
yield self.run_reactor(1, [announcing_node.stop()])
# run the network for an hour, which should expire the removed node and the announced value
self.pump_clock(3600)
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
for node in storing_nodes: # make sure the contact isn't returned as a peer for the blob, but that
# we still have the entry in the datastore in case the node returns
self.assertFalse(node._dataStore.hasPeersForBlob(blob_hash))
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
self.assertEquals(len(datastore_result), 0)
self.assertEquals(len(node._dataStore.getStoringContacts()), 1)
self.assertTrue(blob_hash in node._dataStore._dict)
# bring the announcing node back online
self.nodes.append(announcing_node)
yield self.run_reactor(
31, [announcing_node.start([(seed_name, 4444) for seed_name in sorted(self.seed_dns.keys())])]
)
self.pump_clock(24*60+1) # FIXME: this should work after 12 minutes + 1 second, yet it doesnt
self.verify_all_nodes_are_routable()
self.verify_all_nodes_are_pingable()
# now the announcing node should once again be returned as a peer for the blob
for node in storing_nodes:
self.assertTrue(node._dataStore.hasPeersForBlob(blob_hash))
datastore_result = node._dataStore.getPeersForBlob(blob_hash)
self.assertEquals(len(datastore_result), 1)
self.assertEquals(len(node._dataStore.getStoringContacts()), 1)
self.assertTrue(blob_hash in node._dataStore._dict)
# TODO: handle the case where the announcing node re joins with a different address from what is stored
self.assertTrue(blob_hash not in node._dataStore._dict) # the looping call should have fired

View file

@ -15,6 +15,14 @@ class ContactOperatorsTest(unittest.TestCase):
self.secondContactCopy = self.contact_manager.make_contact(self.node_ids[0], '192.168.0.1', 1000, None, 32)
self.firstContactDifferentValues = self.contact_manager.make_contact(self.node_ids[1], '192.168.1.20',
1000, None, 50)
self.assertRaises(ValueError, self.contact_manager.make_contact, self.node_ids[1], '192.168.1.20',
100000, None)
self.assertRaises(ValueError, self.contact_manager.make_contact, self.node_ids[1], '192.168.1.20.1',
1000, None)
self.assertRaises(ValueError, self.contact_manager.make_contact, self.node_ids[1], 'this is not an ip',
1000, None)
self.assertRaises(ValueError, self.contact_manager.make_contact, "this is not a node id", '192.168.1.20.1',
1000, None)
def testNoDuplicateContactObjects(self):
self.assertTrue(self.secondContact is self.secondContactCopy)
@ -74,7 +82,7 @@ class TestContactLastReplied(unittest.TestCase):
def test_good_turned_stale(self):
self.contact.update_last_replied()
self.assertTrue(self.contact.contact_is_good is True)
self.clock.advance((constants.refreshTimeout / 4) - 1)
self.clock.advance(constants.checkRefreshInterval - 1)
self.assertTrue(self.contact.contact_is_good is True)
self.clock.advance(1)
self.assertTrue(self.contact.contact_is_good is None)
@ -112,7 +120,7 @@ class TestContactLastReplied(unittest.TestCase):
self.assertTrue(self.contact.contact_is_good is True)
# it goes stale
self.clock.advance((constants.refreshTimeout / 4) - 2)
self.clock.advance(constants.checkRefreshInterval - 2)
self.assertTrue(self.contact.contact_is_good is True)
self.clock.advance(1)
self.assertTrue(self.contact.contact_is_good is None)
@ -134,7 +142,7 @@ class TestContactLastRequested(unittest.TestCase):
self.assertTrue(self.contact.contact_is_good is True)
# it goes stale
self.clock.advance((constants.refreshTimeout / 4) - 1)
self.clock.advance(constants.checkRefreshInterval - 1)
self.assertTrue(self.contact.contact_is_good is True)
self.clock.advance(1)
self.assertTrue(self.contact.contact_is_good is None)

View file

@ -1,130 +0,0 @@
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
from twisted.trial import unittest
import time
import hashlib
from lbrynet.dht.datastore import DictDataStore
from lbrynet.dht import constants
class DictDataStoreTest(unittest.TestCase):
""" Basic tests case for the reference DataStore API and implementation """
def setUp(self):
self.ds = DictDataStore()
h = hashlib.sha384()
h.update('g')
hashKey = h.digest()
h2 = hashlib.sha1()
h2.update('dried')
hashKey2 = h2.digest()
h3 = hashlib.sha1()
h3.update('Boozoo Bajou - 09 - S.I.P.mp3')
hashKey3 = h3.digest()
#self.cases = (('a', 'hello there\nthis is a test'),
# (hashKey3, '1 2 3 4 5 6 7 8 9 0'))
self.cases = ((hashKey, 'test1test1test1test1test1t'),
(hashKey, 'test2'),
(hashKey, 'test3test3test3test3test3test3test3test3'),
(hashKey2, 'test4'),
(hashKey3, 'test5'),
(hashKey3, 'test6'))
def testReadWrite(self):
# Test write ability
for key, value in self.cases:
try:
now = int(time.time())
self.ds.addPeerToBlob(key, value, now, now, 'node1')
except Exception:
import traceback
self.fail('Failed writing the following data: key: "%s" '
'data: "%s"\n The error was: %s:' %
(key, value, traceback.format_exc(5)))
# Verify writing (test query ability)
for key, value in self.cases:
try:
self.failUnless(self.ds.hasPeersForBlob(key),
'Key "%s" not found in DataStore! DataStore key dump: %s' %
(key, self.ds.keys()))
except Exception:
import traceback
self.fail(
'Failed verifying that the following key exists: "%s"\n The error was: %s:' %
(key, traceback.format_exc(5)))
# Read back the data
for key, value in self.cases:
self.failUnless(value in self.ds.getPeersForBlob(key),
'DataStore returned invalid data! Expected "%s", got "%s"' %
(value, self.ds.getPeersForBlob(key)))
def testNonExistentKeys(self):
for key, value in self.cases:
self.failIf(key in self.ds.keys(), 'DataStore reports it has non-existent key: "%s"' %
key)
def testExpires(self):
now = int(time.time())
h1 = hashlib.sha1()
h1.update('test1')
key1 = h1.digest()
h2 = hashlib.sha1()
h2.update('test2')
key2 = h2.digest()
td = constants.dataExpireTimeout - 100
td2 = td + td
self.ds.addPeerToBlob(h1, 'val1', now - td, now - td, '1')
self.ds.addPeerToBlob(h1, 'val2', now - td2, now - td2, '2')
self.ds.addPeerToBlob(h2, 'val3', now - td2, now - td2, '3')
self.ds.addPeerToBlob(h2, 'val4', now, now, '4')
self.ds.removeExpiredPeers()
self.failUnless(
'val1' in self.ds.getPeersForBlob(h1),
'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' %
('val1', str(now - td), str(now)))
self.failIf(
'val2' in self.ds.getPeersForBlob(h1),
'DataStore failed to delete an expired value! '
'Value %s, publish time %s, current time %s' %
('val2', str(now - td2), str(now)))
self.failIf(
'val3' in self.ds.getPeersForBlob(h2),
'DataStore failed to delete an expired value! '
'Value %s, publish time %s, current time %s' %
('val3', str(now - td2), str(now)))
self.failUnless(
'val4' in self.ds.getPeersForBlob(h2),
'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' %
('val4', str(now), str(now)))
# # First write with fake values
# for key, value in self.cases:
# except Exception:
#
# # write this stuff a second time, with the real values
# for key, value in self.cases:
# except Exception:
#
# # Read back the data
# for key, value in self.cases:
# # First some values
# for key, value in self.cases:
# except Exception:
#
#
# # Delete an item from the data
# # First some values with metadata
# for key, value in self.cases:
# except Exception:
#
# # Read back the meta-data
# for key, value in self.cases:

View file

@ -85,7 +85,7 @@ class NodeContactTest(unittest.TestCase):
h = hashlib.sha384()
h.update('node1')
contactID = h.digest()
contact = self.node.contact_manager.make_contact(contactID, '127.0.0.1', 91824, self.node._protocol)
contact = self.node.contact_manager.make_contact(contactID, '127.0.0.1', 9182, self.node._protocol)
# Now add it...
yield self.node.addContact(contact)
# ...and request the closest nodes to it using FIND_NODE
@ -99,7 +99,7 @@ class NodeContactTest(unittest.TestCase):
def testAddSelfAsContact(self):
""" Tests the node's behaviour when attempting to add itself as a contact """
# Create a contact with the same ID as the local node's ID
contact = self.node.contact_manager.make_contact(self.node.node_id, '127.0.0.1', 91824, None)
contact = self.node.contact_manager.make_contact(self.node.node_id, '127.0.0.1', 9182, None)
# Now try to add it
yield self.node.addContact(contact)
# ...and request the closest nodes to it using FIND_NODE

View file

@ -1,167 +0,0 @@
# import time
# import unittest
# import twisted.internet.selectreactor
#
# import lbrynet.dht.protocol
# import lbrynet.dht.contact
# import lbrynet.dht.constants
# import lbrynet.dht.msgtypes
# from lbrynet.dht.error import TimeoutError
# from lbrynet.dht.node import Node, rpcmethod
#
#
# class KademliaProtocolTest(unittest.TestCase):
# """ Test case for the Protocol class """
#
# def setUp(self):
# del lbrynet.dht.protocol.reactor
# lbrynet.dht.protocol.reactor = twisted.internet.selectreactor.SelectReactor()
# self.node = Node(node_id='1' * 48, udpPort=9182, externalIP="127.0.0.1")
# self.protocol = lbrynet.dht.protocol.KademliaProtocol(self.node)
#
# def testReactor(self):
# """ Tests if the reactor can start/stop the protocol correctly """
# lbrynet.dht.protocol.reactor.listenUDP(0, self.protocol)
# lbrynet.dht.protocol.reactor.callLater(0, lbrynet.dht.protocol.reactor.stop)
# lbrynet.dht.protocol.reactor.run()
#
# def testRPCTimeout(self):
# """ Tests if a RPC message sent to a dead remote node times out correctly """
#
# @rpcmethod
# def fake_ping(*args, **kwargs):
# time.sleep(lbrynet.dht.constants.rpcTimeout + 1)
# return 'pong'
#
# real_ping = self.node.ping
# real_timeout = lbrynet.dht.constants.rpcTimeout
# real_attempts = lbrynet.dht.constants.rpcAttempts
# lbrynet.dht.constants.rpcAttempts = 1
# lbrynet.dht.constants.rpcTimeout = 1
# self.node.ping = fake_ping
# deadContact = lbrynet.dht.contact.Contact('2' * 48, '127.0.0.1', 9182, self.protocol)
# self.node.addContact(deadContact)
# # Make sure the contact was added
# self.failIf(deadContact not in self.node.contacts,
# 'Contact not added to fake node (error in test code)')
# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol)
#
# # Run the PING RPC (which should raise a timeout error)
# df = self.protocol.sendRPC(deadContact, 'ping', {})
#
# def check_timeout(err):
# self.assertEqual(type(err), TimeoutError)
#
# df.addErrback(check_timeout)
#
# def reset_values():
# self.node.ping = real_ping
# lbrynet.dht.constants.rpcTimeout = real_timeout
# lbrynet.dht.constants.rpcAttempts = real_attempts
#
# # See if the contact was removed due to the timeout
# def check_removed_contact():
# self.failIf(deadContact in self.node.contacts,
# 'Contact was not removed after RPC timeout; check exception types.')
#
# df.addCallback(lambda _: reset_values())
#
# # Stop the reactor if a result arrives (timeout or not)
# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop())
# df.addCallback(lambda _: check_removed_contact())
# lbrynet.dht.protocol.reactor.run()
#
# def testRPCRequest(self):
# """ Tests if a valid RPC request is executed and responded to correctly """
# remoteContact = lbrynet.dht.contact.Contact('2' * 48, '127.0.0.1', 9182, self.protocol)
# self.node.addContact(remoteContact)
# self.error = None
#
# def handleError(f):
# self.error = 'An RPC error occurred: %s' % f.getErrorMessage()
#
# def handleResult(result):
# expectedResult = 'pong'
# if result != expectedResult:
# self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' \
# % (expectedResult, result)
#
# # Publish the "local" node on the network
# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol)
# # Simulate the RPC
# df = remoteContact.ping()
# df.addCallback(handleResult)
# df.addErrback(handleError)
# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop())
# lbrynet.dht.protocol.reactor.run()
# self.failIf(self.error, self.error)
# # The list of sent RPC messages should be empty at this stage
# self.failUnlessEqual(len(self.protocol._sentMessages), 0,
# 'The protocol is still waiting for a RPC result, '
# 'but the transaction is already done!')
#
# def testRPCAccess(self):
# """ Tests invalid RPC requests
# Verifies that a RPC request for an existing but unpublished
# method is denied, and that the associated (remote) exception gets
# raised locally """
# remoteContact = lbrynet.dht.contact.Contact('2' * 48, '127.0.0.1', 9182, self.protocol)
# self.node.addContact(remoteContact)
# self.error = None
#
# def handleError(f):
# try:
# f.raiseException()
# except AttributeError, e:
# # This is the expected outcome since the remote node did not publish the method
# self.error = None
# except Exception, e:
# self.error = 'The remote method failed, but the wrong exception was raised; ' \
# 'expected AttributeError, got %s' % type(e)
#
# def handleResult(result):
# self.error = 'The remote method executed successfully, returning: "%s"; ' \
# 'this RPC should not have been allowed.' % result
#
# # Publish the "local" node on the network
# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol)
# # Simulate the RPC
# df = remoteContact.not_a_rpc_function()
# df.addCallback(handleResult)
# df.addErrback(handleError)
# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop())
# lbrynet.dht.protocol.reactor.run()
# self.failIf(self.error, self.error)
# # The list of sent RPC messages should be empty at this stage
# self.failUnlessEqual(len(self.protocol._sentMessages), 0,
# 'The protocol is still waiting for a RPC result, '
# 'but the transaction is already done!')
#
# def testRPCRequestArgs(self):
# """ Tests if an RPC requiring arguments is executed correctly """
# remoteContact = lbrynet.dht.contact.Contact('2' * 48, '127.0.0.1', 9182, self.protocol)
# self.node.addContact(remoteContact)
# self.error = None
#
# def handleError(f):
# self.error = 'An RPC error occurred: %s' % f.getErrorMessage()
#
# def handleResult(result):
# expectedResult = 'pong'
# if result != expectedResult:
# self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % \
# (expectedResult, result)
#
# # Publish the "local" node on the network
# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol)
# # Simulate the RPC
# df = remoteContact.ping()
# df.addCallback(handleResult)
# df.addErrback(handleError)
# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop())
# lbrynet.dht.protocol.reactor.run()
# self.failIf(self.error, self.error)
# # The list of sent RPC messages should be empty at this stage
# self.failUnlessEqual(len(self.protocol._sentMessages), 0,
# 'The protocol is still waiting for a RPC result, '
# 'but the transaction is already done!')

View file

@ -27,22 +27,13 @@ class TreeRoutingTableTest(unittest.TestCase):
""" Test to see if distance method returns correct result"""
# testList holds a couple 3-tuple (variable1, variable2, result)
basicTestList = [('123456789', '123456789', 0L), ('12345', '98765', 34527773184L)]
basicTestList = [(chr(170) * 48, chr(85) * 48, long((chr(255) * 48).encode('hex'), 16))]
for test in basicTestList:
result = Distance(test[0])(test[1])
self.failIf(result != test[2], 'Result of _distance() should be %s but %s returned' %
(test[2], result))
baseIp = '146.64.19.111'
ipTestList = ['146.64.29.222', '192.68.19.333']
distanceOne = Distance(baseIp)(ipTestList[0])
distanceTwo = Distance(baseIp)(ipTestList[1])
self.failIf(distanceOne > distanceTwo, '%s should be closer to the base ip %s than %s' %
(ipTestList[0], baseIp, ipTestList[1]))
@defer.inlineCallbacks
def testAddContact(self):
""" Tests if a contact can be added and retrieved correctly """
@ -50,7 +41,7 @@ class TreeRoutingTableTest(unittest.TestCase):
h = hashlib.sha384()
h.update('node2')
contactID = h.digest()
contact = self.contact_manager.make_contact(contactID, '127.0.0.1', 91824, self.protocol)
contact = self.contact_manager.make_contact(contactID, '127.0.0.1', 9182, self.protocol)
# Now add it...
yield self.routingTable.addContact(contact)
# ...and request the closest nodes to it (will retrieve it)
@ -66,7 +57,7 @@ class TreeRoutingTableTest(unittest.TestCase):
h = hashlib.sha384()
h.update('node2')
contactID = h.digest()
contact = self.contact_manager.make_contact(contactID, '127.0.0.1', 91824, self.protocol)
contact = self.contact_manager.make_contact(contactID, '127.0.0.1', 9182, self.protocol)
# Now add it...
yield self.routingTable.addContact(contact)
# ...and get it again
@ -80,7 +71,7 @@ class TreeRoutingTableTest(unittest.TestCase):
"""
# Create a contact with the same ID as the local node's ID
contact = self.contact_manager.make_contact(self.nodeID, '127.0.0.1', 91824, self.protocol)
contact = self.contact_manager.make_contact(self.nodeID, '127.0.0.1', 9182, self.protocol)
# Now try to add it
yield self.routingTable.addContact(contact)
# ...and request the closest nodes to it using FIND_NODE
@ -94,7 +85,7 @@ class TreeRoutingTableTest(unittest.TestCase):
h = hashlib.sha384()
h.update('node2')
contactID = h.digest()
contact = self.contact_manager.make_contact(contactID, '127.0.0.1', 91824, self.protocol)
contact = self.contact_manager.make_contact(contactID, '127.0.0.1', 9182, self.protocol)
# Now add it...
yield self.routingTable.addContact(contact)
# Verify addition
@ -113,7 +104,7 @@ class TreeRoutingTableTest(unittest.TestCase):
h = hashlib.sha384()
h.update('remote node %d' % i)
nodeID = h.digest()
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 91824, self.protocol)
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 9182, self.protocol)
yield self.routingTable.addContact(contact)
self.failUnlessEqual(len(self.routingTable._buckets), 1,
'Only k nodes have been added; the first k-bucket should now '
@ -122,7 +113,7 @@ class TreeRoutingTableTest(unittest.TestCase):
h = hashlib.sha384()
h.update('yet another remote node')
nodeID = h.digest()
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 91824, self.protocol)
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 9182, self.protocol)
yield self.routingTable.addContact(contact)
self.failUnlessEqual(len(self.routingTable._buckets), 2,
'k+1 nodes have been added; the first k-bucket should have been '
@ -140,55 +131,43 @@ class TreeRoutingTableTest(unittest.TestCase):
@defer.inlineCallbacks
def testFullSplit(self):
"""
Test that a bucket is not split if it full, but does not cover the range
containing the parent node's ID
Test that a bucket is not split if it is full, but the new contact is not closer than the kth closest contact
"""
self.routingTable._parentNodeID = 49 * 'a'
# more than 384 bits; this will not be in the range of _any_ k-bucket
self.routingTable._parentNodeID = 48 * chr(255)
node_ids = [
"d4a27096d81e3c4efacce9f940e887c956f736f859c8037b556efec6fdda5c388ae92bae96b9eb204b24da2f376c4282",
"553c0bfe119c35247c8cb8124091acb5c05394d5be7b019f6b1a5e18036af7a6148711ad6d47a0f955047bf9eac868aa",
"671a179c251c90863f46e7ef54264cbbad743fe3127871064d8f051ce4124fcbd893339e11358f621655e37bd6a74097",
"f896bafeb7ffb14b92986e3b08ee06807fdd5be34ab43f4f52559a5bbf0f12dedcd8556801f97c334b3ac9be7a0f7a93",
"33a7deb380eb4707211184798b66840c22c396e8cde00b75b64f9ead09bad1141b56d35a93bd511adb28c6708eecc39d",
"5e1e8ca575b536ae5ec52f7766ada904a64ebaad805909b1067ec3c984bf99909c9fcdd37e04ea5c5c043ea8830100ce",
"ee18857d0c1f7fc413424f3ffead4871f2499646d4c2ac16f35f0c8864318ca21596915f18f85a3a25f8ceaa56c844aa",
"68039f78fbf130873e7cce2f71f39d217dcb7f3fe562d64a85de4e21ee980b4a800f51bf6851d2bbf10e6590fe0d46b2"
"100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
]
# Add k contacts
for i in range(constants.k):
h = hashlib.sha384()
h.update('remote node %d' % i)
nodeID = h.digest()
self.assertEquals(nodeID, node_ids[i].decode('hex'))
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 91824, self.protocol)
for nodeID in node_ids:
# self.assertEquals(nodeID, node_ids[i].decode('hex'))
contact = self.contact_manager.make_contact(nodeID.decode('hex'), '127.0.0.1', 9182, self.protocol)
yield self.routingTable.addContact(contact)
self.failUnlessEqual(len(self.routingTable._buckets), 1)
self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), constants.k)
self.failUnlessEqual(len(self.routingTable._buckets), 2)
self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), 8)
self.failUnlessEqual(len(self.routingTable._buckets[1]._contacts), 2)
# try adding a contact who is further from us than the k'th known contact
h = hashlib.sha384()
h.update('yet another remote node!')
nodeID = h.digest()
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 91824, self.protocol)
yield self.routingTable.addContact(contact)
self.failUnlessEqual(len(self.routingTable._buckets), 1)
self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), constants.k)
self.failIf(contact in self.routingTable._buckets[0]._contacts)
# try adding a contact who is closer to us than the k'th known contact
h = hashlib.sha384()
h.update('yet another remote node')
nodeID = h.digest()
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 91824, self.protocol)
nodeID = '020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'.decode('hex')
contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 9182, self.protocol)
self.assertFalse(self.routingTable._shouldSplit(self.routingTable._kbucketIndex(contact.id), contact.id))
yield self.routingTable.addContact(contact)
self.failUnlessEqual(len(self.routingTable._buckets), 2)
self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), 5)
self.failUnlessEqual(len(self.routingTable._buckets[1]._contacts), 4)
self.failIf(contact not in self.routingTable._buckets[1]._contacts)
self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), 8)
self.failUnlessEqual(len(self.routingTable._buckets[1]._contacts), 2)
self.failIf(contact in self.routingTable._buckets[0]._contacts)
self.failIf(contact in self.routingTable._buckets[1]._contacts)
# class KeyErrorFixedTest(unittest.TestCase):