2017-03-23 19:12:09 +01:00
|
|
|
import random
|
2015-08-20 17:27:15 +02:00
|
|
|
import logging
|
2017-01-27 17:18:40 +01:00
|
|
|
from twisted.internet import defer, reactor
|
2016-12-21 20:55:43 +01:00
|
|
|
from lbrynet import conf
|
2018-11-04 20:06:29 +01:00
|
|
|
from lbrynet.p2p.client.ClientProtocol import ClientProtocolFactory
|
|
|
|
from lbrynet.p2p.Error import InsufficientFundsError
|
|
|
|
from lbrynet.p2p import utils
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2015-09-08 21:42:56 +02:00
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2018-07-22 00:34:59 +02:00
|
|
|
class PeerConnectionHandler:
|
2015-08-27 21:41:17 +02:00
|
|
|
def __init__(self, request_creators, factory):
|
|
|
|
self.request_creators = request_creators
|
|
|
|
self.factory = factory
|
|
|
|
self.connection = None
|
|
|
|
|
|
|
|
|
2018-07-22 00:34:59 +02:00
|
|
|
class ConnectionManager:
|
2018-07-03 06:51:25 +02:00
|
|
|
#implements(interfaces.IConnectionManager)
|
2017-03-23 19:12:09 +01:00
|
|
|
MANAGE_CALL_INTERVAL_SEC = 5
|
2017-03-24 18:02:01 +01:00
|
|
|
TCP_CONNECT_TIMEOUT = 15
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-11-30 21:20:45 +01:00
|
|
|
def __init__(self, downloader, rate_limiter,
|
|
|
|
primary_request_creators, secondary_request_creators):
|
2017-07-28 18:55:04 +02:00
|
|
|
|
2017-08-02 18:45:42 +02:00
|
|
|
self.seek_head_blob_first = conf.settings['seek_head_blob_first']
|
|
|
|
self.max_connections_per_stream = conf.settings['max_connections_per_stream']
|
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
self.downloader = downloader
|
|
|
|
self.rate_limiter = rate_limiter
|
2015-08-27 21:41:17 +02:00
|
|
|
self._primary_request_creators = primary_request_creators
|
|
|
|
self._secondary_request_creators = secondary_request_creators
|
|
|
|
self._peer_connections = {} # {Peer: PeerConnectionHandler}
|
|
|
|
self._connections_closing = {} # {Peer: deferred (fired when the connection is closed)}
|
|
|
|
self._next_manage_call = None
|
2017-01-11 18:45:37 +01:00
|
|
|
# a deferred that gets fired when a _manage call is set
|
|
|
|
self._manage_deferred = None
|
2015-09-15 06:29:18 +02:00
|
|
|
self.stopped = True
|
2017-12-17 21:38:00 +01:00
|
|
|
log.debug("%s initialized", self._get_log_name())
|
2017-01-19 02:13:16 +01:00
|
|
|
|
|
|
|
# this identifies what the connection manager is for,
|
|
|
|
# used for logging purposes only
|
|
|
|
def _get_log_name(self):
|
|
|
|
out = 'Connection Manager Unknown'
|
|
|
|
if hasattr(self.downloader, 'stream_name'):
|
|
|
|
out = 'Connection Manager '+self.downloader.stream_name
|
|
|
|
elif hasattr(self.downloader, 'blob_hash'):
|
|
|
|
out = 'Connection Manager '+self.downloader.blob_hash
|
|
|
|
return out
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2017-01-27 17:18:40 +01:00
|
|
|
def _start(self):
|
2015-09-15 06:29:18 +02:00
|
|
|
self.stopped = False
|
2015-08-27 21:41:17 +02:00
|
|
|
if self._next_manage_call is not None and self._next_manage_call.active() is True:
|
|
|
|
self._next_manage_call.cancel()
|
2017-01-27 17:18:40 +01:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
log.debug("%s starting", self._get_log_name())
|
|
|
|
self._start()
|
2017-02-16 22:25:57 +01:00
|
|
|
self._next_manage_call = utils.call_later(0, self.manage)
|
2015-08-20 17:27:15 +02:00
|
|
|
return defer.succeed(True)
|
|
|
|
|
2017-01-27 17:18:40 +01:00
|
|
|
|
2017-01-11 18:42:07 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-08-20 17:27:15 +02:00
|
|
|
def stop(self):
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s stopping", self._get_log_name())
|
2015-09-15 06:29:18 +02:00
|
|
|
self.stopped = True
|
2017-01-11 18:45:37 +01:00
|
|
|
# wait for the current manage call to finish
|
|
|
|
if self._manage_deferred:
|
|
|
|
yield self._manage_deferred
|
|
|
|
# in case we stopped between manage calls, cancel the next one
|
2017-01-11 18:42:07 +01:00
|
|
|
if self._next_manage_call and self._next_manage_call.active():
|
2015-08-27 21:41:17 +02:00
|
|
|
self._next_manage_call.cancel()
|
|
|
|
self._next_manage_call = None
|
2017-01-11 18:42:07 +01:00
|
|
|
yield self._close_peers()
|
2017-01-11 18:40:53 +01:00
|
|
|
|
2017-01-27 17:18:40 +01:00
|
|
|
def num_peer_connections(self):
|
|
|
|
return len(self._peer_connections)
|
2017-02-01 17:59:36 +01:00
|
|
|
|
2017-01-27 17:18:40 +01:00
|
|
|
def _close_peers(self):
|
2017-01-11 18:40:53 +01:00
|
|
|
def disconnect_peer(p):
|
|
|
|
d = defer.Deferred()
|
|
|
|
self._connections_closing[p] = d
|
|
|
|
self._peer_connections[p].connection.disconnect()
|
|
|
|
if p in self._peer_connections:
|
|
|
|
del self._peer_connections[p]
|
|
|
|
return d
|
|
|
|
|
|
|
|
def close_connection(p):
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s Abruptly closing a connection to %s due to downloading being paused",
|
|
|
|
self._get_log_name(), p)
|
2017-01-11 18:40:53 +01:00
|
|
|
if self._peer_connections[p].factory.p is not None:
|
|
|
|
d = self._peer_connections[p].factory.p.cancel_requests()
|
|
|
|
else:
|
|
|
|
d = defer.succeed(True)
|
|
|
|
d.addBoth(lambda _: disconnect_peer(p))
|
|
|
|
return d
|
|
|
|
|
2018-08-11 01:41:08 +02:00
|
|
|
# fixme: stop modifying dict during iteration
|
|
|
|
closing_deferreds = [close_connection(peer) for peer in list(self._peer_connections)]
|
2015-08-20 17:27:15 +02:00
|
|
|
return defer.DeferredList(closing_deferreds)
|
|
|
|
|
2016-12-30 15:27:50 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-08-20 17:27:15 +02:00
|
|
|
def get_next_request(self, peer, protocol):
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s Trying to get the next request for peer %s", self._get_log_name(), peer)
|
2015-09-15 06:29:18 +02:00
|
|
|
if not peer in self._peer_connections or self.stopped is True:
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s The peer %s has already been told to shut down.",
|
|
|
|
self._get_log_name(), peer)
|
2016-12-30 15:27:50 +01:00
|
|
|
defer.returnValue(False)
|
|
|
|
requests = yield self._send_primary_requests(peer, protocol)
|
|
|
|
have_request = any(r[1] for r in requests if r[0] is True)
|
|
|
|
if have_request:
|
|
|
|
yield self._send_secondary_requests(peer, protocol)
|
|
|
|
defer.returnValue(have_request)
|
|
|
|
|
|
|
|
def _send_primary_requests(self, peer, protocol):
|
2015-08-20 17:27:15 +02:00
|
|
|
def handle_error(err):
|
2015-09-17 07:43:41 +02:00
|
|
|
err.trap(InsufficientFundsError)
|
|
|
|
self.downloader.insufficient_funds(err)
|
|
|
|
return False
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def check_if_request_sent(request_sent, request_creator):
|
2015-09-17 07:43:41 +02:00
|
|
|
if peer not in self._peer_connections:
|
|
|
|
# This can happen if the connection is told to close
|
|
|
|
return False
|
2015-08-20 17:27:15 +02:00
|
|
|
if request_sent is False:
|
2015-08-27 21:41:17 +02:00
|
|
|
if request_creator in self._peer_connections[peer].request_creators:
|
|
|
|
self._peer_connections[peer].request_creators.remove(request_creator)
|
2015-08-20 17:27:15 +02:00
|
|
|
else:
|
2015-08-27 21:41:17 +02:00
|
|
|
if not request_creator in self._peer_connections[peer].request_creators:
|
|
|
|
self._peer_connections[peer].request_creators.append(request_creator)
|
2015-08-20 17:27:15 +02:00
|
|
|
return request_sent
|
|
|
|
|
|
|
|
ds = []
|
2015-08-27 21:41:17 +02:00
|
|
|
for p_r_c in self._primary_request_creators:
|
2015-08-20 17:27:15 +02:00
|
|
|
d = p_r_c.send_next_request(peer, protocol)
|
|
|
|
d.addErrback(handle_error)
|
|
|
|
d.addCallback(check_if_request_sent, p_r_c)
|
|
|
|
ds.append(d)
|
2016-12-30 15:27:50 +01:00
|
|
|
return defer.DeferredList(ds, fireOnOneErrback=True)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-12-30 15:27:50 +01:00
|
|
|
def _send_secondary_requests(self, peer, protocol):
|
|
|
|
ds = [
|
|
|
|
s_r_c.send_next_request(peer, protocol)
|
|
|
|
for s_r_c in self._secondary_request_creators
|
|
|
|
]
|
|
|
|
return defer.DeferredList(ds)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-12-30 15:27:50 +01:00
|
|
|
@defer.inlineCallbacks
|
2017-01-27 17:18:40 +01:00
|
|
|
def manage(self, schedule_next_call=True):
|
2017-02-01 17:59:36 +01:00
|
|
|
self._manage_deferred = defer.Deferred()
|
2017-08-02 18:45:42 +02:00
|
|
|
if len(self._peer_connections) < self.max_connections_per_stream:
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s have %d connections, looking for %d",
|
|
|
|
self._get_log_name(), len(self._peer_connections),
|
2017-08-02 18:45:42 +02:00
|
|
|
self.max_connections_per_stream)
|
2017-07-28 18:55:04 +02:00
|
|
|
peers = yield self._get_new_peers()
|
2017-03-23 19:12:09 +01:00
|
|
|
for peer in peers:
|
|
|
|
self._connect_to_peer(peer)
|
2017-02-01 17:59:36 +01:00
|
|
|
self._manage_deferred.callback(None)
|
|
|
|
self._manage_deferred = None
|
2017-01-27 17:18:40 +01:00
|
|
|
if not self.stopped and schedule_next_call:
|
2017-02-16 22:25:57 +01:00
|
|
|
self._next_manage_call = utils.call_later(self.MANAGE_CALL_INTERVAL_SEC, self.manage)
|
2016-12-30 15:27:50 +01:00
|
|
|
|
2017-07-28 18:55:04 +02:00
|
|
|
def return_shuffled_peers_not_connected_to(self, peers, new_conns_needed):
|
|
|
|
out = [peer for peer in peers if peer not in self._peer_connections]
|
|
|
|
random.shuffle(out)
|
2017-08-16 02:05:03 +02:00
|
|
|
return out[0:new_conns_needed]
|
2017-01-11 18:45:18 +01:00
|
|
|
|
2016-12-30 15:27:50 +01:00
|
|
|
@defer.inlineCallbacks
|
2017-07-28 18:55:04 +02:00
|
|
|
def _get_new_peers(self):
|
2017-08-02 18:45:42 +02:00
|
|
|
new_conns_needed = self.max_connections_per_stream - len(self._peer_connections)
|
2017-07-28 18:55:04 +02:00
|
|
|
if new_conns_needed < 1:
|
|
|
|
defer.returnValue([])
|
|
|
|
# we always get the peer from the first request creator
|
|
|
|
# must be a type BlobRequester...
|
|
|
|
request_creator = self._primary_request_creators[0]
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s Trying to get a new peer to connect to", self._get_log_name())
|
2017-07-28 18:55:04 +02:00
|
|
|
|
|
|
|
# find peers for the head blob if configured to do so
|
2017-08-16 02:05:03 +02:00
|
|
|
if self.seek_head_blob_first:
|
2017-12-29 19:54:35 +01:00
|
|
|
try:
|
|
|
|
peers = yield request_creator.get_new_peers_for_head_blob()
|
|
|
|
peers = self.return_shuffled_peers_not_connected_to(peers, new_conns_needed)
|
|
|
|
except KeyError:
|
|
|
|
log.warning("%s does not have a head blob", self._get_log_name())
|
|
|
|
peers = []
|
2017-07-28 18:55:04 +02:00
|
|
|
else:
|
|
|
|
peers = []
|
|
|
|
|
|
|
|
# we didn't find any new peers on the head blob,
|
|
|
|
# we have to look for the first unavailable blob
|
2017-08-16 02:05:03 +02:00
|
|
|
if not peers:
|
2017-07-28 18:55:04 +02:00
|
|
|
peers = yield request_creator.get_new_peers_for_next_unavailable()
|
|
|
|
peers = self.return_shuffled_peers_not_connected_to(peers, new_conns_needed)
|
|
|
|
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s Got a list of peers to choose from: %s",
|
|
|
|
self._get_log_name(), peers)
|
|
|
|
log.debug("%s Current connections: %s",
|
|
|
|
self._get_log_name(), self._peer_connections.keys())
|
|
|
|
log.debug("%s List of connection states: %s", self._get_log_name(),
|
2017-01-26 18:14:33 +01:00
|
|
|
[p_c_h.connection.state for p_c_h in self._peer_connections.values()])
|
2017-07-28 18:55:04 +02:00
|
|
|
defer.returnValue(peers)
|
2017-03-23 19:12:09 +01:00
|
|
|
|
2017-01-11 18:45:18 +01:00
|
|
|
def _connect_to_peer(self, peer):
|
2017-08-16 21:04:50 +02:00
|
|
|
if self.stopped:
|
2017-01-11 18:45:18 +01:00
|
|
|
return
|
|
|
|
|
2017-01-19 02:13:16 +01:00
|
|
|
log.debug("%s Trying to connect to %s", self._get_log_name(), peer)
|
2017-01-11 18:45:18 +01:00
|
|
|
factory = ClientProtocolFactory(peer, self.rate_limiter, self)
|
2017-01-27 17:18:40 +01:00
|
|
|
factory.connection_was_made_deferred.addCallback(
|
|
|
|
lambda c_was_made: self._peer_disconnected(c_was_made, peer))
|
2017-01-11 18:45:18 +01:00
|
|
|
self._peer_connections[peer] = PeerConnectionHandler(self._primary_request_creators[:],
|
|
|
|
factory)
|
2017-03-24 18:02:01 +01:00
|
|
|
connection = reactor.connectTCP(peer.host, peer.port, factory,
|
|
|
|
timeout=self.TCP_CONNECT_TIMEOUT)
|
2017-01-11 18:45:18 +01:00
|
|
|
self._peer_connections[peer].connection = connection
|
2017-01-27 17:18:40 +01:00
|
|
|
|
|
|
|
def _peer_disconnected(self, connection_was_made, peer):
|
|
|
|
log.debug("%s protocol disconnected for %s",
|
|
|
|
self._get_log_name(), peer)
|
|
|
|
if peer in self._peer_connections:
|
|
|
|
del self._peer_connections[peer]
|
|
|
|
if peer in self._connections_closing:
|
|
|
|
d = self._connections_closing[peer]
|
|
|
|
del self._connections_closing[peer]
|
|
|
|
d.callback(True)
|
|
|
|
return connection_was_made
|