Merge pull request #3285 from lbryio/restrict-udp-source
Restrict udp sources, add `ALLOW_LAN_UDP` hub setting
This commit is contained in:
commit
cb5dab3033
7 changed files with 49 additions and 36 deletions
|
@ -155,9 +155,9 @@ class KademliaPeer:
|
|||
if self._node_id is not None:
|
||||
if not len(self._node_id) == constants.HASH_LENGTH:
|
||||
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
|
||||
if self.udp_port is not None and not 1 <= self.udp_port <= 65535:
|
||||
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
|
||||
raise ValueError("invalid udp port")
|
||||
if self.tcp_port is not None and not 1 <= self.tcp_port <= 65535:
|
||||
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
|
||||
raise ValueError("invalid tcp port")
|
||||
if not is_valid_public_ipv4(self.address, self.allow_localhost):
|
||||
raise ValueError(f"invalid ip address: '{self.address}'")
|
||||
|
|
|
@ -380,13 +380,15 @@ CARRIER_GRADE_NAT_SUBNET = ipaddress.ip_network('100.64.0.0/10')
|
|||
IPV4_TO_6_RELAY_SUBNET = ipaddress.ip_network('192.88.99.0/24')
|
||||
|
||||
|
||||
def is_valid_public_ipv4(address, allow_localhost: bool = False):
|
||||
def is_valid_public_ipv4(address, allow_localhost: bool = False, allow_lan: bool = False):
|
||||
try:
|
||||
parsed_ip = ipaddress.ip_address(address)
|
||||
if parsed_ip.is_loopback and allow_localhost:
|
||||
return True
|
||||
if allow_lan and parsed_ip.is_private:
|
||||
return True
|
||||
if any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local, parsed_ip.is_loopback,
|
||||
parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private, parsed_ip.is_reserved)):
|
||||
parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private)):
|
||||
return False
|
||||
else:
|
||||
return not any((CARRIER_GRADE_NAT_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")),
|
||||
|
|
|
@ -25,7 +25,7 @@ def main():
|
|||
parser = get_argument_parser()
|
||||
args = parser.parse_args()
|
||||
coin_class = get_coin_class(args.spvserver)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
||||
logging.info('lbry.server starting')
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
|
|
|
@ -73,6 +73,7 @@ class Env:
|
|||
self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
|
||||
self.anon_logs = self.boolean('ANON_LOGS', False)
|
||||
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
|
||||
self.allow_lan_udp = self.boolean('ALLOW_LAN_UDP', False)
|
||||
# Peer discovery
|
||||
self.peer_discovery = self.peer_discovery_enum()
|
||||
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
|
||||
|
|
|
@ -115,7 +115,7 @@ class Server:
|
|||
if self.env.udp_port:
|
||||
await self.bp.status_server.start(
|
||||
0, bytes.fromhex(self.bp.coin.GENESIS_HASH)[::-1],
|
||||
self.env.host, self.env.udp_port
|
||||
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
||||
)
|
||||
await _start_cancellable(self.bp.fetch_and_process_blocks)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ import struct
|
|||
from time import perf_counter
|
||||
import logging
|
||||
from typing import Optional, Tuple, NamedTuple
|
||||
from lbry.utils import LRUCache
|
||||
from lbry.utils import LRUCache, is_valid_public_ipv4
|
||||
# from prometheus_client import Counter
|
||||
|
||||
|
||||
|
@ -69,7 +69,8 @@ class SPVPong(NamedTuple):
|
|||
class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
||||
PROTOCOL_VERSION = 1
|
||||
|
||||
def __init__(self, height: int, tip: bytes, throttle_cache_size: int = 1024, throttle_reqs_per_sec: int = 10):
|
||||
def __init__(self, height: int, tip: bytes, throttle_cache_size: int = 1024, throttle_reqs_per_sec: int = 10,
|
||||
allow_localhost: bool = False, allow_lan: bool = False):
|
||||
super().__init__()
|
||||
self.transport: Optional[asyncio.transports.DatagramTransport] = None
|
||||
self._height = height
|
||||
|
@ -80,6 +81,8 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
|||
self._throttle = LRUCache(throttle_cache_size)
|
||||
self._should_log = LRUCache(throttle_cache_size)
|
||||
self._min_delay = 1 / throttle_reqs_per_sec
|
||||
self._allow_localhost = allow_localhost
|
||||
self._allow_lan = allow_lan
|
||||
|
||||
def update_cached_response(self):
|
||||
self._cached_response = SPVPong.make(self._height, self._tip, self._flags, self.PROTOCOL_VERSION)
|
||||
|
@ -119,7 +122,11 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
|||
except (ValueError, struct.error, AttributeError, TypeError):
|
||||
# log.exception("derp")
|
||||
return
|
||||
self.transport.sendto(self.make_pong(addr[0]), addr)
|
||||
if addr[1] >= 1024 and is_valid_public_ipv4(
|
||||
addr[0], allow_localhost=self._allow_localhost, allow_lan=self._allow_lan):
|
||||
self.transport.sendto(self.make_pong(addr[0]), addr)
|
||||
else:
|
||||
log.warning("odd packet from %s:%i", addr[0], addr[1])
|
||||
# ping_count_metric.inc()
|
||||
|
||||
def connection_made(self, transport) -> None:
|
||||
|
@ -137,12 +144,14 @@ class StatusServer:
|
|||
def __init__(self):
|
||||
self._protocol: Optional[SPVServerStatusProtocol] = None
|
||||
|
||||
async def start(self, height: int, tip: bytes, interface: str, port: int):
|
||||
async def start(self, height: int, tip: bytes, interface: str, port: int, allow_lan: bool = False):
|
||||
if self.is_running:
|
||||
return
|
||||
loop = asyncio.get_event_loop()
|
||||
self._protocol = SPVServerStatusProtocol(height, tip)
|
||||
interface = interface if interface.lower() != 'localhost' else '127.0.0.1'
|
||||
self._protocol = SPVServerStatusProtocol(
|
||||
height, tip, allow_localhost=interface == '127.0.0.1', allow_lan=allow_lan
|
||||
)
|
||||
await loop.create_datagram_endpoint(lambda: self._protocol, (interface, port))
|
||||
log.info("started udp status server on %s:%i", interface, port)
|
||||
|
||||
|
|
|
@ -10,8 +10,8 @@ class PeerTest(AsyncioTestCase):
|
|||
self.loop = asyncio.get_event_loop()
|
||||
self.peer_manager = PeerManager(self.loop)
|
||||
self.node_ids = [generate_id(), generate_id(), generate_id()]
|
||||
self.first_contact = make_kademlia_peer(self.node_ids[1], '1.0.0.1', udp_port=1000)
|
||||
self.second_contact = make_kademlia_peer(self.node_ids[0], '1.0.0.2', udp_port=1000)
|
||||
self.first_contact = make_kademlia_peer(self.node_ids[1], '1.0.0.1', udp_port=1024)
|
||||
self.second_contact = make_kademlia_peer(self.node_ids[0], '1.0.0.2', udp_port=1024)
|
||||
|
||||
def test_peer_is_good_unknown_peer(self):
|
||||
# Scenario: peer replied, but caller doesn't know the node_id.
|
||||
|
@ -25,36 +25,37 @@ class PeerTest(AsyncioTestCase):
|
|||
|
||||
def test_make_contact_error_cases(self):
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 100000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4.5', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], 'this is not an ip', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4.5', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], 'this is not an ip', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', -1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 0)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 1023)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 70000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, b'not valid node id', '1.2.3.4', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, b'not valid node id', '1.2.3.4', 1024)
|
||||
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '0.0.0.0', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '10.0.0.1', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '100.64.0.1', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '127.0.0.1', 1000)
|
||||
self.assertIsNotNone(make_kademlia_peer(self.node_ids[1], '127.0.0.1', 1000, allow_localhost=True))
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.168.0.1', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '172.16.0.1', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '169.254.1.1', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.0.0.2', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.0.2.2', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.88.99.2', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.18.1.1', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.51.100.2', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.51.100.2', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '203.0.113.4', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '0.0.0.0', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '10.0.0.1', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '100.64.0.1', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '127.0.0.1', 1024)
|
||||
self.assertIsNotNone(make_kademlia_peer(self.node_ids[1], '127.0.0.1', 1024, allow_localhost=True))
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.168.0.1', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '172.16.0.1', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '169.254.1.1', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.0.0.2', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.0.2.2', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.88.99.2', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.18.1.1', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.51.100.2', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.51.100.2', 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '203.0.113.4', 1024)
|
||||
for i in range(32):
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], f"{224 + i}.0.0.0", 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '255.255.255.255', 1000)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], f"{224 + i}.0.0.0", 1024)
|
||||
self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '255.255.255.255', 1024)
|
||||
self.assertRaises(
|
||||
ValueError, make_kademlia_peer, self.node_ids[1], 'beee:eeee:eeee:eeee:eeee:eeee:eeee:eeef', 1000
|
||||
ValueError, make_kademlia_peer, self.node_ids[1], 'beee:eeee:eeee:eeee:eeee:eeee:eeee:eeef', 1024
|
||||
)
|
||||
self.assertRaises(
|
||||
ValueError, make_kademlia_peer, self.node_ids[1], '2001:db8::ff00:42:8329', 1000
|
||||
ValueError, make_kademlia_peer, self.node_ids[1], '2001:db8::ff00:42:8329', 1024
|
||||
)
|
||||
|
||||
def test_is_valid_ipv4(self):
|
||||
|
@ -79,7 +80,7 @@ class PeerTest(AsyncioTestCase):
|
|||
def test_boolean(self):
|
||||
self.assertNotEqual(self.first_contact, self.second_contact)
|
||||
self.assertEqual(
|
||||
self.second_contact, make_kademlia_peer(self.node_ids[0], '1.0.0.2', udp_port=1000)
|
||||
self.second_contact, make_kademlia_peer(self.node_ids[0], '1.0.0.2', udp_port=1024)
|
||||
)
|
||||
|
||||
def test_compact_ip(self):
|
||||
|
|
Loading…
Reference in a new issue