2021-09-27 08:26:34 +02:00
|
|
|
import asyncio
|
|
|
|
import argparse
|
|
|
|
import logging
|
2021-11-17 07:58:27 +01:00
|
|
|
import csv
|
|
|
|
from io import StringIO
|
2021-09-28 23:52:23 +02:00
|
|
|
from typing import Optional
|
2021-10-22 08:39:46 +02:00
|
|
|
from aiohttp import web
|
|
|
|
from prometheus_client import generate_latest as prom_generate_latest, Gauge
|
2021-09-27 08:26:34 +02:00
|
|
|
|
|
|
|
from lbry.dht.constants import generate_id
|
|
|
|
from lbry.dht.node import Node
|
|
|
|
from lbry.dht.peer import PeerManager
|
|
|
|
from lbry.extras.daemon.storage import SQLiteStorage
|
|
|
|
from lbry.conf import Config
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
|
|
|
log = logging.getLogger(__name__)
|
2021-10-22 08:39:46 +02:00
|
|
|
BLOBS_STORED = Gauge(
|
|
|
|
"blobs_stored", "Number of blob info received", namespace="dht_node",
|
|
|
|
labelnames=("method",)
|
|
|
|
)
|
|
|
|
PEERS = Gauge(
|
|
|
|
"known_peers", "Number of peers on routing table", namespace="dht_node",
|
|
|
|
labelnames=("method",)
|
|
|
|
)
|
2021-09-27 08:26:34 +02:00
|
|
|
|
|
|
|
|
2021-10-22 08:39:46 +02:00
|
|
|
class SimpleMetrics:
|
2021-11-17 07:58:27 +01:00
|
|
|
def __init__(self, port, node):
|
2021-10-22 08:39:46 +02:00
|
|
|
self.prometheus_port = port
|
2021-11-17 07:58:27 +01:00
|
|
|
self.dht_node: Node = node
|
2021-12-03 20:41:18 +01:00
|
|
|
self.active_estimation_semaphore = asyncio.Semaphore(1)
|
2021-10-22 08:39:46 +02:00
|
|
|
|
2021-12-03 19:42:20 +01:00
|
|
|
async def handle_metrics_get_request(self, _):
|
2021-10-22 08:39:46 +02:00
|
|
|
try:
|
|
|
|
return web.Response(
|
|
|
|
text=prom_generate_latest().decode(),
|
|
|
|
content_type='text/plain; version=0.0.4'
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
log.exception('could not generate prometheus data')
|
|
|
|
raise
|
|
|
|
|
2021-12-03 19:42:20 +01:00
|
|
|
async def handle_peers_csv(self, _):
|
2021-11-17 07:58:27 +01:00
|
|
|
out = StringIO()
|
|
|
|
writer = csv.DictWriter(out, fieldnames=["ip", "port", "dht_id"])
|
|
|
|
writer.writeheader()
|
|
|
|
for peer in self.dht_node.protocol.routing_table.get_peers():
|
|
|
|
writer.writerow({"ip": peer.address, "port": peer.udp_port, "dht_id": peer.node_id.hex()})
|
2021-11-17 08:04:38 +01:00
|
|
|
return web.Response(text=out.getvalue(), content_type='text/csv')
|
2021-11-17 07:58:27 +01:00
|
|
|
|
2021-12-03 19:42:20 +01:00
|
|
|
async def handle_blobs_csv(self, _):
|
2021-11-17 08:04:38 +01:00
|
|
|
out = StringIO()
|
|
|
|
writer = csv.DictWriter(out, fieldnames=["blob_hash"])
|
|
|
|
writer.writeheader()
|
|
|
|
for blob in self.dht_node.protocol.data_store.keys():
|
|
|
|
writer.writerow({"blob_hash": blob.hex()})
|
|
|
|
return web.Response(text=out.getvalue(), content_type='text/csv')
|
2021-11-17 07:58:27 +01:00
|
|
|
|
2021-12-03 19:42:20 +01:00
|
|
|
async def active_estimation(self, _):
|
2021-12-03 19:40:19 +01:00
|
|
|
# - "crawls" the network for peers close to our node id (not a full aggressive crawler yet)
|
|
|
|
# given everything is random, the odds of a peer having the same X prefix bits matching ours is roughly 1/(2^X)
|
|
|
|
# we use that to estimate the network size, see issue #3463 for related papers and details
|
|
|
|
amount = 20_000
|
2021-12-03 20:41:18 +01:00
|
|
|
with self.active_estimation_semaphore: # this is resource intensive, limit concurrency to 1
|
|
|
|
peers = await self.dht_node.peer_search(self.dht_node.protocol.node_id, count=amount, max_results=amount)
|
2021-11-29 01:58:15 +01:00
|
|
|
close_ids = [peer for peer in peers if peer.node_id[0] == self.dht_node.protocol.node_id[0]]
|
2021-12-03 20:38:35 +01:00
|
|
|
return web.json_response(
|
|
|
|
{"total_peers_found_during_estimation": len(peers),
|
|
|
|
"peers_with_the_same_byte_prefix": len(close_ids),
|
|
|
|
'estimated_network_size': len(close_ids) * 256})
|
2021-11-29 01:58:15 +01:00
|
|
|
|
2021-12-03 19:42:20 +01:00
|
|
|
async def passive_estimation(self, _):
|
2021-12-03 19:40:19 +01:00
|
|
|
# same method as above but instead we use the routing table and assume our implementation was able to add
|
|
|
|
# all the reachable close peers, which should be usable for seed nodes since they are super popular
|
2021-12-03 20:38:35 +01:00
|
|
|
peers = self.dht_node.protocol.routing_table.get_peers()
|
|
|
|
close_ids = [peer for peer in peers if peer.node_id[0] == self.dht_node.protocol.node_id[0]]
|
2021-12-03 19:40:19 +01:00
|
|
|
return web.json_response(
|
2021-12-03 20:38:35 +01:00
|
|
|
{"total_peers_found_during_estimation": len(peers),
|
|
|
|
"peers_with_the_same_byte_prefix": len(close_ids),
|
|
|
|
'estimated_network_size': len(close_ids) * 256})
|
2021-12-03 16:56:22 +01:00
|
|
|
|
2021-10-22 08:39:46 +02:00
|
|
|
async def start(self):
|
|
|
|
prom_app = web.Application()
|
|
|
|
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
|
2021-11-17 07:58:27 +01:00
|
|
|
prom_app.router.add_get('/peers.csv', self.handle_peers_csv)
|
2021-11-17 08:04:38 +01:00
|
|
|
prom_app.router.add_get('/blobs.csv', self.handle_blobs_csv)
|
2021-12-03 19:40:19 +01:00
|
|
|
prom_app.router.add_get('/active_estimation', self.active_estimation)
|
|
|
|
prom_app.router.add_get('/passive_estimation', self.passive_estimation)
|
2021-10-22 08:39:46 +02:00
|
|
|
metrics_runner = web.AppRunner(prom_app)
|
|
|
|
await metrics_runner.setup()
|
|
|
|
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
|
|
|
|
await prom_site.start()
|
|
|
|
|
|
|
|
|
|
|
|
async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional[str], prometheus_port: int):
|
2021-09-27 08:26:34 +02:00
|
|
|
loop = asyncio.get_event_loop()
|
|
|
|
conf = Config()
|
2021-09-28 08:58:31 +02:00
|
|
|
storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
|
2021-09-28 23:52:23 +02:00
|
|
|
if bootstrap_node:
|
|
|
|
nodes = bootstrap_node.split(':')
|
|
|
|
nodes = [(nodes[0], int(nodes[1]))]
|
|
|
|
else:
|
|
|
|
nodes = conf.known_dht_nodes
|
2021-09-27 08:26:34 +02:00
|
|
|
await storage.open()
|
|
|
|
node = Node(
|
|
|
|
loop, PeerManager(loop), generate_id(), port, port, 3333, None,
|
|
|
|
storage=storage
|
|
|
|
)
|
2021-11-17 07:58:27 +01:00
|
|
|
if prometheus_port > 0:
|
|
|
|
metrics = SimpleMetrics(prometheus_port, node)
|
|
|
|
await metrics.start()
|
2021-09-28 23:52:23 +02:00
|
|
|
node.start(host, nodes)
|
2021-09-27 08:26:34 +02:00
|
|
|
while True:
|
|
|
|
await asyncio.sleep(10)
|
2021-10-22 08:39:46 +02:00
|
|
|
PEERS.labels('main').set(len(node.protocol.routing_table.get_peers()))
|
|
|
|
BLOBS_STORED.labels('main').set(len(node.protocol.data_store.get_storing_contacts()))
|
2021-09-27 08:26:34 +02:00
|
|
|
log.info("Known peers: %d. Storing contact information for %d blobs from %d peers.",
|
|
|
|
len(node.protocol.routing_table.get_peers()), len(node.protocol.data_store),
|
|
|
|
len(node.protocol.data_store.get_storing_contacts()))
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description="Starts a single DHT node, which then can be used as a seed node or just a contributing node.")
|
2021-09-27 18:33:10 +02:00
|
|
|
parser.add_argument("--host", default='0.0.0.0', type=str, help="Host to listen for requests. Default: 0.0.0.0")
|
|
|
|
parser.add_argument("--port", default=4444, type=int, help="Port to listen for requests. Default: 4444")
|
2021-09-28 08:58:31 +02:00
|
|
|
parser.add_argument("--db_file", default='/tmp/dht.db', type=str, help="DB file to save peers. Default: /tmp/dht.db")
|
2021-09-28 23:52:23 +02:00
|
|
|
parser.add_argument("--bootstrap_node", default=None, type=str,
|
|
|
|
help="Node to connect for bootstraping this node. Leave unset to use the default ones. "
|
|
|
|
"Format: host:port Example: lbrynet1.lbry.com:4444")
|
2021-11-17 08:04:38 +01:00
|
|
|
parser.add_argument("--metrics_port", default=0, type=int, help="Port for Prometheus and raw CSV metrics. 0 to disable. Default: 0")
|
2021-09-27 08:26:34 +02:00
|
|
|
args = parser.parse_args()
|
2021-11-24 06:47:11 +01:00
|
|
|
asyncio.run(main(args.host, args.port, args.db_file, args.bootstrap_node, args.metrics_port))
|