improve cli/env settings

-remove unused settings
-add help strings
-fix required settings
This commit is contained in:
Jack Robison 2022-04-21 18:13:46 -04:00
parent 9efddcdbf9
commit ca39d38dda
No known key found for this signature in database
GPG key ID: DF25C68FE0239BB2
9 changed files with 100 additions and 206 deletions

View file

@ -11,7 +11,8 @@ def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
prog='scribe' prog='scribe'
) )
Env.contribute_to_arg_parser(parser) Env.contribute_common_settings_to_arg_parser(parser)
Env.contribute_writer_settings_to_arg_parser(parser)
args = parser.parse_args() args = parser.parse_args()
try: try:

View file

@ -36,7 +36,7 @@ NAMESPACE = f"{PROMETHEUS_NAMESPACE}_db"
class HubDB: class HubDB:
DB_VERSIONS = [7, 8] DB_VERSIONS = [7, 8]
def __init__(self, coin, db_dir: str, cache_MB: int = 512, reorg_limit: int = 200, def __init__(self, coin, db_dir: str, reorg_limit: int = 200,
cache_all_claim_txos: bool = False, cache_all_tx_hashes: bool = False, cache_all_claim_txos: bool = False, cache_all_tx_hashes: bool = False,
secondary_name: str = '', max_open_files: int = 64, blocking_channel_ids: List[str] = None, secondary_name: str = '', max_open_files: int = 64, blocking_channel_ids: List[str] = None,
filtering_channel_ids: List[str] = None, executor: ThreadPoolExecutor = None): filtering_channel_ids: List[str] = None, executor: ThreadPoolExecutor = None):
@ -45,7 +45,6 @@ class HubDB:
self._executor = executor self._executor = executor
self._db_dir = db_dir self._db_dir = db_dir
self._cache_MB = cache_MB
self._reorg_limit = reorg_limit self._reorg_limit = reorg_limit
self._cache_all_claim_txos = cache_all_claim_txos self._cache_all_claim_txos = cache_all_claim_txos
self._cache_all_tx_hashes = cache_all_tx_hashes self._cache_all_tx_hashes = cache_all_tx_hashes
@ -820,8 +819,7 @@ class HubDB:
) )
db_path = os.path.join(self._db_dir, 'lbry-rocksdb') db_path = os.path.join(self._db_dir, 'lbry-rocksdb')
self.prefix_db = PrefixDB( self.prefix_db = PrefixDB(
db_path, cache_mb=self._cache_MB, db_path, reorg_limit=self._reorg_limit, max_open_files=self._db_max_open_files,
reorg_limit=self._reorg_limit, max_open_files=self._db_max_open_files,
unsafe_prefixes={DBStatePrefixRow.prefix, MempoolTXPrefixRow.prefix, HashXMempoolStatusPrefixRow.prefix}, unsafe_prefixes={DBStatePrefixRow.prefix, MempoolTXPrefixRow.prefix, HashXMempoolStatusPrefixRow.prefix},
secondary_path=secondary_path secondary_path=secondary_path
) )

View file

@ -1694,7 +1694,7 @@ class HashXMempoolStatusPrefixRow(PrefixRow):
class PrefixDB(BasePrefixDB): class PrefixDB(BasePrefixDB):
def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 64, def __init__(self, path: str, reorg_limit: int = 200, max_open_files: int = 64,
secondary_path: str = '', unsafe_prefixes: Optional[typing.Set[bytes]] = None): secondary_path: str = '', unsafe_prefixes: Optional[typing.Set[bytes]] = None):
super().__init__(path, max_open_files=max_open_files, secondary_path=secondary_path, super().__init__(path, max_open_files=max_open_files, secondary_path=secondary_path,
max_undo_depth=reorg_limit, unsafe_prefixes=unsafe_prefixes) max_undo_depth=reorg_limit, unsafe_prefixes=unsafe_prefixes)

View file

@ -11,8 +11,8 @@ def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
prog='scribe-elastic-sync' prog='scribe-elastic-sync'
) )
Env.contribute_to_arg_parser(parser) Env.contribute_common_settings_to_arg_parser(parser)
parser.add_argument('--reindex', type=bool, default=False) Env.contribute_elastic_sync_settings_to_arg_parser(parser)
args = parser.parse_args() args = parser.parse_args()
try: try:

View file

@ -30,11 +30,9 @@ class Env:
class Error(Exception): class Error(Exception):
pass pass
def __init__(self, db_dir=None, daemon_url=None, host=None, elastic_host=None, def __init__(self, db_dir=None, daemon_url=None, host=None, elastic_host=None, elastic_port=None,
elastic_port=None, loop_policy=None, max_query_workers=None, max_query_workers=None, chain=None, es_index_prefix=None, reorg_limit=None,
chain=None, es_index_prefix=None, cache_MB=None, reorg_limit=None, tcp_port=None, tcp_port=None, udp_port=None, prometheus_port=None, banner_file=None,
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None,
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None, allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None, payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
session_timeout=None, drop_client=None, description=None, daily_fee=None, session_timeout=None, drop_client=None, description=None, daily_fee=None,
@ -45,16 +43,12 @@ class Env:
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY') self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL') self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL')
self.db_max_open_files = db_max_open_files self.db_max_open_files = db_max_open_files
self.host = host if host is not None else self.default('HOST', 'localhost') self.host = host if host is not None else self.default('HOST', 'localhost')
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost') self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200) self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
self.elastic_notifier_host = elastic_notifier_host if elastic_notifier_host is not None else self.default('ELASTIC_NOTIFIER_HOST', 'localhost') self.elastic_notifier_host = elastic_notifier_host if elastic_notifier_host is not None else self.default('ELASTIC_NOTIFIER_HOST', 'localhost')
self.elastic_notifier_port = elastic_notifier_port if elastic_notifier_port is not None else self.integer('ELASTIC_NOTIFIER_PORT', 19080) self.elastic_notifier_port = elastic_notifier_port if elastic_notifier_port is not None else self.integer('ELASTIC_NOTIFIER_PORT', 19080)
self.loop_policy = self.set_event_loop_policy(
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
)
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK']) self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4) self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4)
if chain == 'mainnet': if chain == 'mainnet':
@ -64,21 +58,12 @@ class Env:
else: else:
self.coin = LBCRegTest self.coin = LBCRegTest
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '') self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT) self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server stuff # Server stuff
self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None) self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None)
self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port) self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port)
self.ssl_port = ssl_port if ssl_port is not None else self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required('SSL_CERTFILE')
self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required('SSL_KEYFILE')
self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0) self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0)
self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None) self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None)
# self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
self.anon_logs = anon_logs if anon_logs is not None else self.boolean('ANON_LOGS', False)
self.log_sessions = log_sessions if log_sessions is not None else self.integer('LOG_SESSIONS', 3600)
self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False) self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False)
self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False) self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False)
self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False) self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False)
@ -90,28 +75,17 @@ class Env:
self.peer_hubs = [p.strip("") for p in peer_hubs.split(",")] self.peer_hubs = [p.strip("") for p in peer_hubs.split(",")]
else: else:
self.peer_hubs = self.extract_peer_hubs() self.peer_hubs = self.extract_peer_hubs()
# self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
# self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified # The electrum client takes the empty string as unspecified
self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '') self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '')
self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '') self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '')
# Server limits to help prevent DoS # Server limits to help prevent DoS
self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000) self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000)
self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000) self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000)
# self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions() self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions()
# self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600) self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600)
self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile) self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile)
self.description = description if description is not None else self.default('DESCRIPTION', '') self.description = description if description is not None else self.default('DESCRIPTION', '')
self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0') self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0')
# Identities
clearnet_identity = self.clearnet_identity()
tor_identity = self.tor_identity(clearnet_identity)
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \ self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0) (float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
@ -169,19 +143,6 @@ class Env:
if bad: if bad:
raise cls.Error(f'remove obsolete os.environment variables {bad}') raise cls.Error(f'remove obsolete os.environment variables {bad}')
@classmethod
def set_event_loop_policy(cls, policy_name: str = None):
if not policy_name or policy_name == 'default':
import asyncio
return asyncio.get_event_loop_policy()
elif policy_name == 'uvloop':
import uvloop
import asyncio
loop_policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(loop_policy)
return loop_policy
raise cls.Error(f'unknown event loop policy "{policy_name}"')
def cs_host(self): def cs_host(self):
"""Returns the 'host' argument to pass to asyncio's create_server """Returns the 'host' argument to pass to asyncio's create_server
call. The result can be a single host name string, a list of call. The result can be a single host name string, a list of
@ -213,67 +174,6 @@ class Env:
f'because your open file limit is {nofile_limit:,d}') f'because your open file limit is {nofile_limit:,d}')
return value return value
def clearnet_identity(self):
host = self.default('REPORT_HOST', None)
if host is None:
return None
try:
ip = ip_address(host)
except ValueError:
bad = (not is_valid_hostname(host)
or host.lower() == 'localhost')
else:
bad = (ip.is_multicast or ip.is_unspecified
or (ip.is_private and self.peer_announce))
if bad:
raise self.Error(f'"{host}" is not a valid REPORT_HOST')
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
f'both resolve to {tcp_port}')
return NetIdentity(
host,
tcp_port,
ssl_port,
''
)
def tor_identity(self, clearnet):
host = self.default('REPORT_HOST_TOR', None)
if host is None:
return None
if not host.endswith('.onion'):
raise self.Error(f'tor host "{host}" must end with ".onion"')
def port(port_kind):
"""Returns the clearnet identity port, if any and not zero,
otherwise the listening port."""
result = 0
if clearnet:
result = getattr(clearnet, port_kind)
return result or getattr(self, port_kind)
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
port('tcp_port')) or None
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
port('ssl_port')) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
f'both resolve to {tcp_port}')
return NetIdentity(
host,
tcp_port,
ssl_port,
'_tor',
)
def hosts_dict(self):
return {identity.host: {'tcp_port': identity.tcp_port,
'ssl_port': identity.ssl_port}
for identity in self.identities}
def peer_discovery_enum(self): def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower() pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''): if pd in ('off', ''):
@ -290,80 +190,95 @@ class Env:
return [hub.strip() for hub in peer_hubs.split(',')] return [hub.strip() for hub in peer_hubs.split(',')]
@classmethod @classmethod
def contribute_to_arg_parser(cls, parser): def contribute_common_settings_to_arg_parser(cls, parser):
parser.add_argument('--db_dir', type=str, help='path of the directory containing lbry-leveldb', """
default=cls.default('DB_DIRECTORY', None)) Settings used by all services
parser.add_argument('--daemon_url', """
help='URL for rpc from lbrycrd, <rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>', env_db_dir = cls.default('DB_DIRECTORY', None)
default=cls.default('DAEMON_URL', None)) parser.add_argument('--db_dir', type=str, required=env_db_dir is None,
parser.add_argument('--db_max_open_files', type=int, default=64, help="Path of the directory containing lbry-rocksdb. ", default=env_db_dir)
help='number of files rocksdb can have open at a time')
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
help='Interface for hub server to listen on')
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
help='TCP port to listen on for hub server')
parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001),
help='UDP port to listen on for hub server')
parser.add_argument('--ssl_port', default=cls.integer('SSL_PORT', None), type=int,
help='SSL port to listen on for hub server')
parser.add_argument('--ssl_certfile', default=cls.default('SSL_CERTFILE', None), type=str,
help='Path to SSL cert file')
parser.add_argument('--ssl_keyfile', default=cls.default('SSL_KEYFILE', None), type=str,
help='Path to SSL key file')
parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth') parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth')
parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'),
help="Which chain to use, default is mainnet, others are used for testing",
choices=['mainnet', 'regtest', 'testnet'])
parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4),
help="Size of the thread pool. Can be set in env with 'MAX_QUERY_WORKERS'")
parser.add_argument('--cache_all_tx_hashes', action='store_true',
help="Load all tx hashes into memory. This will make address subscriptions and sync, "
"resolve, transaction fetching, and block sync all faster at the expense of higher "
"memory usage (at least 10GB more). Can be set in env with 'CACHE_ALL_TX_HASHES'.",
default=cls.boolean('CACHE_ALL_TX_HASHES', False))
parser.add_argument('--cache_all_claim_txos', action='store_true',
help="Load all claim txos into memory. This will make address subscriptions and sync, "
"resolve, transaction fetching, and block sync all faster at the expense of higher "
"memory usage. Can be set in env with 'CACHE_ALL_CLAIM_TXOS'.",
default=cls.boolean('CACHE_ALL_CLAIM_TXOS', False))
parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0),
help="Port for prometheus metrics to listen on, disabled by default. "
"Can be set in env with 'PROMETHEUS_PORT'.")
@classmethod
def contribute_writer_settings_to_arg_parser(cls, parser):
env_daemon_url = cls.default('DAEMON_URL', None)
parser.add_argument('--daemon_url', required=env_daemon_url is None,
help="URL for rpc from lbrycrd or lbcd, "
"<rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>.",
default=env_daemon_url)
parser.add_argument('--db_max_open_files', type=int, default=64,
help='This setting translates into the max_open_files option given to rocksdb. '
'A higher number will use more memory. Defaults to 64.')
@classmethod
def contribute_server_settings_to_arg_parser(cls, parser):
env_daemon_url = cls.default('DAEMON_URL', None)
parser.add_argument('--daemon_url', required=env_daemon_url is None,
help="URL for rpc from lbrycrd or lbcd, "
"<rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>.",
default=env_daemon_url)
parser.add_argument('--reindex', default=False, help="Drop and rebuild the elasticsearch index.",
action='store_true')
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
help="Interface for hub server to listen on, use 0.0.0.0 to listen on the external "
"interface. Can be set in env with 'HOST'")
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
help="Electrum TCP port to listen on for hub server. Can be set in env with 'TCP_PORT'")
parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001),
help="'UDP port to listen on for hub server. Can be set in env with 'UDP_PORT'")
parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str, parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str,
help='elasticsearch host, defaults to localhost') help="Hostname or ip address of the elasticsearch instance to connect to. "
"Can be set in env with 'ELASTIC_HOST'")
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int, parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
help='elasticsearch port, defaults to 9200') help="Elasticsearch port to connect to. Can be set in env with 'ELASTIC_PORT'")
parser.add_argument('--elastic_notifier_host', default=cls.default('ELASTIC_NOTIFIER_HOST', 'localhost'), parser.add_argument('--elastic_notifier_host', default=cls.default('ELASTIC_NOTIFIER_HOST', 'localhost'),
type=str, help='elasticsearch sync notifier host, defaults to localhost') type=str, help='elasticsearch sync notifier host, defaults to localhost')
parser.add_argument('--elastic_notifier_port', default=cls.integer('ELASTIC_NOTIFIER_PORT', 19080), type=int, parser.add_argument('--elastic_notifier_port', default=cls.integer('ELASTIC_NOTIFIER_PORT', 19080), type=int,
help='elasticsearch sync notifier port') help='elasticsearch sync notifier port')
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str) parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
choices=['default', 'uvloop'])
parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4),
help='number of threads used by the request handler to read the database')
parser.add_argument('--cache_MB', type=int, default=cls.integer('CACHE_MB', 1024),
help='size of the leveldb lru cache, in megabytes')
parser.add_argument('--cache_all_tx_hashes', type=bool,
help='Load all tx hashes into memory. This will make address subscriptions and sync, '
'resolve, transaction fetching, and block sync all faster at the expense of higher '
'memory usage')
parser.add_argument('--cache_all_claim_txos', type=bool,
help='Load all claim txos into memory. This will make address subscriptions and sync, '
'resolve, transaction fetching, and block sync all faster at the expense of higher '
'memory usage')
parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0),
help='port for hub prometheus metrics to listen on, disabled by default')
parser.add_argument('--max_subscriptions', type=int, default=cls.integer('MAX_SUBSCRIPTIONS', 10000),
help='max subscriptions per connection')
parser.add_argument('--banner_file', type=str, default=cls.default('BANNER_FILE', None),
help='path to file containing banner text')
parser.add_argument('--anon_logs', type=bool, default=cls.boolean('ANON_LOGS', False),
help="don't log ip addresses")
parser.add_argument('--allow_lan_udp', type=bool, default=cls.boolean('ALLOW_LAN_UDP', False),
help='reply to hub UDP ping messages from LAN ip addresses')
parser.add_argument('--country', type=str, default=cls.default('COUNTRY', 'US'), help='')
parser.add_argument('--max_send', type=int, default=cls.default('MAX_SEND', 1000000), help='')
parser.add_argument('--max_receive', type=int, default=cls.default('MAX_RECEIVE', 1000000), help='')
parser.add_argument('--max_sessions', type=int, default=cls.default('MAX_SESSIONS', 1000), help='')
parser.add_argument('--session_timeout', type=int, default=cls.default('SESSION_TIMEOUT', 600), help='')
parser.add_argument('--drop_client', type=str, default=cls.default('DROP_CLIENT', None), help='')
parser.add_argument('--description', type=str, default=cls.default('DESCRIPTION', ''), help='')
parser.add_argument('--daily_fee', type=float, default=cls.default('DAILY_FEE', 0.0), help='')
parser.add_argument('--payment_address', type=str, default=cls.default('PAYMENT_ADDRESS', ''), help='')
parser.add_argument('--donation_address', type=str, default=cls.default('DONATION_ADDRESS', ''), help='')
parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'),
help="Which chain to use, default is mainnet", choices=['mainnet', 'regtest', 'testnet'])
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
help="elasticsearch query timeout")
parser.add_argument('--blocking_channel_ids', nargs='*', help='', @classmethod
def contribute_elastic_sync_settings_to_arg_parser(cls, parser):
parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str,
help="Hostname or ip address of the elasticsearch instance to connect to. "
"Can be set in env with 'ELASTIC_HOST'")
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
help="Elasticsearch port to connect to. Can be set in env with 'ELASTIC_PORT'")
parser.add_argument('--elastic_notifier_host', default=cls.default('ELASTIC_NOTIFIER_HOST', 'localhost'),
type=str, help='elasticsearch sync notifier host, defaults to localhost')
parser.add_argument('--elastic_notifier_port', default=cls.integer('ELASTIC_NOTIFIER_PORT', 19080), type=int,
help='elasticsearch sync notifier port')
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
help="Elasticsearch query timeout, in ms. Can be set in env with 'QUERY_TIMEOUT_MS'")
parser.add_argument('--blocking_channel_ids', nargs='*',
help="Space separated list of channel claim ids used for blocking. "
"Claims that are reposted by these channels can't be resolved "
"or returned in search results. Can be set in env with 'BLOCKING_CHANNEL_IDS'",
default=cls.default('BLOCKING_CHANNEL_IDS', '').split(' ')) default=cls.default('BLOCKING_CHANNEL_IDS', '').split(' '))
parser.add_argument('--filtering_channel_ids', nargs='*', help='', parser.add_argument('--filtering_channel_ids', nargs='*',
help="Space separated list of channel claim ids used for blocking. "
"Claims that are reposted by these channels aren't returned in search results. "
"Can be set in env with 'FILTERING_CHANNEL_IDS'",
default=cls.default('FILTERING_CHANNEL_IDS', '').split(' ')) default=cls.default('FILTERING_CHANNEL_IDS', '').split(' '))
@classmethod @classmethod
@ -371,13 +286,10 @@ class Env:
return cls( return cls(
db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files, db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files,
host=args.host, elastic_host=args.elastic_host, elastic_port=args.elastic_port, host=args.host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, max_query_workers=args.max_query_workers, chain=args.chain, es_index_prefix=args.es_index_prefix,
chain=args.chain, es_index_prefix=args.es_index_prefix, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port, udp_port=args.udp_port, prometheus_port=args.prometheus_port,
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile, banner_file=args.banner_file, allow_lan_udp=args.allow_lan_udp,
ssl_keyfile=args.ssl_keyfile, prometheus_port=args.prometheus_port,
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
log_sessions=None, allow_lan_udp=args.allow_lan_udp,
cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos, cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos,
country=args.country, payment_address=args.payment_address, donation_address=args.donation_address, country=args.country, payment_address=args.payment_address, donation_address=args.donation_address,
max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions, max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions,

View file

@ -11,7 +11,8 @@ def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
prog='scribe-hub' prog='scribe-hub'
) )
Env.contribute_to_arg_parser(parser) Env.contribute_common_settings_to_arg_parser(parser)
Env.contribute_server_settings_to_arg_parser(parser)
args = parser.parse_args() args = parser.parse_args()
try: try:

View file

@ -230,10 +230,6 @@ class SessionManager:
host = env.cs_host() host = env.cs_host()
if env.tcp_port is not None: if env.tcp_port is not None:
await self._start_server('TCP', host, env.tcp_port) await self._start_server('TCP', host, env.tcp_port)
if env.ssl_port is not None:
sslc = ssl.SSLContext(ssl.PROTOCOL_TLS)
sslc.load_cert_chain(env.ssl_certfile, keyfile=env.ssl_keyfile)
await self._start_server('SSL', host, env.ssl_port, ssl=sslc)
async def _close_servers(self, kinds): async def _close_servers(self, kinds):
"""Close the servers of the given kinds (TCP etc.).""" """Close the servers of the given kinds (TCP etc.)."""
@ -698,7 +694,6 @@ class LBRYElectrumX(asyncio.Protocol):
self.kind = kind # 'RPC', 'TCP' etc. self.kind = kind # 'RPC', 'TCP' etc.
self.coin = self.env.coin self.coin = self.env.coin
self.anon_logs = self.env.anon_logs
self.txs_sent = 0 self.txs_sent = 0
self.log_me = False self.log_me = False
self.daemon_request = self.session_manager.daemon_request self.daemon_request = self.session_manager.daemon_request
@ -785,19 +780,6 @@ class LBRYElectrumX(asyncio.Protocol):
def default_framer(self): def default_framer(self):
return NewlineFramer(self.env.max_receive) return NewlineFramer(self.env.max_receive)
def peer_address_str(self, *, for_log=True):
"""Returns the peer's IP address and port as a human-readable
string, respecting anon logs if the output is for a log."""
if for_log and self.anon_logs:
return 'xx.xx.xx.xx:xx'
if not self._address:
return 'unknown'
ip_addr_str, port = self._address[:2]
if ':' in ip_addr_str:
return f'[{ip_addr_str}]:{port}'
else:
return f'{ip_addr_str}:{port}'
def toggle_logging(self): def toggle_logging(self):
self.log_me = not self.log_me self.log_me = not self.log_me
@ -1037,7 +1019,7 @@ class LBRYElectrumX(asyncio.Protocol):
await self._send_message(message) await self._send_message(message)
return True return True
except asyncio.TimeoutError: except asyncio.TimeoutError:
self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True)) self.logger.info(f"timeout sending address notification to {self._address[0]}:{self._address[1]}")
self.abort() self.abort()
return False return False
@ -1048,7 +1030,7 @@ class LBRYElectrumX(asyncio.Protocol):
await self._send_message(message) await self._send_message(message)
return True return True
except asyncio.TimeoutError: except asyncio.TimeoutError:
self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True)) self.logger.info(f"timeout sending address notification to {self._address[0]}:{self._address[1]}")
self.abort() self.abort()
return False return False
@ -1078,7 +1060,7 @@ class LBRYElectrumX(asyncio.Protocol):
"""Return the server features dictionary.""" """Return the server features dictionary."""
min_str, max_str = cls.protocol_min_max_strings() min_str, max_str = cls.protocol_min_max_strings()
cls.cached_server_features.update({ cls.cached_server_features.update({
'hosts': env.hosts_dict(), 'hosts': {},
'pruning': None, 'pruning': None,
'server_version': cls.version, 'server_version': cls.version,
'protocol_min': min_str, 'protocol_min': min_str,

View file

@ -28,7 +28,7 @@ class BlockchainService:
self.lock = asyncio.Lock() self.lock = asyncio.Lock()
self.last_state: typing.Optional[DBState] = None self.last_state: typing.Optional[DBState] = None
self.db = HubDB( self.db = HubDB(
env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes, env.coin, env.db_dir, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes,
secondary_name=secondary_name, max_open_files=-1, blocking_channel_ids=env.blocking_channel_ids, secondary_name=secondary_name, max_open_files=-1, blocking_channel_ids=env.blocking_channel_ids,
filtering_channel_ids=env.filtering_channel_ids, executor=self._executor filtering_channel_ids=env.filtering_channel_ids, executor=self._executor
) )

View file

@ -107,7 +107,7 @@ class TestRevertableOpStack(unittest.TestCase):
class TestRevertablePrefixDB(unittest.TestCase): class TestRevertablePrefixDB(unittest.TestCase):
def setUp(self): def setUp(self):
self.tmp_dir = tempfile.mkdtemp() self.tmp_dir = tempfile.mkdtemp()
self.db = PrefixDB(self.tmp_dir, cache_mb=1, max_open_files=32) self.db = PrefixDB(self.tmp_dir, max_open_files=32)
def tearDown(self) -> None: def tearDown(self) -> None:
self.db.close() self.db.close()