update default ADDRESS_HISTORY_CACHE_SIZE

This commit is contained in:
Jack Robison 2022-06-21 14:24:42 -04:00
parent 028143ec7e
commit 0094237b97
2 changed files with 4 additions and 4 deletions

View file

@ -51,7 +51,7 @@ class ServerEnv(Env):
self.database_query_timeout = (database_query_timeout / 1000.0) if database_query_timeout is not None else \ self.database_query_timeout = (database_query_timeout / 1000.0) if database_query_timeout is not None else \
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0) (float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
self.hashX_history_cache_size = address_history_cache_size if address_history_cache_size is not None \ self.hashX_history_cache_size = address_history_cache_size if address_history_cache_size is not None \
else self.integer('ADDRESS_HISTORY_CACHE_SIZE', 1000) else self.integer('ADDRESS_HISTORY_CACHE_SIZE', 4096)
self.daemon_ca_path = daemon_ca_path if daemon_ca_path else None self.daemon_ca_path = daemon_ca_path if daemon_ca_path else None
@classmethod @classmethod
@ -102,7 +102,7 @@ class ServerEnv(Env):
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000), parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
help="Elasticsearch query timeout, in ms. Can be set in env with 'QUERY_TIMEOUT_MS'") help="Elasticsearch query timeout, in ms. Can be set in env with 'QUERY_TIMEOUT_MS'")
parser.add_argument('--address_history_cache_size', type=int, parser.add_argument('--address_history_cache_size', type=int,
default=cls.integer('ADDRESS_HISTORY_CACHE_SIZE', 1000), default=cls.integer('ADDRESS_HISTORY_CACHE_SIZE', 4096),
help="Size of the lru cache of address histories. " help="Size of the lru cache of address histories. "
"Can be set in the env with 'ADDRESS_HISTORY_CACHE_SIZE'") "Can be set in the env with 'ADDRESS_HISTORY_CACHE_SIZE'")

View file

@ -13,7 +13,7 @@ class BlockchainEnv(Env):
self.db_max_open_files = db_max_open_files self.db_max_open_files = db_max_open_files
self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL') self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL')
self.hashX_history_cache_size = hashX_history_cache_size if hashX_history_cache_size is not None \ self.hashX_history_cache_size = hashX_history_cache_size if hashX_history_cache_size is not None \
else self.integer('ADDRESS_HISTORY_CACHE_SIZE', 1000) else self.integer('ADDRESS_HISTORY_CACHE_SIZE', 4096)
self.rebuild_address_status_from_height = rebuild_address_status_from_height \ self.rebuild_address_status_from_height = rebuild_address_status_from_height \
if isinstance(rebuild_address_status_from_height, int) else -1 if isinstance(rebuild_address_status_from_height, int) else -1
self.daemon_ca_path = daemon_ca_path if daemon_ca_path else None self.daemon_ca_path = daemon_ca_path if daemon_ca_path else None
@ -32,7 +32,7 @@ class BlockchainEnv(Env):
help='This setting translates into the max_open_files option given to rocksdb. ' help='This setting translates into the max_open_files option given to rocksdb. '
'A higher number will use more memory. Defaults to 64.') 'A higher number will use more memory. Defaults to 64.')
parser.add_argument('--address_history_cache_size', type=int, parser.add_argument('--address_history_cache_size', type=int,
default=cls.integer('ADDRESS_HISTORY_CACHE_SIZE', 2 ** 13), default=cls.integer('ADDRESS_HISTORY_CACHE_SIZE', 4096),
help="LRU cache size for address histories, used when processing new blocks " help="LRU cache size for address histories, used when processing new blocks "
"and when processing mempool updates. Can be set in env with " "and when processing mempool updates. Can be set in env with "
"'ADDRESS_HISTORY_CACHE_SIZE'") "'ADDRESS_HISTORY_CACHE_SIZE'")