Merge #16042: test: Bump MAX_NODES to 12

fa47330397 test: Speed up cache creation (MarcoFalke)
fa6ad7a5ec test: Bump MAX_NODES to 12 (MarcoFalke)

Pull request description:

  When testing a combination of settings that affect the datadir (e.g. prune, blockfilter, ...) we may need a lot of datadirs.
  Bump the maximum number of nodes proactively from 8 to 12, so that caches get populated with 12 node dirs, as opposed to 8.

  Also, add an assert that the list of deterministic keys is exactly the number of max nodes (and not more than that.

  Also, create the cache faster.

ACKs for commit fa4733:
  laanwj:
    utACK fa47330397

Tree-SHA512: 9803c765ed52d344102f5a3bce57b05d88a7429dcb05ed66ed6c881fda8d87c2834d02d21b95fe9f39c0efe3b8527e13cf94f006588cde22e8c2cd50b2d517a6
This commit is contained in:
MarcoFalke 2019-05-24 07:00:54 -04:00
commit 63b9efa73d
No known key found for this signature in database
GPG key ID: D2EA4850E7528B25
3 changed files with 40 additions and 49 deletions

View file

@ -29,7 +29,6 @@ from .util import (
disconnect_nodes, disconnect_nodes,
get_datadir_path, get_datadir_path,
initialize_datadir, initialize_datadir,
p2p_port,
sync_blocks, sync_blocks,
sync_mempools, sync_mempools,
) )
@ -468,35 +467,23 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
def _initialize_chain(self): def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test. """Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain (with wallet) for MAX_NODES Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache.""" Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache: if not os.path.isdir(cache_node_dir):
self.log.debug("Creating data directories from cached datadir") self.log.debug("Creating cache directory {}".format(cache_node_dir))
# find and delete old cache directories if any exist initialize_datadir(self.options.cachedir, CACHE_NODE_ID)
for i in range(MAX_NODES): self.nodes.append(
if os.path.isdir(get_datadir_path(self.options.cachedir, i)): TestNode(
shutil.rmtree(get_datadir_path(self.options.cachedir, i)) CACHE_NODE_ID,
cache_node_dir,
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.cachedir, i),
extra_conf=["bind=127.0.0.1"], extra_conf=["bind=127.0.0.1"],
extra_args=[], extra_args=['-disablewallet'],
rpchost=None, rpchost=None,
timewait=self.rpc_timeout, timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind, bitcoind=self.options.bitcoind,
@ -504,12 +491,10 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
coverage_dir=None, coverage_dir=None,
cwd=self.options.tmpdir, cwd=self.options.tmpdir,
)) ))
self.nodes[i].args = args self.start_node(CACHE_NODE_ID)
self.start_node(i)
# Wait for RPC connections to be ready # Wait for RPC connections to be ready
for node in self.nodes: self.nodes[CACHE_NODE_ID].wait_for_rpc_connection()
node.wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes # Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature. # gets 25 mature blocks and 25 immature.
@ -518,29 +503,29 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# This is needed so that we are out of IBD when the test starts, # This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload(). # see the tip age check in IsInitialBlockDownload().
for i in range(8): for i in range(8):
self.nodes[0].generatetoaddress(25 if i != 7 else 24, self.nodes[i % 4].get_deterministic_priv_key().address) self.nodes[CACHE_NODE_ID].generatetoaddress(
self.sync_blocks() nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
for n in self.nodes: assert_equal(self.nodes[CACHE_NODE_ID].getblockchaininfo()["blocks"], 199)
assert_equal(n.getblockchaininfo()["blocks"], 199)
# Shut them down, and clean up cache directories: # Shut it down, and clean up cache directories:
self.stop_nodes() self.stop_nodes()
self.nodes = [] self.nodes = []
def cache_path(n, *paths): def cache_path(*paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths) return os.path.join(cache_node_dir, "regtest", *paths)
for i in range(MAX_NODES): os.rmdir(cache_path('wallets')) # Remove empty wallets dir
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir for entry in os.listdir(cache_path()):
for entry in os.listdir(cache_path(i)): if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
if entry not in ['chainstate', 'blocks']: os.remove(cache_path(entry))
os.remove(cache_path(i, entry))
for i in range(self.num_nodes): for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i) self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i) to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir) shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self): def _initialize_chain_clean(self):

View file

@ -23,6 +23,7 @@ import sys
from .authproxy import JSONRPCException from .authproxy import JSONRPCException
from .util import ( from .util import (
MAX_NODES,
append_config, append_config,
delete_cookie_file, delete_cookie_file,
get_rpc_proxy, get_rpc_proxy,
@ -110,10 +111,8 @@ class TestNode():
self.p2ps = [] self.p2ps = []
def get_deterministic_priv_key(self): AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
"""Return a deterministic priv key in base58, that only depends on the node's index""" PRIV_KEYS = [
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey # address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
@ -124,8 +123,15 @@ class TestNode():
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
] AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
return PRIV_KEYS[self.index] AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self): def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`. """Get the memory usage (RSS) per `ps`.

View file

@ -228,7 +228,7 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N
############################################ ############################################
# The maximum number of nodes a single test can spawn # The maximum number of nodes a single test can spawn
MAX_NODES = 8 MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this # Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000 PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each # The number of ports to "reserve" for p2p and rpc, each