2016-03-19 20:58:06 +01:00
#!/usr/bin/env python3
2019-02-21 02:03:13 +01:00
# Copyright (c) 2014-2019 The Bitcoin Core developers
2015-08-26 12:05:36 +02:00
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
2017-03-09 15:53:26 +01:00
""" Run regression test suite.
2015-10-11 07:41:19 +02:00
This module calls down into individual test cases via subprocess . It will
2017-01-31 20:32:49 +01:00
forward all unrecognized arguments onto the individual test scripts .
2015-10-11 07:41:19 +02:00
For a description of arguments recognized by test scripts , see
2017-03-09 15:53:26 +01:00
` test / functional / test_framework / test_framework . py : BitcoinTestFramework . main ` .
2015-10-11 07:41:19 +02:00
"""
2015-08-26 12:05:36 +02:00
2017-01-31 19:15:40 +01:00
import argparse
2017-11-29 19:25:23 +01:00
from collections import deque
2017-01-30 23:57:27 +01:00
import configparser
2017-05-18 23:33:33 +02:00
import datetime
2015-08-26 12:05:36 +02:00
import os
2015-11-30 14:53:07 +01:00
import time
2015-10-11 07:41:19 +02:00
import shutil
2017-06-08 15:33:52 +02:00
import signal
2015-08-26 12:05:36 +02:00
import sys
import subprocess
2015-10-11 07:41:19 +02:00
import tempfile
2015-08-26 12:05:36 +02:00
import re
2017-02-16 20:00:35 +01:00
import logging
2015-10-11 07:41:19 +02:00
2017-07-25 21:39:39 +02:00
# Formatting. Default colors to empty strings.
2018-08-19 20:54:54 +02:00
BOLD , GREEN , RED , GREY = ( " " , " " ) , ( " " , " " ) , ( " " , " " ) , ( " " , " " )
2017-04-17 19:46:20 +02:00
try :
# Make sure python thinks it can write unicode to its stdout
" \u2713 " . encode ( " utf_8 " ) . decode ( sys . stdout . encoding )
TICK = " ✓ "
CROSS = " ✖ "
CIRCLE = " ○ "
except UnicodeDecodeError :
TICK = " P "
CROSS = " x "
CIRCLE = " o "
2018-08-19 20:54:54 +02:00
if os . name != ' nt ' or sys . getwindowsversion ( ) > = ( 10 , 0 , 14393 ) :
if os . name == ' nt ' :
import ctypes
kernel32 = ctypes . windll . kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = - 11
STD_ERROR_HANDLE = - 12
# Enable ascii color control to stdout
stdout = kernel32 . GetStdHandle ( STD_OUTPUT_HANDLE )
stdout_mode = ctypes . c_int32 ( )
kernel32 . GetConsoleMode ( stdout , ctypes . byref ( stdout_mode ) )
kernel32 . SetConsoleMode ( stdout , stdout_mode . value | ENABLE_VIRTUAL_TERMINAL_PROCESSING )
# Enable ascii color control to stderr
stderr = kernel32 . GetStdHandle ( STD_ERROR_HANDLE )
stderr_mode = ctypes . c_int32 ( )
kernel32 . GetConsoleMode ( stderr , ctypes . byref ( stderr_mode ) )
kernel32 . SetConsoleMode ( stderr , stderr_mode . value | ENABLE_VIRTUAL_TERMINAL_PROCESSING )
2017-04-05 21:19:26 +02:00
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ( ' \033 [0m ' , ' \033 [1m ' )
2018-08-19 20:54:54 +02:00
GREEN = ( ' \033 [0m ' , ' \033 [0;32m ' )
2017-07-25 21:39:39 +02:00
RED = ( ' \033 [0m ' , ' \033 [0;31m ' )
GREY = ( ' \033 [0m ' , ' \033 [1;30m ' )
2017-04-05 21:19:26 +02:00
2017-03-22 15:26:02 +01:00
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
2019-05-03 22:16:37 +02:00
EXTENDED_SCRIPTS = [
2019-06-19 17:39:27 +02:00
# These tests are not run by default.
2019-05-03 22:16:37 +02:00
# Longest test should go first, to favor running tests in parallel
' feature_pruning.py ' ,
' feature_dbcrash.py ' ,
]
2017-11-17 18:54:39 +01:00
BASE_SCRIPTS = [
2019-06-19 17:39:27 +02:00
# Scripts that are run by default.
2017-02-17 20:22:56 +01:00
# Longest test should go first, to favor running tests in parallel
2018-01-25 00:44:29 +01:00
' wallet_hd.py ' ,
' wallet_backup.py ' ,
2016-12-03 21:46:33 +01:00
# vv Tests less than 5m vv
2018-09-15 13:13:26 +02:00
' mining_getblocktemplate_longpoll.py ' ,
' feature_maxuploadtarget.py ' ,
2018-01-25 00:44:29 +01:00
' feature_block.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_fundrawtransaction.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_compactblocks.py ' ,
2018-01-25 00:44:29 +01:00
' feature_segwit.py ' ,
2016-12-03 21:46:33 +01:00
# vv Tests less than 2m vv
2018-01-25 00:44:29 +01:00
' wallet_basic.py ' ,
2017-10-20 19:27:55 +02:00
' wallet_labels.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_segwit.py ' ,
2018-09-15 13:13:26 +02:00
' p2p_timeouts.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_dump.py ' ,
2018-04-11 20:24:34 +02:00
' wallet_listtransactions.py ' ,
2016-12-03 21:46:33 +01:00
# vv Tests less than 60s vv
2018-01-25 00:44:29 +01:00
' p2p_sendheaders.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_zapwallettxes.py ' ,
' wallet_importmulti.py ' ,
2016-12-03 21:46:33 +01:00
' mempool_limit.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_txoutproof.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_listreceivedby.py ' ,
' wallet_abandonconflict.py ' ,
2018-01-25 00:44:29 +01:00
' feature_csv_activation.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_rawtransaction.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_address_types.py ' ,
2018-09-15 13:13:26 +02:00
' feature_bip68_sequence.py ' ,
' p2p_feefilter.py ' ,
2018-01-25 00:44:29 +01:00
' feature_reindex.py ' ,
2019-01-31 22:06:07 +01:00
' feature_abortnode.py ' ,
2016-12-03 21:46:33 +01:00
# vv Tests less than 30s vv
2018-01-25 00:44:29 +01:00
' wallet_keypool_topup.py ' ,
2019-08-02 17:23:16 +02:00
' feature_fee_estimation.py ' ,
2018-01-25 00:44:30 +01:00
' interface_zmq.py ' ,
' interface_bitcoin_cli.py ' ,
' mempool_resurrect.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_txn_doublespend.py --mineblock ' ,
2018-09-21 02:23:42 +02:00
' tool_wallet.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_txn_clone.py ' ,
' wallet_txn_clone.py --segwit ' ,
2018-01-25 00:44:29 +01:00
' rpc_getchaintips.py ' ,
2019-01-31 20:45:27 +01:00
' rpc_misc.py ' ,
2018-01-25 00:44:30 +01:00
' interface_rest.py ' ,
' mempool_spend_coinbase.py ' ,
2018-09-11 09:22:23 +02:00
' wallet_avoidreuse.py ' ,
2015-08-27 03:15:04 +02:00
' mempool_reorg.py ' ,
2017-03-09 23:14:55 +01:00
' mempool_persist.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_multiwallet.py ' ,
' wallet_multiwallet.py --usecli ' ,
2019-02-07 03:26:55 +01:00
' wallet_createwallet.py ' ,
' wallet_createwallet.py --usecli ' ,
2018-01-25 00:44:30 +01:00
' interface_http.py ' ,
2018-11-21 11:26:27 +01:00
' interface_rpc.py ' ,
2018-06-28 02:05:54 +02:00
' rpc_psbt.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_users.py ' ,
2018-01-25 00:44:29 +01:00
' feature_proxy.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_signrawtransaction.py ' ,
2018-01-24 06:59:08 +01:00
' wallet_groups.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_disconnect_ban.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_decodescript.py ' ,
' rpc_blockchain.py ' ,
' rpc_deprecated.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_disable.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_net.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_keypool.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_mempool.py ' ,
2019-05-09 16:42:56 +02:00
' p2p_blocksonly.py ' ,
2018-01-25 00:44:30 +01:00
' mining_prioritisetransaction.py ' ,
2018-08-08 17:24:59 +02:00
' p2p_invalid_locator.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_invalid_block.py ' ,
2018-10-19 19:34:52 +02:00
' p2p_invalid_messages.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_invalid_tx.py ' ,
2018-09-15 13:13:26 +02:00
' feature_assumevalid.py ' ,
' example_test.py ' ,
' wallet_txn_doublespend.py ' ,
' wallet_txn_clone.py --mineblock ' ,
' feature_notifications.py ' ,
2018-08-31 22:28:12 +02:00
' rpc_getblockfilter.py ' ,
2018-09-15 13:13:26 +02:00
' rpc_invalidateblock.py ' ,
' feature_rbf.py ' ,
' mempool_packages.py ' ,
2019-03-26 21:40:58 +01:00
' mempool_package_onemore.py ' ,
2018-04-25 10:44:58 +02:00
' rpc_createmultisig.py ' ,
2018-01-25 00:44:29 +01:00
' feature_versionbits_warning.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_preciousblock.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_importprunedfunds.py ' ,
2018-09-16 02:01:20 +02:00
' p2p_leak_tx.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_signmessage.py ' ,
2018-11-29 19:30:08 +01:00
' wallet_balance.py ' ,
2018-01-25 00:44:29 +01:00
' feature_nulldummy.py ' ,
2017-11-17 18:54:39 +01:00
' mempool_accept.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_import_rescan.py ' ,
2018-10-05 14:33:21 +02:00
' wallet_import_with_label.py ' ,
2018-02-22 15:43:26 +01:00
' rpc_bind.py --ipv4 ' ,
' rpc_bind.py --ipv6 ' ,
' rpc_bind.py --nonloopback ' ,
2018-01-25 00:44:30 +01:00
' mining_basic.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_bumpfee.py ' ,
2019-05-13 10:09:57 +02:00
' wallet_bumpfee_totalfee_deprecation.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_named_arguments.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_listsinceblock.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_leak.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_encryption.py ' ,
2018-01-25 00:44:29 +01:00
' feature_dersig.py ' ,
' feature_cltv.py ' ,
2018-01-25 00:44:29 +01:00
' rpc_uptime.py ' ,
2018-01-25 00:44:29 +01:00
' wallet_resendwallettransactions.py ' ,
2017-12-12 23:33:39 +01:00
' wallet_fallbackfee.py ' ,
2018-01-25 00:44:29 +01:00
' feature_minchainwork.py ' ,
2017-06-21 03:08:05 +02:00
' rpc_getblockstats.py ' ,
2018-12-23 19:08:57 +01:00
' wallet_create_tx.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_fingerprint.py ' ,
2018-01-25 00:44:29 +01:00
' feature_uacomment.py ' ,
2018-12-01 19:00:38 +01:00
' wallet_coinbase_category.py ' ,
2018-10-07 18:24:20 +02:00
' feature_filelock.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_unrequested_blocks.py ' ,
2017-05-02 03:26:33 +02:00
' feature_includeconf.py ' ,
2019-01-09 20:16:46 +01:00
' rpc_deriveaddresses.py ' ,
' rpc_deriveaddresses.py --usecli ' ,
2018-01-16 07:34:07 +01:00
' rpc_scantxoutset.py ' ,
2017-11-28 15:38:46 +01:00
' feature_logging.py ' ,
2018-01-25 00:44:29 +01:00
' p2p_node_network_limited.py ' ,
2018-03-09 05:43:55 +01:00
' feature_blocksdir.py ' ,
2018-01-25 00:44:29 +01:00
' feature_config_args.py ' ,
2018-08-22 02:33:34 +02:00
' rpc_help.py ' ,
2018-03-30 17:36:38 +02:00
' feature_help.py ' ,
2018-11-20 18:59:07 +01:00
' feature_shutdown.py ' ,
2017-12-20 21:05:31 +01:00
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
2015-08-26 12:05:36 +02:00
]
2016-04-09 22:17:52 +02:00
2017-04-18 00:20:35 +02:00
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
2017-03-24 23:36:55 +01:00
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
2016-04-29 12:51:15 +02:00
2017-03-27 17:33:00 +02:00
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
" combine_logs.py " ,
" create_cache.py " ,
" test_runner.py " ,
]
2017-02-06 15:07:14 +01:00
def main ( ) :
# Parse arguments and pass through unrecognised args
parser = argparse . ArgumentParser ( add_help = False ,
2017-03-09 15:53:26 +01:00
usage = ' %(prog)s [test_runner.py options] [script options] [scripts] ' ,
2017-02-06 15:07:14 +01:00
description = __doc__ ,
epilog = '''
Help text and arguments for individual test script : ''' ,
formatter_class = argparse . RawTextHelpFormatter )
2018-10-31 14:39:25 +01:00
parser . add_argument ( ' --combinedlogslen ' , ' -c ' , type = int , default = 0 , metavar = ' n ' , help = ' On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes. ' )
2017-02-06 15:07:14 +01:00
parser . add_argument ( ' --coverage ' , action = ' store_true ' , help = ' generate a basic coverage report for the RPC interface ' )
2018-11-01 15:52:17 +01:00
parser . add_argument ( ' --ci ' , action = ' store_true ' , help = ' Run checks and code that are usually only enabled in a continuous integration environment ' )
2017-08-16 00:24:39 +02:00
parser . add_argument ( ' --exclude ' , ' -x ' , help = ' specify a comma-separated-list of scripts to exclude. ' )
2017-02-06 15:07:14 +01:00
parser . add_argument ( ' --extended ' , action = ' store_true ' , help = ' run the extended test suite in addition to the basic tests ' )
parser . add_argument ( ' --help ' , ' -h ' , ' -? ' , action = ' store_true ' , help = ' print help text and exit ' )
parser . add_argument ( ' --jobs ' , ' -j ' , type = int , default = 4 , help = ' how many test scripts to run in parallel. Default=4. ' )
2017-04-12 17:01:31 +02:00
parser . add_argument ( ' --keepcache ' , ' -k ' , action = ' store_true ' , help = ' the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun. ' )
2018-10-30 14:06:44 +01:00
parser . add_argument ( ' --quiet ' , ' -q ' , action = ' store_true ' , help = ' only print dots, results summary and failure logs ' )
2017-05-18 23:33:33 +02:00
parser . add_argument ( ' --tmpdirprefix ' , ' -t ' , default = tempfile . gettempdir ( ) , help = " Root directory for datadirs " )
2018-04-27 18:50:45 +02:00
parser . add_argument ( ' --failfast ' , action = ' store_true ' , help = ' stop execution after the first test failure ' )
2019-07-15 00:11:55 +02:00
parser . add_argument ( ' --filter ' , help = ' filter scripts to run by regular expression ' )
2017-02-17 20:22:56 +01:00
args , unknown_args = parser . parse_known_args ( )
2017-02-06 15:07:14 +01:00
2017-05-09 16:39:37 +02:00
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [ arg for arg in unknown_args if arg [ : 2 ] != " -- " ]
2017-02-06 15:07:14 +01:00
passon_args = [ arg for arg in unknown_args if arg [ : 2 ] == " -- " ]
# Read config generated by configure.
config = configparser . ConfigParser ( )
2017-03-21 19:47:20 +01:00
configfile = os . path . abspath ( os . path . dirname ( __file__ ) ) + " /../config.ini "
2018-06-12 17:49:20 +02:00
config . read_file ( open ( configfile , encoding = " utf8 " ) )
2017-05-01 21:12:49 +02:00
passon_args . append ( " --configfile= %s " % configfile )
2017-02-06 15:07:14 +01:00
2017-02-16 20:00:35 +01:00
# Set up logging
logging_level = logging . INFO if args . quiet else logging . DEBUG
logging . basicConfig ( format = ' %(message)s ' , level = logging_level )
2017-05-18 23:33:33 +02:00
# Create base test directory
2018-08-03 15:12:16 +02:00
tmpdir = " %s /test_runner_₿_🏃_ %s " % ( args . tmpdirprefix , datetime . datetime . now ( ) . strftime ( " % Y % m %d _ % H % M % S " ) )
2018-08-19 20:54:54 +02:00
2017-05-18 23:33:33 +02:00
os . makedirs ( tmpdir )
logging . debug ( " Temporary test directory at %s " % tmpdir )
2017-02-17 20:22:56 +01:00
enable_bitcoind = config [ " components " ] . getboolean ( " ENABLE_BITCOIND " )
2017-02-06 15:07:14 +01:00
2018-09-09 19:32:37 +02:00
if not enable_bitcoind :
print ( " No functional tests to run. " )
print ( " Rerun ./configure with --with-daemon and then make " )
2017-02-06 15:07:14 +01:00
sys . exit ( 0 )
2017-01-31 19:15:40 +01:00
# Build list of tests
2018-03-17 19:40:56 +01:00
test_list = [ ]
2017-02-17 20:22:56 +01:00
if tests :
2017-01-31 19:15:40 +01:00
# Individual tests have been specified. Run specified tests that exist
2019-07-11 20:36:02 +02:00
# in the ALL_SCRIPTS list. Accept names with or without a .py extension.
# Specified tests can contain wildcards, but in that case the supplied
# paths should be coherent, e.g. the same path as that provided to call
# test_runner.py. Examples:
# `test/functional/test_runner.py test/functional/wallet*`
# `test/functional/test_runner.py ./test/functional/wallet*`
# `test_runner.py wallet*`
# but not:
# `test/functional/test_runner.py wallet*`
# Multiple wildcards can be passed:
# `test_runner.py tool* mempool*`
2018-02-15 02:24:42 +01:00
for test in tests :
2019-07-11 20:36:02 +02:00
script = test . split ( " / " ) [ - 1 ]
script = script + " .py " if " .py " not in script else script
if script in ALL_SCRIPTS :
test_list . append ( script )
2017-05-09 16:39:37 +02:00
else :
2018-02-15 02:24:42 +01:00
print ( " {} WARNING! {} Test ' {} ' not found in full test list. " . format ( BOLD [ 1 ] , BOLD [ 0 ] , test ) )
elif args . extended :
# Include extended tests
2018-03-17 19:40:56 +01:00
test_list + = ALL_SCRIPTS
2016-04-27 22:29:52 +02:00
else :
2018-02-15 02:24:42 +01:00
# Run base tests only
2018-03-17 19:40:56 +01:00
test_list + = BASE_SCRIPTS
2017-01-31 19:15:40 +01:00
2017-02-15 15:59:19 +01:00
# Remove the test cases that the user has explicitly asked to exclude.
if args . exclude :
2018-09-25 04:51:46 +02:00
exclude_tests = [ test . split ( ' .py ' ) [ 0 ] for test in args . exclude . split ( ' , ' ) ]
2018-02-15 02:24:42 +01:00
for exclude_test in exclude_tests :
2018-09-25 04:51:46 +02:00
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [ test for test in test_list if test . split ( ' .py ' ) [ 0 ] == exclude_test ]
for exclude_item in exclude_list :
test_list . remove ( exclude_item )
if not exclude_list :
2017-05-09 16:39:37 +02:00
print ( " {} WARNING! {} Test ' {} ' not found in current test list. " . format ( BOLD [ 1 ] , BOLD [ 0 ] , exclude_test ) )
2017-02-15 15:59:19 +01:00
2019-07-15 00:11:55 +02:00
if args . filter :
test_list = list ( filter ( re . compile ( args . filter ) . search , test_list ) )
2017-02-15 15:59:19 +01:00
if not test_list :
print ( " No valid test scripts specified. Check that your test is in one "
2017-03-09 15:53:26 +01:00
" of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests " )
2017-02-15 15:59:19 +01:00
sys . exit ( 0 )
2017-01-31 19:15:40 +01:00
if args . help :
2017-03-23 18:54:18 +01:00
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
2017-01-31 20:32:49 +01:00
parser . print_help ( )
2017-12-10 00:39:06 +01:00
subprocess . check_call ( [ sys . executable , os . path . join ( config [ " environment " ] [ " SRCDIR " ] , ' test ' , ' functional ' , test_list [ 0 ] . split ( ) [ 0 ] ) , ' -h ' ] )
2016-04-27 22:29:52 +02:00
sys . exit ( 0 )
2018-11-01 15:52:17 +01:00
check_script_list ( src_dir = config [ " environment " ] [ " SRCDIR " ] , fail_on_warn = args . ci )
2017-11-30 11:11:18 +01:00
check_script_prefixes ( )
2017-03-27 17:33:00 +02:00
2017-04-12 17:01:31 +02:00
if not args . keepcache :
shutil . rmtree ( " %s /test/cache " % config [ " environment " ] [ " BUILDDIR " ] , ignore_errors = True )
2018-04-27 18:50:45 +02:00
run_tests (
2018-11-01 15:52:17 +01:00
test_list = test_list ,
src_dir = config [ " environment " ] [ " SRCDIR " ] ,
build_dir = config [ " environment " ] [ " BUILDDIR " ] ,
tmpdir = tmpdir ,
2018-04-27 18:50:45 +02:00
jobs = args . jobs ,
enable_coverage = args . coverage ,
args = passon_args ,
combined_logs_len = args . combinedlogslen ,
2018-11-01 15:52:17 +01:00
failfast = args . failfast ,
runs_ci = args . ci ,
2018-04-27 18:50:45 +02:00
)
2018-11-01 15:52:17 +01:00
def run_tests ( * , test_list , src_dir , build_dir , tmpdir , jobs = 1 , enable_coverage = False , args = None , combined_logs_len = 0 , failfast = False , runs_ci ) :
2018-04-27 18:50:45 +02:00
args = args or [ ]
2017-02-06 15:07:14 +01:00
2017-04-12 16:41:13 +02:00
# Warn if bitcoind is already running (unix only)
try :
if subprocess . check_output ( [ " pidof " , " bitcoind " ] ) is not None :
print ( " %s WARNING! %s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention! " % ( BOLD [ 1 ] , BOLD [ 0 ] ) )
except ( OSError , subprocess . SubprocessError ) :
pass
# Warn if there is a cache directory
cache_dir = " %s /test/cache " % build_dir
if os . path . isdir ( cache_dir ) :
print ( " %s WARNING! %s There is a cache directory here: %s . If tests fail unexpectedly, try deleting the cache directory. " % ( BOLD [ 1 ] , BOLD [ 0 ] , cache_dir ) )
2017-02-06 15:07:14 +01:00
2017-03-09 15:44:57 +01:00
tests_dir = src_dir + ' /test/functional/ '
2015-10-11 07:41:19 +02:00
2018-05-08 18:56:13 +02:00
flags = [ ' --cachedir= {} ' . format ( cache_dir ) ] + args
2017-02-06 15:07:14 +01:00
if enable_coverage :
2015-10-11 07:41:19 +02:00
coverage = RPCCoverage ( )
2016-04-29 12:51:15 +02:00
flags . append ( coverage . flag )
2017-02-16 20:00:35 +01:00
logging . debug ( " Initializing coverage directory at %s " % coverage . dir )
2017-02-06 15:07:14 +01:00
else :
coverage = None
2016-04-29 12:51:15 +02:00
2017-01-31 20:32:49 +01:00
if len ( test_list ) > 1 and jobs > 1 :
2016-04-29 12:51:15 +02:00
# Populate cache
2017-10-13 12:19:20 +02:00
try :
2017-12-10 00:39:06 +01:00
subprocess . check_output ( [ sys . executable , tests_dir + ' create_cache.py ' ] + flags + [ " --tmpdir= %s /cache " % tmpdir ] )
2018-01-19 19:04:03 +01:00
except subprocess . CalledProcessError as e :
sys . stdout . buffer . write ( e . output )
raise
2016-04-09 22:17:52 +02:00
#Run Tests
2018-11-01 15:52:17 +01:00
job_queue = TestHandler (
num_tests_parallel = jobs ,
tests_dir = tests_dir ,
tmpdir = tmpdir ,
test_list = test_list ,
flags = flags ,
2018-11-10 19:14:40 +01:00
timeout_duration = 40 * 60 if runs_ci else float ( ' inf ' ) , # in seconds
2018-11-01 15:52:17 +01:00
)
2018-02-15 02:24:42 +01:00
start_time = time . time ( )
2017-04-05 21:19:26 +02:00
test_results = [ ]
2017-02-06 15:07:14 +01:00
max_len_name = len ( max ( test_list , key = len ) )
2018-10-17 16:12:42 +02:00
test_count = len ( test_list )
for i in range ( test_count ) :
2017-11-29 19:25:23 +01:00
test_result , testdir , stdout , stderr = job_queue . get_next ( )
2017-04-05 21:19:26 +02:00
test_results . append ( test_result )
2018-10-17 16:12:42 +02:00
done_str = " {} / {} - {} {} {} " . format ( i + 1 , test_count , BOLD [ 1 ] , test_result . name , BOLD [ 0 ] )
2017-07-25 21:39:39 +02:00
if test_result . status == " Passed " :
2018-10-17 16:12:42 +02:00
logging . debug ( " %s passed, Duration: %s s " % ( done_str , test_result . time ) )
2017-07-25 21:39:39 +02:00
elif test_result . status == " Skipped " :
2018-10-17 16:12:42 +02:00
logging . debug ( " %s skipped " % ( done_str ) )
2017-02-16 20:00:35 +01:00
else :
2018-10-17 16:12:42 +02:00
print ( " %s failed, Duration: %s s \n " % ( done_str , test_result . time ) )
2017-02-16 20:00:35 +01:00
print ( BOLD [ 1 ] + ' stdout: \n ' + BOLD [ 0 ] + stdout + ' \n ' )
print ( BOLD [ 1 ] + ' stderr: \n ' + BOLD [ 0 ] + stderr + ' \n ' )
2017-11-29 19:28:45 +01:00
if combined_logs_len and os . path . isdir ( testdir ) :
# Print the final `combinedlogslen` lines of the combined logs
print ( ' {} Combine the logs and print the last {} lines ... {} ' . format ( BOLD [ 1 ] , combined_logs_len , BOLD [ 0 ] ) )
2017-11-29 19:25:23 +01:00
print ( ' \n ============ ' )
print ( ' {} Combined log for {} : {} ' . format ( BOLD [ 1 ] , testdir , BOLD [ 0 ] ) )
print ( ' ============ \n ' )
2018-08-19 20:54:54 +02:00
combined_logs_args = [ sys . executable , os . path . join ( tests_dir , ' combine_logs.py ' ) , testdir ]
if BOLD [ 0 ] :
combined_logs_args + = [ ' --color ' ]
combined_logs , _ = subprocess . Popen ( combined_logs_args , universal_newlines = True , stdout = subprocess . PIPE ) . communicate ( )
2017-11-29 19:28:45 +01:00
print ( " \n " . join ( deque ( combined_logs . splitlines ( ) , combined_logs_len ) ) )
2017-02-06 15:07:14 +01:00
2018-04-27 18:50:45 +02:00
if failfast :
logging . debug ( " Early exiting after test failure " )
break
2018-02-15 02:24:42 +01:00
print_results ( test_results , max_len_name , ( int ( time . time ( ) - start_time ) ) )
2016-04-09 22:17:52 +02:00
if coverage :
2019-05-02 22:48:05 +02:00
coverage_passed = coverage . report_rpc_coverage ( )
2016-04-09 22:17:52 +02:00
2017-02-16 20:00:35 +01:00
logging . debug ( " Cleaning up coverage data " )
2016-04-09 22:17:52 +02:00
coverage . cleanup ( )
2019-05-02 22:48:05 +02:00
else :
coverage_passed = True
2015-10-11 07:41:19 +02:00
2017-05-18 23:33:33 +02:00
# Clear up the temp directory if all subdirectories are gone
if not os . listdir ( tmpdir ) :
os . rmdir ( tmpdir )
2019-05-02 22:48:05 +02:00
all_passed = all ( map ( lambda test_result : test_result . was_successful , test_results ) ) and coverage_passed
2017-04-05 21:19:26 +02:00
2018-04-27 18:50:45 +02:00
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue . kill_and_join ( )
2016-04-29 12:51:15 +02:00
sys . exit ( not all_passed )
2017-04-05 21:19:26 +02:00
def print_results ( test_results , max_len_name , runtime ) :
2017-07-25 21:39:39 +02:00
results = " \n " + BOLD [ 1 ] + " %s | %s | %s \n \n " % ( " TEST " . ljust ( max_len_name ) , " STATUS " , " DURATION " ) + BOLD [ 0 ]
2017-04-05 21:19:26 +02:00
2018-03-28 11:55:34 +02:00
test_results . sort ( key = TestResult . sort_key )
2017-04-05 21:19:26 +02:00
all_passed = True
time_sum = 0
for test_result in test_results :
2017-04-11 19:40:54 +02:00
all_passed = all_passed and test_result . was_successful
2017-04-05 21:19:26 +02:00
time_sum + = test_result . time
test_result . padding = max_len_name
results + = str ( test_result )
2017-04-07 15:15:29 +02:00
status = TICK + " Passed " if all_passed else CROSS + " Failed "
2018-03-28 11:17:36 +02:00
if not all_passed :
results + = RED [ 1 ]
2017-07-25 21:39:39 +02:00
results + = BOLD [ 1 ] + " \n %s | %s | %s s (accumulated) \n " % ( " ALL " . ljust ( max_len_name ) , status . ljust ( 9 ) , time_sum ) + BOLD [ 0 ]
2018-03-28 11:17:36 +02:00
if not all_passed :
results + = RED [ 0 ]
2017-04-05 21:19:26 +02:00
results + = " Runtime: %s s \n " % ( runtime )
print ( results )
2017-03-09 15:44:57 +01:00
class TestHandler :
2016-04-29 12:51:15 +02:00
"""
2017-07-01 06:00:51 +02:00
Trigger the test scripts passed in via the list .
2016-04-29 12:51:15 +02:00
"""
2018-11-01 15:52:17 +01:00
def __init__ ( self , * , num_tests_parallel , tests_dir , tmpdir , test_list , flags , timeout_duration ) :
assert num_tests_parallel > = 1
2016-04-29 12:51:15 +02:00
self . num_jobs = num_tests_parallel
2017-02-06 15:07:14 +01:00
self . tests_dir = tests_dir
2017-05-18 23:33:33 +02:00
self . tmpdir = tmpdir
2018-11-01 15:52:17 +01:00
self . timeout_duration = timeout_duration
2016-04-29 12:51:15 +02:00
self . test_list = test_list
self . flags = flags
self . num_running = 0
self . jobs = [ ]
def get_next ( self ) :
while self . num_running < self . num_jobs and self . test_list :
# Add tests
self . num_running + = 1
2018-02-15 02:24:42 +01:00
test = self . test_list . pop ( 0 )
2018-06-07 17:30:58 +02:00
portseed = len ( self . test_list )
2017-05-18 23:33:33 +02:00
portseed_arg = [ " --portseed= {} " . format ( portseed ) ]
2016-09-17 11:47:51 +02:00
log_stdout = tempfile . SpooledTemporaryFile ( max_size = 2 * * 16 )
log_stderr = tempfile . SpooledTemporaryFile ( max_size = 2 * * 16 )
2018-02-15 02:24:42 +01:00
test_argv = test . split ( )
2017-11-29 19:25:23 +01:00
testdir = " {} / {} _ {} " . format ( self . tmpdir , re . sub ( " .py$ " , " " , test_argv [ 0 ] ) , portseed )
tmpdir_arg = [ " --tmpdir= {} " . format ( testdir ) ]
2018-02-15 02:24:42 +01:00
self . jobs . append ( ( test ,
2016-04-29 12:51:15 +02:00
time . time ( ) ,
2017-12-10 00:39:06 +01:00
subprocess . Popen ( [ sys . executable , self . tests_dir + test_argv [ 0 ] ] + test_argv [ 1 : ] + self . flags + portseed_arg + tmpdir_arg ,
2016-04-29 12:51:15 +02:00
universal_newlines = True ,
2016-09-17 11:47:51 +02:00
stdout = log_stdout ,
stderr = log_stderr ) ,
2017-11-29 19:25:23 +01:00
testdir ,
2016-09-17 11:47:51 +02:00
log_stdout ,
log_stderr ) )
2016-04-29 12:51:15 +02:00
if not self . jobs :
2016-05-09 21:29:18 +02:00
raise IndexError ( ' pop from empty list ' )
2019-02-22 19:06:34 +01:00
# Print remaining running jobs when all jobs have been started.
if not self . test_list :
print ( " Remaining jobs: [ {} ] " . format ( " , " . join ( j [ 0 ] for j in self . jobs ) ) )
2018-10-17 16:12:42 +02:00
dot_count = 0
2016-04-29 12:51:15 +02:00
while True :
# Return first proc that finishes
time . sleep ( .5 )
2018-02-15 02:24:42 +01:00
for job in self . jobs :
( name , start_time , proc , testdir , log_out , log_err ) = job
2018-11-01 15:52:17 +01:00
if int ( time . time ( ) - start_time ) > self . timeout_duration :
2019-06-19 17:39:27 +02:00
# Timeout individual tests if timeout is specified (to stop
# tests hanging and not providing useful output).
2017-06-08 15:33:52 +02:00
proc . send_signal ( signal . SIGINT )
2016-04-29 12:51:15 +02:00
if proc . poll ( ) is not None :
2016-09-17 11:47:51 +02:00
log_out . seek ( 0 ) , log_err . seek ( 0 )
2018-03-18 23:54:29 +01:00
[ stdout , stderr ] = [ log_file . read ( ) . decode ( ' utf-8 ' ) for log_file in ( log_out , log_err ) ]
2016-09-17 11:47:51 +02:00
log_out . close ( ) , log_err . close ( )
2017-03-22 15:26:02 +01:00
if proc . returncode == TEST_EXIT_PASSED and stderr == " " :
2017-07-25 21:39:39 +02:00
status = " Passed "
2017-03-22 15:26:02 +01:00
elif proc . returncode == TEST_EXIT_SKIPPED :
2017-07-25 21:39:39 +02:00
status = " Skipped "
2017-03-22 15:26:02 +01:00
else :
2017-07-25 21:39:39 +02:00
status = " Failed "
2016-04-29 12:51:15 +02:00
self . num_running - = 1
2018-02-15 02:24:42 +01:00
self . jobs . remove ( job )
2018-10-30 14:06:44 +01:00
clearline = ' \r ' + ( ' ' * dot_count ) + ' \r '
print ( clearline , end = ' ' , flush = True )
dot_count = 0
2018-02-15 02:24:42 +01:00
return TestResult ( name , status , int ( time . time ( ) - start_time ) ) , testdir , stdout , stderr
2018-10-30 14:06:44 +01:00
print ( ' . ' , end = ' ' , flush = True )
dot_count + = 1
2016-04-29 12:51:15 +02:00
2018-04-27 18:50:45 +02:00
def kill_and_join ( self ) :
""" Send SIGKILL to all jobs and block until all have ended. """
procs = [ i [ 2 ] for i in self . jobs ]
for proc in procs :
proc . kill ( )
for proc in procs :
proc . wait ( )
2017-04-05 21:19:26 +02:00
class TestResult ( ) :
def __init__ ( self , name , status , time ) :
self . name = name
self . status = status
self . time = time
self . padding = 0
2018-03-28 11:55:34 +02:00
def sort_key ( self ) :
if self . status == " Passed " :
return 0 , self . name . lower ( )
elif self . status == " Failed " :
return 2 , self . name . lower ( )
elif self . status == " Skipped " :
return 1 , self . name . lower ( )
2017-04-05 21:19:26 +02:00
def __repr__ ( self ) :
2017-07-25 21:39:39 +02:00
if self . status == " Passed " :
2018-08-19 20:54:54 +02:00
color = GREEN
2017-04-07 15:15:29 +02:00
glyph = TICK
2017-07-25 21:39:39 +02:00
elif self . status == " Failed " :
2017-04-20 20:56:37 +02:00
color = RED
glyph = CROSS
2017-07-25 21:39:39 +02:00
elif self . status == " Skipped " :
color = GREY
glyph = CIRCLE
2017-04-06 19:31:47 +02:00
2017-07-25 21:39:39 +02:00
return color [ 1 ] + " %s | %s %s | %s s \n " % ( self . name . ljust ( self . padding ) , glyph , self . status . ljust ( 7 ) , self . time ) + color [ 0 ]
2017-04-05 21:19:26 +02:00
2017-04-11 19:40:54 +02:00
@property
def was_successful ( self ) :
return self . status != " Failed "
2017-04-05 21:19:26 +02:00
2017-11-30 11:11:18 +01:00
def check_script_prefixes ( ) :
2018-01-23 21:16:20 +01:00
""" Check that test scripts start with one of the allowed name prefixes. """
2017-11-30 11:11:18 +01:00
2018-09-21 02:23:42 +02:00
good_prefixes_re = re . compile ( " (example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_ " )
2017-11-30 11:11:18 +01:00
bad_script_names = [ script for script in ALL_SCRIPTS if good_prefixes_re . match ( script ) is None ]
2018-01-23 21:16:20 +01:00
if bad_script_names :
print ( " %s ERROR: %s %d tests not meeting naming conventions: " % ( BOLD [ 1 ] , BOLD [ 0 ] , len ( bad_script_names ) ) )
2017-11-30 11:11:18 +01:00
print ( " %s " % ( " \n " . join ( sorted ( bad_script_names ) ) ) )
2018-01-23 21:16:20 +01:00
raise AssertionError ( " Some tests are not following naming convention! " )
2017-11-30 11:11:18 +01:00
2018-11-01 15:52:17 +01:00
def check_script_list ( * , src_dir , fail_on_warn ) :
2017-03-27 17:33:00 +02:00
""" Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull - tester . py . """
script_dir = src_dir + ' /test/functional/ '
2018-03-18 23:54:29 +01:00
python_files = set ( [ test_file for test_file in os . listdir ( script_dir ) if test_file . endswith ( " .py " ) ] )
2017-03-27 17:33:00 +02:00
missed_tests = list ( python_files - set ( map ( lambda x : x . split ( ) [ 0 ] , ALL_SCRIPTS + NON_SCRIPTS ) ) )
if len ( missed_tests ) != 0 :
2017-04-12 16:41:13 +02:00
print ( " %s WARNING! %s The following scripts are not being run: %s . Check the test lists in test_runner.py. " % ( BOLD [ 1 ] , BOLD [ 0 ] , str ( missed_tests ) ) )
2018-11-01 15:52:17 +01:00
if fail_on_warn :
2019-06-19 17:39:27 +02:00
# On CI this warning is an error to prevent merging incomplete commits into master
2017-04-12 16:41:13 +02:00
sys . exit ( 1 )
2015-10-11 07:41:19 +02:00
2018-11-01 15:52:17 +01:00
2017-10-17 03:46:23 +02:00
class RPCCoverage ( ) :
2015-10-11 07:41:19 +02:00
"""
2017-03-09 15:53:26 +01:00
Coverage reporting utilities for test_runner .
2015-10-11 07:41:19 +02:00
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory . These files contain the RPC
commands invoked during testing , as well as a complete listing of RPC
commands per ` bitcoin - cli help ` ( ` rpc_interface . txt ` ) .
After all tests complete , the commands run are combined and diff ' d against
the complete list to calculate uncovered RPC commands .
2017-03-09 15:44:57 +01:00
See also : test / functional / test_framework / coverage . py
2015-10-11 07:41:19 +02:00
"""
def __init__ ( self ) :
self . dir = tempfile . mkdtemp ( prefix = " coverage " )
2016-04-29 12:51:15 +02:00
self . flag = ' --coveragedir= %s ' % self . dir
2015-10-11 07:41:19 +02:00
def report_rpc_coverage ( self ) :
"""
Print out RPC commands that were unexercised by tests .
"""
uncovered = self . _get_uncovered_rpc_commands ( )
if uncovered :
print ( " Uncovered RPC commands: " )
2018-02-15 02:24:42 +01:00
print ( " " . join ( ( " - %s \n " % command ) for command in sorted ( uncovered ) ) )
2019-05-02 22:48:05 +02:00
return False
2015-10-11 07:41:19 +02:00
else :
print ( " All RPC commands covered. " )
2019-05-02 22:48:05 +02:00
return True
2015-10-11 07:41:19 +02:00
def cleanup ( self ) :
return shutil . rmtree ( self . dir )
def _get_uncovered_rpc_commands ( self ) :
"""
Return a set of currently untested RPC commands .
"""
2017-03-09 15:44:57 +01:00
# This is shared from `test/functional/test-framework/coverage.py`
2017-02-06 15:07:14 +01:00
reference_filename = ' rpc_interface.txt '
coverage_file_prefix = ' coverage. '
2015-10-11 07:41:19 +02:00
2017-02-06 15:07:14 +01:00
coverage_ref_filename = os . path . join ( self . dir , reference_filename )
2015-10-11 07:41:19 +02:00
coverage_filenames = set ( )
all_cmds = set ( )
covered_cmds = set ( )
if not os . path . isfile ( coverage_ref_filename ) :
raise RuntimeError ( " No coverage reference found " )
2018-06-12 17:49:20 +02:00
with open ( coverage_ref_filename , ' r ' , encoding = " utf8 " ) as coverage_ref_file :
2018-03-18 23:54:29 +01:00
all_cmds . update ( [ line . strip ( ) for line in coverage_ref_file . readlines ( ) ] )
2015-10-11 07:41:19 +02:00
2018-09-24 22:45:58 +02:00
for root , _ , files in os . walk ( self . dir ) :
2015-10-11 07:41:19 +02:00
for filename in files :
2017-02-06 15:07:14 +01:00
if filename . startswith ( coverage_file_prefix ) :
2015-10-11 07:41:19 +02:00
coverage_filenames . add ( os . path . join ( root , filename ) )
for filename in coverage_filenames :
2018-06-12 17:49:20 +02:00
with open ( filename , ' r ' , encoding = " utf8 " ) as coverage_file :
2018-03-18 23:54:29 +01:00
covered_cmds . update ( [ line . strip ( ) for line in coverage_file . readlines ( ) ] )
2015-10-11 07:41:19 +02:00
return all_cmds - covered_cmds
if __name__ == ' __main__ ' :
2017-02-06 15:07:14 +01:00
main ( )