Merge #9780: Suppress noisy output from qa tests in Travis
8c7288c
Print out the final 1000 lines of test_framework.log if test fails (John Newbery)6d780b1
Update travis config to run rpc-tests.py in quiet mode (John Newbery)55992f1
Add --quiet option to suppress rpc-tests.py output (John Newbery) Tree-SHA512: ab080458a07a9346d3b3cbc8ab59b73cea3d4010b1cb0206bb5fade0aaac7562c623475d0a02993f001b22ae9d1ba68e2d0d1a3645cea7e79cc1045b42e2ce3a
This commit is contained in:
commit
c412fd805d
3 changed files with 29 additions and 13 deletions
|
@ -70,7 +70,7 @@ script:
|
|||
- make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false )
|
||||
- export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib
|
||||
- if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS check VERBOSE=1; fi
|
||||
- if [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then extended="--extended --exclude pruning"; fi
|
||||
- if [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then extended="--extended --quiet --exclude pruning"; fi
|
||||
- if [ "$RUN_TESTS" = "true" ]; then test/functional/test_runner.py --coverage ${extended}; fi
|
||||
after_script:
|
||||
- echo $TRAVIS_COMMIT_RANGE
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Base class for RPC testing."""
|
||||
|
||||
from collections import deque
|
||||
import logging
|
||||
import optparse
|
||||
import os
|
||||
|
@ -177,12 +178,17 @@ class BitcoinTestFramework(object):
|
|||
# Dump the end of the debug logs, to aid in debugging rare
|
||||
# travis failures.
|
||||
import glob
|
||||
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
|
||||
filenames = [self.options.tmpdir + "/test_framework.log"]
|
||||
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
|
||||
MAX_LINES_TO_PRINT = 1000
|
||||
for f in filenames:
|
||||
print("From" , f, ":")
|
||||
from collections import deque
|
||||
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
|
||||
for fn in filenames:
|
||||
try:
|
||||
with open(fn, 'r') as f:
|
||||
print("From" , fn, ":")
|
||||
print("".join(deque(f, MAX_LINES_TO_PRINT)))
|
||||
except OSError:
|
||||
print("Opening file %s failed." % fn)
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
self.log.info("Tests successful")
|
||||
sys.exit(self.TEST_EXIT_PASSED)
|
||||
|
|
|
@ -23,6 +23,7 @@ import sys
|
|||
import subprocess
|
||||
import tempfile
|
||||
import re
|
||||
import logging
|
||||
|
||||
TEST_EXIT_PASSED = 0
|
||||
TEST_EXIT_SKIPPED = 77
|
||||
|
@ -141,6 +142,7 @@ def main():
|
|||
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
|
||||
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
|
||||
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
|
||||
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
|
||||
parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests')
|
||||
args, unknown_args = parser.parse_known_args()
|
||||
|
||||
|
@ -152,6 +154,10 @@ def main():
|
|||
config = configparser.ConfigParser()
|
||||
config.read_file(open(os.path.dirname(__file__) + "/config.ini"))
|
||||
|
||||
# Set up logging
|
||||
logging_level = logging.INFO if args.quiet else logging.DEBUG
|
||||
logging.basicConfig(format='%(message)s', level=logging_level)
|
||||
|
||||
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
|
||||
enable_utils = config["components"].getboolean("ENABLE_UTILS")
|
||||
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
|
||||
|
@ -233,7 +239,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
|
|||
if enable_coverage:
|
||||
coverage = RPCCoverage()
|
||||
flags.append(coverage.flag)
|
||||
print("Initializing coverage directory at %s\n" % coverage.dir)
|
||||
logging.debug("Initializing coverage directory at %s" % coverage.dir)
|
||||
else:
|
||||
coverage = None
|
||||
|
||||
|
@ -249,16 +255,20 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
|
|||
job_queue = TestHandler(jobs, tests_dir, test_list, flags)
|
||||
|
||||
max_len_name = len(max(test_list, key=len))
|
||||
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
|
||||
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
|
||||
for _ in range(len(test_list)):
|
||||
(name, stdout, stderr, status, duration) = job_queue.get_next()
|
||||
all_passed = all_passed and status != "Failed"
|
||||
time_sum += duration
|
||||
|
||||
print('\n' + BOLD[1] + name + BOLD[0] + ":")
|
||||
print('' if status == "Passed" else stdout + '\n', end='')
|
||||
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
|
||||
print("Status: %s%s%s, Duration: %s s\n" % (BOLD[1], status, BOLD[0], duration))
|
||||
if status == "Passed":
|
||||
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], name, BOLD[0], duration))
|
||||
elif status == "Skipped":
|
||||
logging.debug("\n%s%s%s skipped" % (BOLD[1], name, BOLD[0]))
|
||||
else:
|
||||
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], name, BOLD[0], duration))
|
||||
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
|
||||
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
|
||||
|
||||
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), status.ljust(7), duration)
|
||||
|
||||
|
@ -269,7 +279,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
|
|||
if coverage:
|
||||
coverage.report_rpc_coverage()
|
||||
|
||||
print("Cleaning up coverage data")
|
||||
logging.debug("Cleaning up coverage data")
|
||||
coverage.cleanup()
|
||||
|
||||
sys.exit(not all_passed)
|
||||
|
|
Loading…
Reference in a new issue