Merge bctest.py into bitcoin-util-test.py
bctest.py is only used as an import by bitcoin-util-test.py. There's no value in keeping it as a separate module, so let's merge them into a single module to keep building and packaging simpler. bitcoin-test-util is importable as a module, so if any future modules really want to import the code from bctest.py, they can import bitcoin-test-util and call the bctest functions by name.
This commit is contained in:
parent
95836c5eba
commit
8ad5bdef78
4 changed files with 141 additions and 155 deletions
|
@ -223,7 +223,6 @@ dist_noinst_SCRIPTS = autogen.sh
|
||||||
EXTRA_DIST = $(top_srcdir)/share/genbuild.sh test/functional/test_runner.py test/functional $(DIST_CONTRIB) $(DIST_DOCS) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)
|
EXTRA_DIST = $(top_srcdir)/share/genbuild.sh test/functional/test_runner.py test/functional $(DIST_CONTRIB) $(DIST_DOCS) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)
|
||||||
|
|
||||||
EXTRA_DIST += \
|
EXTRA_DIST += \
|
||||||
test/util/bctest.py \
|
|
||||||
test/util/bitcoin-util-test.py \
|
test/util/bitcoin-util-test.py \
|
||||||
test/util/data/bitcoin-util-test.json \
|
test/util/data/bitcoin-util-test.json \
|
||||||
test/util/data/blanktxv1.hex \
|
test/util/data/blanktxv1.hex \
|
||||||
|
|
|
@ -1165,7 +1165,6 @@ AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/spl
|
||||||
AC_CONFIG_FILES([doc/Doxyfile])
|
AC_CONFIG_FILES([doc/Doxyfile])
|
||||||
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
|
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
|
||||||
AC_CONFIG_LINKS([test/util/bitcoin-util-test.py:test/util/bitcoin-util-test.py])
|
AC_CONFIG_LINKS([test/util/bitcoin-util-test.py:test/util/bitcoin-util-test.py])
|
||||||
AC_CONFIG_LINKS([test/util/bctest.py:test/util/bctest.py])
|
|
||||||
|
|
||||||
dnl boost's m4 checks do something really nasty: they export these vars. As a
|
dnl boost's m4 checks do something really nasty: they export these vars. As a
|
||||||
dnl result, they leak into secp256k1's configure and crazy things happen.
|
dnl result, they leak into secp256k1's configure and crazy things happen.
|
||||||
|
|
|
@ -1,139 +0,0 @@
|
||||||
# Copyright 2014 BitPay Inc.
|
|
||||||
# Copyright 2016 The Bitcoin Core developers
|
|
||||||
# Distributed under the MIT software license, see the accompanying
|
|
||||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
||||||
from __future__ import division,print_function,unicode_literals
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import binascii
|
|
||||||
import difflib
|
|
||||||
import logging
|
|
||||||
import pprint
|
|
||||||
|
|
||||||
def parse_output(a, fmt):
|
|
||||||
"""Parse the output according to specified format.
|
|
||||||
|
|
||||||
Raise an error if the output can't be parsed."""
|
|
||||||
if fmt == 'json': # json: compare parsed data
|
|
||||||
return json.loads(a)
|
|
||||||
elif fmt == 'hex': # hex: parse and compare binary data
|
|
||||||
return binascii.a2b_hex(a.strip())
|
|
||||||
else:
|
|
||||||
raise NotImplementedError("Don't know how to compare %s" % fmt)
|
|
||||||
|
|
||||||
def bctest(testDir, testObj, buildenv):
|
|
||||||
"""Runs a single test, comparing output and RC to expected output and RC.
|
|
||||||
|
|
||||||
Raises an error if input can't be read, executable fails, or output/RC
|
|
||||||
are not as expected. Error is caught by bctester() and reported.
|
|
||||||
"""
|
|
||||||
# Get the exec names and arguments
|
|
||||||
execprog = buildenv.BUILDDIR + "/src/" + testObj['exec'] + buildenv.exeext
|
|
||||||
execargs = testObj['args']
|
|
||||||
execrun = [execprog] + execargs
|
|
||||||
|
|
||||||
# Read the input data (if there is any)
|
|
||||||
stdinCfg = None
|
|
||||||
inputData = None
|
|
||||||
if "input" in testObj:
|
|
||||||
filename = testDir + "/" + testObj['input']
|
|
||||||
inputData = open(filename).read()
|
|
||||||
stdinCfg = subprocess.PIPE
|
|
||||||
|
|
||||||
# Read the expected output data (if there is any)
|
|
||||||
outputFn = None
|
|
||||||
outputData = None
|
|
||||||
if "output_cmp" in testObj:
|
|
||||||
outputFn = testObj['output_cmp']
|
|
||||||
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
|
|
||||||
try:
|
|
||||||
outputData = open(testDir + "/" + outputFn).read()
|
|
||||||
except:
|
|
||||||
logging.error("Output file " + outputFn + " can not be opened")
|
|
||||||
raise
|
|
||||||
if not outputData:
|
|
||||||
logging.error("Output data missing for " + outputFn)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
# Run the test
|
|
||||||
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
|
|
||||||
try:
|
|
||||||
outs = proc.communicate(input=inputData)
|
|
||||||
except OSError:
|
|
||||||
logging.error("OSError, Failed to execute " + execprog)
|
|
||||||
raise
|
|
||||||
|
|
||||||
if outputData:
|
|
||||||
data_mismatch, formatting_mismatch = False, False
|
|
||||||
# Parse command output and expected output
|
|
||||||
try:
|
|
||||||
a_parsed = parse_output(outs[0], outputType)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error('Error parsing command output as %s: %s' % (outputType,e))
|
|
||||||
raise
|
|
||||||
try:
|
|
||||||
b_parsed = parse_output(outputData, outputType)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error('Error parsing expected output %s as %s: %s' % (outputFn,outputType,e))
|
|
||||||
raise
|
|
||||||
# Compare data
|
|
||||||
if a_parsed != b_parsed:
|
|
||||||
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
|
|
||||||
data_mismatch = True
|
|
||||||
# Compare formatting
|
|
||||||
if outs[0] != outputData:
|
|
||||||
error_message = "Output formatting mismatch for " + outputFn + ":\n"
|
|
||||||
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
|
|
||||||
outs[0].splitlines(True),
|
|
||||||
fromfile=outputFn,
|
|
||||||
tofile="returned"))
|
|
||||||
logging.error(error_message)
|
|
||||||
formatting_mismatch = True
|
|
||||||
|
|
||||||
assert not data_mismatch and not formatting_mismatch
|
|
||||||
|
|
||||||
# Compare the return code to the expected return code
|
|
||||||
wantRC = 0
|
|
||||||
if "return_code" in testObj:
|
|
||||||
wantRC = testObj['return_code']
|
|
||||||
if proc.returncode != wantRC:
|
|
||||||
logging.error("Return code mismatch for " + outputFn)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
if "error_txt" in testObj:
|
|
||||||
want_error = testObj["error_txt"]
|
|
||||||
# Compare error text
|
|
||||||
# TODO: ideally, we'd compare the strings exactly and also assert
|
|
||||||
# That stderr is empty if no errors are expected. However, bitcoin-tx
|
|
||||||
# emits DISPLAY errors when running as a windows application on
|
|
||||||
# linux through wine. Just assert that the expected error text appears
|
|
||||||
# somewhere in stderr.
|
|
||||||
if want_error not in outs[1]:
|
|
||||||
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
def bctester(testDir, input_basename, buildenv):
|
|
||||||
""" Loads and parses the input file, runs all tests and reports results"""
|
|
||||||
input_filename = testDir + "/" + input_basename
|
|
||||||
raw_data = open(input_filename).read()
|
|
||||||
input_data = json.loads(raw_data)
|
|
||||||
|
|
||||||
failed_testcases = []
|
|
||||||
|
|
||||||
for testObj in input_data:
|
|
||||||
try:
|
|
||||||
bctest(testDir, testObj, buildenv)
|
|
||||||
logging.info("PASSED: " + testObj["description"])
|
|
||||||
except:
|
|
||||||
logging.info("FAILED: " + testObj["description"])
|
|
||||||
failed_testcases.append(testObj["description"])
|
|
||||||
|
|
||||||
if failed_testcases:
|
|
||||||
error_message = "FAILED_TESTCASES:\n"
|
|
||||||
error_message += pprint.pformat(failed_testcases, width=400)
|
|
||||||
logging.error(error_message)
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
sys.exit(0)
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright 2014 BitPay Inc.
|
# Copyright 2014 BitPay Inc.
|
||||||
# Copyright 2016 The Bitcoin Core developers
|
# Copyright 2016-2017 The Bitcoin Core developers
|
||||||
# Distributed under the MIT software license, see the accompanying
|
# Distributed under the MIT software license, see the accompanying
|
||||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
"""Test framework for bitcoin utils.
|
"""Test framework for bitcoin utils.
|
||||||
|
@ -9,23 +9,21 @@ Runs automatically during `make check`.
|
||||||
|
|
||||||
Can also be run manually."""
|
Can also be run manually."""
|
||||||
|
|
||||||
import configparser
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import binascii
|
||||||
|
import configparser
|
||||||
|
import difflib
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
if __name__ == '__main__':
|
def main():
|
||||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
||||||
import bctest
|
|
||||||
|
|
||||||
config = configparser.ConfigParser()
|
config = configparser.ConfigParser()
|
||||||
config.read_file(open(os.path.dirname(__file__) + "/../config.ini"))
|
config.read_file(open(os.path.dirname(__file__) + "/../config.ini"))
|
||||||
|
|
||||||
buildenv = argparse.Namespace(exeext=config["environment"]["EXEEXT"],
|
|
||||||
SRCDIR=config["environment"]["SRCDIR"],
|
|
||||||
BUILDDIR=config["environment"]["BUILDDIR"])
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description=__doc__)
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
parser.add_argument('-v', '--verbose', action='store_true')
|
parser.add_argument('-v', '--verbose', action='store_true')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
@ -39,4 +37,133 @@ if __name__ == '__main__':
|
||||||
# Add the format/level to the logger
|
# Add the format/level to the logger
|
||||||
logging.basicConfig(format=formatter, level=level)
|
logging.basicConfig(format=formatter, level=level)
|
||||||
|
|
||||||
bctest.bctester(buildenv.SRCDIR + "/test/util/data", "bitcoin-util-test.json", buildenv)
|
bctester(config["environment"]["SRCDIR"] + "/test/util/data", "bitcoin-util-test.json", config["environment"])
|
||||||
|
|
||||||
|
def bctester(testDir, input_basename, buildenv):
|
||||||
|
""" Loads and parses the input file, runs all tests and reports results"""
|
||||||
|
input_filename = testDir + "/" + input_basename
|
||||||
|
raw_data = open(input_filename).read()
|
||||||
|
input_data = json.loads(raw_data)
|
||||||
|
|
||||||
|
failed_testcases = []
|
||||||
|
|
||||||
|
for testObj in input_data:
|
||||||
|
try:
|
||||||
|
bctest(testDir, testObj, buildenv)
|
||||||
|
logging.info("PASSED: " + testObj["description"])
|
||||||
|
except:
|
||||||
|
logging.info("FAILED: " + testObj["description"])
|
||||||
|
failed_testcases.append(testObj["description"])
|
||||||
|
|
||||||
|
if failed_testcases:
|
||||||
|
error_message = "FAILED_TESTCASES:\n"
|
||||||
|
error_message += pprint.pformat(failed_testcases, width=400)
|
||||||
|
logging.error(error_message)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def bctest(testDir, testObj, buildenv):
|
||||||
|
"""Runs a single test, comparing output and RC to expected output and RC.
|
||||||
|
|
||||||
|
Raises an error if input can't be read, executable fails, or output/RC
|
||||||
|
are not as expected. Error is caught by bctester() and reported.
|
||||||
|
"""
|
||||||
|
# Get the exec names and arguments
|
||||||
|
execprog = buildenv["BUILDDIR"] + "/src/" + testObj['exec'] + buildenv["EXEEXT"]
|
||||||
|
execargs = testObj['args']
|
||||||
|
execrun = [execprog] + execargs
|
||||||
|
|
||||||
|
# Read the input data (if there is any)
|
||||||
|
stdinCfg = None
|
||||||
|
inputData = None
|
||||||
|
if "input" in testObj:
|
||||||
|
filename = testDir + "/" + testObj['input']
|
||||||
|
inputData = open(filename).read()
|
||||||
|
stdinCfg = subprocess.PIPE
|
||||||
|
|
||||||
|
# Read the expected output data (if there is any)
|
||||||
|
outputFn = None
|
||||||
|
outputData = None
|
||||||
|
if "output_cmp" in testObj:
|
||||||
|
outputFn = testObj['output_cmp']
|
||||||
|
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
|
||||||
|
try:
|
||||||
|
outputData = open(testDir + "/" + outputFn).read()
|
||||||
|
except:
|
||||||
|
logging.error("Output file " + outputFn + " can not be opened")
|
||||||
|
raise
|
||||||
|
if not outputData:
|
||||||
|
logging.error("Output data missing for " + outputFn)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
# Run the test
|
||||||
|
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
|
||||||
|
try:
|
||||||
|
outs = proc.communicate(input=inputData)
|
||||||
|
except OSError:
|
||||||
|
logging.error("OSError, Failed to execute " + execprog)
|
||||||
|
raise
|
||||||
|
|
||||||
|
if outputData:
|
||||||
|
data_mismatch, formatting_mismatch = False, False
|
||||||
|
# Parse command output and expected output
|
||||||
|
try:
|
||||||
|
a_parsed = parse_output(outs[0], outputType)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error('Error parsing command output as %s: %s' % (outputType, e))
|
||||||
|
raise
|
||||||
|
try:
|
||||||
|
b_parsed = parse_output(outputData, outputType)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
|
||||||
|
raise
|
||||||
|
# Compare data
|
||||||
|
if a_parsed != b_parsed:
|
||||||
|
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
|
||||||
|
data_mismatch = True
|
||||||
|
# Compare formatting
|
||||||
|
if outs[0] != outputData:
|
||||||
|
error_message = "Output formatting mismatch for " + outputFn + ":\n"
|
||||||
|
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
|
||||||
|
outs[0].splitlines(True),
|
||||||
|
fromfile=outputFn,
|
||||||
|
tofile="returned"))
|
||||||
|
logging.error(error_message)
|
||||||
|
formatting_mismatch = True
|
||||||
|
|
||||||
|
assert not data_mismatch and not formatting_mismatch
|
||||||
|
|
||||||
|
# Compare the return code to the expected return code
|
||||||
|
wantRC = 0
|
||||||
|
if "return_code" in testObj:
|
||||||
|
wantRC = testObj['return_code']
|
||||||
|
if proc.returncode != wantRC:
|
||||||
|
logging.error("Return code mismatch for " + outputFn)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
if "error_txt" in testObj:
|
||||||
|
want_error = testObj["error_txt"]
|
||||||
|
# Compare error text
|
||||||
|
# TODO: ideally, we'd compare the strings exactly and also assert
|
||||||
|
# That stderr is empty if no errors are expected. However, bitcoin-tx
|
||||||
|
# emits DISPLAY errors when running as a windows application on
|
||||||
|
# linux through wine. Just assert that the expected error text appears
|
||||||
|
# somewhere in stderr.
|
||||||
|
if want_error not in outs[1]:
|
||||||
|
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
def parse_output(a, fmt):
|
||||||
|
"""Parse the output according to specified format.
|
||||||
|
|
||||||
|
Raise an error if the output can't be parsed."""
|
||||||
|
if fmt == 'json': # json: compare parsed data
|
||||||
|
return json.loads(a)
|
||||||
|
elif fmt == 'hex': # hex: parse and compare binary data
|
||||||
|
return binascii.a2b_hex(a.strip())
|
||||||
|
else:
|
||||||
|
raise NotImplementedError("Don't know how to compare %s" % fmt)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
Loading…
Reference in a new issue