Improved microbenchmarking with multiple features.
* inline performance critical code * Average runtime is specified and used to calculate iterations. * Console: show median of multiple runs * plot: show box plot * filter benchmarks * specify scaling factor * ignore src/test and src/bench in command line check script * number of iterations instead of time * Replaced runtime in BENCHMARK makro number of iterations. * Added -? to bench_bitcoin * Benchmark plotly.js URL, width, height can be customized * Fixed incorrect precision warning
This commit is contained in:
parent
604e08c83c
commit
00721e69f8
15 changed files with 286 additions and 171 deletions
|
@ -15,7 +15,7 @@ static void Sleep100ms(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(Sleep100ms);
|
BENCHMARK(Sleep100ms, 10);
|
||||||
|
|
||||||
// Extremely fast-running benchmark:
|
// Extremely fast-running benchmark:
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
@ -31,4 +31,4 @@ static void Trig(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(Trig);
|
BENCHMARK(Trig, 12 * 1000 * 1000);
|
||||||
|
|
|
@ -54,6 +54,6 @@ static void Base58Decode(benchmark::State& state)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BENCHMARK(Base58Encode);
|
BENCHMARK(Base58Encode, 470 * 1000);
|
||||||
BENCHMARK(Base58CheckEncode);
|
BENCHMARK(Base58CheckEncode, 320 * 1000);
|
||||||
BENCHMARK(Base58Decode);
|
BENCHMARK(Base58Decode, 800 * 1000);
|
||||||
|
|
|
@ -8,98 +8,136 @@
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <regex>
|
||||||
|
#include <numeric>
|
||||||
|
|
||||||
benchmark::BenchRunner::BenchmarkMap &benchmark::BenchRunner::benchmarks() {
|
void benchmark::ConsolePrinter::header()
|
||||||
static std::map<std::string, benchmark::BenchFunction> benchmarks_map;
|
{
|
||||||
|
std::cout << "# Benchmark, evals, iterations, total, min, max, median" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void benchmark::ConsolePrinter::result(const State& state)
|
||||||
|
{
|
||||||
|
auto results = state.m_elapsed_results;
|
||||||
|
std::sort(results.begin(), results.end());
|
||||||
|
|
||||||
|
double total = state.m_num_iters * std::accumulate(results.begin(), results.end(), 0.0);
|
||||||
|
|
||||||
|
double front = 0;
|
||||||
|
double back = 0;
|
||||||
|
double median = 0;
|
||||||
|
|
||||||
|
if (!results.empty()) {
|
||||||
|
front = results.front();
|
||||||
|
back = results.back();
|
||||||
|
|
||||||
|
size_t mid = results.size() / 2;
|
||||||
|
median = results[mid];
|
||||||
|
if (0 == results.size() % 2) {
|
||||||
|
median = (results[mid] + results[mid + 1]) / 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << std::setprecision(6);
|
||||||
|
std::cout << state.m_name << ", " << state.m_num_evals << ", " << state.m_num_iters << ", " << total << ", " << front << ", " << back << ", " << median << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void benchmark::ConsolePrinter::footer() {}
|
||||||
|
benchmark::PlotlyPrinter::PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height)
|
||||||
|
: m_plotly_url(plotly_url), m_width(width), m_height(height)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void benchmark::PlotlyPrinter::header()
|
||||||
|
{
|
||||||
|
std::cout << "<html><head>"
|
||||||
|
<< "<script src=\"" << m_plotly_url << "\"></script>"
|
||||||
|
<< "</head><body><div id=\"myDiv\" style=\"width:" << m_width << "px; height:" << m_height << "px\"></div>"
|
||||||
|
<< "<script> var data = ["
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void benchmark::PlotlyPrinter::result(const State& state)
|
||||||
|
{
|
||||||
|
std::cout << "{ " << std::endl
|
||||||
|
<< " name: '" << state.m_name << "', " << std::endl
|
||||||
|
<< " y: [";
|
||||||
|
|
||||||
|
const char* prefix = "";
|
||||||
|
for (const auto& e : state.m_elapsed_results) {
|
||||||
|
std::cout << prefix << std::setprecision(6) << e;
|
||||||
|
prefix = ", ";
|
||||||
|
}
|
||||||
|
std::cout << "]," << std::endl
|
||||||
|
<< " boxpoints: 'all', jitter: 0.3, pointpos: 0, type: 'box',"
|
||||||
|
<< std::endl
|
||||||
|
<< "}," << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void benchmark::PlotlyPrinter::footer()
|
||||||
|
{
|
||||||
|
std::cout << "]; var layout = { showlegend: false, yaxis: { rangemode: 'tozero', autorange: true } };"
|
||||||
|
<< "Plotly.newPlot('myDiv', data, layout);"
|
||||||
|
<< "</script></body></html>";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks()
|
||||||
|
{
|
||||||
|
static std::map<std::string, Bench> benchmarks_map;
|
||||||
return benchmarks_map;
|
return benchmarks_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func)
|
benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func, uint64_t num_iters_for_one_second)
|
||||||
{
|
{
|
||||||
benchmarks().insert(std::make_pair(name, func));
|
benchmarks().insert(std::make_pair(name, Bench{func, num_iters_for_one_second}));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only)
|
||||||
benchmark::BenchRunner::RunAll(benchmark::duration elapsedTimeForOne)
|
|
||||||
{
|
{
|
||||||
perf_init();
|
perf_init();
|
||||||
if (std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
|
if (!std::ratio_less_equal<benchmark::clock::period, std::micro>::value) {
|
||||||
std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
|
std::cerr << "WARNING: Clock precision is worse than microsecond - benchmarks may be less accurate!\n";
|
||||||
}
|
}
|
||||||
std::cout << "#Benchmark" << "," << "count" << "," << "min(ns)" << "," << "max(ns)" << "," << "average(ns)" << ","
|
|
||||||
<< "min_cycles" << "," << "max_cycles" << "," << "average_cycles" << "\n";
|
|
||||||
|
|
||||||
for (const auto &p: benchmarks()) {
|
std::regex reFilter(filter);
|
||||||
State state(p.first, elapsedTimeForOne);
|
std::smatch baseMatch;
|
||||||
p.second(state);
|
|
||||||
|
printer.header();
|
||||||
|
|
||||||
|
for (const auto& p : benchmarks()) {
|
||||||
|
if (!std::regex_match(p.first, baseMatch, reFilter)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t num_iters = static_cast<uint64_t>(p.second.num_iters_for_one_second * scaling);
|
||||||
|
if (0 == num_iters) {
|
||||||
|
num_iters = 1;
|
||||||
|
}
|
||||||
|
State state(p.first, num_evals, num_iters, printer);
|
||||||
|
if (!is_list_only) {
|
||||||
|
p.second.func(state);
|
||||||
|
}
|
||||||
|
printer.result(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
printer.footer();
|
||||||
|
|
||||||
perf_fini();
|
perf_fini();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool benchmark::State::KeepRunning()
|
bool benchmark::State::UpdateTimer(const benchmark::time_point current_time)
|
||||||
{
|
{
|
||||||
if (count & countMask) {
|
if (m_start_time != time_point()) {
|
||||||
++count;
|
std::chrono::duration<double> diff = current_time - m_start_time;
|
||||||
return true;
|
m_elapsed_results.push_back(diff.count() / m_num_iters);
|
||||||
}
|
|
||||||
time_point now;
|
|
||||||
|
|
||||||
uint64_t nowCycles;
|
if (m_elapsed_results.size() == m_num_evals) {
|
||||||
if (count == 0) {
|
return false;
|
||||||
lastTime = beginTime = now = clock::now();
|
|
||||||
lastCycles = beginCycles = nowCycles = perf_cpucycles();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
now = clock::now();
|
|
||||||
auto elapsed = now - lastTime;
|
|
||||||
auto elapsedOne = elapsed / (countMask + 1);
|
|
||||||
if (elapsedOne < minTime) minTime = elapsedOne;
|
|
||||||
if (elapsedOne > maxTime) maxTime = elapsedOne;
|
|
||||||
|
|
||||||
// We only use relative values, so don't have to handle 64-bit wrap-around specially
|
|
||||||
nowCycles = perf_cpucycles();
|
|
||||||
uint64_t elapsedOneCycles = (nowCycles - lastCycles) / (countMask + 1);
|
|
||||||
if (elapsedOneCycles < minCycles) minCycles = elapsedOneCycles;
|
|
||||||
if (elapsedOneCycles > maxCycles) maxCycles = elapsedOneCycles;
|
|
||||||
|
|
||||||
if (elapsed*128 < maxElapsed) {
|
|
||||||
// If the execution was much too fast (1/128th of maxElapsed), increase the count mask by 8x and restart timing.
|
|
||||||
// The restart avoids including the overhead of this code in the measurement.
|
|
||||||
countMask = ((countMask<<3)|7) & ((1LL<<60)-1);
|
|
||||||
count = 0;
|
|
||||||
minTime = duration::max();
|
|
||||||
maxTime = duration::zero();
|
|
||||||
minCycles = std::numeric_limits<uint64_t>::max();
|
|
||||||
maxCycles = std::numeric_limits<uint64_t>::min();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (elapsed*16 < maxElapsed) {
|
|
||||||
uint64_t newCountMask = ((countMask<<1)|1) & ((1LL<<60)-1);
|
|
||||||
if ((count & newCountMask)==0) {
|
|
||||||
countMask = newCountMask;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lastTime = now;
|
|
||||||
lastCycles = nowCycles;
|
|
||||||
++count;
|
|
||||||
|
|
||||||
if (now - beginTime < maxElapsed) return true; // Keep going
|
m_num_iters_left = m_num_iters - 1;
|
||||||
|
return true;
|
||||||
--count;
|
|
||||||
|
|
||||||
assert(count != 0 && "count == 0 => (now == 0 && beginTime == 0) => return above");
|
|
||||||
|
|
||||||
// Output results
|
|
||||||
// Duration casts are only necessary here because hardware with sub-nanosecond clocks
|
|
||||||
// will lose precision.
|
|
||||||
int64_t min_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(minTime).count();
|
|
||||||
int64_t max_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime).count();
|
|
||||||
int64_t avg_elapsed = std::chrono::duration_cast<std::chrono::nanoseconds>((now-beginTime)/count).count();
|
|
||||||
int64_t averageCycles = (nowCycles-beginCycles)/count;
|
|
||||||
std::cout << std::fixed << std::setprecision(15) << name << "," << count << "," << min_elapsed << "," << max_elapsed << "," << avg_elapsed << ","
|
|
||||||
<< minCycles << "," << maxCycles << "," << averageCycles << "\n";
|
|
||||||
std::cout.copyfmt(std::ios(nullptr));
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
|
||||||
#include <boost/preprocessor/cat.hpp>
|
#include <boost/preprocessor/cat.hpp>
|
||||||
|
@ -32,64 +33,110 @@ static void CODE_TO_TIME(benchmark::State& state)
|
||||||
... do any cleanup needed...
|
... do any cleanup needed...
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(CODE_TO_TIME);
|
// default to running benchmark for 5000 iterations
|
||||||
|
BENCHMARK(CODE_TO_TIME, 5000);
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
namespace benchmark {
|
namespace benchmark {
|
||||||
// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
|
// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
|
||||||
struct best_clock {
|
struct best_clock {
|
||||||
using hi_res_clock = std::chrono::high_resolution_clock;
|
using hi_res_clock = std::chrono::high_resolution_clock;
|
||||||
using steady_clock = std::chrono::steady_clock;
|
using steady_clock = std::chrono::steady_clock;
|
||||||
using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
|
using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
|
||||||
};
|
};
|
||||||
using clock = best_clock::type;
|
using clock = best_clock::type;
|
||||||
using time_point = clock::time_point;
|
using time_point = clock::time_point;
|
||||||
using duration = clock::duration;
|
using duration = clock::duration;
|
||||||
|
|
||||||
class State {
|
class Printer;
|
||||||
std::string name;
|
|
||||||
duration maxElapsed;
|
|
||||||
time_point beginTime, lastTime;
|
|
||||||
duration minTime, maxTime;
|
|
||||||
uint64_t count;
|
|
||||||
uint64_t countMask;
|
|
||||||
uint64_t beginCycles;
|
|
||||||
uint64_t lastCycles;
|
|
||||||
uint64_t minCycles;
|
|
||||||
uint64_t maxCycles;
|
|
||||||
public:
|
|
||||||
State(std::string _name, duration _maxElapsed) :
|
|
||||||
name(_name),
|
|
||||||
maxElapsed(_maxElapsed),
|
|
||||||
minTime(duration::max()),
|
|
||||||
maxTime(duration::zero()),
|
|
||||||
count(0),
|
|
||||||
countMask(1),
|
|
||||||
beginCycles(0),
|
|
||||||
lastCycles(0),
|
|
||||||
minCycles(std::numeric_limits<uint64_t>::max()),
|
|
||||||
maxCycles(std::numeric_limits<uint64_t>::min()) {
|
|
||||||
}
|
|
||||||
bool KeepRunning();
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::function<void(State&)> BenchFunction;
|
class State
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
std::string m_name;
|
||||||
|
uint64_t m_num_iters_left;
|
||||||
|
const uint64_t m_num_iters;
|
||||||
|
const uint64_t m_num_evals;
|
||||||
|
std::vector<double> m_elapsed_results;
|
||||||
|
time_point m_start_time;
|
||||||
|
|
||||||
class BenchRunner
|
bool UpdateTimer(time_point finish_time);
|
||||||
|
|
||||||
|
State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals)
|
||||||
{
|
{
|
||||||
typedef std::map<std::string, BenchFunction> BenchmarkMap;
|
}
|
||||||
static BenchmarkMap &benchmarks();
|
|
||||||
|
|
||||||
public:
|
inline bool KeepRunning()
|
||||||
BenchRunner(std::string name, BenchFunction func);
|
{
|
||||||
|
if (m_num_iters_left--) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void RunAll(duration elapsedTimeForOne = std::chrono::seconds(1));
|
bool result = UpdateTimer(clock::now());
|
||||||
|
// measure again so runtime of UpdateTimer is not included
|
||||||
|
m_start_time = clock::now();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::function<void(State&)> BenchFunction;
|
||||||
|
|
||||||
|
class BenchRunner
|
||||||
|
{
|
||||||
|
struct Bench {
|
||||||
|
BenchFunction func;
|
||||||
|
uint64_t num_iters_for_one_second;
|
||||||
};
|
};
|
||||||
|
typedef std::map<std::string, Bench> BenchmarkMap;
|
||||||
|
static BenchmarkMap& benchmarks();
|
||||||
|
|
||||||
|
public:
|
||||||
|
BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second);
|
||||||
|
|
||||||
|
static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only);
|
||||||
|
};
|
||||||
|
|
||||||
|
// interface to output benchmark results.
|
||||||
|
class Printer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual ~Printer() {}
|
||||||
|
virtual void header() = 0;
|
||||||
|
virtual void result(const State& state) = 0;
|
||||||
|
virtual void footer() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// default printer to console, shows min, max, median.
|
||||||
|
class ConsolePrinter : public Printer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void header();
|
||||||
|
void result(const State& state);
|
||||||
|
void footer();
|
||||||
|
};
|
||||||
|
|
||||||
|
// creates box plot with plotly.js
|
||||||
|
class PlotlyPrinter : public Printer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height);
|
||||||
|
void header();
|
||||||
|
void result(const State& state);
|
||||||
|
void footer();
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string m_plotly_url;
|
||||||
|
int64_t m_width;
|
||||||
|
int64_t m_height;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo);
|
|
||||||
#define BENCHMARK(n) \
|
// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations);
|
||||||
benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n);
|
// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately
|
||||||
|
// the same time, and scaling factor can be used that the total time is appropriate for your system.
|
||||||
|
#define BENCHMARK(n, num_iters_for_one_second) \
|
||||||
|
benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second));
|
||||||
|
|
||||||
#endif // BITCOIN_BENCH_BENCH_H
|
#endif // BITCOIN_BENCH_BENCH_H
|
||||||
|
|
|
@ -10,16 +10,62 @@
|
||||||
#include <util.h>
|
#include <util.h>
|
||||||
#include <random.h>
|
#include <random.h>
|
||||||
|
|
||||||
|
#include <boost/lexical_cast.hpp>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
static const int64_t DEFAULT_BENCH_EVALUATIONS = 5;
|
||||||
|
static const char* DEFAULT_BENCH_FILTER = ".*";
|
||||||
|
static const char* DEFAULT_BENCH_SCALING = "1.0";
|
||||||
|
static const char* DEFAULT_BENCH_PRINTER = "console";
|
||||||
|
static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js";
|
||||||
|
static const int64_t DEFAULT_PLOT_WIDTH = 1024;
|
||||||
|
static const int64_t DEFAULT_PLOT_HEIGHT = 768;
|
||||||
|
|
||||||
int
|
int
|
||||||
main(int argc, char** argv)
|
main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
|
gArgs.ParseParameters(argc, argv);
|
||||||
|
|
||||||
|
if (gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || gArgs.IsArgSet("-help")) {
|
||||||
|
std::cout << HelpMessageGroup(_("Options:"))
|
||||||
|
<< HelpMessageOpt("-?", _("Print this help message and exit"))
|
||||||
|
<< HelpMessageOpt("-list", _("List benchmarks without executing them. Can be combined with -scaling and -filter"))
|
||||||
|
<< HelpMessageOpt("-evals=<n>", strprintf(_("Number of measurement evaluations to perform. (default: %u)"), DEFAULT_BENCH_EVALUATIONS))
|
||||||
|
<< HelpMessageOpt("-filter=<regex>", strprintf(_("Regular expression filter to select benchmark by name (default: %s)"), DEFAULT_BENCH_FILTER))
|
||||||
|
<< HelpMessageOpt("-scaling=<n>", strprintf(_("Scaling factor for benchmark's runtime (default: %u)"), DEFAULT_BENCH_SCALING))
|
||||||
|
<< HelpMessageOpt("-printer=(console|plot)", strprintf(_("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)"), DEFAULT_BENCH_PRINTER))
|
||||||
|
<< HelpMessageOpt("-plot-plotlyurl=<uri>", strprintf(_("URL to use for plotly.js (default: %s)"), DEFAULT_PLOT_PLOTLYURL))
|
||||||
|
<< HelpMessageOpt("-plot-width=<x>", strprintf(_("Plot width in pixel (default: %u)"), DEFAULT_PLOT_WIDTH))
|
||||||
|
<< HelpMessageOpt("-plot-height=<x>", strprintf(_("Plot height in pixel (default: %u)"), DEFAULT_PLOT_HEIGHT));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
SHA256AutoDetect();
|
SHA256AutoDetect();
|
||||||
RandomInit();
|
RandomInit();
|
||||||
ECC_Start();
|
ECC_Start();
|
||||||
SetupEnvironment();
|
SetupEnvironment();
|
||||||
fPrintToDebugLog = false; // don't want to write to debug.log file
|
fPrintToDebugLog = false; // don't want to write to debug.log file
|
||||||
|
|
||||||
benchmark::BenchRunner::RunAll();
|
int64_t evaluations = gArgs.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS);
|
||||||
|
std::string regex_filter = gArgs.GetArg("-filter", DEFAULT_BENCH_FILTER);
|
||||||
|
std::string scaling_str = gArgs.GetArg("-scaling", DEFAULT_BENCH_SCALING);
|
||||||
|
bool is_list_only = gArgs.GetBoolArg("-list", false);
|
||||||
|
|
||||||
|
double scaling_factor = boost::lexical_cast<double>(scaling_str);
|
||||||
|
|
||||||
|
|
||||||
|
std::unique_ptr<benchmark::Printer> printer(new benchmark::ConsolePrinter());
|
||||||
|
std::string printer_arg = gArgs.GetArg("-printer", DEFAULT_BENCH_PRINTER);
|
||||||
|
if ("plot" == printer_arg) {
|
||||||
|
printer.reset(new benchmark::PlotlyPrinter(
|
||||||
|
gArgs.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL),
|
||||||
|
gArgs.GetArg("-plot-width", DEFAULT_PLOT_WIDTH),
|
||||||
|
gArgs.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT)));
|
||||||
|
}
|
||||||
|
|
||||||
|
benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only);
|
||||||
|
|
||||||
ECC_Stop();
|
ECC_Stop();
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,4 +84,4 @@ static void CCoinsCaching(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(CCoinsCaching);
|
BENCHMARK(CCoinsCaching, 170 * 1000);
|
||||||
|
|
|
@ -52,5 +52,5 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(DeserializeBlockTest);
|
BENCHMARK(DeserializeBlockTest, 130);
|
||||||
BENCHMARK(DeserializeAndCheckBlockTest);
|
BENCHMARK(DeserializeAndCheckBlockTest, 160);
|
||||||
|
|
|
@ -99,5 +99,5 @@ static void CCheckQueueSpeedPrevectorJob(benchmark::State& state)
|
||||||
tg.interrupt_all();
|
tg.interrupt_all();
|
||||||
tg.join_all();
|
tg.join_all();
|
||||||
}
|
}
|
||||||
BENCHMARK(CCheckQueueSpeed);
|
BENCHMARK(CCheckQueueSpeed, 29000);
|
||||||
BENCHMARK(CCheckQueueSpeedPrevectorJob);
|
BENCHMARK(CCheckQueueSpeedPrevectorJob, 1400);
|
||||||
|
|
|
@ -56,4 +56,4 @@ static void CoinSelection(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(CoinSelection);
|
BENCHMARK(CoinSelection, 650);
|
||||||
|
|
|
@ -46,9 +46,9 @@ static void SHA256_32b(benchmark::State& state)
|
||||||
{
|
{
|
||||||
std::vector<uint8_t> in(32,0);
|
std::vector<uint8_t> in(32,0);
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
for (int i = 0; i < 1000000; i++) {
|
CSHA256()
|
||||||
CSHA256().Write(in.data(), in.size()).Finalize(in.data());
|
.Write(in.data(), in.size())
|
||||||
}
|
.Finalize(in.data());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,10 +63,9 @@ static void SHA512(benchmark::State& state)
|
||||||
static void SipHash_32b(benchmark::State& state)
|
static void SipHash_32b(benchmark::State& state)
|
||||||
{
|
{
|
||||||
uint256 x;
|
uint256 x;
|
||||||
|
uint64_t k1 = 0;
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
for (int i = 0; i < 1000000; i++) {
|
*((uint64_t*)x.begin()) = SipHashUint256(0, ++k1, x);
|
||||||
*((uint64_t*)x.begin()) = SipHashUint256(0, i, x);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,9 +74,7 @@ static void FastRandom_32bit(benchmark::State& state)
|
||||||
FastRandomContext rng(true);
|
FastRandomContext rng(true);
|
||||||
uint32_t x = 0;
|
uint32_t x = 0;
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
for (int i = 0; i < 1000000; i++) {
|
x += rng.rand32();
|
||||||
x += rng.rand32();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,18 +83,16 @@ static void FastRandom_1bit(benchmark::State& state)
|
||||||
FastRandomContext rng(true);
|
FastRandomContext rng(true);
|
||||||
uint32_t x = 0;
|
uint32_t x = 0;
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
for (int i = 0; i < 1000000; i++) {
|
x += rng.randbool();
|
||||||
x += rng.randbool();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(RIPEMD160);
|
BENCHMARK(RIPEMD160, 440);
|
||||||
BENCHMARK(SHA1);
|
BENCHMARK(SHA1, 570);
|
||||||
BENCHMARK(SHA256);
|
BENCHMARK(SHA256, 340);
|
||||||
BENCHMARK(SHA512);
|
BENCHMARK(SHA512, 330);
|
||||||
|
|
||||||
BENCHMARK(SHA256_32b);
|
BENCHMARK(SHA256_32b, 4700 * 1000);
|
||||||
BENCHMARK(SipHash_32b);
|
BENCHMARK(SipHash_32b, 40 * 1000 * 1000);
|
||||||
BENCHMARK(FastRandom_32bit);
|
BENCHMARK(FastRandom_32bit, 110 * 1000 * 1000);
|
||||||
BENCHMARK(FastRandom_1bit);
|
BENCHMARK(FastRandom_1bit, 440 * 1000 * 1000);
|
||||||
|
|
|
@ -43,5 +43,4 @@ static void BenchLockedPool(benchmark::State& state)
|
||||||
addr.clear();
|
addr.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(BenchLockedPool);
|
BENCHMARK(BenchLockedPool, 530);
|
||||||
|
|
||||||
|
|
|
@ -111,4 +111,4 @@ static void MempoolEviction(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(MempoolEviction);
|
BENCHMARK(MempoolEviction, 41000);
|
||||||
|
|
|
@ -32,5 +32,5 @@ static void PrevectorClear(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(PrevectorDestructor);
|
BENCHMARK(PrevectorDestructor, 5700);
|
||||||
BENCHMARK(PrevectorClear);
|
BENCHMARK(PrevectorClear, 5600);
|
||||||
|
|
|
@ -12,8 +12,6 @@ static void RollingBloom(benchmark::State& state)
|
||||||
CRollingBloomFilter filter(120000, 0.000001);
|
CRollingBloomFilter filter(120000, 0.000001);
|
||||||
std::vector<unsigned char> data(32);
|
std::vector<unsigned char> data(32);
|
||||||
uint32_t count = 0;
|
uint32_t count = 0;
|
||||||
uint32_t nEntriesPerGeneration = (120000 + 1) / 2;
|
|
||||||
uint32_t countnow = 0;
|
|
||||||
uint64_t match = 0;
|
uint64_t match = 0;
|
||||||
while (state.KeepRunning()) {
|
while (state.KeepRunning()) {
|
||||||
count++;
|
count++;
|
||||||
|
@ -21,16 +19,8 @@ static void RollingBloom(benchmark::State& state)
|
||||||
data[1] = count >> 8;
|
data[1] = count >> 8;
|
||||||
data[2] = count >> 16;
|
data[2] = count >> 16;
|
||||||
data[3] = count >> 24;
|
data[3] = count >> 24;
|
||||||
if (countnow == nEntriesPerGeneration) {
|
filter.insert(data);
|
||||||
auto b = benchmark::clock::now();
|
|
||||||
filter.insert(data);
|
|
||||||
auto total = std::chrono::duration_cast<std::chrono::nanoseconds>(benchmark::clock::now() - b).count();
|
|
||||||
std::cout << "RollingBloom-refresh,1," << total << "," << total << "," << total << "\n";
|
|
||||||
countnow = 0;
|
|
||||||
} else {
|
|
||||||
filter.insert(data);
|
|
||||||
}
|
|
||||||
countnow++;
|
|
||||||
data[0] = count >> 24;
|
data[0] = count >> 24;
|
||||||
data[1] = count >> 16;
|
data[1] = count >> 16;
|
||||||
data[2] = count >> 8;
|
data[2] = count >> 8;
|
||||||
|
@ -39,4 +29,4 @@ static void RollingBloom(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(RollingBloom);
|
BENCHMARK(RollingBloom, 1500 * 1000);
|
||||||
|
|
|
@ -105,4 +105,4 @@ static void VerifyScriptBench(benchmark::State& state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK(VerifyScriptBench);
|
BENCHMARK(VerifyScriptBench, 6300);
|
||||||
|
|
Loading…
Reference in a new issue