summaryrefslogtreecommitdiffstatshomepage
path: root/3rdparty/benchmark
diff options
context:
space:
mode:
author Miodrag Milanovic <mmicko@gmail.com>2016-05-12 08:42:02 +0200
committer Miodrag Milanovic <mmicko@gmail.com>2016-05-12 08:42:02 +0200
commit2fe208917976a66a315c83672cc2242ffb2ac88a (patch)
tree461cb81c293cd0a429db12a2ffe055a4b9400861 /3rdparty/benchmark
parentf7f6569add35e5588fcdf686af4b4d9ad5396b11 (diff)
Update BGFX, BX, Benchmark and RapidJSON (nw)
Diffstat (limited to '3rdparty/benchmark')
-rw-r--r--3rdparty/benchmark/.travis.yml2
-rw-r--r--3rdparty/benchmark/AUTHORS1
-rw-r--r--3rdparty/benchmark/CMakeLists.txt26
-rw-r--r--3rdparty/benchmark/CONTRIBUTORS3
-rw-r--r--3rdparty/benchmark/README.md134
-rw-r--r--3rdparty/benchmark/cmake/posix_regex.cpp4
-rw-r--r--3rdparty/benchmark/include/benchmark/benchmark_api.h40
-rw-r--r--3rdparty/benchmark/include/benchmark/reporter.h10
-rw-r--r--3rdparty/benchmark/src/benchmark.cc93
-rw-r--r--3rdparty/benchmark/src/console_reporter.cc42
-rw-r--r--3rdparty/benchmark/src/csv_reporter.cc9
-rw-r--r--3rdparty/benchmark/src/json_reporter.cc11
-rw-r--r--3rdparty/benchmark/src/reporter.cc12
-rw-r--r--3rdparty/benchmark/test/benchmark_test.cc28
-rw-r--r--3rdparty/benchmark/test/options_test.cc16
15 files changed, 354 insertions, 77 deletions
diff --git a/3rdparty/benchmark/.travis.yml b/3rdparty/benchmark/.travis.yml
index 8b138ce134d..bf26395bcbc 100644
--- a/3rdparty/benchmark/.travis.yml
+++ b/3rdparty/benchmark/.travis.yml
@@ -39,3 +39,5 @@ after_success:
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .;
fi
+
+sudo: required
diff --git a/3rdparty/benchmark/AUTHORS b/3rdparty/benchmark/AUTHORS
index 5a4b35535e2..9da43c73b47 100644
--- a/3rdparty/benchmark/AUTHORS
+++ b/3rdparty/benchmark/AUTHORS
@@ -17,6 +17,7 @@ Evgeny Safronov <division494@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Google Inc.
JianXiong Zhou <zhoujianxiong2@gmail.com>
+Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
diff --git a/3rdparty/benchmark/CMakeLists.txt b/3rdparty/benchmark/CMakeLists.txt
index 2c722526216..f340fb35040 100644
--- a/3rdparty/benchmark/CMakeLists.txt
+++ b/3rdparty/benchmark/CMakeLists.txt
@@ -33,15 +33,29 @@ include(CXXFeatureCheck)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# Turn compiler warnings up to 11
- add_cxx_compiler_flag(-W4)
+ string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
- set(CMAKE_CXX_FLAGS_RELEASE "/GL")
- set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "/LTCG")
- set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "/LTCG")
- set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/LTCG")
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
+ set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
+ set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG")
+ set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
+
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL")
+ string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}")
+ set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
+ string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
+ set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
+ string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
+ set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
+
+ set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL")
+ set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG")
+ set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG")
+ set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG")
endif()
else()
# Try and enable C++11. Don't use C++14 because it doesn't work in some
@@ -57,6 +71,8 @@ else()
add_cxx_compiler_flag(-Wextra)
add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Werror RELEASE)
+ add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
+ add_cxx_compiler_flag(-Werror MINSIZEREL)
add_cxx_compiler_flag(-pedantic)
add_cxx_compiler_flag(-pedantic-errors)
add_cxx_compiler_flag(-Wshorten-64-to-32)
diff --git a/3rdparty/benchmark/CONTRIBUTORS b/3rdparty/benchmark/CONTRIBUTORS
index ed55bcf2767..67ecb280e0b 100644
--- a/3rdparty/benchmark/CONTRIBUTORS
+++ b/3rdparty/benchmark/CONTRIBUTORS
@@ -23,6 +23,7 @@
# Please keep the list sorted.
Arne Beer <arne@twobeer.de>
+Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
@@ -31,7 +32,9 @@ Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
JianXiong Zhou <zhoujianxiong2@gmail.com>
+Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
+Kai Wolf <kai.wolf@gmail.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
diff --git a/3rdparty/benchmark/README.md b/3rdparty/benchmark/README.md
index 21ae478b893..051b3011801 100644
--- a/3rdparty/benchmark/README.md
+++ b/3rdparty/benchmark/README.md
@@ -1,5 +1,4 @@
-benchmark
-=========
+# benchmark
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
@@ -10,10 +9,9 @@ Discussion group: https://groups.google.com/d/forum/benchmark-discuss
IRC channel: https://freenode.net #googlebenchmark
-Example usage
--------------
-Define a function that executes the code to be measured a
-specified number of times:
+## Example usage
+### Basic usage
+Define a function that executes the code to be measured.
```c++
static void BM_StringCreation(benchmark::State& state) {
@@ -34,15 +32,16 @@ BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
-Sometimes a family of microbenchmarks can be implemented with
-just one routine that takes an extra argument to specify which
-one of the family of benchmarks to run. For example, the following
-code defines a family of microbenchmarks for measuring the speed
-of `memcpy()` calls of different lengths:
+### Passing arguments
+Sometimes a family of benchmarks can be implemented with just one routine that
+takes an extra argument to specify which one of the family of benchmarks to
+run. For example, the following code defines a family of benchmarks for
+measuring the speed of `memcpy()` calls of different lengths:
```c++
static void BM_memcpy(benchmark::State& state) {
- char* src = new char[state.range_x()]; char* dst = new char[state.range_x()];
+ char* src = new char[state.range_x()];
+ char* dst = new char[state.range_x()];
memset(src, 'x', state.range_x());
while (state.KeepRunning())
memcpy(dst, src, state.range_x());
@@ -54,18 +53,17 @@ static void BM_memcpy(benchmark::State& state) {
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
```
-The preceding code is quite repetitive, and can be replaced with the
-following short-hand. The following invocation will pick a few
-appropriate arguments in the specified range and will generate a
-microbenchmark for each such argument.
+The preceding code is quite repetitive, and can be replaced with the following
+short-hand. The following invocation will pick a few appropriate arguments in
+the specified range and will generate a benchmark for each such argument.
```c++
BENCHMARK(BM_memcpy)->Range(8, 8<<10);
```
-You might have a microbenchmark that depends on two inputs. For
-example, the following code defines a family of microbenchmarks for
-measuring the speed of set insertion.
+You might have a benchmark that depends on two inputs. For example, the
+following code defines a family of benchmarks for measuring the speed of set
+insertion.
```c++
static void BM_SetInsert(benchmark::State& state) {
@@ -88,19 +86,18 @@ BENCHMARK(BM_SetInsert)
->ArgPair(8<<10, 512);
```
-The preceding code is quite repetitive, and can be replaced with
-the following short-hand. The following macro will pick a few
-appropriate arguments in the product of the two specified ranges
-and will generate a microbenchmark for each such pair.
+The preceding code is quite repetitive, and can be replaced with the following
+short-hand. The following macro will pick a few appropriate arguments in the
+product of the two specified ranges and will generate a benchmark for each such
+pair.
```c++
BENCHMARK(BM_SetInsert)->RangePair(1<<10, 8<<10, 1, 512);
```
-For more complex patterns of inputs, passing a custom function
-to Apply allows programmatic specification of an
-arbitrary set of arguments to run the microbenchmark on.
-The following example enumerates a dense range on one parameter,
+For more complex patterns of inputs, passing a custom function to `Apply` allows
+programmatic specification of an arbitrary set of arguments on which to run the
+benchmark. The following example enumerates a dense range on one parameter,
and a sparse range on the second.
```c++
@@ -112,9 +109,10 @@ static void CustomArguments(benchmark::internal::Benchmark* b) {
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
```
-Templated microbenchmarks work the same way:
-Produce then consume 'size' messages 'iters' times
-Measures throughput in the absence of multiprogramming.
+### Templated benchmarks
+Templated benchmarks work the same way: This example produces and consumes
+messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
+absence of multiprogramming.
```c++
template <class Q> int BM_Sequential(benchmark::State& state) {
@@ -145,11 +143,12 @@ Three macros are provided for adding benchmark templates.
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
```
+### Multithreaded benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
it is guaranteed that none of the threads will start until all have called
-KeepRunning, and all will have finished before KeepRunning returns false. As
-such, any global setup or teardown you want to do can be
-wrapped in a check against the thread index:
+`KeepRunning`, and all will have finished before KeepRunning returns false. As
+such, any global setup or teardown can be wrapped in a check against the thread
+index:
```c++
static void BM_MultiThreaded(benchmark::State& state) {
@@ -176,6 +175,46 @@ BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
Without `UseRealTime`, CPU time is used by default.
+
+## Manual timing
+For benchmarking something for which neither CPU time nor real-time are
+correct or accurate enough, completely manual timing is supported using
+the `UseManualTime` function.
+
+When `UseManualTime` is used, the benchmarked code must call
+`SetIterationTime` once per iteration of the `KeepRunning` loop to
+report the manually measured time.
+
+An example use case for this is benchmarking GPU execution (e.g. OpenCL
+or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot
+be accurately measured using CPU time or real-time. Instead, they can be
+measured accurately using a dedicated API, and these measurement results
+can be reported back with `SetIterationTime`.
+
+```c++
+static void BM_ManualTiming(benchmark::State& state) {
+ int microseconds = state.range_x();
+ std::chrono::duration<double, std::micro> sleep_duration {
+ static_cast<double>(microseconds)
+ };
+
+ while (state.KeepRunning()) {
+ auto start = std::chrono::high_resolution_clock::now();
+ // Simulate some useful workload with a sleep
+ std::this_thread::sleep_for(sleep_duration);
+ auto end = std::chrono::high_resolution_clock::now();
+
+ auto elapsed_seconds =
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ end - start);
+
+ state.SetIterationTime(elapsed_seconds.count());
+ }
+}
+BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
+```
+
+### Preventing optimisation
To prevent a value or expression from being optimized away by the compiler
the `benchmark::DoNotOptimize(...)` function can be used.
@@ -190,8 +229,24 @@ static void BM_test(benchmark::State& state) {
}
```
-Benchmark Fixtures
-------------------
+### Set time unit manually
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
+
+```c++
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
+```
+
+## Controlling number of iterations
+In all cases, the number of iterations for which the benchmark is run is
+governed by the amount of time the benchmark takes. Concretely, the number of
+iterations is at least one, not more than 1e9, until CPU time is greater than
+the minimum time, or the wallclock time is 5x minimum time. The minimum time is
+set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
+the registered benchmark object.
+
+## Fixtures
Fixture tests are created by
first defining a type that derives from ::benchmark::Fixture and then
creating/registering the tests using the following macros:
@@ -221,8 +276,7 @@ BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
/* BarTest is now registered */
```
-Output Formats
---------------
+## Output Formats
The library supports multiple output formats. Use the
`--benchmark_format=<tabular|json>` flag to set the format type. `tabular` is
the default format.
@@ -290,8 +344,7 @@ name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
```
-Debug vs Release
-----------------
+## Debug vs Release
By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use:
```
@@ -304,6 +357,5 @@ To enable link-time optimisation, use
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
```
-Linking against the library
----------------------------
+## Linking against the library
When using gcc, it is necessary to link against pthread to avoid runtime exceptions. This is due to how gcc implements std::thread. See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
diff --git a/3rdparty/benchmark/cmake/posix_regex.cpp b/3rdparty/benchmark/cmake/posix_regex.cpp
index a31af80481a..466dc62560a 100644
--- a/3rdparty/benchmark/cmake/posix_regex.cpp
+++ b/3rdparty/benchmark/cmake/posix_regex.cpp
@@ -7,6 +7,8 @@ int main() {
if (ec != 0) {
return ec;
}
- return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
+ int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
+ regfree(&re);
+ return ret;
}
diff --git a/3rdparty/benchmark/include/benchmark/benchmark_api.h b/3rdparty/benchmark/include/benchmark/benchmark_api.h
index 7a42025a457..ef7eca5313d 100644
--- a/3rdparty/benchmark/include/benchmark/benchmark_api.h
+++ b/3rdparty/benchmark/include/benchmark/benchmark_api.h
@@ -137,6 +137,13 @@ static void BM_MultiThreaded(benchmark::State& state) {
}
}
BENCHMARK(BM_MultiThreaded)->Threads(4);
+
+
+If a benchmark runs a few milliseconds it may be hard to visually compare the
+measured times, since the output data is given in nanoseconds per default. In
+order to manually set the time unit, you can specify it manually:
+
+BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
*/
#ifndef BENCHMARK_BENCHMARK_API_H_
@@ -216,6 +223,13 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
}
#endif
+// TimeUnit is passed to a benchmark in order to specify the order of magnitude
+// for the measured time.
+enum TimeUnit {
+ kNanosecond,
+ kMicrosecond,
+ kMillisecond
+};
// State is passed to a running Benchmark and contains state for the
// benchmark to use.
@@ -269,6 +283,17 @@ public:
// within each benchmark iteration, if possible.
void ResumeTiming();
+ // REQUIRES: called exactly once per iteration of the KeepRunning loop.
+ // Set the manually measured time for this benchmark iteration, which
+ // is used instead of automatically measured time if UseManualTime() was
+ // specified.
+ //
+ // For threaded benchmarks the SetIterationTime() function acts
+ // like a barrier. I.e., the ith call by a particular thread to this
+ // function will block until all threads have made their ith call.
+ // The time will be set by the last thread to call this function.
+ void SetIterationTime(double seconds);
+
// Set the number of bytes processed by the current benchmark
// execution. This routine is typically called once at the end of a
// throughput oriented benchmark. If this routine is called with a
@@ -305,10 +330,10 @@ public:
// If this routine is called, the specified label is printed at the
// end of the benchmark report line for the currently executing
// benchmark. Example:
- // static void BM_Compress(int iters) {
+ // static void BM_Compress(benchmark::State& state) {
// ...
// double compress = input_size / output_size;
- // benchmark::SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression));
+ // state.SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression));
// }
// Produces output that looks like:
// BM_Compress 50 50 14115038 compress:27.3%
@@ -390,6 +415,9 @@ public:
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* Arg(int x);
+ // Run this benchmark with the given time unit for the generated output report
+ Benchmark* Unit(TimeUnit unit);
+
// Run this benchmark once for a number of values picked from the
// range [start..limit]. (start and limit are always picked.)
// REQUIRES: The function passed to the constructor must accept an arg1.
@@ -427,6 +455,13 @@ public:
// called, the cpu time used by the benchmark will be used.
Benchmark* UseRealTime();
+ // If a benchmark must measure time manually (e.g. if GPU execution time is being
+ // measured), call this method. If called, each benchmark iteration should call
+ // SetIterationTime(seconds) to report the measured time, which will be used
+ // to control how many iterations are run, and in the printing of items/second
+ // or MB/second values.
+ Benchmark* UseManualTime();
+
// Support for running multiple copies of the same benchmark concurrently
// in multiple threads. This may be useful when measuring the scaling
// of some piece of code.
@@ -534,6 +569,7 @@ protected:
// Old-style macros
#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a))
#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->ArgPair((a1), (a2))
+#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t))
#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi))
#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
BENCHMARK(n)->RangePair((l1), (h1), (l2), (h2))
diff --git a/3rdparty/benchmark/include/benchmark/reporter.h b/3rdparty/benchmark/include/benchmark/reporter.h
index d23ab6574d7..aaf5fbff2d2 100644
--- a/3rdparty/benchmark/include/benchmark/reporter.h
+++ b/3rdparty/benchmark/include/benchmark/reporter.h
@@ -22,6 +22,8 @@
namespace benchmark {
+typedef std::pair<const char*,double> TimeUnitMultiplier;
+
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
@@ -41,6 +43,7 @@ class BenchmarkReporter {
struct Run {
Run() :
iterations(1),
+ time_unit(kNanosecond),
real_accumulated_time(0),
cpu_accumulated_time(0),
bytes_per_second(0),
@@ -50,6 +53,7 @@ class BenchmarkReporter {
std::string benchmark_name;
std::string report_label; // Empty if not set by benchmark.
int64_t iterations;
+ TimeUnit time_unit;
double real_accumulated_time;
double cpu_accumulated_time;
@@ -81,7 +85,8 @@ class BenchmarkReporter {
virtual ~BenchmarkReporter();
protected:
- static void ComputeStats(std::vector<Run> const& reports, Run* mean, Run* stddev);
+ static void ComputeStats(std::vector<Run> const& reports, Run* mean, Run* stddev);
+ static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit);
};
// Simple reporter that outputs benchmark data to the console. This is the
@@ -90,7 +95,8 @@ class ConsoleReporter : public BenchmarkReporter {
public:
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
-protected:
+
+ protected:
virtual void PrintRunData(const Run& report);
size_t name_field_width_;
diff --git a/3rdparty/benchmark/src/benchmark.cc b/3rdparty/benchmark/src/benchmark.cc
index 08b180e37e6..1a836eb4abc 100644
--- a/3rdparty/benchmark/src/benchmark.cc
+++ b/3rdparty/benchmark/src/benchmark.cc
@@ -64,9 +64,9 @@ DEFINE_int32(benchmark_repetitions, 1,
"The number of runs of each benchmark. If greater than 1, the "
"mean and standard deviation of the runs will be reported.");
-DEFINE_string(benchmark_format, "tabular",
+DEFINE_string(benchmark_format, "console",
"The format to use for console output. Valid values are "
- "'tabular', 'json', or 'csv'.");
+ "'console', 'json', or 'csv'.");
DEFINE_bool(color_print, true, "Enables colorized logging.");
@@ -130,6 +130,7 @@ class TimerManager {
running_(false),
real_time_used_(0),
cpu_time_used_(0),
+ manual_time_used_(0),
num_finalized_(0),
phase_number_(0),
entered_(0) {
@@ -170,6 +171,21 @@ class TimerManager {
}
// Called by each thread
+ void SetIterationTime(double seconds) EXCLUDES(lock_) {
+ bool last_thread = false;
+ {
+ MutexLock ml(lock_);
+ last_thread = Barrier(ml);
+ if (last_thread) {
+ manual_time_used_ += seconds;
+ }
+ }
+ if (last_thread) {
+ phase_condition_.notify_all();
+ }
+ }
+
+ // Called by each thread
void Finalize() EXCLUDES(lock_) {
MutexLock l(lock_);
num_finalized_++;
@@ -194,6 +210,13 @@ class TimerManager {
return cpu_time_used_;
}
+ // REQUIRES: timer is not running
+ double manual_time_used() EXCLUDES(lock_) {
+ MutexLock l(lock_);
+ CHECK(!running_);
+ return manual_time_used_;
+ }
+
private:
Mutex lock_;
Condition phase_condition_;
@@ -207,6 +230,8 @@ class TimerManager {
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_;
double cpu_time_used_;
+ // Manually set iteration time. User sets this with SetIterationTime(seconds).
+ double manual_time_used_;
// How many threads have called Finalize()
int num_finalized_;
@@ -261,7 +286,9 @@ struct Benchmark::Instance {
int arg1;
bool has_arg2;
int arg2;
+ TimeUnit time_unit;
bool use_real_time;
+ bool use_manual_time;
double min_time;
int threads; // Number of concurrent threads to use
bool multithreaded; // Is benchmark multi-threaded?
@@ -294,12 +321,14 @@ public:
~BenchmarkImp();
void Arg(int x);
+ void Unit(TimeUnit unit);
void Range(int start, int limit);
void DenseRange(int start, int limit);
void ArgPair(int start, int limit);
void RangePair(int lo1, int hi1, int lo2, int hi2);
void MinTime(double n);
void UseRealTime();
+ void UseManualTime();
void Threads(int t);
void ThreadRange(int min_threads, int max_threads);
void ThreadPerCpu();
@@ -313,8 +342,10 @@ private:
std::string name_;
int arg_count_;
std::vector< std::pair<int, int> > args_; // Args for all benchmark runs
+ TimeUnit time_unit_;
double min_time_;
bool use_real_time_;
+ bool use_manual_time_;
std::vector<int> thread_counts_;
BenchmarkImp& operator=(BenchmarkImp const&);
@@ -372,8 +403,10 @@ bool BenchmarkFamilies::FindBenchmarks(
instance.arg1 = args.first;
instance.has_arg2 = family->arg_count_ == 2;
instance.arg2 = args.second;
+ instance.time_unit = family->time_unit_;
instance.min_time = family->min_time_;
instance.use_real_time = family->use_real_time_;
+ instance.use_manual_time = family->use_manual_time_;
instance.threads = num_threads;
instance.multithreaded = !(family->thread_counts_.empty());
@@ -387,7 +420,9 @@ bool BenchmarkFamilies::FindBenchmarks(
if (!IsZero(family->min_time_)) {
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
}
- if (family->use_real_time_) {
+ if (family->use_manual_time_) {
+ instance.name += "/manual_time";
+ } else if (family->use_real_time_) {
instance.name += "/real_time";
}
@@ -406,8 +441,9 @@ bool BenchmarkFamilies::FindBenchmarks(
}
BenchmarkImp::BenchmarkImp(const char* name)
- : name_(name), arg_count_(-1),
- min_time_(0.0), use_real_time_(false) {
+ : name_(name), arg_count_(-1), time_unit_(kNanosecond),
+ min_time_(0.0), use_real_time_(false),
+ use_manual_time_(false) {
}
BenchmarkImp::~BenchmarkImp() {
@@ -419,6 +455,10 @@ void BenchmarkImp::Arg(int x) {
args_.emplace_back(x, -1);
}
+void BenchmarkImp::Unit(TimeUnit unit) {
+ time_unit_ = unit;
+}
+
void BenchmarkImp::Range(int start, int limit) {
CHECK(arg_count_ == -1 || arg_count_ == 1);
arg_count_ = 1;
@@ -466,9 +506,15 @@ void BenchmarkImp::MinTime(double t) {
}
void BenchmarkImp::UseRealTime() {
+ CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
}
+void BenchmarkImp::UseManualTime() {
+ CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
+ use_manual_time_ = true;
+}
+
void BenchmarkImp::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
@@ -531,6 +577,11 @@ Benchmark* Benchmark::Arg(int x) {
return this;
}
+Benchmark* Benchmark::Unit(TimeUnit unit) {
+ imp_->Unit(unit);
+ return this;
+}
+
Benchmark* Benchmark::Range(int start, int limit) {
imp_->Range(start, limit);
return this;
@@ -566,6 +617,11 @@ Benchmark* Benchmark::UseRealTime() {
return this;
}
+Benchmark* Benchmark::UseManualTime() {
+ imp_->UseManualTime();
+ return this;
+}
+
Benchmark* Benchmark::Threads(int t) {
imp_->Threads(t);
return this;
@@ -647,7 +703,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
thread.join();
}
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
- pool[ti] = std::thread(&RunInThread, &b, iters, ti, &total);
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti), &total);
}
} else {
// Run directly in this thread
@@ -658,6 +714,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
const double cpu_accumulated_time = timer_manager->cpu_time_used();
const double real_accumulated_time = timer_manager->real_time_used();
+ const double manual_accumulated_time = timer_manager->manual_time_used();
timer_manager.reset();
VLOG(2) << "Ran in " << cpu_accumulated_time << "/"
@@ -665,7 +722,9 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
// Base decisions off of real time if requested by this benchmark.
double seconds = cpu_accumulated_time;
- if (b.use_real_time) {
+ if (b.use_manual_time) {
+ seconds = manual_accumulated_time;
+ } else if (b.use_real_time) {
seconds = real_accumulated_time;
}
@@ -699,7 +758,12 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
report.report_label = label;
// Report the total iterations across all threads.
report.iterations = static_cast<int64_t>(iters) * b.threads;
- report.real_accumulated_time = real_accumulated_time;
+ report.time_unit = b.time_unit;
+ if (b.use_manual_time) {
+ report.real_accumulated_time = manual_accumulated_time;
+ } else {
+ report.real_accumulated_time = real_accumulated_time;
+ }
report.cpu_accumulated_time = cpu_accumulated_time;
report.bytes_per_second = bytes_per_second;
report.items_per_second = items_per_second;
@@ -760,6 +824,12 @@ void State::ResumeTiming() {
timer_manager->StartTimer();
}
+void State::SetIterationTime(double seconds)
+{
+ CHECK(running_benchmark);
+ timer_manager->SetIterationTime(seconds);
+}
+
void State::SetLabel(const char* label) {
CHECK(running_benchmark);
MutexLock l(GetBenchmarkLock());
@@ -814,7 +884,7 @@ void RunMatchingBenchmarks(const std::string& spec,
std::unique_ptr<BenchmarkReporter> GetDefaultReporter() {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
- if (FLAGS_benchmark_format == "tabular") {
+ if (FLAGS_benchmark_format == "console") {
return PtrType(new ConsoleReporter);
} else if (FLAGS_benchmark_format == "json") {
return PtrType(new JSONReporter);
@@ -860,7 +930,7 @@ void PrintUsageAndExit() {
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
- " [--benchmark_format=<tabular|json|csv>]\n"
+ " [--benchmark_format=<console|json|csv>]\n"
" [--color_print={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
@@ -891,7 +961,8 @@ void ParseCommandLineFlags(int* argc, char** argv) {
PrintUsageAndExit();
}
}
- if (FLAGS_benchmark_format != "tabular" &&
+
+ if (FLAGS_benchmark_format != "console" &&
FLAGS_benchmark_format != "json" &&
FLAGS_benchmark_format != "csv") {
PrintUsageAndExit();
diff --git a/3rdparty/benchmark/src/console_reporter.cc b/3rdparty/benchmark/src/console_reporter.cc
index 092936d5935..56bd3ced05b 100644
--- a/3rdparty/benchmark/src/console_reporter.cc
+++ b/3rdparty/benchmark/src/console_reporter.cc
@@ -18,6 +18,7 @@
#include <cstdio>
#include <iostream>
#include <string>
+#include <tuple>
#include <vector>
#include "check.h"
@@ -46,9 +47,9 @@ bool ConsoleReporter::ReportContext(const Context& context) {
"affected.\n";
#endif
- int output_width = fprintf(stdout, "%-*s %10s %10s %10s\n",
+ int output_width = fprintf(stdout, "%-*s %13s %13s %10s\n",
static_cast<int>(name_field_width_), "Benchmark",
- "Time(ns)", "CPU(ns)", "Iterations");
+ "Time", "CPU", "Iterations");
std::cout << std::string(output_width - 1, '-') << "\n";
return true;
@@ -92,25 +93,44 @@ void ConsoleReporter::PrintRunData(const Run& result) {
" items/s");
}
- double const multiplier = 1e9; // nano second multiplier
+ double multiplier;
+ const char* timeLabel;
+ std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit);
+
ColorPrintf(COLOR_GREEN, "%-*s ",
name_field_width_, result.benchmark_name.c_str());
+
if (result.iterations == 0) {
- ColorPrintf(COLOR_YELLOW, "%10.0f %10.0f ",
+ ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",
result.real_accumulated_time * multiplier,
- result.cpu_accumulated_time * multiplier);
+ timeLabel,
+ result.cpu_accumulated_time * multiplier,
+ timeLabel);
} else {
- ColorPrintf(COLOR_YELLOW, "%10.0f %10.0f ",
+ ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",
(result.real_accumulated_time * multiplier) /
(static_cast<double>(result.iterations)),
+ timeLabel,
(result.cpu_accumulated_time * multiplier) /
- (static_cast<double>(result.iterations)));
+ (static_cast<double>(result.iterations)),
+ timeLabel);
}
+
ColorPrintf(COLOR_CYAN, "%10lld", result.iterations);
- ColorPrintf(COLOR_DEFAULT, "%*s %*s %s\n",
- 13, rate.c_str(),
- 18, items.c_str(),
- result.report_label.c_str());
+
+ if (!rate.empty()) {
+ ColorPrintf(COLOR_DEFAULT, " %*s", 13, rate.c_str());
+ }
+
+ if (!items.empty()) {
+ ColorPrintf(COLOR_DEFAULT, " %*s", 18, items.c_str());
+ }
+
+ if (!result.report_label.empty()) {
+ ColorPrintf(COLOR_DEFAULT, " %s", result.report_label.c_str());
+ }
+
+ ColorPrintf(COLOR_DEFAULT, "\n");
}
} // end namespace benchmark
diff --git a/3rdparty/benchmark/src/csv_reporter.cc b/3rdparty/benchmark/src/csv_reporter.cc
index d78a9dfb267..3f67d1de1e6 100644
--- a/3rdparty/benchmark/src/csv_reporter.cc
+++ b/3rdparty/benchmark/src/csv_reporter.cc
@@ -17,6 +17,7 @@
#include <cstdint>
#include <iostream>
#include <string>
+#include <tuple>
#include <vector>
#include "string_util.h"
@@ -42,7 +43,7 @@ bool CSVReporter::ReportContext(const Context& context) {
std::cerr << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
- std::cout << "name,iterations,real_time,cpu_time,bytes_per_second,"
+ std::cout << "name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
"items_per_second,label\n";
return true;
}
@@ -66,7 +67,10 @@ void CSVReporter::ReportRuns(std::vector<Run> const& reports) {
}
void CSVReporter::PrintRunData(Run const& run) {
- double const multiplier = 1e9; // nano second multiplier
+ double multiplier;
+ const char* timeLabel;
+ std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit);
+
double cpu_time = run.cpu_accumulated_time * multiplier;
double real_time = run.real_accumulated_time * multiplier;
if (run.iterations != 0) {
@@ -83,6 +87,7 @@ void CSVReporter::PrintRunData(Run const& run) {
std::cout << run.iterations << ",";
std::cout << real_time << ",";
std::cout << cpu_time << ",";
+ std::cout << timeLabel << ",";
if (run.bytes_per_second > 0.0) {
std::cout << run.bytes_per_second;
diff --git a/3rdparty/benchmark/src/json_reporter.cc b/3rdparty/benchmark/src/json_reporter.cc
index def50ac49cf..7ed141fc179 100644
--- a/3rdparty/benchmark/src/json_reporter.cc
+++ b/3rdparty/benchmark/src/json_reporter.cc
@@ -17,6 +17,7 @@
#include <cstdint>
#include <iostream>
#include <string>
+#include <tuple>
#include <vector>
#include "string_util.h"
@@ -120,7 +121,10 @@ void JSONReporter::Finalize() {
}
void JSONReporter::PrintRunData(Run const& run) {
- double const multiplier = 1e9; // nano second multiplier
+ double multiplier;
+ const char* timeLabel;
+ std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit);
+
double cpu_time = run.cpu_accumulated_time * multiplier;
double real_time = run.real_accumulated_time * multiplier;
if (run.iterations != 0) {
@@ -140,7 +144,10 @@ void JSONReporter::PrintRunData(Run const& run) {
<< FormatKV("real_time", RoundDouble(real_time))
<< ",\n";
out << indent
- << FormatKV("cpu_time", RoundDouble(cpu_time));
+ << FormatKV("cpu_time", RoundDouble(cpu_time))
+ << ",\n";
+ out << indent
+ << FormatKV("time_unit", timeLabel);
if (run.bytes_per_second > 0.0) {
out << ",\n" << indent
<< FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
diff --git a/3rdparty/benchmark/src/reporter.cc b/3rdparty/benchmark/src/reporter.cc
index 4b47e3d556c..036546e7669 100644
--- a/3rdparty/benchmark/src/reporter.cc
+++ b/3rdparty/benchmark/src/reporter.cc
@@ -77,6 +77,18 @@ void BenchmarkReporter::ComputeStats(
stddev_data->items_per_second = items_per_second_stat.StdDev();
}
+TimeUnitMultiplier BenchmarkReporter::GetTimeUnitAndMultiplier(TimeUnit unit) {
+ switch (unit) {
+ case kMillisecond:
+ return std::make_pair("ms", 1e3);
+ case kMicrosecond:
+ return std::make_pair("us", 1e6);
+ case kNanosecond:
+ default:
+ return std::make_pair("ns", 1e9);
+ }
+}
+
void BenchmarkReporter::Finalize() {
}
diff --git a/3rdparty/benchmark/test/benchmark_test.cc b/3rdparty/benchmark/test/benchmark_test.cc
index 97abb68fdb8..252602a75f8 100644
--- a/3rdparty/benchmark/test/benchmark_test.cc
+++ b/3rdparty/benchmark/test/benchmark_test.cc
@@ -14,6 +14,8 @@
#include <sstream>
#include <string>
#include <vector>
+#include <chrono>
+#include <thread>
#if defined(__GNUC__)
# define BENCHMARK_NOINLINE __attribute__((noinline))
@@ -174,5 +176,31 @@ static void BM_ParallelMemset(benchmark::State& state) {
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
+static void BM_ManualTiming(benchmark::State& state) {
+ size_t slept_for = 0;
+ int microseconds = state.range_x();
+ std::chrono::duration<double, std::micro> sleep_duration {
+ static_cast<double>(microseconds)
+ };
+
+ while (state.KeepRunning()) {
+ auto start = std::chrono::high_resolution_clock::now();
+ // Simulate some useful workload with a sleep
+ std::this_thread::sleep_for(std::chrono::duration_cast<
+ std::chrono::nanoseconds>(sleep_duration));
+ auto end = std::chrono::high_resolution_clock::now();
+
+ auto elapsed =
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ end - start);
+
+ state.SetIterationTime(elapsed.count());
+ slept_for += microseconds;
+ }
+ state.SetItemsProcessed(slept_for);
+}
+BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
+BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
+
BENCHMARK_MAIN()
diff --git a/3rdparty/benchmark/test/options_test.cc b/3rdparty/benchmark/test/options_test.cc
index d4c682d4ece..4737caa2e4c 100644
--- a/3rdparty/benchmark/test/options_test.cc
+++ b/3rdparty/benchmark/test/options_test.cc
@@ -1,11 +1,27 @@
#include "benchmark/benchmark_api.h"
+#include <chrono>
+#include <thread>
+
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
+
+void BM_basic_slow(benchmark::State& state) {
+ std::chrono::milliseconds sleep_duration(state.range_x());
+ while (state.KeepRunning()) {
+ std::this_thread::sleep_for(
+ std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)
+ );
+ }
+}
+
BENCHMARK(BM_basic);
BENCHMARK(BM_basic)->Arg(42);
+BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
+BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
+BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
BENCHMARK(BM_basic)->ArgPair(42, 42);