diff options
Diffstat (limited to '3rdparty/benchmark/test')
-rw-r--r-- | 3rdparty/benchmark/test/CMakeLists.txt | 89 | ||||
-rw-r--r-- | 3rdparty/benchmark/test/basic_test.cc | 102 | ||||
-rw-r--r-- | 3rdparty/benchmark/test/benchmark_test.cc | 154 | ||||
-rw-r--r-- | 3rdparty/benchmark/test/cxx03_test.cc | 31 | ||||
-rw-r--r-- | 3rdparty/benchmark/test/filter_test.cc | 91 | ||||
-rw-r--r-- | 3rdparty/benchmark/test/fixture_test.cc | 42 | ||||
-rw-r--r-- | 3rdparty/benchmark/test/options_test.cc | 26 |
7 files changed, 535 insertions, 0 deletions
diff --git a/3rdparty/benchmark/test/CMakeLists.txt b/3rdparty/benchmark/test/CMakeLists.txt new file mode 100644 index 00000000000..7e4f4854710 --- /dev/null +++ b/3rdparty/benchmark/test/CMakeLists.txt @@ -0,0 +1,89 @@ +# Enable the tests + +find_package(Threads REQUIRED) + +set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}") +string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") +string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") + +macro(compile_benchmark_test name) + add_executable(${name} "${name}.cc") + target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT}) +endmacro(compile_benchmark_test) + +# Demonstration executable +compile_benchmark_test(benchmark_test) +add_test(benchmark benchmark_test --benchmark_min_time=0.01) + +compile_benchmark_test(filter_test) +macro(add_filter_test name filter expect) + add_test(${name} filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) +endmacro(add_filter_test) + +add_filter_test(filter_simple "Foo" 3) +add_filter_test(filter_suffix "BM_.*" 4) +add_filter_test(filter_regex_all ".*" 5) +add_filter_test(filter_regex_blank "" 5) +add_filter_test(filter_regex_none "monkey" 0) +add_filter_test(filter_regex_wildcard ".*Foo.*" 3) +add_filter_test(filter_regex_begin "^BM_.*" 4) +add_filter_test(filter_regex_begin2 "^N" 1) +add_filter_test(filter_regex_end ".*Ba$" 1) + +compile_benchmark_test(options_test) +add_test(options_benchmarks options_test --benchmark_min_time=0.01) + +compile_benchmark_test(basic_test) +add_test(basic_benchmark basic_test --benchmark_min_time=0.01) + +compile_benchmark_test(fixture_test) +add_test(fixture_test fixture_test --benchmark_min_time=0.01) + +compile_benchmark_test(cxx03_test) +set_target_properties(cxx03_test + PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}") +add_test(cxx03 cxx03_test --benchmark_min_time=0.01) + +# Add the coverage command(s) +if(CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) +endif() +if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") + find_program(GCOV gcov) + find_program(LCOV lcov) + find_program(GENHTML genhtml) + find_program(CTEST ctest) + if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE) + add_custom_command( + OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html + COMMAND ${LCOV} -q -z -d . + COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i + COMMAND ${CTEST} --force-new-ctest-process + COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov + COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov + COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov + COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark + DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Running LCOV" + ) + add_custom_target(coverage + DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html + COMMENT "LCOV report at lcov/index.html" + ) + message(STATUS "Coverage command added") + else() + if (HAVE_CXX_FLAG_COVERAGE) + set(CXX_FLAG_COVERAGE_MESSAGE supported) + else() + set(CXX_FLAG_COVERAGE_MESSAGE unavailable) + endif() + message(WARNING + "Coverage not available:\n" + " gcov: ${GCOV}\n" + " lcov: ${LCOV}\n" + " genhtml: ${GENHTML}\n" + " ctest: ${CTEST}\n" + " --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}") + endif() +endif() diff --git a/3rdparty/benchmark/test/basic_test.cc b/3rdparty/benchmark/test/basic_test.cc new file mode 100644 index 00000000000..3435415447f --- /dev/null +++ b/3rdparty/benchmark/test/basic_test.cc @@ -0,0 +1,102 @@ + +#include "benchmark/benchmark_api.h" + +#define BASIC_BENCHMARK_TEST(x) \ + BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) + +void BM_empty(benchmark::State& state) { + while (state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); +BENCHMARK(BM_empty)->ThreadPerCpu(); + +void BM_spin_empty(benchmark::State& state) { + while (state.KeepRunning()) { + for (int x = 0; x < state.range_x(); ++x) { + benchmark::DoNotOptimize(x); + } + } +} +BASIC_BENCHMARK_TEST(BM_spin_empty); +BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); + +void BM_spin_pause_before(benchmark::State& state) { + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + while(state.KeepRunning()) { + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_before); +BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); + + +void BM_spin_pause_during(benchmark::State& state) { + while(state.KeepRunning()) { + state.PauseTiming(); + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + state.ResumeTiming(); + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_during); +BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); + +void BM_pause_during(benchmark::State& state) { + while(state.KeepRunning()) { + state.PauseTiming(); + state.ResumeTiming(); + } +} +BENCHMARK(BM_pause_during); +BENCHMARK(BM_pause_during)->ThreadPerCpu(); +BENCHMARK(BM_pause_during)->UseRealTime(); +BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); + +void BM_spin_pause_after(benchmark::State& state) { + while(state.KeepRunning()) { + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + } + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_after); +BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); + + +void BM_spin_pause_before_and_after(benchmark::State& state) { + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + while(state.KeepRunning()) { + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } + } + for (int i = 0; i < state.range_x(); ++i) { + benchmark::DoNotOptimize(i); + } +} +BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); +BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); + + +void BM_empty_stop_start(benchmark::State& state) { + while (state.KeepRunning()) { } +} +BENCHMARK(BM_empty_stop_start); +BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); + +BENCHMARK_MAIN() diff --git a/3rdparty/benchmark/test/benchmark_test.cc b/3rdparty/benchmark/test/benchmark_test.cc new file mode 100644 index 00000000000..2d268ce4121 --- /dev/null +++ b/3rdparty/benchmark/test/benchmark_test.cc @@ -0,0 +1,154 @@ +#include "benchmark/benchmark.h" + +#include <assert.h> +#include <math.h> +#include <stdint.h> + +#include <cstdlib> +#include <iostream> +#include <limits> +#include <list> +#include <map> +#include <mutex> +#include <set> +#include <sstream> +#include <string> +#include <vector> + +#if defined(__GNUC__) +# define BENCHMARK_NOINLINE __attribute__((noinline)) +#else +# define BENCHMARK_NOINLINE +#endif + +namespace { + +int BENCHMARK_NOINLINE Factorial(uint32_t n) { + return (n == 1) ? 1 : n * Factorial(n - 1); +} + +double CalculatePi(int depth) { + double pi = 0.0; + for (int i = 0; i < depth; ++i) { + double numerator = static_cast<double>(((i % 2) * 2) - 1); + double denominator = static_cast<double>((2 * i) - 1); + pi += numerator / denominator; + } + return (pi - 1.0) * 4; +} + +std::set<int> ConstructRandomSet(int size) { + std::set<int> s; + for (int i = 0; i < size; ++i) + s.insert(i); + return s; +} + +std::mutex test_vector_mu; +std::vector<int>* test_vector = nullptr; + +} // end namespace + +static void BM_Factorial(benchmark::State& state) { + int fac_42 = 0; + while (state.KeepRunning()) + fac_42 = Factorial(8); + // Prevent compiler optimizations + std::stringstream ss; + ss << fac_42; + state.SetLabel(ss.str()); +} +BENCHMARK(BM_Factorial); +BENCHMARK(BM_Factorial)->UseRealTime(); + +static void BM_CalculatePiRange(benchmark::State& state) { + double pi = 0.0; + while (state.KeepRunning()) + pi = CalculatePi(state.range_x()); + std::stringstream ss; + ss << pi; + state.SetLabel(ss.str()); +} +BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); + +static void BM_CalculatePi(benchmark::State& state) { + static const int depth = 1024; + while (state.KeepRunning()) { + benchmark::DoNotOptimize(CalculatePi(depth)); + } +} +BENCHMARK(BM_CalculatePi)->Threads(8); +BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); +BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); + +static void BM_SetInsert(benchmark::State& state) { + while (state.KeepRunning()) { + state.PauseTiming(); + std::set<int> data = ConstructRandomSet(state.range_x()); + state.ResumeTiming(); + for (int j = 0; j < state.range_y(); ++j) + data.insert(rand()); + } + state.SetItemsProcessed(state.iterations() * state.range_y()); + state.SetBytesProcessed(state.iterations() * state.range_y() * sizeof(int)); +} +BENCHMARK(BM_SetInsert)->RangePair(1<<10,8<<10, 1,10); + +template<typename Container, typename ValueType = typename Container::value_type> +static void BM_Sequential(benchmark::State& state) { + ValueType v = 42; + while (state.KeepRunning()) { + Container c; + for (int i = state.range_x(); --i; ) + c.push_back(v); + } + const size_t items_processed = state.iterations() * state.range_x(); + state.SetItemsProcessed(items_processed); + state.SetBytesProcessed(items_processed * sizeof(v)); +} +BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)->Range(1 << 0, 1 << 10); +BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10); +// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. +#if __cplusplus >= 201103L +BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512); +#endif + +static void BM_StringCompare(benchmark::State& state) { + std::string s1(state.range_x(), '-'); + std::string s2(state.range_x(), '-'); + while (state.KeepRunning()) + benchmark::DoNotOptimize(s1.compare(s2)); +} +BENCHMARK(BM_StringCompare)->Range(1, 1<<20); + +static void BM_SetupTeardown(benchmark::State& state) { + if (state.thread_index == 0) { + // No need to lock test_vector_mu here as this is running single-threaded. + test_vector = new std::vector<int>(); + } + int i = 0; + while (state.KeepRunning()) { + std::lock_guard<std::mutex> l(test_vector_mu); + if (i%2 == 0) + test_vector->push_back(i); + else + test_vector->pop_back(); + ++i; + } + if (state.thread_index == 0) { + delete test_vector; + } +} +BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); + +static void BM_LongTest(benchmark::State& state) { + double tracker = 0.0; + while (state.KeepRunning()) { + for (int i = 0; i < state.range_x(); ++i) + benchmark::DoNotOptimize(tracker += i); + } +} +BENCHMARK(BM_LongTest)->Range(1<<16,1<<28); + +BENCHMARK_MAIN() + diff --git a/3rdparty/benchmark/test/cxx03_test.cc b/3rdparty/benchmark/test/cxx03_test.cc new file mode 100644 index 00000000000..56779d66021 --- /dev/null +++ b/3rdparty/benchmark/test/cxx03_test.cc @@ -0,0 +1,31 @@ + +#include <cstddef> + +#include "benchmark/benchmark.h" + +#if __cplusplus >= 201103L +#error C++11 or greater detected. Should be C++03. +#endif + +void BM_empty(benchmark::State& state) { + while (state.KeepRunning()) { + volatile std::size_t x = state.iterations(); + ((void)x); + } +} +BENCHMARK(BM_empty); + +template <class T, class U> +void BM_template2(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE2(BM_template2, int, long); + +template <class T> +void BM_template1(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE(BM_template1, long); +BENCHMARK_TEMPLATE1(BM_template1, int); + +BENCHMARK_MAIN() diff --git a/3rdparty/benchmark/test/filter_test.cc b/3rdparty/benchmark/test/filter_test.cc new file mode 100644 index 00000000000..2a278ff4a77 --- /dev/null +++ b/3rdparty/benchmark/test/filter_test.cc @@ -0,0 +1,91 @@ +#include "benchmark/benchmark.h" + +#include <cassert> +#include <cmath> +#include <cstdint> +#include <cstdlib> + +#include <iostream> +#include <limits> +#include <sstream> +#include <string> + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector<Run>& report) { + ++count_; + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() : count_(0) {} + + virtual ~TestReporter() {} + + size_t GetCount() const { + return count_; + } + + private: + mutable size_t count_; +}; + +} // end namespace + + +static void NoPrefix(benchmark::State& state) { + while (state.KeepRunning()) {} +} +BENCHMARK(NoPrefix); + +static void BM_Foo(benchmark::State& state) { + while (state.KeepRunning()) {} +} +BENCHMARK(BM_Foo); + + +static void BM_Bar(benchmark::State& state) { + while (state.KeepRunning()) {} +} +BENCHMARK(BM_Bar); + + +static void BM_FooBar(benchmark::State& state) { + while (state.KeepRunning()) {} +} +BENCHMARK(BM_FooBar); + + +static void BM_FooBa(benchmark::State& state) { + while (state.KeepRunning()) {} +} +BENCHMARK(BM_FooBa); + + + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + TestReporter test_reporter; + benchmark::RunSpecifiedBenchmarks(&test_reporter); + + if (argc == 2) { + // Make sure we ran all of the tests + std::stringstream ss(argv[1]); + size_t expected; + ss >> expected; + + const size_t count = test_reporter.GetCount(); + if (count != expected) { + std::cerr << "ERROR: Expected " << expected << " tests to be ran but only " + << count << " completed" << std::endl; + return -1; + } + } + return 0; +} diff --git a/3rdparty/benchmark/test/fixture_test.cc b/3rdparty/benchmark/test/fixture_test.cc new file mode 100644 index 00000000000..8aea6ef0601 --- /dev/null +++ b/3rdparty/benchmark/test/fixture_test.cc @@ -0,0 +1,42 @@ + +#include "benchmark/benchmark.h" + +#include <cassert> + +class MyFixture : public ::benchmark::Fixture +{ +public: + void SetUp() { + data = new int(42); + } + + void TearDown() { + assert(data != nullptr); + delete data; + data = nullptr; + } + + ~MyFixture() { + assert(data == nullptr); + } + + int* data; +}; + + +BENCHMARK_F(MyFixture, Foo)(benchmark::State& st) { + assert(data != nullptr); + assert(*data == 42); + while (st.KeepRunning()) { + } +} + +BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { + while (st.KeepRunning()) { + } + st.SetItemsProcessed(st.range_x()); +} +BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); + + +BENCHMARK_MAIN() diff --git a/3rdparty/benchmark/test/options_test.cc b/3rdparty/benchmark/test/options_test.cc new file mode 100644 index 00000000000..d4c682d4ece --- /dev/null +++ b/3rdparty/benchmark/test/options_test.cc @@ -0,0 +1,26 @@ +#include "benchmark/benchmark_api.h" + +void BM_basic(benchmark::State& state) { + while (state.KeepRunning()) { + } +} +BENCHMARK(BM_basic); +BENCHMARK(BM_basic)->Arg(42); +BENCHMARK(BM_basic)->Range(1, 8); +BENCHMARK(BM_basic)->DenseRange(10, 15); +BENCHMARK(BM_basic)->ArgPair(42, 42); +BENCHMARK(BM_basic)->RangePair(64, 512, 64, 512); +BENCHMARK(BM_basic)->MinTime(0.7); +BENCHMARK(BM_basic)->UseRealTime(); +BENCHMARK(BM_basic)->ThreadRange(2, 4); +BENCHMARK(BM_basic)->ThreadPerCpu(); + +void CustomArgs(benchmark::internal::Benchmark* b) { + for (int i = 0; i < 10; ++i) { + b->Arg(i); + } +} + +BENCHMARK(BM_basic)->Apply(CustomArgs); + +BENCHMARK_MAIN() |