diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc index 7bc6b6329e..d081aa869d 100644 --- a/src/benchmark_runner.cc +++ b/src/benchmark_runner.cc @@ -109,7 +109,7 @@ BenchmarkReporter::Run CreateRunReport( } // Execute one thread of benchmark b for the specified number of iterations. -// Adds the stats collected for the thread into *total. +// Adds the stats collected for the thread into manager->results. void RunInThread(const BenchmarkInstance* b, IterationCount iters, int thread_id, ThreadManager* manager) { internal::ThreadTimer timer( @@ -236,8 +236,10 @@ class BenchmarkRunner { VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" << i.results.real_time_used << "\n"; - // So for how long were we running? - i.iters = iters; + // By using KeepRunningBatch a benchmark can iterate more times than + // requested, so take the iteration count from i.results. + i.iters = i.results.iterations / b.threads; + // Base decisions off of real time if requested by this benchmark. i.seconds = i.results.cpu_time_used; if (b.use_manual_time) { diff --git a/test/basic_test.cc b/test/basic_test.cc index 5f3dd1a3ee..33642211e2 100644 --- a/test/basic_test.cc +++ b/test/basic_test.cc @@ -108,15 +108,30 @@ void BM_KeepRunning(benchmark::State& state) { BENCHMARK(BM_KeepRunning); void BM_KeepRunningBatch(benchmark::State& state) { - // Choose a prime batch size to avoid evenly dividing max_iterations. - const benchmark::IterationCount batch_size = 101; + // Choose a batch size >1000 to skip the typical runs with iteration + // targets of 10, 100 and 1000. If these are not actually skipped the + // bug would be detectable as consecutive runs with the same iteration + // count. Below we assert that this does not happen. + const benchmark::IterationCount batch_size = 1009; + + static benchmark::IterationCount prior_iter_count = 0; benchmark::IterationCount iter_count = 0; while (state.KeepRunningBatch(batch_size)) { iter_count += batch_size; } assert(state.iterations() == iter_count); + + // Verify that the iteration count always increases across runs (see + // comment above). + assert(iter_count == batch_size // max_iterations == 1 + || iter_count > prior_iter_count); // max_iterations > batch_size + prior_iter_count = iter_count; } -BENCHMARK(BM_KeepRunningBatch); +// Register with a fixed repetition count to establish the invariant that +// the iteration count should always change across runs. This overrides +// the --benchmark_repetitions command line flag, which would otherwise +// cause this test to fail if set > 1. +BENCHMARK(BM_KeepRunningBatch)->Repetitions(1); void BM_RangedFor(benchmark::State& state) { benchmark::IterationCount iter_count = 0;