Searched refs:benchmark_names_and_iterations (Results 1 – 3 of 3) sorted by relevance
/external/toolchain-utils/crosperf/ |
D | results_report_unittest.py | 241 benchmark_names_and_iterations = [('bench1', 1)] 243 results = BenchmarkResults(labels, benchmark_names_and_iterations, 252 benchmark_names_and_iterations = [('bench1', 1)] 254 results = BenchmarkResults(labels, benchmark_names_and_iterations, 322 benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2), 340 bench_results = BenchmarkResults(labels, benchmark_names_and_iterations, 348 benchmark_names_and_iterations = [('bench1', 1)] 352 bench_results = BenchmarkResults(labels, benchmark_names_and_iterations, 362 benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)] 376 bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
|
D | results_report.py | 117 benchmark_names_and_iterations, argument 131 for bench_name, bench_iterations in benchmark_names_and_iterations: 198 p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations, 541 benchmark_names_and_iterations, argument 552 self.benchmark_names_and_iterations = benchmark_names_and_iterations 553 self.iter_counts = dict(benchmark_names_and_iterations) 560 benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations) 565 return BenchmarkResults(label_names, benchmark_names_and_iterations,
|
D | generate_report.py | 291 benchmark_names_and_iterations=benches,
|