Searched refs:benchmark_results (Results 1 – 4 of 4) sorted by relevance
/external/toolchain-utils/crosperf/ |
D | results_report.py | 165 def _GetTables(benchmark_results, columns, table_type): argument 166 iter_counts = benchmark_results.iter_counts 167 result = benchmark_results.run_keyvals 180 table = TableGenerator(runs, benchmark_results.label_names).GetTable() 189 def _GetPerfTables(benchmark_results, columns, table_type): argument 190 p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations, 191 benchmark_results.label_names, 192 benchmark_results.read_perf_report) 196 iterations = benchmark_results.iter_counts[benchmark] 203 benchmark_results.label_names, [all …]
|
D | results_report_unittest.py | 202 def _GetOutput(self, experiment=None, benchmark_results=None): argument 207 HTMLResultsReport(benchmark_results).GetReport() 244 output = self._GetOutput(benchmark_results=results) 255 output = self._GetOutput(benchmark_results=results)
|
D | generate_report.py | 199 def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose): argument 206 get_contents = lambda: report_ctor(benchmark_results).GetReport()
|
/external/gemmlowp/test/ |
D | benchmark.cc | 155 std::map<gemm_t, std::vector<double>> benchmark_results; in benchmark() local 195 benchmark_results[gemm].emplace_back(gflops); in benchmark() 209 for (auto b : benchmark_results) { in benchmark()
|