Lines Matching refs:benchmark_results
165 def _GetTables(benchmark_results, columns, table_type): argument
166 iter_counts = benchmark_results.iter_counts
167 result = benchmark_results.run_keyvals
180 table = TableGenerator(runs, benchmark_results.label_names).GetTable()
189 def _GetPerfTables(benchmark_results, columns, table_type): argument
190 p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
191 benchmark_results.label_names,
192 benchmark_results.read_perf_report)
196 iterations = benchmark_results.iter_counts[benchmark]
203 benchmark_results.label_names,
223 self.benchmark_results = results
227 return get_tables(self.benchmark_results, columns, table_type)
384 def __init__(self, benchmark_results, experiment=None): argument
385 super(HTMLResultsReport, self).__init__(benchmark_results)
394 label_names = self.benchmark_results.label_names
395 test_results = self.benchmark_results.run_keyvals
563 def __init__(self, benchmark_results, date=None, time=None, experiment=None, argument
569 super(JSONResultsReport, self).__init__(benchmark_results)
592 benchmark_results = BenchmarkResults.FromExperiment(experiment,
594 return JSONResultsReport(benchmark_results, date, time, experiment,
602 benchmark_results = self.benchmark_results
603 label_names = benchmark_results.label_names
606 for test, test_results in benchmark_results.run_keyvals.iteritems():