Searched refs:benchmark_runs (Results 1 – 10 of 10) sorted by relevance
/external/toolchain-utils/crosperf/ |
D | results_organizer_unittest.py | 139 benchmark_runs = [None] * 8 140 benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '', 142 benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '', 144 benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '', 146 benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '', 148 benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '', 150 benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '', 152 benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '', 154 benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '', 158 for b in benchmark_runs: [all …]
|
D | experiment_status.py | 18 self.num_total = len(self.experiment.benchmark_runs) 90 for benchmark_run in self.experiment.benchmark_runs: 117 def _GetNamesAndIterations(self, benchmark_runs): argument 120 for benchmark_run in benchmark_runs: 126 def _GetCompactNamesAndIterations(self, benchmark_runs): argument 128 for benchmark_run in benchmark_runs: 145 return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
|
D | experiment.py | 106 self.benchmark_runs = self._GenerateBenchmarkRuns() 119 benchmark_runs = [] 129 benchmark_runs.append( 135 return benchmark_runs 144 for t in self.benchmark_runs: 183 for run in self.benchmark_runs: 190 for run in self.benchmark_runs:
|
D | results_organizer.py | 122 def _MakeOrganizeResultOutline(benchmark_runs, labels): argument 136 for run in benchmark_runs: 144 for run in benchmark_runs: 154 def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False): argument 168 result = _MakeOrganizeResultOutline(benchmark_runs, labels) 175 for benchmark_run in benchmark_runs:
|
D | schedv2.py | 244 for br in self._experiment.benchmark_runs: 276 n_benchmarkruns = len(self._experiment.benchmark_runs) 281 BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run() 295 benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end]) 296 benchmarkrun_segments.append(self._experiment.benchmark_runs[(
|
D | experiment_runner_unittest.py | 297 for r in self.exp.benchmark_runs: 327 for r in self.exp.benchmark_runs: 385 for r in self.exp.benchmark_runs: 406 bench_run = self.exp.benchmark_runs[5] 409 self.assertEqual(len(self.exp.benchmark_runs), 6) 432 for r in self.exp.benchmark_runs:
|
D | generate_report.py | 62 def CountBenchmarks(benchmark_runs): argument 71 for name, results in benchmark_runs.iteritems()]
|
D | experiment_runner.py | 150 for br in experiment.benchmark_runs: 220 for benchmark_run in experiment.benchmark_runs: 277 for benchmark_run in experiment.benchmark_runs:
|
D | results_report_unittest.py | 116 num_runs = len(experiment.benchmark_runs) // num_configs 142 experiment.benchmark_runs.extend(
|
D | results_report.py | 318 for benchmark_run in self.experiment.benchmark_runs: 533 experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
|