Home
last modified time | relevance | path

Searched refs:benchmark_runs (Results 1 – 10 of 10) sorted by relevance

/external/toolchain-utils/crosperf/
Dresults_organizer_unittest.py139 benchmark_runs = [None] * 8
140 benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
142 benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
144 benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
146 benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
148 benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
150 benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
152 benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
154 benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
158 for b in benchmark_runs:
[all …]
Dexperiment_status.py18 self.num_total = len(self.experiment.benchmark_runs)
90 for benchmark_run in self.experiment.benchmark_runs:
117 def _GetNamesAndIterations(self, benchmark_runs): argument
120 for benchmark_run in benchmark_runs:
126 def _GetCompactNamesAndIterations(self, benchmark_runs): argument
128 for benchmark_run in benchmark_runs:
145 return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
Dexperiment.py106 self.benchmark_runs = self._GenerateBenchmarkRuns()
119 benchmark_runs = []
129 benchmark_runs.append(
135 return benchmark_runs
144 for t in self.benchmark_runs:
183 for run in self.benchmark_runs:
190 for run in self.benchmark_runs:
Dresults_organizer.py122 def _MakeOrganizeResultOutline(benchmark_runs, labels): argument
136 for run in benchmark_runs:
144 for run in benchmark_runs:
154 def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False): argument
168 result = _MakeOrganizeResultOutline(benchmark_runs, labels)
175 for benchmark_run in benchmark_runs:
Dschedv2.py244 for br in self._experiment.benchmark_runs:
276 n_benchmarkruns = len(self._experiment.benchmark_runs)
281 BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
295 benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
296 benchmarkrun_segments.append(self._experiment.benchmark_runs[(
Dexperiment_runner_unittest.py297 for r in self.exp.benchmark_runs:
327 for r in self.exp.benchmark_runs:
385 for r in self.exp.benchmark_runs:
406 bench_run = self.exp.benchmark_runs[5]
409 self.assertEqual(len(self.exp.benchmark_runs), 6)
432 for r in self.exp.benchmark_runs:
Dgenerate_report.py62 def CountBenchmarks(benchmark_runs): argument
71 for name, results in benchmark_runs.iteritems()]
Dexperiment_runner.py150 for br in experiment.benchmark_runs:
220 for benchmark_run in experiment.benchmark_runs:
277 for benchmark_run in experiment.benchmark_runs:
Dresults_report_unittest.py116 num_runs = len(experiment.benchmark_runs) // num_configs
142 experiment.benchmark_runs.extend(
Dresults_report.py318 for benchmark_run in self.experiment.benchmark_runs:
533 experiment.benchmark_runs, experiment.labels, json_report=for_json_report)