/external/v8/tools/ |
D | perf-compare.py | 172 def getBenchmark(self, benchmark_name): argument 173 benchmark_object = self.benchmarks_.get(benchmark_name) 175 benchmark_object = Benchmark(benchmark_name) 176 self.benchmarks_[benchmark_name] = benchmark_object 349 def StartBenchmark(self, benchmark_name): argument 351 self.Print(" <td class=\"name-column\">%s</td>" % benchmark_name) 425 benchmark_name = "/".join(trace["graphs"][1:]) 432 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name) 447 for benchmark_name in benchmark_suite_object.SortedTestKeys(): 448 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name) [all …]
|
/external/tensorflow/tensorflow/tools/benchmark/ |
D | benchmark_model.cc | 215 const string& benchmark_name, const string& postfix, in RecordBenchmarkEntry() argument 219 stream << benchmark_name; in RecordBenchmarkEntry() 381 string benchmark_name = ""; in Main() local 412 Flag("benchmark_name", &benchmark_name, "benchmark name"), in Main() 482 LOG(INFO) << "Benchmark name: [" << benchmark_name << "]"; in Main() 643 if (!benchmark_name.empty() && !output_prefix.empty()) { in Main() 653 RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs, in Main() 657 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1, in Main() 662 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference", in Main() 668 output_prefix, benchmark_name, "meta-init-plus-first-inference", 1, in Main() [all …]
|
/external/google-fruit/extras/benchmark/ |
D | run_benchmarks.py | 728 benchmark_name = benchmark_definition['name'] 730 … if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'} 734 if benchmark_name == 'new_delete_run_time': 738 elif benchmark_name == 'fruit_single_file_compile_time': 744 elif benchmark_name.startswith('fruit_'): 753 }[benchmark_name] 758 elif benchmark_name.startswith('boost_di_'): 766 }[benchmark_name] 770 elif benchmark_name.startswith('simple_di_'): 790 }[benchmark_name] [all …]
|
/external/python/cpython3/Tools/importbench/ |
D | importbench.py | 212 benchmark_name = benchmark.__doc__ 213 old_result = max(prev_results[benchmark_name]) 214 new_result = max(new_results[benchmark_name]) 218 print(benchmark_name, ':', result)
|
/external/libcxx/utils/google-benchmark/src/ |
D | statistics.cc | 122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats() 150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
|
D | complexity.cc | 186 std::string run_name = reports[0].benchmark_name().substr( in ComputeBigO() 187 0, reports[0].benchmark_name().find('/')); in ComputeBigO()
|
D | reporter.cc | 85 std::string BenchmarkReporter::Run::benchmark_name() const { in benchmark_name() function in benchmark::BenchmarkReporter::Run
|
D | csv_reporter.cc | 95 std::string name = run.benchmark_name(); in PrintRunData()
|
D | console_reporter.cc | 124 result.benchmark_name().c_str()); in PrintRunData()
|
D | json_reporter.cc | 170 out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; in PrintRunData()
|
/external/google-benchmark/src/ |
D | statistics.cc | 122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats() 150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
|
D | complexity.cc | 186 std::string run_name = reports[0].benchmark_name().substr( in ComputeBigO() 187 0, reports[0].benchmark_name().find('/')); in ComputeBigO()
|
D | reporter.cc | 85 std::string BenchmarkReporter::Run::benchmark_name() const { in benchmark_name() function in benchmark::BenchmarkReporter::Run
|
D | csv_reporter.cc | 95 std::string name = run.benchmark_name(); in PrintRunData()
|
D | console_reporter.cc | 123 result.benchmark_name().c_str()); in PrintRunData()
|
D | json_reporter.cc | 170 out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; in PrintRunData()
|
/external/toolchain-utils/crosperf/ |
D | results_report.py | 136 def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): argument 141 if benchmark_name not in self.perf_data: 142 self.perf_data[benchmark_name] = {event: [] for event in perf_of_run} 143 ben_data = self.perf_data[benchmark_name] 510 def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name, argument 518 raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
|
D | experiment_factory.py | 160 benchmark_name = benchmark_settings.name 163 test_name = benchmark_name 240 benchmark_name,
|
D | experiment_status.py | 137 benchmark_name = benchmark_run.benchmark.name 138 benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
|
/external/google-benchmark/test/ |
D | register_benchmark_test.cc | 33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun() 34 << run.benchmark_name(); in CheckRun()
|
D | skip_with_error_test.cc | 36 CHECK(name == run.benchmark_name()) in CheckRun() 37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
|
/external/libcxx/utils/google-benchmark/test/ |
D | register_benchmark_test.cc | 33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun() 34 << run.benchmark_name(); in CheckRun()
|
D | skip_with_error_test.cc | 36 CHECK(name == run.benchmark_name()) in CheckRun() 37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
|
/external/tensorflow/tensorflow/python/platform/ |
D | benchmark.py | 341 benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__) 352 full_benchmark_name = "%s.%s" % (benchmark_name, attr)
|
/external/tensorflow/tensorflow/compiler/aot/ |
D | tfcompile.bzl | 338 benchmark_name = name + "_benchmark" 339 benchmark_file = benchmark_name + ".cc" 345 name = ("gen_" + benchmark_name), 367 name = benchmark_name,
|