Home
last modified time | relevance | path

Searched refs:benchmark_name (Results 1 – 25 of 27) sorted by relevance

12

/external/v8/tools/
Dperf-compare.py172 def getBenchmark(self, benchmark_name): argument
173 benchmark_object = self.benchmarks_.get(benchmark_name)
175 benchmark_object = Benchmark(benchmark_name)
176 self.benchmarks_[benchmark_name] = benchmark_object
349 def StartBenchmark(self, benchmark_name): argument
351 self.Print(" <td class=\"name-column\">%s</td>" % benchmark_name)
425 benchmark_name = "/".join(trace["graphs"][1:])
432 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
447 for benchmark_name in benchmark_suite_object.SortedTestKeys():
448 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
[all …]
/external/tensorflow/tensorflow/tools/benchmark/
Dbenchmark_model.cc215 const string& benchmark_name, const string& postfix, in RecordBenchmarkEntry() argument
219 stream << benchmark_name; in RecordBenchmarkEntry()
381 string benchmark_name = ""; in Main() local
412 Flag("benchmark_name", &benchmark_name, "benchmark name"), in Main()
482 LOG(INFO) << "Benchmark name: [" << benchmark_name << "]"; in Main()
643 if (!benchmark_name.empty() && !output_prefix.empty()) { in Main()
653 RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs, in Main()
657 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1, in Main()
662 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference", in Main()
668 output_prefix, benchmark_name, "meta-init-plus-first-inference", 1, in Main()
[all …]
/external/google-fruit/extras/benchmark/
Drun_benchmarks.py728 benchmark_name = benchmark_definition['name']
730 … if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'}
734 if benchmark_name == 'new_delete_run_time':
738 elif benchmark_name == 'fruit_single_file_compile_time':
744 elif benchmark_name.startswith('fruit_'):
753 }[benchmark_name]
758 elif benchmark_name.startswith('boost_di_'):
766 }[benchmark_name]
770 elif benchmark_name.startswith('simple_di_'):
790 }[benchmark_name]
[all …]
/external/python/cpython3/Tools/importbench/
Dimportbench.py212 benchmark_name = benchmark.__doc__
213 old_result = max(prev_results[benchmark_name])
214 new_result = max(new_results[benchmark_name])
218 print(benchmark_name, ':', result)
/external/libcxx/utils/google-benchmark/src/
Dstatistics.cc122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats()
150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
Dcomplexity.cc186 std::string run_name = reports[0].benchmark_name().substr( in ComputeBigO()
187 0, reports[0].benchmark_name().find('/')); in ComputeBigO()
Dreporter.cc85 std::string BenchmarkReporter::Run::benchmark_name() const { in benchmark_name() function in benchmark::BenchmarkReporter::Run
Dcsv_reporter.cc95 std::string name = run.benchmark_name(); in PrintRunData()
Dconsole_reporter.cc124 result.benchmark_name().c_str()); in PrintRunData()
Djson_reporter.cc170 out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; in PrintRunData()
/external/google-benchmark/src/
Dstatistics.cc122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats()
150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
Dcomplexity.cc186 std::string run_name = reports[0].benchmark_name().substr( in ComputeBigO()
187 0, reports[0].benchmark_name().find('/')); in ComputeBigO()
Dreporter.cc85 std::string BenchmarkReporter::Run::benchmark_name() const { in benchmark_name() function in benchmark::BenchmarkReporter::Run
Dcsv_reporter.cc95 std::string name = run.benchmark_name(); in PrintRunData()
Dconsole_reporter.cc123 result.benchmark_name().c_str()); in PrintRunData()
Djson_reporter.cc170 out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; in PrintRunData()
/external/toolchain-utils/crosperf/
Dresults_report.py136 def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): argument
141 if benchmark_name not in self.perf_data:
142 self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
143 ben_data = self.perf_data[benchmark_name]
510 def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name, argument
518 raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
Dexperiment_factory.py160 benchmark_name = benchmark_settings.name
163 test_name = benchmark_name
240 benchmark_name,
Dexperiment_status.py137 benchmark_name = benchmark_run.benchmark.name
138 benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
/external/google-benchmark/test/
Dregister_benchmark_test.cc33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun()
34 << run.benchmark_name(); in CheckRun()
Dskip_with_error_test.cc36 CHECK(name == run.benchmark_name()) in CheckRun()
37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
/external/libcxx/utils/google-benchmark/test/
Dregister_benchmark_test.cc33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun()
34 << run.benchmark_name(); in CheckRun()
Dskip_with_error_test.cc36 CHECK(name == run.benchmark_name()) in CheckRun()
37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
/external/tensorflow/tensorflow/python/platform/
Dbenchmark.py341 benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
352 full_benchmark_name = "%s.%s" % (benchmark_name, attr)
/external/tensorflow/tensorflow/compiler/aot/
Dtfcompile.bzl338 benchmark_name = name + "_benchmark"
339 benchmark_file = benchmark_name + ".cc"
345 name = ("gen_" + benchmark_name),
367 name = benchmark_name,

12