Home
last modified time | relevance | path

Searched refs:benchmark_name (Results 1 – 25 of 64) sorted by relevance

123

/external/llvm-project/llvm/utils/benchmark/src/
Dcomplexity.cc182 std::string benchmark_name = in ComputeBigO() local
183 reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); in ComputeBigO()
187 big_o.benchmark_name = benchmark_name + "_BigO"; in ComputeBigO()
204 rms.benchmark_name = benchmark_name + "_RMS"; in ComputeBigO()
Dstatistics.cc127 CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); in ComputeStats()
154 data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_; in ComputeStats()
/external/protobuf/benchmarks/php/
DPhpBenchmark.php47 private $benchmark_name; variable in Google\\Protobuf\\Benchmark\\Benchmark
53 public function __construct($benchmark_name, $args, $total_bytes, argument
56 $this->benchmark_name = $benchmark_name;
73 call_user_func_array($this->benchmark_name, array(&$this->args));
/external/toolchain-utils/crosperf/
Dexperiment_factory.py207 benchmark_name = benchmark_settings.name
210 test_name = benchmark_name
223 benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1])
226 if not benchmark_name in benchmark_names:
227 benchmark_names[benchmark_name] = True
229 raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name)
329 benchmark = Benchmark(benchmark_name, test_name, test_args,
352 benchmark_name,
Dexperiment_status.py141 benchmark_name = benchmark_run.benchmark.name
142 benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
Dresults_report.py140 def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): argument
145 if benchmark_name not in self.perf_data:
146 self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
147 ben_data = self.perf_data[benchmark_name]
599 def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name, argument
607 raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
/external/tensorflow/tensorflow/python/platform/
Dbenchmark.py222 benchmark_name = name + "__" + test_name_suffix
223 if benchmark_name in attrs:
225 "Benchmark named {} already defined.".format(benchmark_name))
230 attrs[benchmark_name] = _rename_function(benchmark, 1, benchmark_name)
474 benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
485 full_benchmark_name = "%s.%s" % (benchmark_name, attr)
/external/tensorflow/tensorflow/tools/benchmark/
Dbenchmark_model.cc216 const string& benchmark_name, const string& postfix, in RecordBenchmarkEntry() argument
220 stream << benchmark_name; in RecordBenchmarkEntry()
382 string benchmark_name = ""; in Main() local
413 Flag("benchmark_name", &benchmark_name, "benchmark name"), in Main()
483 LOG(INFO) << "Benchmark name: [" << benchmark_name << "]"; in Main()
654 if (!benchmark_name.empty() && !output_prefix.empty()) { in Main()
664 RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs, in Main()
668 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1, in Main()
673 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference", in Main()
679 output_prefix, benchmark_name, "meta-init-plus-first-inference", 1, in Main()
[all …]
/external/autotest/server/cros/
Dtelemetry_runner.py571 benchmark_name = ''
585 if benchmark_name == '':
586 benchmark_name = local_benchmark_name
590 if benchmark_name != local_benchmark_name:
593 'result. old: %s, new: %s', benchmark_name,
652 'name': benchmark_name,
658 'benchmark_name': benchmark_name,
/external/google-fruit/extras/benchmark/
Drun_benchmarks.py793 benchmark_name = benchmark_definition['name']
795 … if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'}
799 if benchmark_name == 'new_delete_run_time':
803 elif benchmark_name == 'fruit_single_file_compile_time':
809 elif benchmark_name.startswith('fruit_'):
819 }[benchmark_name]
824 elif benchmark_name.startswith('boost_di_'):
833 }[benchmark_name]
837 elif benchmark_name.startswith('simple_di_'):
860 }[benchmark_name]
[all …]
/external/chromium-trace/catapult/common/py_trace_event/py_trace_event/trace_event_impl/
Dlog.py314 benchmark_name, argument
341 benchmark_name=benchmark_name,
358 "benchmarks": [benchmark_name],
Dperfetto_trace_writer.py166 benchmark_name, argument
177 metadata.benchmark_name = benchmark_name
/external/tensorflow/tensorflow/python/eager/
Dbenchmarks_test_base.py79 benchmark_name = self._get_benchmark_name()
81 iters=num_iters, wall_time=mean_us, extras=extras, name=benchmark_name)
/external/llvm-project/libcxx/utils/google-benchmark/src/
Dstatistics.cc122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats()
150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
/external/libcxx/utils/google-benchmark/src/
Dstatistics.cc122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats()
150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
/external/python/cpython3/Tools/importbench/
Dimportbench.py212 benchmark_name = benchmark.__doc__
213 old_result = max(prev_results[benchmark_name])
214 new_result = max(new_results[benchmark_name])
218 print(benchmark_name, ':', result)
/external/kotlinx.coroutines/benchmarks/scripts/
Dgenerate_plots_flow_flatten_merge.py14 benchmark_name = "benchmarks.flow.FlowFlattenMergeBenchmark.flattenMerge" variable
69 data = data[(data.benchmark == benchmark_name)]
/external/llvm-project/llvm/utils/benchmark/test/
Dregister_benchmark_test.cc32 CHECK(name == run.benchmark_name) << "expected " << name << " got " in CheckRun()
33 << run.benchmark_name; in CheckRun()
Dskip_with_error_test.cc36 CHECK(name == run.benchmark_name) << "expected " << name << " got " in CheckRun()
37 << run.benchmark_name; in CheckRun()
/external/google-benchmark/test/
Dregister_benchmark_test.cc33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun()
34 << run.benchmark_name(); in CheckRun()
Dskip_with_error_test.cc36 CHECK(name == run.benchmark_name()) in CheckRun()
37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
/external/llvm-project/libcxx/utils/google-benchmark/test/
Dregister_benchmark_test.cc33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun()
34 << run.benchmark_name(); in CheckRun()
Dskip_with_error_test.cc36 CHECK(name == run.benchmark_name()) in CheckRun()
37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
/external/libcxx/utils/google-benchmark/test/
Dregister_benchmark_test.cc33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun()
34 << run.benchmark_name(); in CheckRun()
Dskip_with_error_test.cc36 CHECK(name == run.benchmark_name()) in CheckRun()
37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()

123