/external/angle/scripts/ |
D | process_angle_perf_results.py | 137 for benchmark_name, directories in benchmark_directory_map.items(): 140 is_ref = '.reference' in benchmark_name 164 logging.error('Failed to obtain test results for %s: %s', benchmark_name, e) 169 logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name) 171 benchmark_enabled_map[benchmark_name] = True 187 for benchmark_name, directories in benchmark_directory_map.items(): 193 name=_generate_unique_logdog_filename(benchmark_name), data=f.read()) 194 benchmark_logs_links[benchmark_name].append(uploaded_link) 241 benchmark_name = _get_benchmark_name(directory) 242 logging.debug('Found benchmark %s directory %s' % (benchmark_name, directory)) [all …]
|
/external/cronet/third_party/protobuf/benchmarks/php/ |
D | PhpBenchmark.php | 47 private $benchmark_name; variable in Google\\Protobuf\\Benchmark\\Benchmark 53 public function __construct($benchmark_name, $args, $total_bytes, argument 56 $this->benchmark_name = $benchmark_name; 73 call_user_func_array($this->benchmark_name, array(&$this->args));
|
/external/protobuf/benchmarks/php/ |
D | PhpBenchmark.php | 47 private $benchmark_name; variable in Google\\Protobuf\\Benchmark\\Benchmark 53 public function __construct($benchmark_name, $args, $total_bytes, argument 56 $this->benchmark_name = $benchmark_name; 73 call_user_func_array($this->benchmark_name, array(&$this->args));
|
/external/toolchain-utils/crosperf/ |
D | experiment_factory.py | 242 benchmark_name = benchmark_settings.name 245 test_name = benchmark_name 260 benchmark_name = "%s@@%s" % ( 261 benchmark_name, 266 if not benchmark_name in benchmark_names: 267 benchmark_names[benchmark_name] = True 270 "Duplicate benchmark name: '%s'." % benchmark_name 433 benchmark_name, 466 benchmark_name,
|
D | experiment_status.py | 155 benchmark_name = benchmark_run.benchmark.name 156 benchmark_iterations[benchmark_name].append(
|
D | results_report.py | 139 def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): argument 144 if benchmark_name not in self.perf_data: 145 self.perf_data[benchmark_name] = { 148 ben_data = self.perf_data[benchmark_name] 654 results_directory, label_name, benchmark_name, benchmark_iteration argument 662 raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
|
/external/tensorflow/tensorflow/python/platform/ |
D | benchmark.py | 206 benchmark_name = name + "__" + test_name_suffix 207 if benchmark_name in attrs: 209 "Benchmark named {} already defined.".format(benchmark_name)) 214 attrs[benchmark_name] = _rename_function(benchmark, 1, benchmark_name) 458 benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__) 469 full_benchmark_name = "%s.%s" % (benchmark_name, attr)
|
/external/tensorflow/tensorflow/tools/benchmark/ |
D | benchmark_model.cc | 220 const string& benchmark_name, const string& postfix, in RecordBenchmarkEntry() argument 224 stream << benchmark_name; in RecordBenchmarkEntry() 386 string benchmark_name = ""; in Main() local 417 Flag("benchmark_name", &benchmark_name, "benchmark name"), in Main() 487 LOG(INFO) << "Benchmark name: [" << benchmark_name << "]"; in Main() 658 if (!benchmark_name.empty() && !output_prefix.empty()) { in Main() 668 RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs, in Main() 672 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1, in Main() 677 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference", in Main() 683 output_prefix, benchmark_name, "meta-init-plus-first-inference", 1, in Main() [all …]
|
/external/cronet/testing/scripts/ |
D | run_performance_tests.py | 235 def write_simple_test_results(return_code, output_filepath, benchmark_name): argument 241 benchmark_name += '_shard_%s' % os.environ.get('GTEST_SHARD_INDEX', '0') 244 benchmark_name: { 263 def upload_simple_test_results(return_code, benchmark_name): argument 279 'testId': benchmark_name, 670 benchmark_name = options.gtest_benchmark_name 686 if not benchmark_name: 687 benchmark_name = options.executable 688 output_paths = OutputFilePaths(isolated_out_dir, benchmark_name).SetUp() 689 print('\n### {folder} ###'.format(folder=benchmark_name))
|
/external/google-fruit/extras/benchmark/ |
D | run_benchmarks.py | 793 benchmark_name = benchmark_definition['name'] 795 … if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'} 799 if benchmark_name == 'new_delete_run_time': 803 elif benchmark_name == 'fruit_single_file_compile_time': 809 elif benchmark_name.startswith('fruit_'): 819 }[benchmark_name] 824 elif benchmark_name.startswith('boost_di_'): 833 }[benchmark_name] 837 elif benchmark_name.startswith('simple_di_'): 860 }[benchmark_name] [all …]
|
/external/autotest/server/cros/ |
D | telemetry_runner.py | 513 benchmark_name = '' 527 if benchmark_name == '': 528 benchmark_name = local_benchmark_name 532 if benchmark_name != local_benchmark_name: 535 'result. old: %s, new: %s', benchmark_name, 594 'name': benchmark_name, 600 'benchmark_name': benchmark_name,
|
/external/chromium-trace/catapult/common/py_trace_event/py_trace_event/trace_event_impl/ |
D | log.py | 314 benchmark_name, argument 341 benchmark_name=benchmark_name, 358 "benchmarks": [benchmark_name],
|
D | perfetto_trace_writer.py | 166 benchmark_name, argument 177 metadata.benchmark_name = benchmark_name
|
D | perfetto_proto_classes.py | 204 self.benchmark_name = None 219 if self.benchmark_name is not None: 221 writer(parts.append, self.benchmark_name)
|
/external/python/cpython3/Tools/importbench/ |
D | importbench.py | 212 benchmark_name = benchmark.__doc__ 213 old_result = max(prev_results[benchmark_name]) 214 new_result = max(new_results[benchmark_name]) 218 print(benchmark_name, ':', result)
|
/external/libcxx/utils/google-benchmark/src/ |
D | statistics.cc | 122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats() 150 data.run_name = reports[0].benchmark_name(); in ComputeStats()
|
D | complexity.cc | 186 std::string run_name = reports[0].benchmark_name().substr( in ComputeBigO() 187 0, reports[0].benchmark_name().find('/')); in ComputeBigO()
|
/external/tensorflow/tensorflow/python/eager/ |
D | benchmarks_test_base.py | 75 benchmark_name = self._get_benchmark_name() 77 iters=num_iters, wall_time=mean_us, extras=extras, name=benchmark_name)
|
/external/kotlinx.coroutines/benchmarks/scripts/ |
D | generate_plots_flow_flatten_merge.py | 14 benchmark_name = "benchmarks.flow.FlowFlattenMergeBenchmark.flattenMerge" variable 69 data = data[(data.benchmark == benchmark_name)]
|
/external/google-benchmark/test/ |
D | register_benchmark_test.cc | 33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun() 34 << run.benchmark_name(); in CheckRun()
|
D | skip_with_error_test.cc | 36 CHECK(name == run.benchmark_name()) in CheckRun() 37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
|
/external/libcxx/utils/google-benchmark/test/ |
D | register_benchmark_test.cc | 33 CHECK(name == run.benchmark_name()) << "expected " << name << " got " in CheckRun() 34 << run.benchmark_name(); in CheckRun()
|
D | skip_with_error_test.cc | 36 CHECK(name == run.benchmark_name()) in CheckRun() 37 << "expected " << name << " got " << run.benchmark_name(); in CheckRun()
|
/external/perfetto/src/trace_processor/importers/proto/ |
D | metadata_minimal_module.cc | 62 auto benchmark_name_id = storage->InternString(packet.benchmark_name()); in ParseChromeBenchmarkMetadata() 63 metadata->SetMetadata(metadata::benchmark_name, in ParseChromeBenchmarkMetadata()
|
/external/google-benchmark/src/ |
D | statistics.cc | 122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); in ComputeStats()
|