/external/rust/crates/grpcio-sys/grpc/tools/run_tests/python_utils/ |
D | upload_test_results.py | 70 def _get_build_metadata(test_results): argument 80 test_results['build_id'] = build_id 82 test_results['build_url'] = build_url 84 test_results['job_name'] = job_name 128 test_results = {} 129 _get_build_metadata(test_results) 130 test_results['cpu_estimated'] = result.cpu_estimated 131 test_results['cpu_measured'] = result.cpu_measured 132 test_results['elapsed_time'] = '%.2f' % result.elapsed_time 133 test_results['result'] = result.state [all …]
|
/external/grpc-grpc/tools/run_tests/python_utils/ |
D | upload_test_results.py | 70 def _get_build_metadata(test_results): argument 80 test_results['build_id'] = build_id 82 test_results['build_url'] = build_url 84 test_results['job_name'] = job_name 129 test_results = {} 130 _get_build_metadata(test_results) 131 test_results['cpu_estimated'] = result.cpu_estimated 132 test_results['cpu_measured'] = result.cpu_measured 133 test_results['elapsed_time'] = '%.2f' % result.elapsed_time 134 test_results['result'] = result.state [all …]
|
/external/autotest/client/site_tests/graphics_dEQP/ |
D | graphics_dEQP.py | 97 test_results=None, 113 if test_results is None: 114 test_results = {} 118 return test_results 149 test_results[result] = test_results.get(result, 0) + 1 158 return test_results 327 test_results = {} 414 test_results[result] = test_results.get(result, 0) + 1 416 return test_results 513 test_results = self._run_tests_hasty(test_cases, failing_test) [all …]
|
/external/tensorflow/tensorflow/python/compiler/tensorrt/model_tests/ |
D | result_analyzer.py | 105 test_results: model_handler.TestResultCollection) -> DataFrame: 112 for result in test_results.results: 122 def analyze_test_latency(test_results: model_handler.TestResultCollection, 126 test_results.cpu_base_result 127 if use_cpu_baseline else test_results.gpu_base_result) 134 for result in test_results.results: 140 def analyze_test_numerics(test_results: model_handler.TestResultCollection, 152 test_results.cpu_base_result 153 if use_cpu_baseline else test_results.gpu_base_result) 161 for result in test_results.results: [all …]
|
D | run_models.py | 131 test_results = manager.run(inputs) 134 analysis_result_df, _ = self._analyzer.analysis(test_results)
|
/external/vulkan-validation-layers/scripts/ |
D | parse_test_results.py | 48 self.test_results = defaultdict(defaultdict) 72 for test_name, results in self.test_results.items(): 98 num_tests = len(self.test_results) 132 self.test_results[self.current_test][self.current_profile] = "skip" 137 if self.test_results.get(self.current_test, {}).get(self.current_profile, "") != "skip": 138 self.test_results[self.current_test][self.current_profile] = "pass" 143 self.test_results[self.current_test][self.current_profile] = "fail"
|
/external/webrtc/modules/audio_coding/neteq/test/ |
D | result_sink.h | 30 void AddResult(const T* test_results, size_t length); 43 void ResultSink::AddResult(const T* test_results, size_t length) { in AddResult() argument 45 ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_)); in AddResult() 47 digest_->Update(test_results, sizeof(T) * length); in AddResult()
|
/external/autotest/client/site_tests/platform_GesturesRegressionTest/ |
D | platform_GesturesRegressionTest.py | 54 self.test_results = {} 60 self.test_results[key.replace('/', '-')] = score 64 if self.test_results: 65 self.write_perf_keyval(self.test_results)
|
/external/angle/src/tests/restricted_traces/ |
D | restricted_trace_gold_tests.py | 335 def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results): argument 395 test_results[trace] = {'expected': expected_result, 'actual': result} 397 test_results[trace]['artifacts'] = artifacts 403 for _, trace_results in test_results.items(): 471 test_results = {} 492 results, test_results): 496 results, test_results): 501 test_results): 509 if test_results: 510 results['tests']['angle_restricted_trace_gold_tests'] = test_results
|
/external/tensorflow/tensorflow/tools/test/ |
D | run_and_gather_logs.py | 73 test_results, _ = run_and_gather_logs_lib.run_and_gather_logs( 78 test_results.build_configuration.CopyFrom(gather_build_configuration()) 81 print(text_format.MessageToString(test_results)) 96 json_test_results = json_format.MessageToJson(test_results)
|
/external/libchrome/base/test/launcher/ |
D | test_results_tracker.cc | 105 TestResult result = i.second.test_results.back(); in ~TestResultsTracker() 240 TestNameWithoutDisabledPrefix(result.full_name)].test_results.push_back( in AddTestResult() 344 std::unique_ptr<ListValue> test_results(new ListValue); in SaveSummaryAsJSON() local 346 for (size_t k = 0; k < j->second.test_results.size(); k++) { in SaveSummaryAsJSON() 347 const TestResult& test_result = j->second.test_results[k]; in SaveSummaryAsJSON() 419 test_results->Append(std::move(test_result_value)); in SaveSummaryAsJSON() 423 std::move(test_results)); in SaveSummaryAsJSON() 496 const TestResult& result = j->second.test_results.back(); in GetTestStatusForIteration()
|
D | unit_test_launcher.cc | 288 std::vector<TestResult> test_results; in ProcessTestResults() local 291 ProcessGTestOutput(output_file, &test_results, &crashed); in ProcessTestResults() 299 for (size_t i = 0; i < test_results.size(); i++) in ProcessTestResults() 300 results_map[test_results[i].full_name] = test_results[i]; in ProcessTestResults()
|
D | test_results_tracker.h | 99 std::vector<TestResult> test_results; member
|
/external/autotest/server/ |
D | site_utils_unittest.py | 65 test_results = [ 82 [{'test_name':r[0], 'status':r[1]} for r in test_results])
|
/external/autotest/server/cros/dynamic_suite/ |
D | suite_unittest.py | 782 test_results = self._createSuiteMockResults() 785 [test_results[0] + test_results[1]], 817 test_results = self._createSuiteMockResults(result_status='WARN') 820 [test_results[0] + test_results[1]], 844 test_results = self._createSuiteMockResults() 847 [test_results[0] + test_results[1]],
|
/external/toolchain-utils/crosperf/ |
D | results_report.py | 440 def _GetHTMLCharts(label_names, test_results): argument 442 for item, runs in test_results.items(): 493 test_results = self.benchmark_results.run_keyvals 494 charts = _GetHTMLCharts(label_names, test_results) 738 for test, test_results in benchmark_results.run_keyvals.items(): 739 for label_name, label_results in zip(label_names, test_results):
|
/external/deqp/external/openglcts/modules/glesext/tessellation_shader/ |
D | esextcTessellationShaderIsolines.cpp | 502 _test_results& test_results = m_test_results[vertex_spacing_mode]; in findTestResult() local 506 for (_test_results_iterator test_results_iterator = test_results.begin(); in findTestResult() 507 test_results_iterator != test_results.end(); test_results_iterator++) in findTestResult()
|
/external/perfetto/test/stress_test/ |
D | stress_test.cc | 111 const std::list<TestResult>& test_results() const { return test_results_; } in test_results() function in perfetto::__anonedb5b0c80111::TestHarness 468 for (const auto& tres : th.test_results()) { in StressTestMain()
|
/external/nos/host/generic/nugget/proto/nugget/app/protoapi/ |
D | testing_api.proto | 366 oneof test_results {
|
/external/rust/crates/grpcio-sys/grpc/tools/run_tests/ |
D | run_xds_tests.py | 2118 test_results = {} variable 2244 test_results[test_case] = [result] 2251 report_utils.render_junit_xml_report(test_results,
|
/external/tensorflow/third_party/gpus/ |
D | rocm_configure.bzl | 295 test_results = _batch_files_exist(repository_ctx, libs_paths, bash_bin) 302 if test_results[i] and selected_path == None:
|