/external/linux-kselftest/tools/testing/selftests/ |
D | kselftest_module.h | 14 static unsigned int failed_tests __initdata 20 failed_tests++; \ 24 static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests) in kstm_report() argument 26 if (failed_tests == 0) in kstm_report() 29 pr_warn("failed %u out of %u tests\n", failed_tests, total_tests); in kstm_report() 31 return failed_tests ? -EINVAL : 0; in kstm_report() 39 return kstm_report(total_tests, failed_tests); \
|
/external/jsoncpp/test/ |
D | runjsontests.py | 96 failed_tests = [] 111 failed_tests.append((input_path, 'Parsing should have failed:\n%s' % 118 failed_tests.append((input_path, 'Parsing failed:\n' + process_output)) 128 failed_tests.append((input_path, 'Parsing failed:\n' + process_output)) 136 failed_tests.append((input_path, detail)) 140 if failed_tests: 143 for failed_test in failed_tests: 147 print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests), 148 len(failed_tests))) 149 raise FailError(repr(failed_tests))
|
/external/autotest/client/bin/ |
D | setup_job.py | 177 failed_tests = [] 180 failed_tests += broken_tests 188 failed_tests.append(broken_test) 195 elif not failed_tests: 202 failed_tests.append(client_test.__class__.__name__) 208 if failed_tests: 210 for failed_test in failed_tests: 216 if failed_tests:
|
/external/OpenCL-CTS/test_conformance/api/ |
D | test_kernel_arg_info.cpp | 487 int failed_tests = 0; in compare_kernel_with_expected() local 522 failed_tests += compare_expected_actual(expected_args[i], actual); in compare_kernel_with_expected() 524 return failed_tests; in compare_kernel_with_expected() 598 int failed_tests = 0; in run_scalar_vector_tests() local 678 failed_tests += compare_kernel_with_expected( in run_scalar_vector_tests() 700 failed_tests += compare_kernel_with_expected( in run_scalar_vector_tests() 702 return failed_tests; in run_scalar_vector_tests() 715 int failed_tests = 0; in run_pipe_tests() local 752 failed_tests += compare_kernel_with_expected( in run_pipe_tests() 774 failed_tests += compare_kernel_with_expected( in run_pipe_tests() [all …]
|
/external/autotest/server/cros/tradefed/ |
D | tradefed_utils.py | 109 failed_tests = set() 130 failed_tests.add(test_name) 137 if failed_tests: 139 '\n'.join(sorted(failed_tests))) 176 failed_tests = set() 196 failed_tests.add(testname) 205 if failed_tests: 207 '\n'.join(sorted(failed_tests)))
|
/external/autotest/client/site_tests/graphics_GLBench/ |
D | graphics_GLBench.py | 173 failed_tests = {} 205 failed_tests[testname] = 'GLError' 215 failed_tests[testname] = imagefile 234 failed_tests[testname] = imagefile 242 if failed_tests:
|
/external/vulkan-validation-layers/scripts/ |
D | parse_test_results.py | 68 failed_tests = 0 89 failed_tests += 1 103 if failed_tests != 0: 105 print("FAILED: ", failed_tests, "/", num_tests, "tests")
|
/external/pigweed/pw_unit_test/py/pw_unit_test/ |
D | rpc.py | 61 def run_all_tests_end(self, passed_tests: int, failed_tests: int): 87 def run_all_tests_end(self, passed_tests: int, failed_tests: int): 90 if failed_tests: 91 _LOG.info('[ FAILED ] %d test(s).', failed_tests)
|
/external/capstone/suite/cstest/ |
D | cstest_report.py | 21 failed_tests = [] 26 failed_tests.append(match.group(1)) 38 details.append((parts[1], failed_tests[counter], parts[2]))
|
/external/pigweed/pw_unit_test/ |
D | logging_event_handler.cc | 34 if (run_tests_summary.failed_tests) { in RunAllTestsEnd() 35 PW_LOG_ERROR("[ FAILED ] %d test(s).", run_tests_summary.failed_tests); in RunAllTestsEnd()
|
D | simple_printing_event_handler.cc | 31 if (run_tests_summary.failed_tests) { in RunAllTestsEnd() 32 WriteLine("[ FAILED ] %d test(s).", run_tests_summary.failed_tests); in RunAllTestsEnd()
|
D | framework.cc | 52 run_tests_summary_.failed_tests = 0; in RunAllTests() 93 run_tests_summary_.failed_tests++; in EndCurrentTest()
|
D | unit_test_service.cc | 95 test_run_end.WriteFailed(summary.failed_tests); in WriteTestRunEnd()
|
/external/autotest/server/site_tests/enterprise_CFM_Test/ |
D | enterprise_CFM_Test.py | 797 failed_tests, failed_verifications, argument 834 for testname, counter in failed_tests.iteritems(): 876 failed_tests = {} 906 if not test in failed_tests.keys(): 907 failed_tests[test] = 1 909 failed_tests[test] += 1 947 failed_tests, 970 failed_tests,
|
/external/pigweed/pw_unit_test/public/pw_unit_test/ |
D | event_handler.h | 86 int failed_tests; member
|
D | framework.h | 161 .failed_tests = 0, in Framework()
|
/external/autotest/site_utils/ |
D | generate_test_report | 687 failed_tests = len([key for key in results.keys() if results[key][ 691 total_tests = passed_tests + failed_tests + na_tests 756 failed_tests, na_tests, total_tests)
|
/external/angle/src/tests/ |
D | capture_replay_tests.py | 875 failed_tests = [] 935 failed_tests.append(failed_test) 954 if len(failed_tests): 956 for failed_test in sorted(failed_tests):
|
/external/rust/crates/grpcio-sys/grpc/tools/run_tests/ |
D | run_xds_tests.py | 2119 failed_tests = [] variable 2230 failed_tests.append(test_case) 2257 if failed_tests: 2258 logger.error('Test case(s) %s failed', failed_tests)
|