/external/vboot_reference/tests/ |
D | test_common.c | 21 int TEST_EQ(int result, int expected_result, const char* testname) { in TEST_EQ() argument 23 fprintf(stderr, "%s Test " COL_GREEN "PASSED\n" COL_STOP, testname); in TEST_EQ() 26 fprintf(stderr, "%s Test " COL_RED "FAILED\n" COL_STOP, testname); in TEST_EQ() 34 int TEST_NEQ(int result, int not_expected_result, const char* testname) { in TEST_NEQ() argument 36 fprintf(stderr, "%s Test " COL_GREEN "PASSED\n" COL_STOP, testname); in TEST_NEQ() 39 fprintf(stderr, "%s Test " COL_RED "FAILED\n" COL_STOP, testname); in TEST_NEQ() 48 const char* testname) { in TEST_PTR_EQ() argument 50 fprintf(stderr, "%s Test " COL_GREEN "PASSED\n" COL_STOP, testname); in TEST_PTR_EQ() 53 fprintf(stderr, "%s Test " COL_RED "FAILED\n" COL_STOP, testname); in TEST_PTR_EQ() 62 const char* testname) { in TEST_PTR_NEQ() argument [all …]
|
D | test_common.h | 14 int TEST_EQ(int result, int expected_result, const char* testname); 18 int TEST_NEQ(int result, int not_expected_result, const char* testname); 24 const char* testname); 30 const char* testname); 35 const char* testname); 39 int TEST_TRUE(int result, const char* testname); 43 int TEST_FALSE(int result, const char* testname); 47 int TEST_SUCC(int result, const char* testname);
|
/external/autotest/server/site_tests/platform_GCC/ |
D | platform_GCC.py | 25 result, testname = line.split(': ', 1) 26 testname = testname.strip() 27 if testname in results: 28 counts[testname] += 1 29 unique_testname = '%s (%d)' % (testname, counts[testname]) 31 counts[testname] = 1 32 unique_testname = testname 47 for testname in new_results.keys(): 48 if testname not in baseline_results: 49 differences.append((testname, 'NOTEXECUTED', [all …]
|
/external/ltp/testcases/kernel/syscalls/membarrier/ |
D | membarrier01.c | 29 char testname[80]; member 50 .testname = "cmd_fail", 62 .testname = "cmd_flags_fail", 75 .testname = "cmd_global_success", 90 .testname = "cmd_private_expedited_fail", 102 .testname = "cmd_private_expedited_register_success", 118 .testname = "cmd_private_expedited_success", 137 .testname = "cmd_private_expedited_sync_core_fail", 149 .testname = "cmd_private_expedited_sync_core_register_success", 162 .testname = "cmd_private_expedited_sync_core_success", [all …]
|
/external/autotest/tko/parsers/ |
D | version_1.py | 192 def __init__(self, indent, status, subdir, testname, reason, argument 198 self.status = self.subdir = self.testname = self.reason = None 203 testname, reason, 218 if self.testname != 'reboot': 260 def make_dummy_abort(indent, subdir, testname, timestamp, reason): argument 276 if not testname: 277 testname = '----' 285 return msg % (subdir, testname, timestamp_field, reason) 290 line_buffer, line, indent, subdir, testname, timestamp, reason): argument 306 indent, subdir, testname, timestamp, reason) [all …]
|
D | version_0.py | 205 def __init__(self, subdir, testname, status, reason, test_kernel, argument 217 super(test, self).__init__(subdir, testname, status, reason, 245 def __init__(self, indent, status, subdir, testname, reason, argument 263 self.testname = self.parse_name(testname) 297 status, subdir, testname = parts[0:part_index] 313 return cls(indent, status, subdir, testname, reason, 351 not line.testname): 360 not line.testname): 383 line.testname, line.reason]) 392 if (line.testname == "Autotest.install" and [all …]
|
/external/autotest/client/site_tests/kernel_LTP/ |
D | ltp-diff.py | 52 testname = s.group(1) 54 runs[i][testname] = status 55 testnames[testname] = 1 80 for testname in all_testnames: 85 if not runs[i].has_key(testname): 86 runs[i][testname] = "null" 87 if not runs[i-1].has_key(testname): 88 runs[i-1][testname] = "null" 90 if runs[i][testname] != runs[i-1][testname]: 101 testname_cleaned = re.sub('\s+', ',', testname) [all …]
|
/external/autotest/client/tests/ltp/ |
D | ltp-diff.py | 49 testname = s.group(1) 51 runs[i][testname] = status 52 testnames[testname] = 1 75 for testname in all_testnames: 80 if not runs[i].has_key(testname): 81 runs[i][testname] = "null" 82 if not runs[i-1].has_key(testname): 83 runs[i-1][testname] = "null" 85 if runs[i][testname] != runs[i-1][testname]: 96 testname_cleaned = re.sub('\s+', ',', testname) [all …]
|
/external/u-boot/lib/efi_selftest/ |
D | efi_selftest.c | 151 static struct efi_unit_test *find_test(const u16 *testname) in find_test() argument 157 if (!efi_st_strcmp_16_8(testname, test->name)) in find_test() 160 efi_st_printf("\nTest '%ps' not found\n", testname); in find_test() 188 void efi_st_do_tests(const u16 *testname, unsigned int phase, in efi_st_do_tests() argument 195 if (testname ? in efi_st_do_tests() 196 efi_st_strcmp_16_8(testname, test->name) : test->on_request) in efi_st_do_tests() 230 const u16 *testname = NULL; in efi_selftest() local 249 testname = (u16 *)loaded_image->load_options; in efi_selftest() 251 if (testname) { in efi_selftest() 252 if (!efi_st_strcmp_16_8(testname, "list") || in efi_selftest() [all …]
|
/external/skia/modules/pathkit/tests/ |
D | testReporter.js | 6 function reportCanvas(canvas, testname, outputType='canvas') { argument 8 return _report(b64, outputType, testname); 11 function reportSVG(svg, testname) { argument 27 _report(b64, 'svg', testname).then(() => { 38 function reportSVGString(svgstr, testname, fillRule='nofill') { argument 54 return reportSVG(newSVG, testname); 58 function reportPath(path, testname, done) { argument 67 return reportCanvas(canvas, testname).then(() => { 68 reportSVGString(svgStr, testname).then(() => { 76 function _report(data, outputType, testname) { argument [all …]
|
/external/skqp/modules/pathkit/tests/ |
D | testReporter.js | 6 function reportCanvas(canvas, testname, outputType='canvas') { argument 8 return _report(b64, outputType, testname); 11 function reportSVG(svg, testname) { argument 27 _report(b64, 'svg', testname).then(() => { 38 function reportSVGString(svgstr, testname, fillRule='nofill') { argument 54 return reportSVG(newSVG, testname); 58 function reportPath(path, testname, done) { argument 67 return reportCanvas(canvas, testname).then(() => { 68 reportSVGString(svgStr, testname).then(() => { 76 function _report(data, outputType, testname) { argument [all …]
|
/external/autotest/client/site_tests/graphics_GLBench/ |
D | graphics_GLBench.py | 93 def is_no_checksum_test(self, testname): argument 99 if testname.startswith(prefix): 207 testname = key.strip() 216 (unit, testname)) 219 perf_value_name = '%s_%s' % (unit, testname) 241 keyvals[testname] = -3.0 242 failed_tests[testname] = 'GLError' 246 keyvals[testname] = 0.0 250 keyvals[testname] = -2.0 252 failed_tests[testname] = imagefile [all …]
|
/external/autotest/client/bin/ |
D | job_unittest.py | 384 testname = "error_test" 385 outputdir = os.path.join(self.job.resultdir, testname) 387 testname, 'test').and_return(("", testname)) 389 self.job.record.expect_call("START", testname, testname, 391 self.job._runtest.expect_call(testname, "", None, (), {}).and_raises( 393 self.job.record.expect_call("ERROR", testname, testname, 395 self.job.record.expect_call("END ERROR", testname, testname) 400 self.job.run_test(testname) 419 testname = "error_test" 420 outputdir = os.path.join(self.job.resultdir, testname) [all …]
|
/external/autotest/server/cros/dynamic_suite/ |
D | tools.py | 266 def _testname_to_keyval_key(testname): argument 278 return testname.replace('/', '_') 285 def create_bug_keyvals(job_id, testname, bug_info): argument 295 testname = _testname_to_keyval_key(testname) 296 keyval_base = '%s_%s' % (job_id, testname) if job_id else testname 303 def get_test_failure_bug_info(keyvals, job_id, testname): argument 326 testname = _testname_to_keyval_key(testname) 327 keyval_base = '%s_%s' % (job_id, testname) if job_id else testname
|
/external/icu/icu4j/perf-tests/ |
D | perftests.pl | 83 my $testname = $shortNames[$index]; 95 my $testName = "DateFmt-$testname-pat$patternCounter-JDK"; 105 my $testName = "DateFmt-$testname-pat$patternCounter"; 128 my $testname = $shortNames[$index]; 139 my $testName = "NumFmt-$testname-$patternName-JDK"; 149 my $testName = "NumFmt-$testname-$patternName"; 230 my $testname = "Coll-$locale-data$counter-StrCol"; 232 $ICU->setAttribute("test"=> $testname); 238 my $testname = "Coll-$locale-data$counter-keyGen"; 239 $Key->setAttribute("test"=> $testname); [all …]
|
/external/eigen/cmake/ |
D | EigenTesting.cmake | 12 macro(ei_add_test_internal testname testname_with_suffix) 16 set(filename ${testname}.${EIGEN_ADD_TEST_FILENAME_EXTENSION}) 18 set(filename ${testname}.cpp) 62 ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}") 113 macro(ei_add_test_internal_sycl testname testname_with_suffix) 118 set(filename ${testname}.${EIGEN_ADD_TEST_FILENAME_EXTENSION}) 120 set(filename ${testname}.cpp) 135 add_custom_target(${testname}_integration_header_sycl DEPENDS ${include_file}) 138 add_dependencies(${targetname} ${testname}_integration_header_sycl) 157 ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}") [all …]
|
/external/webrtc/webrtc/tools/rtcbot/ |
D | main.js | 74 function runTest(testname) { argument 75 if (testname in testSuites) { 76 console.log("Running test: " + testname); 78 testSuites[testname](test); 80 console.log("Unknown test: " + testname); 89 for (var testname in testSuites) 90 console.log(' ' + testname);
|
/external/elfutils/tests/ |
D | backtrace-subr.sh | 76 testname=$3 77 check_main $bt $testname 78 check_gsignal $bt $testname 79 check_err $err $testname 85 testname=$2 87 echo >&2 $testname: arch not supported 96 testname=$2 97 check_unsupported $err $testname 105 echo >&2 $testname: arm needs debuginfo installed for all libraries
|
/external/eigen/blas/testing/ |
D | CMakeLists.txt | 2 macro(ei_add_blas_test testname) 4 set(targetname ${testname}) 6 set(filename ${testname}.f) 17 …add_test(${testname} "${Eigen_SOURCE_DIR}/blas/testing/runblastest.sh" "${testname}" "${Eigen_SOUR…
|
/external/autotest/server/cros/ |
D | tradefed_utils.py | 149 testname = match.group(1) 150 if waivers and testname in waivers: 151 waived_count[testname] = waived_count.get(testname, 0) + 1 153 failed_tests.add(testname) 168 for testname, fail_count in waived_count.items(): 175 'abis: %s', fail_count, testname, len(abis), abis) 177 waived += [testname] * fail_count 178 logging.info('Waived failure for %s %d time(s)', testname, fail_count)
|
/external/autotest/server/site_tests/stress_ClientTestReboot/ |
D | control | 23 --args="testname=NAME loops=N" 29 if 'testname' in args_dict and 'loops' in args_dict: 30 testname = args_dict.get('testname') 33 testname=testname, loops=loops) 37 '"testname=<test> loops=<number>"')
|
/external/ltp/testcases/open_posix_testsuite/bin/ |
D | run-tests.sh | 45 testname="$TEST_PATH/${1%.*}" 47 complog=$(basename $testname).log.$$ 54 echo "$testname: execution: PASS" >> "${LOGFILE}" 79 echo "$testname: execution: $msg: Output: " >> "${LOGFILE}" 81 echo "$testname: execution: $msg " 83 echo "$testname: execution: SKIPPED (test not present)"
|
/external/python/cpython3/Lib/test/ |
D | test_regrtest.py | 903 testname = self.create_test(code=code) 906 all_methods = ['%s.Tests.test_method1' % testname, 907 '%s.Tests.test_method2' % testname] 908 output = self.run_tests('--list-cases', testname) 912 all_methods = ['%s.Tests.test_method1' % testname] 915 testname) 949 testname = self.create_test(code=code) 952 output = self.run_tests("-v", testname) 964 '%s.Tests.test_method3' % testname] 969 output = self.run_tests("-v", "--matchfile", filename, testname) [all …]
|
/external/autotest/client/cros/power/ |
D | power_dashboard.py | 69 def __init__(self, logger, testname, start_ts=None, resultsdir=None, argument 82 self._testname = testname 290 def __init__(self, logger, testname, start_ts=None, resultsdir=None, argument 303 super(ClientTestDashboard, self).__init__(logger, testname, start_ts, 366 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, argument 368 super(MeasurementLoggerDashboard, self).__init__(logger, testname, None, 450 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, argument 454 super(PowerLoggerDashboard, self).__init__(logger, testname, resultsdir, 464 def __init__(self, logger, testname, resultsdir=None, uploadurl=None, argument 468 super(TempLoggerDashboard, self).__init__(logger, testname, resultsdir, [all …]
|
/external/python/cpython2/Lib/test/ |
D | test_regrtest.py | 568 testname = self.create_test(code=code) 571 all_methods = ['%s.Tests.test_method1' % testname, 572 '%s.Tests.test_method2' % testname] 573 output = self.run_tests('--list-cases', testname) 577 all_methods = ['%s.Tests.test_method1' % testname] 580 testname) 621 testname = self.create_test(code=code) 624 output = self.run_tests("-v", testname) 636 '%s.Tests.test_method3' % testname] 641 output = self.run_tests("-v", "--matchfile", filename, testname) [all …]
|