/tools/testing/selftests/powerpc/ptrace/ |
D | ptrace-vsx.h | 15 int i; in validate_vsx() local 17 for (i = 0; i < VSX_MAX; i++) { in validate_vsx() 18 if (vsx[i] != load[2 * i + 1]) { in validate_vsx() 20 i, vsx[i], 2 * i + 1, load[2 * i + 1]); in validate_vsx() 33 int i; in validate_vmx() local 35 for (i = 0; i < VMX_MAX; i++) { in validate_vmx() 37 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 38 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 40 i, vmx[i][0], 64 + 2 * i, in validate_vmx() 41 load[64 + 2 * i]); in validate_vmx() [all …]
|
D | ptrace-gpr.h | 23 int i, found = 1; in validate_gpr() local 25 for (i = 0; i < 18; i++) { in validate_gpr() 26 if (gpr[i] != val) { in validate_gpr() 28 i+14, gpr[i], val); in validate_gpr() 41 int i, found = 1; in validate_fpr() local 43 for (i = 0; i < 32; i++) { in validate_fpr() 44 if (fpr[i] != val) { in validate_fpr() 45 printf("FPR[%d]: %lx Expected: %lx\n", i, fpr[i], val); in validate_fpr() 58 int i, found = 1; in validate_fpr_float() local 60 for (i = 0; i < 32; i++) { in validate_fpr_float() [all …]
|
/tools/testing/selftests/zram/ |
D | zram_lib.sh | 50 local i= 51 for i in $(seq $dev_start $dev_makeswap); do 52 swapoff /dev/zram$i 55 for i in $(seq $dev_start $dev_mounted); do 56 umount /dev/zram$i 59 for i in $(seq $dev_start $dev_end); do 60 echo 1 > /sys/block/zram${i}/reset 61 rm -rf zram$i 65 for i in $(seq $dev_start $dev_end); do 66 echo $i > /sys/class/zram-control/hot_remove [all …]
|
/tools/testing/radix-tree/ |
D | idr-test.c | 35 unsigned long i; in idr_alloc_test() local 43 for (i = 0x3ffe; i < 0x4003; i++) { in idr_alloc_test() 47 if (i < 0x4000) in idr_alloc_test() 48 item = item_create(i, 0); in idr_alloc_test() 50 item = item_create(i - 0x3fff, 0); in idr_alloc_test() 78 int i; in idr_null_test() local 93 for (i = 0; i < 10; i++) { in idr_null_test() 94 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); in idr_null_test() 105 for (i = 0; i < 9; i++) { in idr_null_test() 106 idr_remove(&idr, i); in idr_null_test() [all …]
|
D | multiorder.c | 38 int i, j, err; in multiorder_iteration() local 46 for (i = 0; i < NUM_ENTRIES; i++) { in multiorder_iteration() 47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration() 52 for (i = 0; i < NUM_ENTRIES; i++) in multiorder_iteration() 53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration() 58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration() 60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration() 62 assert((xas.xa_index | mask) == (index[i] | mask)); in multiorder_iteration() 65 assert((item->index | mask) == (index[i] | mask)); in multiorder_iteration() 66 assert(item->order == order[i]); in multiorder_iteration() [all …]
|
/tools/testing/selftests/bpf/prog_tests/ |
D | bpf_obj_id.c | 24 __u32 i, next_id, info_len, nr_id_found, duration = 0; in test_bpf_obj_id() local 39 for (i = 0; i < nr_iters; i++) in test_bpf_obj_id() 40 objs[i] = NULL; in test_bpf_obj_id() 44 for (i = 0; i < nr_iters; i++) { in test_bpf_obj_id() 47 &objs[i], &prog_fds[i]); in test_bpf_obj_id() 55 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); in test_bpf_obj_id() 56 if (CHECK_FAIL(map_fds[i] < 0)) in test_bpf_obj_id() 58 err = bpf_map_update_elem(map_fds[i], &array_key, in test_bpf_obj_id() 65 bzero(&map_infos[i], info_len); in test_bpf_obj_id() 66 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], in test_bpf_obj_id() [all …]
|
D | map_lock.c | 7 int vars[17], i, j, rnd, key = 0; in parallel_map_access() local 9 for (i = 0; i < 10000; i++) { in parallel_map_access() 16 printf("lookup #%d var[0]=%d\n", i, vars[0]); in parallel_map_access() 24 i, rnd, j, vars[j]); in parallel_map_access() 39 int err = 0, key = 0, i; in test_map_lock() local 56 for (i = 0; i < 4; i++) in test_map_lock() 57 if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, in test_map_lock() 60 for (i = 4; i < 6; i++) in test_map_lock() 61 if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, in test_map_lock() 63 &map_fd[i - 4]))) in test_map_lock() [all …]
|
/tools/testing/selftests/powerpc/math/ |
D | vsx_preempt.c | 50 int i; in vsx_memcmp() local 54 for(i = 0; i < 12; i++) { in vsx_memcmp() 55 if (memcmp(&a[i + 12], &zero, sizeof(vector int)) == 0) { in vsx_memcmp() 56 fprintf(stderr, "Detected zero from the VSX reg %d\n", i + 12); in vsx_memcmp() 64 for (i = 0; i < 24; i=i+2) in vsx_memcmp() 66 i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]); in vsx_memcmp() 74 int i, j; in preempt_vsx_c() local 77 for (i = 0; i < 12; i++) in preempt_vsx_c() 79 varray[i][j] = rand(); in preempt_vsx_c() 81 if (varray[i][j] == 0) in preempt_vsx_c() [all …]
|
D | fpu_signal.c | 42 int i; in signal_fpu_sig() local 47 for (i = 14; i < 32; i++) { in signal_fpu_sig() 48 if (mc->fp_regs[i] != darray[i - 14]) { in signal_fpu_sig() 57 int i; in signal_fpu_c() local 67 for (i = 0; i < 21; i++) in signal_fpu_c() 68 darray[i] = rand(); in signal_fpu_c() 77 int i, j, rc, threads; in test_signal_fpu() local 87 for (i = 0; i < threads; i++) { in test_signal_fpu() 88 rc = pthread_create(&tids[i], NULL, signal_fpu_c, NULL); in test_signal_fpu() 99 for (i = 0; i < ITERATIONS; i++) { in test_signal_fpu() [all …]
|
/tools/testing/selftests/rcutorture/bin/ |
D | kvm-recheck.sh | 22 for i in $dirs 27 resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'` 30 TORTURE_SUITE="`cat $i/../TORTURE_SUITE`" 31 rm -f $i/console.log.*.diags 32 kvm-recheck-${TORTURE_SUITE}.sh $i 33 …if test -f "$i/qemu-retval" && test "`cat $i/qemu-retval`" -ne 0 && test "`cat $i/qemu-retval`" -n… 36 cat $i/qemu-output 37 elif test -f "$i/console.log" 39 if test -f "$i/qemu-retval" && test "`cat $i/qemu-retval`" -eq 137 43 configcheck.sh $i/.config $i/ConfigFragment [all …]
|
/tools/testing/selftests/powerpc/pmu/ |
D | per_event_excludes.c | 27 int i; in per_event_excludes() local 70 for (i = 1; i < 4; i++) in per_event_excludes() 71 FAIL_IF(event_open_with_group(&events[i], events[0].fd)); in per_event_excludes() 80 for (i = 0; i < INT_MAX; i++) in per_event_excludes() 85 for (i = 0; i < 4; i++) { in per_event_excludes() 86 FAIL_IF(event_read(&events[i])); in per_event_excludes() 87 event_report(&events[i]); in per_event_excludes() 94 for (i = 0; i < 4; i++) in per_event_excludes() 95 FAIL_IF(events[i].result.running != events[i].result.enabled); in per_event_excludes() 102 for (i = 1; i < 4; i++) in per_event_excludes() [all …]
|
/tools/gpio/ |
D | gpio-hammer.c | 32 int i, j; in hammer_device() local 49 for (i = 0; i < nlines; i++) { in hammer_device() 50 fprintf(stdout, "%d", lines[i]); in hammer_device() 51 if (i != (nlines - 1)) in hammer_device() 55 for (i = 0; i < nlines; i++) { in hammer_device() 56 fprintf(stdout, "%d", data.values[i]); in hammer_device() 57 if (i != (nlines - 1)) in hammer_device() 66 for (i = 0; i < nlines; i++) in hammer_device() 67 data.values[i] = !data.values[i]; in hammer_device() 84 for (i = 0; i < nlines; i++) { in hammer_device() [all …]
|
/tools/perf/util/ |
D | values.c | 49 int i; in perf_read_values_destroy() local 54 for (i = 0; i < values->threads; i++) in perf_read_values_destroy() 55 zfree(&values->value[i]); in perf_read_values_destroy() 60 for (i = 0; i < values->counters; i++) in perf_read_values_destroy() 61 zfree(&values->countername[i]); in perf_read_values_destroy() 91 int i; in perf_read_values__findnew_thread() local 93 for (i = 0; i < values->threads; i++) in perf_read_values__findnew_thread() 94 if (values->pid[i] == pid && values->tid[i] == tid) in perf_read_values__findnew_thread() 95 return i; in perf_read_values__findnew_thread() 98 i = perf_read_values__enlarge_threads(values); in perf_read_values__findnew_thread() [all …]
|
D | thread_map.c | 39 int i; in thread_map__new_by_pid() local 48 for (i = 0; i < items; i++) in thread_map__new_by_pid() 49 perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); in thread_map__new_by_pid() 54 for (i=0; i<items; i++) in thread_map__new_by_pid() 55 zfree(&namelist[i]); in thread_map__new_by_pid() 77 int max_threads = 32, items, i; in __thread_map__new_all_cpus() local 129 for (i = 0; i < items; i++) { in __thread_map__new_all_cpus() 130 perf_thread_map__set_pid(threads, threads->nr + i, in __thread_map__new_all_cpus() 131 atoi(namelist[i]->d_name)); in __thread_map__new_all_cpus() 134 for (i = 0; i < items; i++) in __thread_map__new_all_cpus() [all …]
|
D | print_binary.c | 10 size_t i, j, mask; in binary__fprintf() local 20 for (i = 0; i < len; i++) { in binary__fprintf() 21 if ((i & mask) == 0) { in binary__fprintf() 23 printed += printer(BINARY_PRINT_ADDR, i, extra, fp); in binary__fprintf() 26 printed += printer(BINARY_PRINT_NUM_DATA, data[i], extra, fp); in binary__fprintf() 28 if (((i & mask) == mask) || i == len - 1) { in binary__fprintf() 29 for (j = 0; j < mask-(i & mask); j++) in binary__fprintf() 32 printer(BINARY_PRINT_SEP, i, extra, fp); in binary__fprintf() 33 for (j = i & ~mask; j <= i; j++) in binary__fprintf() 35 for (j = 0; j < mask-(i & mask); j++) in binary__fprintf() [all …]
|
/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
D | devlink_resources.sh | 27 local i 31 for i in $KVD_PROFILES; do 33 devlink_sp_resource_kvd_profile_set $i 34 log_test "'$i' profile" 46 local i 51 for i in $KVD_CHILDREN; do 53 size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]') 54 devlink_resource_size_set "$size" kvd "$i" 57 if [[ "$i" == "linear" ]]; then 65 log_test "'$i' minimize [$size]" [all …]
|
/tools/testing/selftests/vm/ |
D | virtual_address_range.c | 102 unsigned long i, lchunks, hchunks; in main() local 104 for (i = 0; i < NR_CHUNKS_LOW; i++) { in main() 105 ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE, in main() 108 if (ptr[i] == MAP_FAILED) { in main() 114 if (validate_addr(ptr[i], 0)) in main() 117 lchunks = i; in main() 119 for (i = 0; i < NR_CHUNKS_HIGH; i++) { in main() 121 hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE, in main() 124 if (hptr[i] == MAP_FAILED) in main() 127 if (validate_addr(hptr[i], 1)) in main() [all …]
|
/tools/testing/selftests/net/ |
D | reuseport_dualstack.c | 36 int opt, i; in build_rcv_fd() local 55 for (i = 0; i < count; ++i) { in build_rcv_fd() 56 rcv_fds[i] = socket(family, proto, 0); in build_rcv_fd() 57 if (rcv_fds[i] < 0) in build_rcv_fd() 61 if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt, in build_rcv_fd() 65 if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr))) in build_rcv_fd() 68 if (proto == SOCK_STREAM && listen(rcv_fds[i], 10)) in build_rcv_fd() 105 int i, fd; in receive_once() local 108 i = epoll_wait(epfd, &ev, 1, -1); in receive_once() 109 if (i < 0) in receive_once() [all …]
|
/tools/testing/selftests/firmware/ |
D | fw_filesystem.sh | 174 for i in $(seq 0 3); do 175 config_set_read_fw_idx $i 181 echo "request #$i: firmware was not loaded" >&2 189 for i in $(seq 0 3); do 190 config_set_read_fw_idx $i 193 echo "request $i: file was not expected to match" >&2 343 for i in $(seq 1 5); do 344 test_batched_request_firmware $i normal 347 for i in $(seq 1 5); do 348 test_batched_request_firmware_into_buf $i normal [all …]
|
/tools/testing/selftests/powerpc/tm/ |
D | tm-tmspr.c | 41 int i, cpu; in tfiar_tfhar() local 61 for (i = 0; i < num_loops; i++) { in tfiar_tfhar() 74 unsigned long i; in texasr() local 77 for (i = 0; i < num_loops; i++) { in texasr() 102 unsigned long i; in test_tmspr() local 114 for (i = 0; i < thread_num; i += 2) { in test_tmspr() 115 if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar, in test_tmspr() 116 (void *)i)) in test_tmspr() 120 for (i = 1; i < thread_num; i += 2) { in test_tmspr() 121 if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i)) in test_tmspr() [all …]
|
D | tm-signal-context-chk-gpr.c | 51 int i; in signal_usr1() local 56 for (i = 0; i < NV_GPR_REGS; i++) { in signal_usr1() 57 fail = (ucp->uc_mcontext.gp_regs[R14 + i] != gprs[i]); in signal_usr1() 61 R14 + i, ucp->uc_mcontext.gp_regs[R14 + i], gprs[i]); in signal_usr1() 66 for (i = 0; i < NV_GPR_REGS; i++) { in signal_usr1() 67 fail = (tm_ucp->uc_mcontext.gp_regs[R14 + i] != gprs[NV_GPR_REGS + i]); in signal_usr1() 71 R14 + i, tm_ucp->uc_mcontext.gp_regs[R14 + i], gprs[NV_GPR_REGS + i]); in signal_usr1() 79 int i; in tm_signal_context_chk_gpr() local 93 i = 0; in tm_signal_context_chk_gpr() 94 while (i < MAX_ATTEMPT && !broken) { in tm_signal_context_chk_gpr() [all …]
|
D | tm-signal-context-chk-fpu.c | 49 int i; in signal_usr1() local 53 for (i = 0; i < NV_FPU_REGS; i++) { in signal_usr1() 55 fail = (ucp->uc_mcontext.fp_regs[FPR14 + i] != fps[i]); in signal_usr1() 59 FPR14 + i, ucp->uc_mcontext.fp_regs[FPR14 + i], fps[i]); in signal_usr1() 63 for (i = 0; i < NV_FPU_REGS; i++) { in signal_usr1() 65 fail = (tm_ucp->uc_mcontext.fp_regs[FPR14 + i] != fps[NV_FPU_REGS + i]); in signal_usr1() 69 FPR14 + i, tm_ucp->uc_mcontext.fp_regs[FPR14 + i], fps[NV_FPU_REGS + i]); in signal_usr1() 77 int i; in tm_signal_context_chk_fpu() local 91 i = 0; in tm_signal_context_chk_fpu() 92 while (i < MAX_ATTEMPT && !broken) { in tm_signal_context_chk_fpu() [all …]
|
/tools/testing/selftests/sync/ |
D | sync_stress_merge.c | 38 int i, size, ret; in test_merge_stress_random_merge() local 48 for (i = 0; i < timeline_count; i++) in test_merge_stress_random_merge() 49 timelines[i] = sw_sync_timeline_create(); in test_merge_stress_random_merge() 62 for (i = 0; i < merge_count; i++) { in test_merge_stress_random_merge() 86 for (i = 0; i < timeline_count; i++) in test_merge_stress_random_merge() 87 if (fence_map[i] != -1) in test_merge_stress_random_merge() 95 for (i = 0; i < timeline_count; i++) { in test_merge_stress_random_merge() 96 if (fence_map[i] != -1) { in test_merge_stress_random_merge() 101 sw_sync_timeline_inc(timelines[i], fence_map[i]); in test_merge_stress_random_merge() 111 for (i = 0; i < timeline_count; i++) in test_merge_stress_random_merge() [all …]
|
/tools/testing/selftests/bpf/ |
D | test_maps.c | 129 int fd, i, j; in test_hashmap_sizes() local 131 for (i = 1; i <= 512; i <<= 1) in test_hashmap_sizes() 133 fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, in test_hashmap_sizes() 139 i, j, strerror(errno)); in test_hashmap_sizes() 153 int fd, i; in test_hashmap_percpu() local 162 for (i = 0; i < nr_cpus; i++) in test_hashmap_percpu() 163 bpf_percpu(value, i) = i + 100; in test_hashmap_percpu() 224 for (i = 0; i < nr_cpus; i++) in test_hashmap_percpu() 225 assert(bpf_percpu(value, i) == i + 100); in test_hashmap_percpu() 254 int i, fd, ret; in helper_fill_hashmap() local [all …]
|
/tools/testing/selftests/rseq/ |
D | param_test.c | 362 long long i, reps; in test_percpu_spinlock_thread() local 368 for (i = 0; i < reps; i++) { in test_percpu_spinlock_thread() 373 if (i != 0 && !(i % (reps / 10))) in test_percpu_spinlock_thread() 375 (int) rseq_gettid(), i); in test_percpu_spinlock_thread() 395 int i, ret; in test_percpu_spinlock() local 402 for (i = 0; i < num_threads; i++) { in test_percpu_spinlock() 403 thread_data[i].reps = opt_reps; in test_percpu_spinlock() 404 if (opt_disable_mod <= 0 || (i % opt_disable_mod)) in test_percpu_spinlock() 405 thread_data[i].reg = 1; in test_percpu_spinlock() 407 thread_data[i].reg = 0; in test_percpu_spinlock() [all …]
|