/external/ltp/testcases/kernel/irq/ |
D | irqbalance01.c | 54 static unsigned int nr_cpus; variable 98 nr_cpus = 0; in collect_irq_info() 106 nr_cpus++; in collect_irq_info() 129 tst_res(TINFO, "Found %u CPUS, %u IRQs", nr_cpus, nr_irqs); in collect_irq_info() 133 nr_cpus * (nr_irqs + 1) * sizeof(*irq_stats)); in collect_irq_info() 135 nr_cpus * nr_irqs * sizeof(*irq_affinity)); in collect_irq_info() 148 irq_stats[row * nr_cpus + col] = acc; in collect_irq_info() 182 for (col = 0; col < nr_cpus; col++) { in collect_irq_info() 186 cpu_total += irq_stats[row * nr_cpus + col]; in collect_irq_info() 188 irq_stats[row * nr_cpus + col] = cpu_total; in collect_irq_info() [all …]
|
/external/trace-cmd/tracecmd/ |
D | trace-setup-guest.c | 80 static int make_guest_fifos(const char *guest, int nr_cpus, mode_t mode) in make_guest_fifos() argument 86 for (i = 0; i < nr_cpus; i++) { in make_guest_fifos() 99 int nr_cpus = -1; in get_guest_cpu_count() local 108 fscanf(f, "%d", &nr_cpus); in get_guest_cpu_count() 111 return nr_cpus; in get_guest_cpu_count() 114 static int attach_guest_fifos(const char *guest, int nr_cpus) in attach_guest_fifos() argument 137 for (i = 0; i < nr_cpus; i++) { in attach_guest_fifos() 156 static void do_setup_guest(const char *guest, int nr_cpus, in do_setup_guest() argument 173 ret = make_guest_fifos(guest, nr_cpus, mode); in do_setup_guest() 178 ret = attach_guest_fifos(guest, nr_cpus); in do_setup_guest() [all …]
|
D | trace-agent.c | 74 static int open_agent_fifos(int nr_cpus, int *fds) in open_agent_fifos() argument 79 for (i = 0; i < nr_cpus; i++) { in open_agent_fifos() 125 static void agent_handle(int sd, int nr_cpus, int page_size, const char *network) in agent_handle() argument 143 fds = calloc(nr_cpus, sizeof(*fds)); in agent_handle() 144 ports = calloc(nr_cpus, sizeof(*ports)); in agent_handle() 158 if (use_fifos && open_agent_fifos(nr_cpus, fds)) in agent_handle() 162 make_sockets(nr_cpus, fds, ports, network); in agent_handle() 200 ret = tracecmd_msg_send_trace_resp(msg_handle, nr_cpus, page_size, in agent_handle() 206 trace_record_agent(msg_handle, nr_cpus, fds, argc, argv, in agent_handle() 258 int sd, cd, nr_cpus; in agent_serve() local [all …]
|
/external/bcc/libbpf-tools/ |
D | cpufreq.c | 81 static int nr_cpus; variable 94 for (i = 0; i < nr_cpus; i++) { in open_and_attach_perf_event() 126 static int init_freqs_hmz(__u32 *freqs_mhz, int nr_cpus) in init_freqs_hmz() argument 132 for (i = 0; i < nr_cpus; i++) { in init_freqs_hmz() 197 nr_cpus = libbpf_num_possible_cpus(); in main() 198 if (nr_cpus < 0) { in main() 200 strerror(-nr_cpus)); in main() 203 if (nr_cpus > MAX_CPU_NR) { in main() 220 err = init_freqs_hmz(obj->bss->freqs_mhz, nr_cpus); in main() 250 for (i = 0; i < nr_cpus; i++) in main()
|
D | runqlen.c | 124 static int nr_cpus; variable 137 for (i = 0; i < nr_cpus; i++) { in open_and_attach_perf_event() 198 } while (env.per_cpu && ++i < nr_cpus); in print_runq_occupancy() 212 } while (env.per_cpu && ++i < nr_cpus); in print_linear_hists() 236 nr_cpus = libbpf_num_possible_cpus(); in main() 237 if (nr_cpus < 0) { in main() 239 strerror(-nr_cpus)); in main() 242 if (nr_cpus > MAX_CPU_NR) { in main() 297 for (i = 0; i < nr_cpus; i++) in main()
|
D | llcstat.c | 84 static int nr_cpus; variable 98 for (i = 0; i < nr_cpus; i++) { in open_and_attach_perf_event() 188 nr_cpus = libbpf_num_possible_cpus(); in main() 189 if (nr_cpus < 0) { in main() 191 strerror(-nr_cpus)); in main() 194 mlinks = calloc(nr_cpus, sizeof(*mlinks)); in main() 195 rlinks = calloc(nr_cpus, sizeof(*rlinks)); in main() 228 for (i = 0; i < nr_cpus; i++) { in main()
|
/external/ltp/testcases/kernel/controllers/cpuset/cpuset_base_ops_test/ |
D | cpuset_base_ops_testset.sh | 33 nr_cpus=$NR_CPUS 112 $nr_cpus WRITE_ERROR 113 $cpus_all 0-$((nr_cpus-1)) 114 ${cpus_all}$nr_cpus WRITE_ERROR 117 0-$((nr_cpus-1)) 0-$((nr_cpus-1)) 119 0-$nr_cpus WRITE_ERROR 120 0--$((nr_cpus-1)) WRITE_ERROR 126 if [ $nr_cpus -ge 3 ]; then
|
/external/linux-kselftest/tools/testing/selftests/bpf/prog_tests/ |
D | map_init.c | 10 static int nr_cpus; variable 22 pcpu_map_value_t value[nr_cpus]; in map_populate() 26 for (i = 0; i < nr_cpus; i++) in map_populate() 105 for (i = 0; i < nr_cpus; i++) { in check_values_one_cpu() 131 pcpu_map_value_t value[nr_cpus]; in test_pcpu_map_init() 171 pcpu_map_value_t value[nr_cpus]; in test_pcpu_lru_map_init() 203 nr_cpus = bpf_num_possible_cpus(); in test_map_init() 204 if (nr_cpus <= 1) { in test_map_init()
|
D | lookup_and_delete.c | 11 static int nr_cpus; variable 29 __u64 key, value[nr_cpus]; in fill_values_percpu() 32 for (i = 0; i < nr_cpus; i++) in fill_values_percpu() 136 __u64 key, val, value[nr_cpus]; in test_lookup_and_delete_percpu_hash() 154 for (i = 0; i < nr_cpus; i++) { in test_lookup_and_delete_percpu_hash() 220 __u64 key, val, value[nr_cpus]; in test_lookup_and_delete_lru_percpu_hash() 239 for (i = 0; i < nr_cpus; i++) in test_lookup_and_delete_lru_percpu_hash() 249 for (i = 0; i < nr_cpus; i++) { in test_lookup_and_delete_lru_percpu_hash() 278 nr_cpus = bpf_num_possible_cpus(); in test_lookup_and_delete()
|
D | perf_buffer.c | 48 int err, on_len, nr_on_cpus = 0, nr_cpus, i, j; in serial_test_perf_buffer() local 56 nr_cpus = libbpf_num_possible_cpus(); in serial_test_perf_buffer() 57 if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) in serial_test_perf_buffer() 94 for (i = 0; i < nr_cpus; i++) { in serial_test_perf_buffer() 117 for (i = 0, j = 0; i < nr_cpus; i++) { in serial_test_perf_buffer()
|
D | xdp_noinline.c | 8 unsigned int nr_cpus = bpf_num_possible_cpus(); in test_xdp_noinline() local 19 } stats[nr_cpus]; in test_xdp_noinline() 59 for (i = 0; i < nr_cpus; i++) { in test_xdp_noinline()
|
D | l4lb_all.c | 7 unsigned int nr_cpus = bpf_num_possible_cpus(); in test_l4lb() local 17 } stats[nr_cpus]; in test_l4lb() 70 for (i = 0; i < nr_cpus; i++) { in test_l4lb()
|
/external/linux-kselftest/tools/testing/selftests/bpf/map_tests/ |
D | array_map_batch_ops.c | 12 static int nr_cpus; variable 27 cpu_offset = i * nr_cpus; in map_batch_update() 28 for (j = 0; j < nr_cpus; j++) in map_batch_update() 48 cpu_offset = i * nr_cpus; in map_batch_verify() 49 for (j = 0; j < nr_cpus; j++) { in map_batch_verify() 89 value_size *= nr_cpus; in __test_map_lookup_and_update_batch() 156 nr_cpus = libbpf_num_possible_cpus(); in test_array_map_batch_ops() 158 CHECK(nr_cpus < 0, "nr_cpus checking", in test_array_map_batch_ops()
|
/external/ltp/testcases/kernel/tracing/ftrace_test/ftrace_stress/ |
D | ftrace_tracing_cpumask.sh | 25 nr_cpus=`tst_ncpus` 29 if [ $nr_cpus -gt 32 ]; then 30 group_cnt=$((nr_cpus / 32)) 32 rem=$((nr_cpus % 32)) 38 range=$((nr_cpus - 1))
|
D | ftrace_trace_stat.sh | 19 nr_cpus=`tst_ncpus` 30 cpu=$(tst_random 0 $((nr_cpus - 1)))
|
/external/linux-kselftest/tools/testing/selftests/bpf/ |
D | test_lru_map.c | 27 static int nr_cpus; variable 94 unsigned long long value0[nr_cpus], value1[nr_cpus]; in map_subset() 125 while (next < nr_cpus) { in sched_next_online() 148 unsigned long long key, value[nr_cpus]; in test_lru_sanity0() 158 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); in test_lru_sanity0() 245 unsigned long long key, end_key, value[nr_cpus]; in test_lru_sanity1() 321 unsigned long long key, value[nr_cpus]; in test_lru_sanity2() 429 unsigned long long key, end_key, value[nr_cpus]; in test_lru_sanity3() 494 unsigned long long key, value[nr_cpus]; in test_lru_sanity4() 505 3 * tgt_free * nr_cpus); in test_lru_sanity4() [all …]
|
/external/mesa3d/src/util/ |
D | u_cpu_detect.c | 438 util_cpu_caps.cores_per_L3 = util_cpu_caps.nr_cpus; in get_cpu_topology() 452 if (cache_level != 3 || cores_per_L3 == util_cpu_caps.nr_cpus) in get_cpu_topology() 477 for (unsigned i = 0; i < util_cpu_caps.nr_cpus && i < UTIL_MAX_CPUS; in get_cpu_topology() 499 util_cpu_caps.num_L3_caches = util_cpu_caps.nr_cpus / cores_per_L3; in get_cpu_topology() 504 for (unsigned i = 0; i < util_cpu_caps.nr_cpus && i < UTIL_MAX_CPUS; in get_cpu_topology() 524 for (int j = util_cpu_caps.nr_cpus - 1; j >= 0; j -= 32) in get_cpu_topology() 550 util_cpu_caps.nr_cpus = system_info.dwNumberOfProcessors; in util_cpu_detect_once() 553 util_cpu_caps.nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); in util_cpu_detect_once() 554 if (util_cpu_caps.nr_cpus == ~0) in util_cpu_detect_once() 555 util_cpu_caps.nr_cpus = 1; in util_cpu_detect_once() [all …]
|
/external/linux-kselftest/tools/testing/selftests/kvm/lib/aarch64/ |
D | gic.c | 26 gic_dist_init(enum gic_type type, unsigned int nr_cpus, void *dist_base) in gic_dist_init() argument 43 gic_ops->gic_init(nr_cpus, dist_base); in gic_dist_init() 52 void gic_init(enum gic_type type, unsigned int nr_cpus, in gic_init() argument 60 GUEST_ASSERT(nr_cpus); in gic_init() 62 gic_dist_init(type, nr_cpus, dist_base); in gic_init()
|
D | gic_v3.c | 18 unsigned int nr_cpus; member 291 GUEST_ASSERT(cpu < gicv3_data.nr_cpus); in gicv3_cpu_init() 359 static void gicv3_init(unsigned int nr_cpus, void *dist_base) in gicv3_init() argument 361 GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS); in gicv3_init() 363 gicv3_data.nr_cpus = nr_cpus; in gicv3_init()
|
/external/igt-gpu-tools/overlay/ |
D | gpu-perf.c | 222 n = gp->nr_cpus * (gp->nr_events+1); in perf_tracepoint_open() 230 fd += gp->nr_events * gp->nr_cpus; in perf_tracepoint_open() 231 sample += gp->nr_events * gp->nr_cpus; in perf_tracepoint_open() 232 for (n = 0; n < gp->nr_cpus; n++) { in perf_tracepoint_open() 255 gp->map = malloc(sizeof(void *)*gp->nr_cpus); in perf_mmap() 260 for (j = 0; j < gp->nr_cpus; j++) { in perf_mmap() 267 for (j = 0; j < gp->nr_cpus; j++) in perf_mmap() 426 gp->nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); in gpu_perf_init() 453 int m = n * gp->nr_cpus + cpu; in process_sample() 475 for (n = 0; n < gp->nr_cpus; n++) { in gpu_perf_update()
|
/external/ltp/testcases/kernel/controllers/cpuset/cpuset_load_balance_test/ |
D | cpuset_cpu_hog.c | 116 long nr_cpus = 0; in checkopt() local 119 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); in checkopt() 120 if (nr_cpus <= 0) { in checkopt() 149 nprocs = 2 * nr_cpus; in checkopt()
|
/external/linux-kselftest/tools/testing/selftests/vm/ |
D | userfaultfd.c | 67 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable 416 for (i = 0; i < nr_cpus * 2; ++i) { in uffd_test_ctx_clear() 491 pipefd = malloc(sizeof(int) * nr_cpus * 2); in uffd_test_ctx_init() 494 for (cpu = 0; cpu < nr_cpus; cpu++) in uffd_test_ctx_init() 844 pthread_t locking_threads[nr_cpus]; in stress() 845 pthread_t uffd_threads[nr_cpus]; in stress() 846 pthread_t background_threads[nr_cpus]; in stress() 849 for (cpu = 0; cpu < nr_cpus; cpu++) { in stress() 869 for (cpu = 0; cpu < nr_cpus; cpu++) in stress() 885 for (cpu = 0; cpu < nr_cpus; cpu++) in stress() [all …]
|
/external/libtracefs/src/ |
D | tracefs-instance.c | 876 int nr_cpus; in tracefs_instance_set_affinity_set() local 885 nr_cpus = sysconf(_SC_NPROCESSORS_CONF); in tracefs_instance_set_affinity_set() 886 set = CPU_ALLOC(nr_cpus); in tracefs_instance_set_affinity_set() 889 set_size = CPU_ALLOC_SIZE(nr_cpus); in tracefs_instance_set_affinity_set() 892 for (cpu = 0; cpu < nr_cpus; cpu++) in tracefs_instance_set_affinity_set() 897 nr_cpus = (set_size + 1) * 8; in tracefs_instance_set_affinity_set() 898 if (nr_cpus < 1) { in tracefs_instance_set_affinity_set() 904 for (w = ((nr_cpus + 31) / 32) - 1; w >= 0; w--) { in tracefs_instance_set_affinity_set() 909 if ((n * 4) + (w * 32) >= nr_cpus) in tracefs_instance_set_affinity_set() 915 if (cpu >= nr_cpus) in tracefs_instance_set_affinity_set()
|
/external/linux-kselftest/tools/testing/selftests/rcutorture/bin/ |
D | functions.sh | 63 if test "$3" -gt "$nr_cpus" 65 echo $nr_cpus
|
/external/virglrenderer/src/mesa/util/ |
D | u_cpu_detect.h | 66 int16_t nr_cpus; member 135 assert(util_cpu_caps.nr_cpus >= 1); in util_get_cpu_caps()
|