/lib/ |
D | cpu_rmap.c | 28 unsigned int cpu; in alloc_cpu_rmap() local 52 for_each_possible_cpu(cpu) { in alloc_cpu_rmap() 53 rmap->near[cpu].index = cpu % size; in alloc_cpu_rmap() 54 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; in alloc_cpu_rmap() 94 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, in cpu_rmap_copy_neigh() argument 100 if (rmap->near[cpu].dist > dist && in cpu_rmap_copy_neigh() 102 rmap->near[cpu].index = rmap->near[neigh].index; in cpu_rmap_copy_neigh() 103 rmap->near[cpu].dist = dist; in cpu_rmap_copy_neigh() 114 unsigned int cpu; in debug_print_rmap() local 118 for_each_possible_cpu(cpu) { in debug_print_rmap() [all …]
|
D | percpu_counter.c | 62 int cpu; in percpu_counter_set() local 66 for_each_possible_cpu(cpu) { in percpu_counter_set() 67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set() 140 int cpu; in __percpu_counter_sum() local 145 for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { in __percpu_counter_sum() 146 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum() 224 static int compute_batch_value(unsigned int cpu) in compute_batch_value() argument 232 static int percpu_counter_cpu_dead(unsigned int cpu) in percpu_counter_cpu_dead() argument 237 compute_batch_value(cpu); in percpu_counter_cpu_dead() 244 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_cpu_dead()
|
D | nmi_backtrace.c | 96 int cpu = smp_processor_id(); in nmi_cpu_backtrace() local 99 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { in nmi_cpu_backtrace() 107 cpu, (void *)instruction_pointer(regs)); in nmi_cpu_backtrace() 109 pr_warn("NMI backtrace for cpu %d\n", cpu); in nmi_cpu_backtrace() 116 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); in nmi_cpu_backtrace()
|
D | cpumask_kunit.c | 20 int cpu, iter = 0; \ 21 for_each_cpu(cpu, m) \ 31 int cpu, iter = 0; \ 34 for_each_cpu_##op(cpu, mask1, mask2) \ 43 int cpu, iter = 0; \ 44 for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \ 52 int cpu, iter = 0; \ 53 for_each_##name##_cpu(cpu) \
|
D | group_cpus.c | 18 int cpu, sibl; in grp_spread_init_one() local 21 cpu = cpumask_first(nmsk); in grp_spread_init_one() 24 if (cpu >= nr_cpu_ids) in grp_spread_init_one() 27 cpumask_clear_cpu(cpu, nmsk); in grp_spread_init_one() 28 cpumask_set_cpu(cpu, irqmsk); in grp_spread_init_one() 32 siblmsk = topology_sibling_cpumask(cpu); in grp_spread_init_one() 79 int cpu; in build_node_to_cpumask() local 81 for_each_possible_cpu(cpu) in build_node_to_cpumask() 82 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); in build_node_to_cpumask()
|
D | cpumask.c | 144 unsigned int cpu; in cpumask_local_spread() local 149 cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node); in cpumask_local_spread() 151 WARN_ON(cpu >= nr_cpu_ids); in cpumask_local_spread() 152 return cpu; in cpumask_local_spread()
|
D | percpu-refcount.c | 175 int cpu; in percpu_ref_switch_to_atomic_rcu() local 177 for_each_possible_cpu(cpu) in percpu_ref_switch_to_atomic_rcu() 178 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu() 240 int cpu; in __percpu_ref_switch_to_percpu() local 258 for_each_possible_cpu(cpu) in __percpu_ref_switch_to_percpu() 259 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
|
D | dhry_run.c | 34 unsigned int cpu = get_cpu(); in dhry_benchmark() local 51 pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu, in dhry_benchmark()
|
D | test_lockup.c | 577 unsigned int cpu; in test_lockup_init() local 583 for_each_online_cpu(cpu) { in test_lockup_init() 584 INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn); in test_lockup_init() 585 queue_work_on(cpu, system_highpri_wq, in test_lockup_init() 586 per_cpu_ptr(&test_works, cpu)); in test_lockup_init() 590 for_each_online_cpu(cpu) in test_lockup_init() 591 flush_work(per_cpu_ptr(&test_works, cpu)); in test_lockup_init()
|
D | irq_poll.c | 188 static int irq_poll_cpu_dead(unsigned int cpu) in irq_poll_cpu_dead() argument 198 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), in irq_poll_cpu_dead()
|
D | debugobjects.c | 431 static int object_cpu_offline(unsigned int cpu) in object_cpu_offline() argument 439 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); in object_cpu_offline() 1027 int cpu, obj_percpu_free = 0; in debug_stats_show() local 1029 for_each_possible_cpu(cpu) in debug_stats_show() 1030 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); in debug_stats_show() 1367 int cpu, extras; in debug_objects_mem_init() local 1378 for_each_possible_cpu(cpu) in debug_objects_mem_init() 1379 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); in debug_objects_mem_init()
|
D | sbitmap.c | 603 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) in sbitmap_update_cpu_hint() argument 606 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); in sbitmap_update_cpu_hint() 644 unsigned int cpu) in sbitmap_queue_clear() argument 667 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); in sbitmap_queue_clear()
|
D | radix-tree.c | 1578 static int radix_tree_cpu_dead(unsigned int cpu) in radix_tree_cpu_dead() argument 1584 rtp = &per_cpu(radix_tree_preloads, cpu); in radix_tree_cpu_dead()
|
D | Kconfig.debug | 571 bool "Force weak per-cpu definitions" 1218 bool "Report per-cpu work items which hog CPU for too long" 1221 Say Y here to enable reporting of concurrency-managed per-cpu work 1225 them from stalling other per-cpu work items. Occassional 2129 KCOV uses preallocated per-cpu areas to collect coverage from 2329 tristate "Per cpu operations test" 2332 Enable this option to build test module which validates per-cpu
|