Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 12 of 12) sorted by relevance

/lib/
Dcpu_rmap.c28 unsigned int cpu; in alloc_cpu_rmap() local
52 for_each_possible_cpu(cpu) { in alloc_cpu_rmap()
53 rmap->near[cpu].index = cpu % size; in alloc_cpu_rmap()
54 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; in alloc_cpu_rmap()
94 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, in cpu_rmap_copy_neigh() argument
100 if (rmap->near[cpu].dist > dist && in cpu_rmap_copy_neigh()
102 rmap->near[cpu].index = rmap->near[neigh].index; in cpu_rmap_copy_neigh()
103 rmap->near[cpu].dist = dist; in cpu_rmap_copy_neigh()
114 unsigned int cpu; in debug_print_rmap() local
118 for_each_possible_cpu(cpu) { in debug_print_rmap()
[all …]
Dcpumask.c53 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) in cpumask_any_but() argument
57 cpumask_check(cpu); in cpumask_any_but()
59 if (i != cpu) in cpumask_any_but()
208 int cpu; in cpumask_local_spread() local
214 for_each_cpu(cpu, cpu_online_mask) in cpumask_local_spread()
216 return cpu; in cpumask_local_spread()
219 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) in cpumask_local_spread()
221 return cpu; in cpumask_local_spread()
223 for_each_cpu(cpu, cpu_online_mask) { in cpumask_local_spread()
225 if (cpumask_test_cpu(cpu, cpumask_of_node(node))) in cpumask_local_spread()
[all …]
Dpercpu_counter.c62 int cpu; in percpu_counter_set() local
66 for_each_possible_cpu(cpu) { in percpu_counter_set()
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
127 int cpu; in __percpu_counter_sum() local
132 for_each_online_cpu(cpu) { in __percpu_counter_sum()
133 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum()
187 static int compute_batch_value(unsigned int cpu) in compute_batch_value() argument
195 static int percpu_counter_cpu_dead(unsigned int cpu) in percpu_counter_cpu_dead() argument
200 compute_batch_value(cpu); in percpu_counter_cpu_dead()
207 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_cpu_dead()
Dnmi_backtrace.c94 int cpu = smp_processor_id(); in nmi_cpu_backtrace() local
97 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { in nmi_cpu_backtrace()
105 cpu, (void *)instruction_pointer(regs)); in nmi_cpu_backtrace()
107 pr_warn("NMI backtrace for cpu %d\n", cpu); in nmi_cpu_backtrace()
114 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); in nmi_cpu_backtrace()
Dpercpu-refcount.c175 int cpu; in percpu_ref_switch_to_atomic_rcu() local
177 for_each_possible_cpu(cpu) in percpu_ref_switch_to_atomic_rcu()
178 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
239 int cpu; in __percpu_ref_switch_to_percpu() local
257 for_each_possible_cpu(cpu) in __percpu_ref_switch_to_percpu()
258 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
Dtest_lockup.c577 unsigned int cpu; in test_lockup_init() local
583 for_each_online_cpu(cpu) { in test_lockup_init()
584 INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn); in test_lockup_init()
585 queue_work_on(cpu, system_highpri_wq, in test_lockup_init()
586 per_cpu_ptr(&test_works, cpu)); in test_lockup_init()
590 for_each_online_cpu(cpu) in test_lockup_init()
591 flush_work(per_cpu_ptr(&test_works, cpu)); in test_lockup_init()
Dirq_poll.c188 static int irq_poll_cpu_dead(unsigned int cpu) in irq_poll_cpu_dead() argument
195 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), in irq_poll_cpu_dead()
Ddebugobjects.c434 static int object_cpu_offline(unsigned int cpu) in object_cpu_offline() argument
442 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); in object_cpu_offline()
1001 int cpu, obj_percpu_free = 0; in debug_stats_show() local
1003 for_each_possible_cpu(cpu) in debug_stats_show()
1004 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); in debug_stats_show()
1341 int cpu, extras; in debug_objects_mem_init() local
1352 for_each_possible_cpu(cpu) in debug_objects_mem_init()
1353 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); in debug_objects_mem_init()
Dsbitmap.c581 unsigned int cpu) in sbitmap_queue_clear() argument
606 *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr; in sbitmap_queue_clear()
Dtest_kasan.c1235 int cpu; in vmalloc_percpu() local
1245 for_each_possible_cpu(cpu) { in vmalloc_percpu()
1246 char *c_ptr = per_cpu_ptr(ptr, cpu); in vmalloc_percpu()
Dradix-tree.c1576 static int radix_tree_cpu_dead(unsigned int cpu) in radix_tree_cpu_dead() argument
1582 rtp = &per_cpu(radix_tree_preloads, cpu); in radix_tree_cpu_dead()
DKconfig.debug494 bool "Force weak per-cpu definitions"
778 Depending on the cpu, kmemleak scan may be cpu intensive and can
2045 KCOV uses preallocated per-cpu areas to collect coverage from
2161 tristate "Per cpu operations test"
2164 Enable this option to build test module which validates per-cpu