Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 12 of 12) sorted by relevance

/lib/
Dcpu_rmap.c28 unsigned int cpu; in alloc_cpu_rmap() local
52 for_each_possible_cpu(cpu) { in alloc_cpu_rmap()
53 rmap->near[cpu].index = cpu % size; in alloc_cpu_rmap()
54 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; in alloc_cpu_rmap()
94 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, in cpu_rmap_copy_neigh() argument
100 if (rmap->near[cpu].dist > dist && in cpu_rmap_copy_neigh()
102 rmap->near[cpu].index = rmap->near[neigh].index; in cpu_rmap_copy_neigh()
103 rmap->near[cpu].dist = dist; in cpu_rmap_copy_neigh()
114 unsigned int cpu; in debug_print_rmap() local
118 for_each_possible_cpu(cpu) { in debug_print_rmap()
[all …]
Dcpumask.c53 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) in cpumask_any_but() argument
57 cpumask_check(cpu); in cpumask_any_but()
59 if (i != cpu) in cpumask_any_but()
208 int cpu; in cpumask_local_spread() local
214 for_each_cpu(cpu, cpu_online_mask) in cpumask_local_spread()
216 return cpu; in cpumask_local_spread()
219 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) in cpumask_local_spread()
221 return cpu; in cpumask_local_spread()
223 for_each_cpu(cpu, cpu_online_mask) { in cpumask_local_spread()
225 if (cpumask_test_cpu(cpu, cpumask_of_node(node))) in cpumask_local_spread()
[all …]
Dtest_vmalloc.c362 int cpu; member
391 if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0) in test_func()
392 pr_err("Failed to set affinity to %d CPU\n", t->cpu); in test_func()
418 per_cpu_test_data[t->cpu][index].test_passed++; in test_func()
420 per_cpu_test_data[t->cpu][index].test_failed++; in test_func()
429 per_cpu_test_data[t->cpu][index].time = delta; in test_func()
469 int cpu, ret; in do_concurrent_test() local
481 for_each_cpu(cpu, &cpus_run_test_mask) { in do_concurrent_test()
482 struct test_driver *t = &per_cpu_test_driver[cpu]; in do_concurrent_test()
484 t->cpu = cpu; in do_concurrent_test()
[all …]
Dpercpu_counter.c62 int cpu; in percpu_counter_set() local
66 for_each_possible_cpu(cpu) { in percpu_counter_set()
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
108 int cpu; in __percpu_counter_sum() local
113 for_each_online_cpu(cpu) { in __percpu_counter_sum()
114 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum()
168 static int compute_batch_value(unsigned int cpu) in compute_batch_value() argument
176 static int percpu_counter_cpu_dead(unsigned int cpu) in percpu_counter_cpu_dead() argument
181 compute_batch_value(cpu); in percpu_counter_cpu_dead()
188 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_cpu_dead()
Dnmi_backtrace.c90 int cpu = smp_processor_id(); in nmi_cpu_backtrace() local
92 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { in nmi_cpu_backtrace()
95 cpu, (void *)instruction_pointer(regs)); in nmi_cpu_backtrace()
97 pr_warn("NMI backtrace for cpu %d\n", cpu); in nmi_cpu_backtrace()
103 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); in nmi_cpu_backtrace()
Ddump_stack.c93 int cpu; in dump_stack() local
101 cpu = smp_processor_id(); in dump_stack()
102 old = atomic_cmpxchg(&dump_lock, -1, cpu); in dump_stack()
105 } else if (old == cpu) { in dump_stack()
Dpercpu-refcount.c138 int cpu; in percpu_ref_switch_to_atomic_rcu() local
140 for_each_possible_cpu(cpu) in percpu_ref_switch_to_atomic_rcu()
141 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
197 int cpu; in __percpu_ref_switch_to_percpu() local
215 for_each_possible_cpu(cpu) in __percpu_ref_switch_to_percpu()
216 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
Dirq_poll.c188 static int irq_poll_cpu_dead(unsigned int cpu) in irq_poll_cpu_dead() argument
195 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), in irq_poll_cpu_dead()
Ddebugobjects.c1002 int cpu, obj_percpu_free = 0; in debug_stats_show() local
1004 for_each_possible_cpu(cpu) in debug_stats_show()
1005 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); in debug_stats_show()
1351 int cpu, extras; in debug_objects_mem_init() local
1362 for_each_possible_cpu(cpu) in debug_objects_mem_init()
1363 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); in debug_objects_mem_init()
Dsbitmap.c577 unsigned int cpu) in sbitmap_queue_clear() argument
602 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; in sbitmap_queue_clear()
Dradix-tree.c1587 static int radix_tree_cpu_dead(unsigned int cpu) in radix_tree_cpu_dead() argument
1593 rtp = &per_cpu(radix_tree_preloads, cpu); in radix_tree_cpu_dead()
DKconfig.debug388 bool "Force weak per-cpu definitions"
610 Depending on the cpu, kmemleak scan may be cpu intensive and can
1757 tristate "Per cpu operations test"
1760 Enable this option to build test module which validates per-cpu