Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 148) sorted by relevance

123456

/kernel/
Dsmpboot.c30 struct task_struct *idle_thread_get(unsigned int cpu) in idle_thread_get() argument
32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get()
50 static inline void idle_init(unsigned int cpu) in idle_init() argument
52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init()
55 tsk = fork_idle(cpu); in idle_init()
57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); in idle_init()
59 per_cpu(idle_threads, cpu) = tsk; in idle_init()
68 unsigned int cpu, boot_cpu; in idle_threads_init() local
72 for_each_possible_cpu(cpu) { in idle_threads_init()
73 if (cpu != boot_cpu) in idle_threads_init()
[all …]
Dcpu.c125 int (*single)(unsigned int cpu);
126 int (*multi)(unsigned int cpu,
130 int (*single)(unsigned int cpu);
131 int (*multi)(unsigned int cpu,
157 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument
161 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback()
163 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback()
164 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback()
181 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
182 ret = cb(cpu); in cpuhp_invoke_callback()
[all …]
Dsmp.c46 int smpcfd_prepare_cpu(unsigned int cpu) in smpcfd_prepare_cpu() argument
48 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_prepare_cpu()
51 cpu_to_node(cpu))) in smpcfd_prepare_cpu()
54 cpu_to_node(cpu))) { in smpcfd_prepare_cpu()
68 int smpcfd_dead_cpu(unsigned int cpu) in smpcfd_dead_cpu() argument
70 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_dead_cpu()
78 int smpcfd_dying_cpu(unsigned int cpu) in smpcfd_dying_cpu() argument
146 int cpu = -1; in csd_lock_wait_toolong() local
156 cpu = csd_lock_wait_getcpu(csd); in csd_lock_wait_toolong()
158 *bug_id, raw_smp_processor_id(), cpu); in csd_lock_wait_toolong()
[all …]
Dstop_machine.c68 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument
70 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_queue_work()
114 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) in stop_one_cpu() argument
120 if (!cpu_stop_queue_work(cpu, &work)) in stop_one_cpu()
182 int cpu = smp_processor_id(), err = 0; in multi_cpu_stop() local
195 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop()
198 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop()
358 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, in stop_one_cpu_nowait() argument
362 return cpu_stop_queue_work(cpu, work_buf); in stop_one_cpu_nowait()
381 int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg, in stop_one_cpu_async() argument
[all …]
Dsoftirq.c412 int cpu = smp_processor_id(); in tick_irq_exit() local
415 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { in tick_irq_exit()
641 int cpu; in softirq_init() local
643 for_each_possible_cpu(cpu) { in softirq_init()
644 per_cpu(tasklet_vec, cpu).tail = in softirq_init()
645 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
646 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init()
647 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
654 static int ksoftirqd_should_run(unsigned int cpu) in ksoftirqd_should_run() argument
659 static void run_ksoftirqd(unsigned int cpu) in run_ksoftirqd() argument
[all …]
/kernel/time/
Dtick-broadcast.c39 static void tick_broadcast_clear_oneshot(int cpu);
42 static void tick_broadcast_oneshot_offline(unsigned int cpu);
46 static inline void tick_broadcast_clear_oneshot(int cpu) { } in tick_broadcast_clear_oneshot() argument
49 static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } in tick_broadcast_oneshot_offline() argument
66 static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu);
68 const struct clock_event_device *tick_get_wakeup_device(int cpu) in tick_get_wakeup_device() argument
70 return tick_get_oneshot_wakeup_device(cpu); in tick_get_wakeup_device()
101 static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) in tick_get_oneshot_wakeup_device() argument
103 return per_cpu(tick_oneshot_wakeup_device, cpu); in tick_get_oneshot_wakeup_device()
116 int cpu) in tick_set_oneshot_wakeup_device() argument
[all …]
Dtick-common.c62 struct tick_device *tick_get_device(int cpu) in tick_get_device() argument
64 return &per_cpu(tick_cpu_device, cpu); in tick_get_device()
84 static void tick_periodic(int cpu) in tick_periodic() argument
86 if (tick_do_timer_cpu == cpu) { in tick_periodic()
109 int cpu = smp_processor_id(); in tick_handle_periodic() local
112 tick_periodic(cpu); in tick_handle_periodic()
145 tick_periodic(cpu); in tick_handle_periodic()
185 int cpu = *(unsigned int *)info; in giveup_do_timer() local
189 tick_do_timer_cpu = cpu; in giveup_do_timer()
194 int cpu = smp_processor_id(); in tick_take_do_timer_from_boot() local
[all …]
Dtick-sched.c41 struct tick_sched *tick_get_tick_sched(int cpu) in tick_get_tick_sched() argument
43 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched()
156 int cpu = smp_processor_id(); in tick_sched_do_timer() local
173 tick_do_timer_cpu = cpu; in tick_sched_do_timer()
178 if (tick_do_timer_cpu == cpu) { in tick_sched_do_timer()
273 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) in can_stop_full_tick() argument
277 if (unlikely(!cpu_online(cpu))) in can_stop_full_tick()
323 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument
325 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu()
328 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu()
[all …]
Dtimer_list.c21 int cpu; member
130 static void print_cpu(struct seq_file *m, int cpu, u64 now) in print_cpu() argument
132 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); in print_cpu()
135 SEQ_printf(m, "cpu: %d\n", cpu); in print_cpu()
166 struct tick_sched *ts = tick_get_tick_sched(cpu); in print_cpu()
193 print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) in print_tickdevice() argument
200 if (cpu < 0) in print_tickdevice()
203 SEQ_printf(m, "Per CPU device: %d\n", cpu); in print_tickdevice()
261 if (cpu >= 0) { in print_tickdevice()
262 const struct clock_event_device *wd = tick_get_wakeup_device(cpu); in print_tickdevice()
[all …]
/kernel/sched/
Dcpudeadline.c30 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_down()
58 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down()
60 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_down()
64 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_down()
66 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_down()
73 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_up()
84 cp->elements[idx].cpu = cp->elements[p].cpu; in cpudl_heapify_up()
86 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_up()
90 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_up()
92 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_up()
[all …]
Dtopology.c39 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
52 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
53 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one()
55 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one()
56 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one()
135 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument
143 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug()
147 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug()
150 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug()
161 # define sched_domain_debug(sd, cpu) do { } while (0) argument
[all …]
Dcpupri.c52 unsigned int cpu = cpumask_first(lowest_mask); in drop_nopreempt_cpus() local
53 while (cpu < nr_cpu_ids) { in drop_nopreempt_cpus()
55 struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr); in drop_nopreempt_cpus()
56 if (task_may_not_preempt(task, cpu)) { in drop_nopreempt_cpus()
57 cpumask_clear_cpu(cpu, lowest_mask); in drop_nopreempt_cpus()
59 cpu = cpumask_next(cpu, lowest_mask); in drop_nopreempt_cpus()
149 bool (*fitness_fn)(struct task_struct *p, int cpu)) in cpupri_find_fitness()
152 int idx, cpu; in cpupri_find_fitness() local
169 for_each_cpu(cpu, lowest_mask) { in cpupri_find_fitness()
170 if (!fitness_fn(p, cpu)) in cpupri_find_fitness()
[all …]
Dcpuacct.c95 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, in cpuacct_cpuusage_read() argument
98 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read()
99 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read()
112 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()
129 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()
135 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu) in cpuacct_cpuusage_write() argument
137 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write()
138 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write()
148 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()
156 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()
[all …]
/kernel/bpf/
Dpercpu_freelist.c8 int cpu; in pcpu_freelist_init() local
14 for_each_possible_cpu(cpu) { in pcpu_freelist_init()
15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init()
59 int cpu, orig_cpu; in ___pcpu_freelist_push_nmi() local
61 orig_cpu = cpu = raw_smp_processor_id(); in ___pcpu_freelist_push_nmi()
65 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_push_nmi()
71 cpu = cpumask_next(cpu, cpu_possible_mask); in ___pcpu_freelist_push_nmi()
72 if (cpu >= nr_cpu_ids) in ___pcpu_freelist_push_nmi()
73 cpu = 0; in ___pcpu_freelist_push_nmi()
76 if (cpu == orig_cpu && in ___pcpu_freelist_push_nmi()
[all …]
/kernel/irq/
Dmatrix.c132 unsigned int cpu, best_cpu, maxavl = 0; in matrix_find_best_cpu() local
137 for_each_cpu(cpu, msk) { in matrix_find_best_cpu()
138 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu()
143 best_cpu = cpu; in matrix_find_best_cpu()
153 unsigned int cpu, best_cpu, allocated = UINT_MAX; in matrix_find_best_cpu_managed() local
158 for_each_cpu(cpu, msk) { in matrix_find_best_cpu_managed()
159 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu_managed()
164 best_cpu = cpu; in matrix_find_best_cpu_managed()
212 unsigned int cpu, failed_cpu; in irq_matrix_reserve_managed() local
214 for_each_cpu(cpu, msk) { in irq_matrix_reserve_managed()
[all …]
Dipi.c162 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) in ipi_get_hwirq() argument
167 if (!data || !ipimask || cpu >= nr_cpu_ids) in ipi_get_hwirq()
170 if (!cpumask_test_cpu(cpu, ipimask)) in ipi_get_hwirq()
180 data = irq_get_irq_data(irq + cpu - data->common->ipi_offset); in ipi_get_hwirq()
187 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify() argument
197 if (cpu >= nr_cpu_ids) in ipi_send_verify()
204 if (!cpumask_test_cpu(cpu, ipimask)) in ipi_send_verify()
221 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) in __ipi_send_single() argument
232 if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) in __ipi_send_single()
236 chip->ipi_send_mask(data, cpumask_of(cpu)); in __ipi_send_single()
[all …]
/kernel/debug/
Ddebug_core.c249 int cpu; in kgdb_roundup_cpus() local
252 for_each_online_cpu(cpu) { in kgdb_roundup_cpus()
254 if (cpu == this_cpu) in kgdb_roundup_cpus()
257 csd = &per_cpu(kgdb_roundup_csd, cpu); in kgdb_roundup_cpus()
267 if (kgdb_info[cpu].rounding_up) in kgdb_roundup_cpus()
269 kgdb_info[cpu].rounding_up = true; in kgdb_roundup_cpus()
272 ret = smp_call_function_single_async(cpu, csd); in kgdb_roundup_cpus()
274 kgdb_info[cpu].rounding_up = false; in kgdb_roundup_cpus()
472 void kdb_dump_stack_on_cpu(int cpu) in kdb_dump_stack_on_cpu() argument
474 if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) { in kdb_dump_stack_on_cpu()
[all …]
/kernel/cgroup/
Drstat.c9 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
11 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) in cgroup_rstat_cpu() argument
13 return per_cpu_ptr(cgrp->rstat_cpu, cpu); in cgroup_rstat_cpu()
25 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) in cgroup_rstat_updated() argument
27 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); in cgroup_rstat_updated()
43 if (cgroup_rstat_cpu(cgrp, cpu)->updated_next) in cgroup_rstat_updated()
51 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); in cgroup_rstat_updated()
52 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu); in cgroup_rstat_updated()
84 struct cgroup *root, int cpu) in cgroup_rstat_cpu_pop_updated() argument
102 rstatc = cgroup_rstat_cpu(pos, cpu); in cgroup_rstat_cpu_pop_updated()
[all …]
/kernel/trace/
Dring_buffer.c279 #define for_each_buffer_cpu(buffer, cpu) \ argument
280 for_each_cpu(cpu, buffer->cpumask)
282 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
283 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
495 int cpu; member
767 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
769 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
779 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
785 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
786 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
[all …]
Dtrace_functions_graph.c37 int cpu; member
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
134 int cpu; in trace_graph_entry() local
173 cpu = raw_smp_processor_id(); in trace_graph_entry()
174 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry()
243 int cpu; in trace_graph_return() local
254 cpu = raw_smp_processor_id(); in trace_graph_return()
255 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return()
333 static void print_graph_cpu(struct trace_seq *s, int cpu) in print_graph_cpu() argument
340 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); in print_graph_cpu()
[all …]
Dtrace_kdb.c27 int cnt = 0, cpu; in ftrace_dump_buf() local
44 for_each_tracing_cpu(cpu) { in ftrace_dump_buf()
45 iter.buffer_iter[cpu] = in ftrace_dump_buf()
47 cpu, GFP_ATOMIC); in ftrace_dump_buf()
48 ring_buffer_read_start(iter.buffer_iter[cpu]); in ftrace_dump_buf()
49 tracing_iter_reset(&iter, cpu); in ftrace_dump_buf()
84 for_each_tracing_cpu(cpu) { in ftrace_dump_buf()
85 if (iter.buffer_iter[cpu]) { in ftrace_dump_buf()
86 ring_buffer_read_finish(iter.buffer_iter[cpu]); in ftrace_dump_buf()
87 iter.buffer_iter[cpu] = NULL; in ftrace_dump_buf()
[all …]
/kernel/events/
Dhw_breakpoint.c52 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) in get_bp_info() argument
54 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info()
88 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) in max_task_bp_pinned() argument
90 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; in max_task_bp_pinned()
105 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) in task_bp_pinned() argument
114 (iter->cpu < 0 || cpu == iter->cpu)) in task_bp_pinned()
123 if (bp->cpu >= 0) in cpumask_of_bp()
124 return cpumask_of(bp->cpu); in cpumask_of_bp()
137 int cpu; in fetch_bp_busy_slots() local
139 for_each_cpu(cpu, cpumask) { in fetch_bp_busy_slots()
[all …]
/kernel/debug/kdb/
Dkdb_bt.c107 kdb_bt_cpu(unsigned long cpu) in kdb_bt_cpu() argument
111 if (cpu >= num_possible_cpus() || !cpu_online(cpu)) { in kdb_bt_cpu()
112 kdb_printf("WARNING: no process for cpu %ld\n", cpu); in kdb_bt_cpu()
117 kdb_tsk = KDB_TSK(cpu); in kdb_bt_cpu()
119 kdb_printf("WARNING: no task for cpu %ld\n", cpu); in kdb_bt_cpu()
140 unsigned long cpu; in kdb_bt() local
146 for_each_online_cpu(cpu) { in kdb_bt()
147 p = kdb_curr_task(cpu); in kdb_bt()
181 unsigned long cpu = ~0; in kdb_bt() local
185 diag = kdbgetularg((char *)argv[1], &cpu); in kdb_bt()
[all …]
/kernel/rcu/
Dtree_exp.h233 int cpu; in rcu_report_exp_cpu_mult() local
243 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { in rcu_report_exp_cpu_mult()
244 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_exp_cpu_mult()
248 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP); in rcu_report_exp_cpu_mult()
339 int cpu; in sync_rcu_exp_select_node_cpus() local
352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in sync_rcu_exp_select_node_cpus()
353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in sync_rcu_exp_select_node_cpus()
357 if (raw_smp_processor_id() == cpu || in sync_rcu_exp_select_node_cpus()
380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { in sync_rcu_exp_select_node_cpus()
381 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in sync_rcu_exp_select_node_cpus()
[all …]
Dtree_stall.h329 int cpu; in rcu_dump_cpu_stacks() local
335 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_dump_cpu_stacks()
336 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) in rcu_dump_cpu_stacks()
337 if (!trigger_single_cpu_backtrace(cpu)) in rcu_dump_cpu_stacks()
338 dump_cpu_task(cpu); in rcu_dump_cpu_stacks()
345 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) in print_cpu_stall_fast_no_hz() argument
347 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); in print_cpu_stall_fast_no_hz()
356 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) in print_cpu_stall_fast_no_hz() argument
406 static void print_cpu_stall_info(int cpu) in print_cpu_stall_info() argument
411 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in print_cpu_stall_info()
[all …]

123456