/kernel/ |
D | smpboot.c | 30 struct task_struct *idle_thread_get(unsigned int cpu) in idle_thread_get() argument 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 50 static __always_inline void idle_init(unsigned int cpu) in idle_init() argument 52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 55 tsk = fork_idle(cpu); in idle_init() 57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); in idle_init() 59 per_cpu(idle_threads, cpu) = tsk; in idle_init() 68 unsigned int cpu, boot_cpu; in idle_threads_init() local 72 for_each_possible_cpu(cpu) { in idle_threads_init() 73 if (cpu != boot_cpu) in idle_threads_init() [all …]
|
D | cpu.c | 126 int (*single)(unsigned int cpu); 127 int (*multi)(unsigned int cpu, 131 int (*single)(unsigned int cpu); 132 int (*multi)(unsigned int cpu, 167 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument 171 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback() 173 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback() 174 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback() 191 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback() 192 ret = cb(cpu); in cpuhp_invoke_callback() [all …]
|
D | smp.c | 101 int smpcfd_prepare_cpu(unsigned int cpu) in smpcfd_prepare_cpu() argument 103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_prepare_cpu() 106 cpu_to_node(cpu))) in smpcfd_prepare_cpu() 109 cpu_to_node(cpu))) { in smpcfd_prepare_cpu() 123 int smpcfd_dead_cpu(unsigned int cpu) in smpcfd_dead_cpu() argument 125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_dead_cpu() 133 int smpcfd_dying_cpu(unsigned int cpu) in smpcfd_dying_cpu() argument 289 static void csd_lock_print_extended(struct __call_single_data *csd, int cpu) in csd_lock_print_extended() argument 291 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); in csd_lock_print_extended() 294 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); in csd_lock_print_extended() [all …]
|
D | stop_machine.c | 93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument 95 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_queue_work() 139 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) in stop_one_cpu() argument 145 if (!cpu_stop_queue_work(cpu, &work)) in stop_one_cpu() 208 int cpu = smp_processor_id(), err = 0; in multi_cpu_stop() local 221 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 224 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop() 385 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, in stop_one_cpu_nowait() argument 389 return cpu_stop_queue_work(cpu, work_buf); in stop_one_cpu_nowait() 398 unsigned int cpu; in queue_stop_cpus_work() local [all …]
|
D | profile.c | 226 int cpu = smp_processor_id(); in __profile_flip_buffers() local 228 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers() 233 int i, j, cpu; in profile_flip_buffers() local 239 for_each_online_cpu(cpu) { in profile_flip_buffers() 240 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 256 int i, cpu; in profile_discard_flip_buffers() local 262 for_each_online_cpu(cpu) { in profile_discard_flip_buffers() 263 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() 272 int i, j, cpu; in do_profile_hits() local 278 cpu = get_cpu(); in do_profile_hits() [all …]
|
D | watchdog_hld.c | 168 unsigned int cpu = smp_processor_id(); in hardlockup_detector_event_create() local 176 evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL, in hardlockup_detector_event_create() 179 pr_debug("Perf event create on CPU %d failed with %ld\n", cpu, in hardlockup_detector_event_create() 225 int cpu; in hardlockup_detector_perf_cleanup() local 227 for_each_cpu(cpu, &dead_events_mask) { in hardlockup_detector_perf_cleanup() 228 struct perf_event *event = per_cpu(dead_event, cpu); in hardlockup_detector_perf_cleanup() 236 per_cpu(dead_event, cpu) = NULL; in hardlockup_detector_perf_cleanup() 248 int cpu; in hardlockup_detector_perf_stop() local 252 for_each_online_cpu(cpu) { in hardlockup_detector_perf_stop() 253 struct perf_event *event = per_cpu(watchdog_ev, cpu); in hardlockup_detector_perf_stop() [all …]
|
/kernel/time/ |
D | tick-broadcast.c | 39 static void tick_broadcast_clear_oneshot(int cpu); 42 static void tick_broadcast_oneshot_offline(unsigned int cpu); 47 static inline void tick_broadcast_clear_oneshot(int cpu) { } in tick_broadcast_clear_oneshot() argument 50 static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } in tick_broadcast_oneshot_offline() argument 67 static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu); 69 const struct clock_event_device *tick_get_wakeup_device(int cpu) in tick_get_wakeup_device() argument 71 return tick_get_oneshot_wakeup_device(cpu); in tick_get_wakeup_device() 102 static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) in tick_get_oneshot_wakeup_device() argument 104 return per_cpu(tick_oneshot_wakeup_device, cpu); in tick_get_oneshot_wakeup_device() 117 int cpu) in tick_set_oneshot_wakeup_device() argument [all …]
|
D | tick-common.c | 64 struct tick_device *tick_get_device(int cpu) in tick_get_device() argument 66 return &per_cpu(tick_cpu_device, cpu); in tick_get_device() 86 static void tick_periodic(int cpu) in tick_periodic() argument 88 if (tick_do_timer_cpu == cpu) { in tick_periodic() 111 int cpu = smp_processor_id(); in tick_handle_periodic() local 114 tick_periodic(cpu); in tick_handle_periodic() 147 tick_periodic(cpu); in tick_handle_periodic() 187 int cpu = *(unsigned int *)info; in giveup_do_timer() local 191 tick_do_timer_cpu = cpu; in giveup_do_timer() 196 int cpu = smp_processor_id(); in tick_take_do_timer_from_boot() local [all …]
|
D | tick-sched.c | 42 struct tick_sched *tick_get_tick_sched(int cpu) in tick_get_tick_sched() argument 44 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched() 188 int cpu = smp_processor_id(); in tick_sched_do_timer() local 205 tick_do_timer_cpu = cpu; in tick_sched_do_timer() 210 if (tick_do_timer_cpu == cpu) { in tick_sched_do_timer() 306 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) in can_stop_full_tick() argument 310 if (unlikely(!cpu_online(cpu))) in can_stop_full_tick() 354 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument 356 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu() 359 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu() [all …]
|
D | timer_list.c | 21 int cpu; member 115 static void print_cpu(struct seq_file *m, int cpu, u64 now) in print_cpu() argument 117 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); in print_cpu() 120 SEQ_printf(m, "cpu: %d\n", cpu); in print_cpu() 151 struct tick_sched *ts = tick_get_tick_sched(cpu); in print_cpu() 178 print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) in print_tickdevice() argument 185 if (cpu < 0) in print_tickdevice() 188 SEQ_printf(m, "Per CPU device: %d\n", cpu); in print_tickdevice() 233 if (cpu >= 0) { in print_tickdevice() 234 const struct clock_event_device *wd = tick_get_wakeup_device(cpu); in print_tickdevice() [all …]
|
/kernel/sched/ |
D | cpudeadline.c | 29 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_down() 57 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down() 59 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_down() 63 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_down() 65 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_down() 72 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_up() 83 cp->elements[idx].cpu = cp->elements[p].cpu; in cpudl_heapify_up() 85 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_up() 89 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_up() 91 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_up() [all …]
|
D | topology.c | 38 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument 51 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one() 52 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one() 54 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one() 55 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one() 134 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument 142 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug() 146 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug() 149 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug() 160 # define sched_domain_debug(sd, cpu) do { } while (0) argument [all …]
|
D | cpuacct.c | 94 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, in cpuacct_cpuusage_read() argument 97 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read() 98 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read() 112 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 135 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu) in cpuacct_cpuusage_write() argument 137 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write() 138 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write() 148 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 156 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() [all …]
|
/kernel/irq/ |
D | matrix.c | 132 unsigned int cpu, best_cpu, maxavl = 0; in matrix_find_best_cpu() local 137 for_each_cpu(cpu, msk) { in matrix_find_best_cpu() 138 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu() 143 best_cpu = cpu; in matrix_find_best_cpu() 153 unsigned int cpu, best_cpu, allocated = UINT_MAX; in matrix_find_best_cpu_managed() local 158 for_each_cpu(cpu, msk) { in matrix_find_best_cpu_managed() 159 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu_managed() 164 best_cpu = cpu; in matrix_find_best_cpu_managed() 212 unsigned int cpu, failed_cpu; in irq_matrix_reserve_managed() local 214 for_each_cpu(cpu, msk) { in irq_matrix_reserve_managed() [all …]
|
D | ipi.c | 163 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) in ipi_get_hwirq() argument 168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq() 172 if (!ipimask || !cpumask_test_cpu(cpu, ipimask)) in ipi_get_hwirq() 182 data = irq_get_irq_data(irq + cpu - data->common->ipi_offset); in ipi_get_hwirq() 189 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify() argument 199 if (cpu >= nr_cpu_ids) in ipi_send_verify() 210 if (!cpumask_test_cpu(cpu, ipimask)) in ipi_send_verify() 227 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) in __ipi_send_single() argument 238 if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) in __ipi_send_single() 242 chip->ipi_send_mask(data, cpumask_of(cpu)); in __ipi_send_single() [all …]
|
/kernel/debug/ |
D | debug_core.c | 245 int cpu; in kgdb_roundup_cpus() local 248 for_each_online_cpu(cpu) { in kgdb_roundup_cpus() 250 if (cpu == this_cpu) in kgdb_roundup_cpus() 253 csd = &per_cpu(kgdb_roundup_csd, cpu); in kgdb_roundup_cpus() 263 if (kgdb_info[cpu].rounding_up) in kgdb_roundup_cpus() 265 kgdb_info[cpu].rounding_up = true; in kgdb_roundup_cpus() 267 ret = smp_call_function_single_async(cpu, csd); in kgdb_roundup_cpus() 269 kgdb_info[cpu].rounding_up = false; in kgdb_roundup_cpus() 456 void kdb_dump_stack_on_cpu(int cpu) in kdb_dump_stack_on_cpu() argument 458 if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) { in kdb_dump_stack_on_cpu() [all …]
|
/kernel/rcu/ |
D | tree_stall.h | 367 int cpu; in rcu_dump_cpu_stacks() local 373 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_dump_cpu_stacks() 374 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { in rcu_dump_cpu_stacks() 375 if (cpu_is_offline(cpu)) in rcu_dump_cpu_stacks() 376 pr_err("Offline CPU %d blocking current GP.\n", cpu); in rcu_dump_cpu_stacks() 378 dump_cpu_task(cpu); in rcu_dump_cpu_stacks() 418 int cpu; in rcu_is_rcuc_kthread_starving() local 426 cpu = task_cpu(rcuc); in rcu_is_rcuc_kthread_starving() 427 if (cpu_is_offline(cpu) || idle_cpu(cpu)) in rcu_is_rcuc_kthread_starving() 448 static void print_cpu_stall_info(int cpu) in print_cpu_stall_info() argument [all …]
|
D | tasks.h | 45 int cpu; member 227 int cpu; in cblist_init_generic() local 249 for_each_possible_cpu(cpu) { in cblist_init_generic() 250 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in cblist_init_generic() 253 if (cpu) in cblist_init_generic() 259 rtpcp->cpu = cpu; in cblist_init_generic() 353 int cpu; in rcu_barrier_tasks_generic() local 367 for_each_possible_cpu(cpu) { in rcu_barrier_tasks_generic() 368 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) in rcu_barrier_tasks_generic() 370 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_barrier_tasks_generic() [all …]
|
D | tree_nocb.h | 104 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_bypass_lock() 123 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_wait_contended() 224 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __wake_nocb_gp() 240 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); in __wake_nocb_gp() 316 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); in wake_nocb_gp_defer() 473 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 492 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 524 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); in rcu_nocb_try_bypass() 541 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 545 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() [all …]
|
/kernel/cgroup/ |
D | rstat.c | 13 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 15 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) in cgroup_rstat_cpu() argument 17 return per_cpu_ptr(cgrp->rstat_cpu, cpu); in cgroup_rstat_cpu() 29 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) in cgroup_rstat_updated() argument 31 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); in cgroup_rstat_updated() 42 if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next)) in cgroup_rstat_updated() 49 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); in cgroup_rstat_updated() 66 prstatc = cgroup_rstat_cpu(parent, cpu); in cgroup_rstat_updated() 92 struct cgroup *root, int cpu) in cgroup_rstat_cpu_pop_updated() argument 107 if (!cgroup_rstat_cpu(pos, cpu)->updated_next) in cgroup_rstat_cpu_pop_updated() [all …]
|
/kernel/trace/ |
D | ring_buffer.c | 272 #define for_each_buffer_cpu(buffer, cpu) \ argument 273 for_each_cpu(cpu, buffer->cpumask) 275 #define for_each_online_buffer_cpu(buffer, cpu) \ argument 276 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 459 int cpu; member 814 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument 816 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages() 826 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument 832 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages() 833 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages() [all …]
|
D | trace_functions_graph.c | 37 int cpu; member 48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 134 int cpu; in trace_graph_entry() local 172 cpu = raw_smp_processor_id(); in trace_graph_entry() 173 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry() 242 int cpu; in trace_graph_return() local 252 cpu = raw_smp_processor_id(); in trace_graph_return() 253 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return() 331 static void print_graph_cpu(struct trace_seq *s, int cpu) in print_graph_cpu() argument 338 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); in print_graph_cpu() [all …]
|
D | trace_kdb.c | 27 int cnt = 0, cpu; in ftrace_dump_buf() local 44 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 45 iter.buffer_iter[cpu] = in ftrace_dump_buf() 47 cpu, GFP_ATOMIC); in ftrace_dump_buf() 48 ring_buffer_read_start(iter.buffer_iter[cpu]); in ftrace_dump_buf() 49 tracing_iter_reset(&iter, cpu); in ftrace_dump_buf() 84 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 85 if (iter.buffer_iter[cpu]) { in ftrace_dump_buf() 86 ring_buffer_read_finish(iter.buffer_iter[cpu]); in ftrace_dump_buf() 87 iter.buffer_iter[cpu] = NULL; in ftrace_dump_buf() [all …]
|
/kernel/events/ |
D | hw_breakpoint.c | 62 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) in get_bp_info() argument 64 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info() 199 int i, cpu, err_cpu; in init_breakpoint_slots() local 204 for_each_possible_cpu(cpu) { in init_breakpoint_slots() 206 struct bp_cpuinfo *info = get_bp_info(cpu, i); in init_breakpoint_slots() 224 if (err_cpu == cpu) in init_breakpoint_slots() 302 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) in max_task_bp_pinned() argument 304 struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned; in max_task_bp_pinned() 322 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) in task_bp_pinned() argument 342 if (iter->cpu >= 0) { in task_bp_pinned() [all …]
|
/kernel/debug/kdb/ |
D | kdb_bt.c | 107 kdb_bt_cpu(unsigned long cpu) in kdb_bt_cpu() argument 111 if (cpu >= num_possible_cpus() || !cpu_online(cpu)) { in kdb_bt_cpu() 112 kdb_printf("WARNING: no process for cpu %ld\n", cpu); in kdb_bt_cpu() 117 kdb_tsk = KDB_TSK(cpu); in kdb_bt_cpu() 119 kdb_printf("WARNING: no task for cpu %ld\n", cpu); in kdb_bt_cpu() 140 unsigned long cpu; in kdb_bt() local 146 for_each_online_cpu(cpu) { in kdb_bt() 147 p = kdb_curr_task(cpu); in kdb_bt() 181 unsigned long cpu = ~0; in kdb_bt() local 185 diag = kdbgetularg((char *)argv[1], &cpu); in kdb_bt() [all …]
|