/kernel/ |
D | smpboot.c | 30 struct task_struct *idle_thread_get(unsigned int cpu) in idle_thread_get() argument 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 50 static __always_inline void idle_init(unsigned int cpu) in idle_init() argument 52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 55 tsk = fork_idle(cpu); in idle_init() 57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); in idle_init() 59 per_cpu(idle_threads, cpu) = tsk; in idle_init() 68 unsigned int cpu, boot_cpu; in idle_threads_init() local 72 for_each_possible_cpu(cpu) { in idle_threads_init() 73 if (cpu != boot_cpu) in idle_threads_init() [all …]
|
D | cpu.c | 126 int (*single)(unsigned int cpu); 127 int (*multi)(unsigned int cpu, 131 int (*single)(unsigned int cpu); 132 int (*multi)(unsigned int cpu, 167 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument 171 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback() 173 int (*cbm)(unsigned int cpu, struct hlist_node *node); in cpuhp_invoke_callback() 174 int (*cb)(unsigned int cpu); in cpuhp_invoke_callback() 191 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback() 192 ret = cb(cpu); in cpuhp_invoke_callback() [all …]
|
D | smp.c | 101 int smpcfd_prepare_cpu(unsigned int cpu) in smpcfd_prepare_cpu() argument 103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_prepare_cpu() 106 cpu_to_node(cpu))) in smpcfd_prepare_cpu() 109 cpu_to_node(cpu))) { in smpcfd_prepare_cpu() 123 int smpcfd_dead_cpu(unsigned int cpu) in smpcfd_dead_cpu() argument 125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in smpcfd_dead_cpu() 133 int smpcfd_dying_cpu(unsigned int cpu) in smpcfd_dying_cpu() argument 285 static void csd_lock_print_extended(struct __call_single_data *csd, int cpu) in csd_lock_print_extended() argument 287 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); in csd_lock_print_extended() 290 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); in csd_lock_print_extended() [all …]
|
D | stop_machine.c | 93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument 95 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_queue_work() 139 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) in stop_one_cpu() argument 145 if (!cpu_stop_queue_work(cpu, &work)) in stop_one_cpu() 208 int cpu = smp_processor_id(), err = 0; in multi_cpu_stop() local 221 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 224 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop() 385 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, in stop_one_cpu_nowait() argument 389 return cpu_stop_queue_work(cpu, work_buf); in stop_one_cpu_nowait() 398 unsigned int cpu; in queue_stop_cpus_work() local [all …]
|
D | watchdog.c | 100 int __weak watchdog_nmi_enable(unsigned int cpu) in watchdog_nmi_enable() argument 106 void __weak watchdog_nmi_disable(unsigned int cpu) in watchdog_nmi_disable() argument 291 int cpu; in touch_all_softlockup_watchdogs() local 302 for_each_cpu(cpu, &watchdog_allowed_mask) { in touch_all_softlockup_watchdogs() 303 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT; in touch_all_softlockup_watchdogs() 304 wq_watchdog_touch(cpu); in touch_all_softlockup_watchdogs() 460 static void watchdog_enable(unsigned int cpu) in watchdog_enable() argument 465 WARN_ON_ONCE(cpu != smp_processor_id()); in watchdog_enable() 483 watchdog_nmi_enable(cpu); in watchdog_enable() 486 static void watchdog_disable(unsigned int cpu) in watchdog_disable() argument [all …]
|
/kernel/time/ |
D | tick-broadcast.c | 39 static void tick_broadcast_clear_oneshot(int cpu); 42 static void tick_broadcast_oneshot_offline(unsigned int cpu); 47 static inline void tick_broadcast_clear_oneshot(int cpu) { } in tick_broadcast_clear_oneshot() argument 50 static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } in tick_broadcast_oneshot_offline() argument 67 static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu); 69 const struct clock_event_device *tick_get_wakeup_device(int cpu) in tick_get_wakeup_device() argument 71 return tick_get_oneshot_wakeup_device(cpu); in tick_get_wakeup_device() 102 static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) in tick_get_oneshot_wakeup_device() argument 104 return per_cpu(tick_oneshot_wakeup_device, cpu); in tick_get_oneshot_wakeup_device() 117 int cpu) in tick_set_oneshot_wakeup_device() argument [all …]
|
D | tick-common.c | 64 struct tick_device *tick_get_device(int cpu) in tick_get_device() argument 66 return &per_cpu(tick_cpu_device, cpu); in tick_get_device() 86 static void tick_periodic(int cpu) in tick_periodic() argument 88 if (tick_do_timer_cpu == cpu) { in tick_periodic() 111 int cpu = smp_processor_id(); in tick_handle_periodic() local 114 tick_periodic(cpu); in tick_handle_periodic() 147 tick_periodic(cpu); in tick_handle_periodic() 187 int cpu = *(unsigned int *)info; in giveup_do_timer() local 191 tick_do_timer_cpu = cpu; in giveup_do_timer() 196 int cpu = smp_processor_id(); in tick_take_do_timer_from_boot() local [all …]
|
D | tick-sched.c | 42 struct tick_sched *tick_get_tick_sched(int cpu) in tick_get_tick_sched() argument 44 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched() 188 int cpu = smp_processor_id(); in tick_sched_do_timer() local 205 tick_do_timer_cpu = cpu; in tick_sched_do_timer() 210 if (tick_do_timer_cpu == cpu) { in tick_sched_do_timer() 306 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) in can_stop_full_tick() argument 310 if (unlikely(!cpu_online(cpu))) in can_stop_full_tick() 354 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument 356 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu() 359 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu() [all …]
|
D | timer_list.c | 21 int cpu; member 115 static void print_cpu(struct seq_file *m, int cpu, u64 now) in print_cpu() argument 117 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); in print_cpu() 120 SEQ_printf(m, "cpu: %d\n", cpu); in print_cpu() 151 struct tick_sched *ts = tick_get_tick_sched(cpu); in print_cpu() 178 print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) in print_tickdevice() argument 185 if (cpu < 0) in print_tickdevice() 188 SEQ_printf(m, "Per CPU device: %d\n", cpu); in print_tickdevice() 233 if (cpu >= 0) { in print_tickdevice() 234 const struct clock_event_device *wd = tick_get_wakeup_device(cpu); in print_tickdevice() [all …]
|
/kernel/sched/ |
D | cpudeadline.c | 30 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_down() 58 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down() 60 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_down() 64 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_down() 66 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_down() 73 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_up() 84 cp->elements[idx].cpu = cp->elements[p].cpu; in cpudl_heapify_up() 86 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_up() 90 cp->elements[idx].cpu = orig_cpu; in cpudl_heapify_up() 92 cp->elements[cp->elements[idx].cpu].idx = idx; in cpudl_heapify_up() [all …]
|
D | topology.c | 39 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument 52 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one() 53 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one() 55 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one() 56 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one() 135 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument 143 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug() 147 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug() 150 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) in sched_domain_debug() 161 # define sched_domain_debug(sd, cpu) do { } while (0) argument [all …]
|
D | cpupri.c | 76 unsigned int cpu = cpumask_first(lowest_mask); in drop_nopreempt_cpus() local 77 while (cpu < nr_cpu_ids) { in drop_nopreempt_cpus() 79 struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr); in drop_nopreempt_cpus() 80 if (task_may_not_preempt(task, cpu)) { in drop_nopreempt_cpus() 81 cpumask_clear_cpu(cpu, lowest_mask); in drop_nopreempt_cpus() 83 cpu = cpumask_next(cpu, lowest_mask); in drop_nopreempt_cpus() 173 bool (*fitness_fn)(struct task_struct *p, int cpu)) in cpupri_find_fitness() 176 int idx, cpu; in cpupri_find_fitness() local 193 for_each_cpu(cpu, lowest_mask) { in cpupri_find_fitness() 194 if (!fitness_fn(p, cpu)) in cpupri_find_fitness() [all …]
|
D | cpuacct.c | 95 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, in cpuacct_cpuusage_read() argument 98 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read() 99 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read() 112 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 135 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu) in cpuacct_cpuusage_write() argument 137 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write() 138 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write() 148 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 156 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() [all …]
|
/kernel/bpf/ |
D | percpu_freelist.c | 8 int cpu; in pcpu_freelist_init() local 14 for_each_possible_cpu(cpu) { in pcpu_freelist_init() 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() 59 int cpu, orig_cpu; in ___pcpu_freelist_push_nmi() local 61 orig_cpu = cpu = raw_smp_processor_id(); in ___pcpu_freelist_push_nmi() 65 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_push_nmi() 71 cpu = cpumask_next(cpu, cpu_possible_mask); in ___pcpu_freelist_push_nmi() 72 if (cpu >= nr_cpu_ids) in ___pcpu_freelist_push_nmi() 73 cpu = 0; in ___pcpu_freelist_push_nmi() 76 if (cpu == orig_cpu && in ___pcpu_freelist_push_nmi() [all …]
|
/kernel/irq/ |
D | matrix.c | 132 unsigned int cpu, best_cpu, maxavl = 0; in matrix_find_best_cpu() local 137 for_each_cpu(cpu, msk) { in matrix_find_best_cpu() 138 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu() 143 best_cpu = cpu; in matrix_find_best_cpu() 153 unsigned int cpu, best_cpu, allocated = UINT_MAX; in matrix_find_best_cpu_managed() local 158 for_each_cpu(cpu, msk) { in matrix_find_best_cpu_managed() 159 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu_managed() 164 best_cpu = cpu; in matrix_find_best_cpu_managed() 212 unsigned int cpu, failed_cpu; in irq_matrix_reserve_managed() local 214 for_each_cpu(cpu, msk) { in irq_matrix_reserve_managed() [all …]
|
D | ipi.c | 162 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) in ipi_get_hwirq() argument 167 if (!data || !ipimask || cpu >= nr_cpu_ids) in ipi_get_hwirq() 170 if (!cpumask_test_cpu(cpu, ipimask)) in ipi_get_hwirq() 180 data = irq_get_irq_data(irq + cpu - data->common->ipi_offset); in ipi_get_hwirq() 187 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify() argument 197 if (cpu >= nr_cpu_ids) in ipi_send_verify() 204 if (!cpumask_test_cpu(cpu, ipimask)) in ipi_send_verify() 221 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) in __ipi_send_single() argument 232 if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu))) in __ipi_send_single() 236 chip->ipi_send_mask(data, cpumask_of(cpu)); in __ipi_send_single() [all …]
|
/kernel/debug/ |
D | debug_core.c | 246 int cpu; in kgdb_roundup_cpus() local 249 for_each_online_cpu(cpu) { in kgdb_roundup_cpus() 251 if (cpu == this_cpu) in kgdb_roundup_cpus() 254 csd = &per_cpu(kgdb_roundup_csd, cpu); in kgdb_roundup_cpus() 264 if (kgdb_info[cpu].rounding_up) in kgdb_roundup_cpus() 266 kgdb_info[cpu].rounding_up = true; in kgdb_roundup_cpus() 268 ret = smp_call_function_single_async(cpu, csd); in kgdb_roundup_cpus() 270 kgdb_info[cpu].rounding_up = false; in kgdb_roundup_cpus() 468 void kdb_dump_stack_on_cpu(int cpu) in kdb_dump_stack_on_cpu() argument 470 if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) { in kdb_dump_stack_on_cpu() [all …]
|
/kernel/cgroup/ |
D | rstat.c | 9 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 11 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) in cgroup_rstat_cpu() argument 13 return per_cpu_ptr(cgrp->rstat_cpu, cpu); in cgroup_rstat_cpu() 25 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) in cgroup_rstat_updated() argument 27 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); in cgroup_rstat_updated() 38 if (cgroup_rstat_cpu(cgrp, cpu)->updated_next) in cgroup_rstat_updated() 45 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); in cgroup_rstat_updated() 62 prstatc = cgroup_rstat_cpu(parent, cpu); in cgroup_rstat_updated() 88 struct cgroup *root, int cpu) in cgroup_rstat_cpu_pop_updated() argument 106 rstatc = cgroup_rstat_cpu(pos, cpu); in cgroup_rstat_cpu_pop_updated() [all …]
|
/kernel/trace/ |
D | ring_buffer.c | 280 #define for_each_buffer_cpu(buffer, cpu) \ argument 281 for_each_cpu(cpu, buffer->cpumask) 283 #define for_each_online_buffer_cpu(buffer, cpu) \ argument 284 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 488 int cpu; member 809 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument 811 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages() 821 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument 827 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages() 828 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages() [all …]
|
D | trace_functions_graph.c | 37 int cpu; member 48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 134 int cpu; in trace_graph_entry() local 172 cpu = raw_smp_processor_id(); in trace_graph_entry() 173 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry() 242 int cpu; in trace_graph_return() local 252 cpu = raw_smp_processor_id(); in trace_graph_return() 253 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return() 331 static void print_graph_cpu(struct trace_seq *s, int cpu) in print_graph_cpu() argument 338 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); in print_graph_cpu() [all …]
|
D | trace_kdb.c | 27 int cnt = 0, cpu; in ftrace_dump_buf() local 44 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 45 iter.buffer_iter[cpu] = in ftrace_dump_buf() 47 cpu, GFP_ATOMIC); in ftrace_dump_buf() 48 ring_buffer_read_start(iter.buffer_iter[cpu]); in ftrace_dump_buf() 49 tracing_iter_reset(&iter, cpu); in ftrace_dump_buf() 84 for_each_tracing_cpu(cpu) { in ftrace_dump_buf() 85 if (iter.buffer_iter[cpu]) { in ftrace_dump_buf() 86 ring_buffer_read_finish(iter.buffer_iter[cpu]); in ftrace_dump_buf() 87 iter.buffer_iter[cpu] = NULL; in ftrace_dump_buf() [all …]
|
/kernel/rcu/ |
D | tree_stall.h | 339 int cpu; in rcu_dump_cpu_stacks() local 345 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_dump_cpu_stacks() 346 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { in rcu_dump_cpu_stacks() 347 if (cpu_is_offline(cpu)) in rcu_dump_cpu_stacks() 348 pr_err("Offline CPU %d blocking current GP.\n", cpu); in rcu_dump_cpu_stacks() 349 else if (!trigger_single_cpu_backtrace(cpu)) in rcu_dump_cpu_stacks() 350 dump_cpu_task(cpu); in rcu_dump_cpu_stacks() 358 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) in print_cpu_stall_fast_no_hz() argument 360 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in print_cpu_stall_fast_no_hz() 369 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) in print_cpu_stall_fast_no_hz() argument [all …]
|
D | tree_nocb.h | 101 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_bypass_lock() 120 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_wait_contended() 213 bool rcu_is_nocb_cpu(int cpu) in rcu_is_nocb_cpu() argument 216 return cpumask_test_cpu(cpu, rcu_nocb_mask); in rcu_is_nocb_cpu() 229 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __wake_nocb_gp() 245 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); in __wake_nocb_gp() 292 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); in wake_nocb_gp_defer() 429 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 444 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass() 467 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); in rcu_nocb_try_bypass() [all …]
|
/kernel/events/ |
D | hw_breakpoint.c | 52 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) in get_bp_info() argument 54 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info() 88 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) in max_task_bp_pinned() argument 90 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; in max_task_bp_pinned() 105 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) in task_bp_pinned() argument 114 (iter->cpu < 0 || cpu == iter->cpu)) in task_bp_pinned() 123 if (bp->cpu >= 0) in cpumask_of_bp() 124 return cpumask_of(bp->cpu); in cpumask_of_bp() 137 int cpu; in fetch_bp_busy_slots() local 139 for_each_cpu(cpu, cpumask) { in fetch_bp_busy_slots() [all …]
|
/kernel/debug/kdb/ |
D | kdb_bt.c | 107 kdb_bt_cpu(unsigned long cpu) in kdb_bt_cpu() argument 111 if (cpu >= num_possible_cpus() || !cpu_online(cpu)) { in kdb_bt_cpu() 112 kdb_printf("WARNING: no process for cpu %ld\n", cpu); in kdb_bt_cpu() 117 kdb_tsk = KDB_TSK(cpu); in kdb_bt_cpu() 119 kdb_printf("WARNING: no task for cpu %ld\n", cpu); in kdb_bt_cpu() 140 unsigned long cpu; in kdb_bt() local 146 for_each_online_cpu(cpu) { in kdb_bt() 147 p = kdb_curr_task(cpu); in kdb_bt() 181 unsigned long cpu = ~0; in kdb_bt() local 185 diag = kdbgetularg((char *)argv[1], &cpu); in kdb_bt() [all …]
|