Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 143) sorted by relevance

123456

/include/linux/
Dtopology.h94 static inline int cpu_to_node(int cpu) in cpu_to_node() argument
96 return per_cpu(numa_node, cpu); in cpu_to_node()
108 static inline void set_cpu_numa_node(int cpu, int node) in set_cpu_numa_node() argument
110 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node()
151 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument
153 return per_cpu(_numa_mem_, cpu); in cpu_to_mem()
158 static inline void set_cpu_numa_mem(int cpu, int node) in set_cpu_numa_mem() argument
160 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem()
175 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument
177 return cpu_to_node(cpu); in cpu_to_mem()
[all …]
Darch_topology.h16 bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
20 static inline unsigned long topology_get_cpu_scale(int cpu) in topology_get_cpu_scale() argument
22 return per_cpu(cpu_scale, cpu); in topology_get_cpu_scale()
25 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
29 static inline unsigned long topology_get_freq_scale(int cpu) in topology_get_freq_scale() argument
31 return per_cpu(freq_scale, cpu); in topology_get_freq_scale()
42 static inline unsigned long topology_get_thermal_pressure(int cpu) in topology_get_thermal_pressure() argument
44 return per_cpu(thermal_pressure, cpu); in topology_get_thermal_pressure()
65 #define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) argument
66 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument
[all …]
Dcpumask.h117 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) argument
118 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) argument
119 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) argument
120 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) argument
126 #define cpu_online(cpu) ((cpu) == 0) argument
127 #define cpu_possible(cpu) ((cpu) == 0) argument
128 #define cpu_present(cpu) ((cpu) == 0) argument
129 #define cpu_active(cpu) ((cpu) == 0) argument
134 static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) in cpu_max_bits_warn() argument
137 WARN_ON_ONCE(cpu >= bits); in cpu_max_bits_warn()
[all …]
Dring_buffer.h100 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
101 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
103 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
109 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
124 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
127 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
131 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
143 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
145 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
151 struct trace_buffer *buffer_b, int cpu);
[all …]
Dkernel_stat.h49 #define kstat_cpu(cpu) per_cpu(kstat, cpu) argument
50 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) argument
54 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
62 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) in kstat_softirqs_cpu() argument
64 return kstat_cpu(cpu).softirqs[irq]; in kstat_softirqs_cpu()
76 static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu) in kstat_cpu_irqs_sum() argument
78 return kstat_cpu(cpu).irqs_sum; in kstat_cpu_irqs_sum()
83 enum cpu_usage_stat usage, int cpu);
84 extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
87 enum cpu_usage_stat usage, int cpu) in kcpustat_field() argument
[all …]
Dtick.h22 extern void tick_cleanup_dead_cpu(int cpu);
28 static inline void tick_cleanup_dead_cpu(int cpu) { } in tick_cleanup_dead_cpu() argument
72 extern void tick_offline_cpu(unsigned int cpu);
74 static inline void tick_offline_cpu(unsigned int cpu) { } in tick_offline_cpu() argument
128 extern bool tick_nohz_tick_stopped_cpu(int cpu);
139 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
140 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
141 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
153 static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } in tick_nohz_tick_stopped_cpu() argument
170 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } in get_cpu_idle_time_us() argument
[all …]
Dsmpboot.h34 int (*thread_should_run)(unsigned int cpu);
35 void (*thread_fn)(unsigned int cpu);
36 void (*create)(unsigned int cpu);
37 void (*setup)(unsigned int cpu);
38 void (*cleanup)(unsigned int cpu, bool online);
39 void (*park)(unsigned int cpu);
40 void (*unpark)(unsigned int cpu);
Drcutree.h30 static inline void rcu_virt_note_context_switch(int cpu) in rcu_virt_note_context_switch() argument
39 bool rcu_eqs_special_set(int cpu);
72 int rcutree_prepare_cpu(unsigned int cpu);
73 int rcutree_online_cpu(unsigned int cpu);
74 int rcutree_offline_cpu(unsigned int cpu);
75 int rcutree_dead_cpu(unsigned int cpu);
76 int rcutree_dying_cpu(unsigned int cpu);
77 void rcu_cpu_starting(unsigned int cpu);
Dcpu.h26 struct cpu { struct
37 extern int register_cpu(struct cpu *cpu, int num); argument
38 extern struct device *get_cpu_device(unsigned cpu);
39 extern bool cpu_is_hotpluggable(unsigned cpu);
40 extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
42 int cpu, unsigned int *thread);
83 extern void unregister_cpu(struct cpu *cpu);
101 int add_cpu(unsigned int cpu);
103 void notify_cpu_starting(unsigned int cpu);
132 void clear_tasks_mm_cpumask(int cpu);
[all …]
Dcacheinfo.h82 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
83 int init_cache_level(unsigned int cpu);
84 int populate_cache_leaves(unsigned int cpu);
85 int cache_setup_acpi(unsigned int cpu);
95 static inline int acpi_find_last_cache_level(unsigned int cpu) in acpi_find_last_cache_level() argument
100 int acpi_find_last_cache_level(unsigned int cpu);
109 static inline int get_cpu_cacheinfo_id(int cpu, int level) in get_cpu_cacheinfo_id() argument
111 struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); in get_cpu_cacheinfo_id()
Dirq_cpustat.h22 #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu)) argument
26 #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ argument
Dstop_machine.h41 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
43 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
45 void stop_machine_park(int cpu);
46 void stop_machine_unpark(int cpu);
48 int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
63 static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) in stop_one_cpu() argument
67 if (cpu == smp_processor_id()) in stop_one_cpu()
82 static inline bool stop_one_cpu_nowait(unsigned int cpu, in stop_one_cpu_nowait() argument
86 if (cpu == smp_processor_id()) { in stop_one_cpu_nowait()
Dhypervisor.h15 static inline void hypervisor_pin_vcpu(int cpu) in hypervisor_pin_vcpu() argument
17 x86_platform.hyper.pin_vcpu(cpu); in hypervisor_pin_vcpu()
24 static inline void hypervisor_pin_vcpu(int cpu) in hypervisor_pin_vcpu() argument
Dsmp.h18 typedef bool (*smp_cond_func_t)(int cpu, void *info);
46 extern void __smp_call_single_queue(int cpu, struct llist_node *node);
77 int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
100 extern void smp_send_reschedule(int cpu);
171 static inline void smp_send_reschedule(int cpu) { } in smp_send_reschedule() argument
255 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
259 int smpcfd_prepare_cpu(unsigned int cpu);
260 int smpcfd_dead_cpu(unsigned int cpu);
261 int smpcfd_dying_cpu(unsigned int cpu);
Dcpuhotplug.h206 int (*startup)(unsigned int cpu),
207 int (*teardown)(unsigned int cpu), bool multi_instance);
211 int (*startup)(unsigned int cpu),
212 int (*teardown)(unsigned int cpu),
226 int (*startup)(unsigned int cpu), in cpuhp_setup_state() argument
227 int (*teardown)(unsigned int cpu)) in cpuhp_setup_state() argument
234 int (*startup)(unsigned int cpu), in cpuhp_setup_state_cpuslocked() argument
235 int (*teardown)(unsigned int cpu)) in cpuhp_setup_state_cpuslocked() argument
254 int (*startup)(unsigned int cpu), in cpuhp_setup_state_nocalls() argument
255 int (*teardown)(unsigned int cpu)) in cpuhp_setup_state_nocalls() argument
[all …]
Dnmi.h49 extern int lockup_detector_online_cpu(unsigned int cpu);
50 extern int lockup_detector_offline_cpu(unsigned int cpu);
122 int watchdog_nmi_enable(unsigned int cpu);
123 void watchdog_nmi_disable(unsigned int cpu);
164 static inline bool trigger_single_cpu_backtrace(int cpu) in trigger_single_cpu_backtrace() argument
166 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); in trigger_single_cpu_backtrace()
189 static inline bool trigger_single_cpu_backtrace(int cpu) in trigger_single_cpu_backtrace() argument
Dcpu_rmap.h42 static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu) in cpu_rmap_lookup_index() argument
44 return rmap->near[cpu].index; in cpu_rmap_lookup_index()
47 static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu) in cpu_rmap_lookup_obj() argument
49 return rmap->obj[rmap->near[cpu].index]; in cpu_rmap_lookup_obj()
/include/trace/events/
Dcpuhp.h12 TP_PROTO(unsigned int cpu,
17 TP_ARGS(cpu, target, idx, fun),
20 __field( unsigned int, cpu )
27 __entry->cpu = cpu;
34 __entry->cpu, __entry->target, __entry->idx, __entry->fun)
39 TP_PROTO(unsigned int cpu,
45 TP_ARGS(cpu, target, idx, fun, node),
48 __field( unsigned int, cpu )
55 __entry->cpu = cpu;
62 __entry->cpu, __entry->target, __entry->idx, __entry->fun)
[all …]
Dirq_matrix.h67 TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
70 TP_ARGS(bit, cpu, matrix, cmap),
74 __field( unsigned int, cpu )
87 __entry->cpu = cpu;
99 __entry->bit, __entry->cpu, __entry->online,
143 TP_PROTO(int bit, unsigned int cpu,
146 TP_ARGS(bit, cpu, matrix, cmap)
151 TP_PROTO(int bit, unsigned int cpu,
154 TP_ARGS(bit, cpu, matrix, cmap)
159 TP_PROTO(int bit, unsigned int cpu,
[all …]
/include/linux/clk/
Dtegra.h33 void (*wait_for_reset)(u32 cpu);
34 void (*put_in_reset)(u32 cpu);
35 void (*out_of_reset)(u32 cpu);
36 void (*enable_clock)(u32 cpu);
37 void (*disable_clock)(u32 cpu);
47 static inline void tegra_wait_cpu_in_reset(u32 cpu) in tegra_wait_cpu_in_reset() argument
52 tegra_cpu_car_ops->wait_for_reset(cpu); in tegra_wait_cpu_in_reset()
55 static inline void tegra_put_cpu_in_reset(u32 cpu) in tegra_put_cpu_in_reset() argument
60 tegra_cpu_car_ops->put_in_reset(cpu); in tegra_put_cpu_in_reset()
63 static inline void tegra_cpu_out_of_reset(u32 cpu) in tegra_cpu_out_of_reset() argument
[all …]
/include/linux/sched/
Dhotplug.h9 extern int sched_cpu_starting(unsigned int cpu);
10 extern int sched_cpu_activate(unsigned int cpu);
12 extern int sched_cpu_deactivate(unsigned int cpu);
14 extern int sched_cpu_drain_rq(unsigned int cpu);
15 extern void sched_cpu_drain_rq_wait(unsigned int cpu);
18 extern int sched_cpu_dying(unsigned int cpu);
Dnohz.h10 extern void nohz_balance_enter_idle(int cpu);
13 static inline void nohz_balance_enter_idle(int cpu) { } in nohz_balance_enter_idle() argument
27 extern void wake_up_nohz_cpu(int cpu);
29 static inline void wake_up_nohz_cpu(int cpu) { } in wake_up_nohz_cpu() argument
Dclock.h21 extern u64 sched_clock_cpu(int cpu);
43 static inline u64 cpu_clock(int cpu) in cpu_clock() argument
77 static inline u64 cpu_clock(int cpu) in cpu_clock() argument
79 return sched_clock_cpu(cpu); in cpu_clock()
/include/asm-generic/
Dtopology.h35 #define cpu_to_node(cpu) ((void)(cpu),0) argument
41 #define set_cpu_numa_node(cpu, node) argument
44 #define cpu_to_mem(cpu) ((void)(cpu),0) argument
72 #define set_cpu_numa_mem(cpu, node) argument
/include/trace/hooks/
Dcpu.h3 #define TRACE_SYSTEM cpu
14 TP_PROTO(unsigned int cpu),
15 TP_ARGS(cpu));

123456