• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * include/linux/cpu.h - generic cpu definition
3  *
4  * This is mainly for topological representation. We define the
5  * basic 'struct cpu' here, which can be embedded in per-arch
6  * definitions of processors.
7  *
8  * Basic handling of the devices is done in drivers/base/cpu.c
9  *
10  * CPUs are exported via sysfs in the devices/system/cpu
11  * directory.
12  */
13 #ifndef _LINUX_CPU_H_
14 #define _LINUX_CPU_H_
15 
16 #include <linux/node.h>
17 #include <linux/compiler.h>
18 #include <linux/cpumask.h>
19 
20 struct device;
21 struct device_node;
22 struct attribute_group;
23 
24 struct cpu {
25 	int node_id;		/* The node which contains the CPU */
26 	int hotpluggable;	/* creates sysfs control file if hotpluggable */
27 	struct device dev;
28 };
29 
30 extern int register_cpu(struct cpu *cpu, int num);
31 extern struct device *get_cpu_device(unsigned cpu);
32 extern bool cpu_is_hotpluggable(unsigned cpu);
33 extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
34 extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
35 					      int cpu, unsigned int *thread);
36 
37 extern int cpu_add_dev_attr(struct device_attribute *attr);
38 extern void cpu_remove_dev_attr(struct device_attribute *attr);
39 
40 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
41 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
42 
43 extern ssize_t cpu_show_meltdown(struct device *dev,
44 				 struct device_attribute *attr, char *buf);
45 extern ssize_t cpu_show_spectre_v1(struct device *dev,
46 				   struct device_attribute *attr, char *buf);
47 extern ssize_t cpu_show_spectre_v2(struct device *dev,
48 				   struct device_attribute *attr, char *buf);
49 extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
50 					  struct device_attribute *attr, char *buf);
51 extern ssize_t cpu_show_l1tf(struct device *dev,
52 			     struct device_attribute *attr, char *buf);
53 extern ssize_t cpu_show_mds(struct device *dev,
54 			    struct device_attribute *attr, char *buf);
55 extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
56 					struct device_attribute *attr,
57 					char *buf);
58 extern ssize_t cpu_show_itlb_multihit(struct device *dev,
59 				      struct device_attribute *attr, char *buf);
60 
61 extern __printf(4, 5)
62 struct device *cpu_device_create(struct device *parent, void *drvdata,
63 				 const struct attribute_group **groups,
64 				 const char *fmt, ...);
65 #ifdef CONFIG_HOTPLUG_CPU
66 extern void unregister_cpu(struct cpu *cpu);
67 extern ssize_t arch_cpu_probe(const char *, size_t);
68 extern ssize_t arch_cpu_release(const char *, size_t);
69 #endif
70 struct notifier_block;
71 
72 /*
73  * CPU notifier priorities.
74  */
75 enum {
76 	/*
77 	 * SCHED_ACTIVE marks a cpu which is coming up active during
78 	 * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
79 	 * notifier.  CPUSET_ACTIVE adjusts cpuset according to
80 	 * cpu_active mask right after SCHED_ACTIVE.  During
81 	 * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
82 	 * ordered in the similar way.
83 	 *
84 	 * This ordering guarantees consistent cpu_active mask and
85 	 * migration behavior to all cpu notifiers.
86 	 */
87 	CPU_PRI_SCHED_ACTIVE	= INT_MAX,
88 	CPU_PRI_CPUSET_ACTIVE	= INT_MAX - 1,
89 	CPU_PRI_SCHED_INACTIVE	= INT_MIN + 1,
90 	CPU_PRI_CPUSET_INACTIVE	= INT_MIN,
91 
92 	/* migration should happen before other stuff but after perf */
93 	CPU_PRI_PERF		= 20,
94 	CPU_PRI_MIGRATION	= 10,
95 	CPU_PRI_SMPBOOT		= 9,
96 	/* bring up workqueues before normal notifiers and down after */
97 	CPU_PRI_WORKQUEUE_UP	= 5,
98 	CPU_PRI_WORKQUEUE_DOWN	= -5,
99 };
100 
101 #define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
102 #define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */
103 #define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */
104 #define CPU_DOWN_PREPARE	0x0005 /* CPU (unsigned)v going down */
105 #define CPU_DOWN_FAILED		0x0006 /* CPU (unsigned)v NOT going down */
106 #define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */
107 #define CPU_DYING		0x0008 /* CPU (unsigned)v not running any task,
108 					* not handling interrupts, soon dead.
109 					* Called on the dying cpu, interrupts
110 					* are already disabled. Must not
111 					* sleep, must not fail */
112 #define CPU_POST_DEAD		0x0009 /* CPU (unsigned)v dead, cpu_hotplug
113 					* lock is dropped */
114 #define CPU_STARTING		0x000A /* CPU (unsigned)v soon running.
115 					* Called on the new cpu, just before
116 					* enabling interrupts. Must not sleep,
117 					* must not fail */
118 #define CPU_DYING_IDLE		0x000B /* CPU (unsigned)v dying, reached
119 					* idle loop. */
120 #define CPU_BROKEN		0x000C /* CPU (unsigned)v did not die properly,
121 					* perhaps due to preemption. */
122 
123 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
124  * operation in progress
125  */
126 #define CPU_TASKS_FROZEN	0x0010
127 
128 #define CPU_ONLINE_FROZEN	(CPU_ONLINE | CPU_TASKS_FROZEN)
129 #define CPU_UP_PREPARE_FROZEN	(CPU_UP_PREPARE | CPU_TASKS_FROZEN)
130 #define CPU_UP_CANCELED_FROZEN	(CPU_UP_CANCELED | CPU_TASKS_FROZEN)
131 #define CPU_DOWN_PREPARE_FROZEN	(CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
132 #define CPU_DOWN_FAILED_FROZEN	(CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
133 #define CPU_DEAD_FROZEN		(CPU_DEAD | CPU_TASKS_FROZEN)
134 #define CPU_DYING_FROZEN	(CPU_DYING | CPU_TASKS_FROZEN)
135 #define CPU_STARTING_FROZEN	(CPU_STARTING | CPU_TASKS_FROZEN)
136 
137 
138 #ifdef CONFIG_SMP
139 /* Need to know about CPUs going up/down? */
140 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
141 #define cpu_notifier(fn, pri) {					\
142 	static struct notifier_block fn##_nb =			\
143 		{ .notifier_call = fn, .priority = pri };	\
144 	register_cpu_notifier(&fn##_nb);			\
145 }
146 
147 #define __cpu_notifier(fn, pri) {				\
148 	static struct notifier_block fn##_nb =			\
149 		{ .notifier_call = fn, .priority = pri };	\
150 	__register_cpu_notifier(&fn##_nb);			\
151 }
152 
153 extern int register_cpu_notifier(struct notifier_block *nb);
154 extern int __register_cpu_notifier(struct notifier_block *nb);
155 extern void unregister_cpu_notifier(struct notifier_block *nb);
156 extern void __unregister_cpu_notifier(struct notifier_block *nb);
157 
158 #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
159 #define cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
160 #define __cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
161 
register_cpu_notifier(struct notifier_block * nb)162 static inline int register_cpu_notifier(struct notifier_block *nb)
163 {
164 	return 0;
165 }
166 
__register_cpu_notifier(struct notifier_block * nb)167 static inline int __register_cpu_notifier(struct notifier_block *nb)
168 {
169 	return 0;
170 }
171 
unregister_cpu_notifier(struct notifier_block * nb)172 static inline void unregister_cpu_notifier(struct notifier_block *nb)
173 {
174 }
175 
__unregister_cpu_notifier(struct notifier_block * nb)176 static inline void __unregister_cpu_notifier(struct notifier_block *nb)
177 {
178 }
179 #endif
180 
181 void smpboot_thread_init(void);
182 int cpu_up(unsigned int cpu);
183 void notify_cpu_starting(unsigned int cpu);
184 extern void cpu_maps_update_begin(void);
185 extern void cpu_maps_update_done(void);
186 
187 #define cpu_notifier_register_begin	cpu_maps_update_begin
188 #define cpu_notifier_register_done	cpu_maps_update_done
189 
190 #else	/* CONFIG_SMP */
191 
192 #define cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
193 #define __cpu_notifier(fn, pri)	do { (void)(fn); } while (0)
194 
register_cpu_notifier(struct notifier_block * nb)195 static inline int register_cpu_notifier(struct notifier_block *nb)
196 {
197 	return 0;
198 }
199 
__register_cpu_notifier(struct notifier_block * nb)200 static inline int __register_cpu_notifier(struct notifier_block *nb)
201 {
202 	return 0;
203 }
204 
unregister_cpu_notifier(struct notifier_block * nb)205 static inline void unregister_cpu_notifier(struct notifier_block *nb)
206 {
207 }
208 
__unregister_cpu_notifier(struct notifier_block * nb)209 static inline void __unregister_cpu_notifier(struct notifier_block *nb)
210 {
211 }
212 
cpu_maps_update_begin(void)213 static inline void cpu_maps_update_begin(void)
214 {
215 }
216 
cpu_maps_update_done(void)217 static inline void cpu_maps_update_done(void)
218 {
219 }
220 
cpu_notifier_register_begin(void)221 static inline void cpu_notifier_register_begin(void)
222 {
223 }
224 
cpu_notifier_register_done(void)225 static inline void cpu_notifier_register_done(void)
226 {
227 }
228 
smpboot_thread_init(void)229 static inline void smpboot_thread_init(void)
230 {
231 }
232 
233 #endif /* CONFIG_SMP */
234 extern struct bus_type cpu_subsys;
235 
236 #ifdef CONFIG_HOTPLUG_CPU
237 /* Stop CPUs going up and down. */
238 
239 extern void cpu_hotplug_begin(void);
240 extern void cpu_hotplug_done(void);
241 extern void get_online_cpus(void);
242 extern void put_online_cpus(void);
243 extern void cpu_hotplug_disable(void);
244 extern void cpu_hotplug_enable(void);
245 #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
246 #define __hotcpu_notifier(fn, pri)	__cpu_notifier(fn, pri)
247 #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
248 #define __register_hotcpu_notifier(nb)	__register_cpu_notifier(nb)
249 #define unregister_hotcpu_notifier(nb)	unregister_cpu_notifier(nb)
250 #define __unregister_hotcpu_notifier(nb)	__unregister_cpu_notifier(nb)
251 void clear_tasks_mm_cpumask(int cpu);
252 int cpu_down(unsigned int cpu);
253 
254 #else		/* CONFIG_HOTPLUG_CPU */
255 
cpu_hotplug_begin(void)256 static inline void cpu_hotplug_begin(void) {}
cpu_hotplug_done(void)257 static inline void cpu_hotplug_done(void) {}
258 #define get_online_cpus()	do { } while (0)
259 #define put_online_cpus()	do { } while (0)
260 #define cpu_hotplug_disable()	do { } while (0)
261 #define cpu_hotplug_enable()	do { } while (0)
262 #define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
263 #define __hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
264 /* These aren't inline functions due to a GCC bug. */
265 #define register_hotcpu_notifier(nb)	({ (void)(nb); 0; })
266 #define __register_hotcpu_notifier(nb)	({ (void)(nb); 0; })
267 #define unregister_hotcpu_notifier(nb)	({ (void)(nb); })
268 #define __unregister_hotcpu_notifier(nb)	({ (void)(nb); })
269 #endif		/* CONFIG_HOTPLUG_CPU */
270 
271 #ifdef CONFIG_PM_SLEEP_SMP
272 extern int disable_nonboot_cpus(void);
273 extern void enable_nonboot_cpus(void);
274 #else /* !CONFIG_PM_SLEEP_SMP */
disable_nonboot_cpus(void)275 static inline int disable_nonboot_cpus(void) { return 0; }
enable_nonboot_cpus(void)276 static inline void enable_nonboot_cpus(void) {}
277 #endif /* !CONFIG_PM_SLEEP_SMP */
278 
279 enum cpuhp_state {
280 	CPUHP_OFFLINE,
281 	CPUHP_ONLINE,
282 };
283 
284 void cpu_startup_entry(enum cpuhp_state state);
285 
286 void cpu_idle_poll_ctrl(bool enable);
287 
288 void arch_cpu_idle(void);
289 void arch_cpu_idle_prepare(void);
290 void arch_cpu_idle_enter(void);
291 void arch_cpu_idle_exit(void);
292 void arch_cpu_idle_dead(void);
293 
294 DECLARE_PER_CPU(bool, cpu_dead_idle);
295 
296 int cpu_report_state(int cpu);
297 int cpu_check_up_prepare(int cpu);
298 void cpu_set_state_online(int cpu);
299 #ifdef CONFIG_HOTPLUG_CPU
300 bool cpu_wait_death(unsigned int cpu, int seconds);
301 bool cpu_report_death(void);
302 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
303 
304 /*
305  * These are used for a global "mitigations=" cmdline option for toggling
306  * optional CPU mitigations.
307  */
308 enum cpu_mitigations {
309 	CPU_MITIGATIONS_OFF,
310 	CPU_MITIGATIONS_AUTO,
311 };
312 
313 extern enum cpu_mitigations cpu_mitigations;
314 
315 /* mitigations=off */
cpu_mitigations_off(void)316 static inline bool cpu_mitigations_off(void)
317 {
318 	return cpu_mitigations == CPU_MITIGATIONS_OFF;
319 }
320 
321 #define IDLE_START 1
322 #define IDLE_END 2
323 
324 void idle_notifier_register(struct notifier_block *n);
325 void idle_notifier_unregister(struct notifier_block *n);
326 void idle_notifier_call_chain(unsigned long val);
327 
328 #endif /* _LINUX_CPU_H_ */
329