1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/cpu.h - generic cpu definition
4 *
5 * This is mainly for topological representation. We define the
6 * basic 'struct cpu' here, which can be embedded in per-arch
7 * definitions of processors.
8 *
9 * Basic handling of the devices is done in drivers/base/cpu.c
10 *
11 * CPUs are exported via sysfs in the devices/system/cpu
12 * directory.
13 */
14 #ifndef _LINUX_CPU_H_
15 #define _LINUX_CPU_H_
16
17 #include <linux/node.h>
18 #include <linux/compiler.h>
19 #include <linux/cpumask.h>
20 #include <linux/cpuhotplug.h>
21
22 struct device;
23 struct device_node;
24 struct attribute_group;
25
26 struct cpu {
27 int node_id; /* The node which contains the CPU */
28 int hotpluggable; /* creates sysfs control file if hotpluggable */
29 struct device dev;
30 };
31
32 extern void boot_cpu_init(void);
33 extern void boot_cpu_hotplug_init(void);
34 extern void cpu_init(void);
35 extern void trap_init(void);
36
37 extern int register_cpu(struct cpu *cpu, int num);
38 extern struct device *get_cpu_device(unsigned cpu);
39 extern bool cpu_is_hotpluggable(unsigned cpu);
40 extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
41 extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
42 int cpu, unsigned int *thread);
43
44 extern int cpu_add_dev_attr(struct device_attribute *attr);
45 extern void cpu_remove_dev_attr(struct device_attribute *attr);
46
47 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
48 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
49
50 extern ssize_t cpu_show_meltdown(struct device *dev,
51 struct device_attribute *attr, char *buf);
52 extern ssize_t cpu_show_spectre_v1(struct device *dev,
53 struct device_attribute *attr, char *buf);
54 extern ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf);
56 extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
57 struct device_attribute *attr, char *buf);
58 extern ssize_t cpu_show_l1tf(struct device *dev,
59 struct device_attribute *attr, char *buf);
60 extern ssize_t cpu_show_mds(struct device *dev,
61 struct device_attribute *attr, char *buf);
62 extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
63 struct device_attribute *attr,
64 char *buf);
65 extern ssize_t cpu_show_itlb_multihit(struct device *dev,
66 struct device_attribute *attr, char *buf);
67 extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
68 extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
69 struct device_attribute *attr,
70 char *buf);
71 extern ssize_t cpu_show_retbleed(struct device *dev,
72 struct device_attribute *attr, char *buf);
73 extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
74 struct device_attribute *attr, char *buf);
75 extern ssize_t cpu_show_gds(struct device *dev,
76 struct device_attribute *attr, char *buf);
77
78 extern __printf(4, 5)
79 struct device *cpu_device_create(struct device *parent, void *drvdata,
80 const struct attribute_group **groups,
81 const char *fmt, ...);
82 #ifdef CONFIG_HOTPLUG_CPU
83 extern void unregister_cpu(struct cpu *cpu);
84 extern ssize_t arch_cpu_probe(const char *, size_t);
85 extern ssize_t arch_cpu_release(const char *, size_t);
86 #endif
87
88 /*
89 * These states are not related to the core CPU hotplug mechanism. They are
90 * used by various (sub)architectures to track internal state
91 */
92 #define CPU_ONLINE 0x0002 /* CPU is up */
93 #define CPU_UP_PREPARE 0x0003 /* CPU coming up */
94 #define CPU_DEAD 0x0007 /* CPU dead */
95 #define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
96 #define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
97 #define CPU_BROKEN 0x000B /* CPU did not die properly */
98
99 #ifdef CONFIG_SMP
100 extern bool cpuhp_tasks_frozen;
101 int add_cpu(unsigned int cpu);
102 int cpu_device_up(struct device *dev);
103 void notify_cpu_starting(unsigned int cpu);
104 extern void cpu_maps_update_begin(void);
105 extern void cpu_maps_update_done(void);
106 int bringup_hibernate_cpu(unsigned int sleep_cpu);
107 void bringup_nonboot_cpus(unsigned int setup_max_cpus);
108
109 #else /* CONFIG_SMP */
110 #define cpuhp_tasks_frozen 0
111
cpu_maps_update_begin(void)112 static inline void cpu_maps_update_begin(void)
113 {
114 }
115
cpu_maps_update_done(void)116 static inline void cpu_maps_update_done(void)
117 {
118 }
119
120 #endif /* CONFIG_SMP */
121 extern struct bus_type cpu_subsys;
122
123 #ifdef CONFIG_HOTPLUG_CPU
124 extern void cpus_write_lock(void);
125 extern void cpus_write_unlock(void);
126 extern void cpus_read_lock(void);
127 extern void cpus_read_unlock(void);
128 extern int cpus_read_trylock(void);
129 extern void lockdep_assert_cpus_held(void);
130 extern void cpu_hotplug_disable(void);
131 extern void cpu_hotplug_enable(void);
132 void clear_tasks_mm_cpumask(int cpu);
133 int remove_cpu(unsigned int cpu);
134 int pause_cpus(struct cpumask *cpumask);
135 int resume_cpus(struct cpumask *cpumask);
136 int cpu_device_down(struct device *dev);
137 extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
138
139 #else /* CONFIG_HOTPLUG_CPU */
140
cpus_write_lock(void)141 static inline void cpus_write_lock(void) { }
cpus_write_unlock(void)142 static inline void cpus_write_unlock(void) { }
cpus_read_lock(void)143 static inline void cpus_read_lock(void) { }
cpus_read_unlock(void)144 static inline void cpus_read_unlock(void) { }
cpus_read_trylock(void)145 static inline int cpus_read_trylock(void) { return true; }
lockdep_assert_cpus_held(void)146 static inline void lockdep_assert_cpus_held(void) { }
cpu_hotplug_disable(void)147 static inline void cpu_hotplug_disable(void) { }
cpu_hotplug_enable(void)148 static inline void cpu_hotplug_enable(void) { }
pause_cpus(struct cpumask * cpumask)149 static inline int pause_cpus(struct cpumask *cpumask) { return -ENODEV; }
resume_cpus(struct cpumask * cpumask)150 static inline int resume_cpus(struct cpumask *cpumask) { return -ENODEV; }
smp_shutdown_nonboot_cpus(unsigned int primary_cpu)151 static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
152 #endif /* !CONFIG_HOTPLUG_CPU */
153
154 /* Wrappers which go away once all code is converted */
cpu_hotplug_begin(void)155 static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
cpu_hotplug_done(void)156 static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
get_online_cpus(void)157 static inline void get_online_cpus(void) { cpus_read_lock(); }
put_online_cpus(void)158 static inline void put_online_cpus(void) { cpus_read_unlock(); }
159
160 #ifdef CONFIG_PM_SLEEP_SMP
161 extern int freeze_secondary_cpus(int primary);
162 extern void thaw_secondary_cpus(void);
163
suspend_disable_secondary_cpus(void)164 static inline int suspend_disable_secondary_cpus(void)
165 {
166 int cpu = 0;
167
168 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
169 cpu = -1;
170
171 return freeze_secondary_cpus(cpu);
172 }
suspend_enable_secondary_cpus(void)173 static inline void suspend_enable_secondary_cpus(void)
174 {
175 return thaw_secondary_cpus();
176 }
177
178 #else /* !CONFIG_PM_SLEEP_SMP */
thaw_secondary_cpus(void)179 static inline void thaw_secondary_cpus(void) {}
suspend_disable_secondary_cpus(void)180 static inline int suspend_disable_secondary_cpus(void) { return 0; }
suspend_enable_secondary_cpus(void)181 static inline void suspend_enable_secondary_cpus(void) { }
182 #endif /* !CONFIG_PM_SLEEP_SMP */
183
184 void cpu_startup_entry(enum cpuhp_state state);
185
186 void cpu_idle_poll_ctrl(bool enable);
187
188 /* Attach to any functions which should be considered cpuidle. */
189 #define __cpuidle __section(".cpuidle.text")
190
191 bool cpu_in_idle(unsigned long pc);
192
193 void arch_cpu_idle(void);
194 void arch_cpu_idle_prepare(void);
195 void arch_cpu_idle_enter(void);
196 void arch_cpu_idle_exit(void);
197 void arch_cpu_idle_dead(void);
198
199 #ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
200 void arch_cpu_finalize_init(void);
201 #else
arch_cpu_finalize_init(void)202 static inline void arch_cpu_finalize_init(void) { }
203 #endif
204
205 int cpu_report_state(int cpu);
206 int cpu_check_up_prepare(int cpu);
207 void cpu_set_state_online(int cpu);
208 void play_idle_precise(u64 duration_ns, u64 latency_ns);
209
play_idle(unsigned long duration_us)210 static inline void play_idle(unsigned long duration_us)
211 {
212 play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
213 }
214
215 #ifdef CONFIG_HOTPLUG_CPU
216 bool cpu_wait_death(unsigned int cpu, int seconds);
217 bool cpu_report_death(void);
218 void cpuhp_report_idle_dead(void);
219 #else
cpuhp_report_idle_dead(void)220 static inline void cpuhp_report_idle_dead(void) { }
221 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
222
223 enum cpuhp_smt_control {
224 CPU_SMT_ENABLED,
225 CPU_SMT_DISABLED,
226 CPU_SMT_FORCE_DISABLED,
227 CPU_SMT_NOT_SUPPORTED,
228 CPU_SMT_NOT_IMPLEMENTED,
229 };
230
231 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
232 extern enum cpuhp_smt_control cpu_smt_control;
233 extern void cpu_smt_disable(bool force);
234 extern void cpu_smt_check_topology(void);
235 extern bool cpu_smt_possible(void);
236 extern int cpuhp_smt_enable(void);
237 extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
238 #else
239 # define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
cpu_smt_disable(bool force)240 static inline void cpu_smt_disable(bool force) { }
cpu_smt_check_topology(void)241 static inline void cpu_smt_check_topology(void) { }
cpu_smt_possible(void)242 static inline bool cpu_smt_possible(void) { return false; }
cpuhp_smt_enable(void)243 static inline int cpuhp_smt_enable(void) { return 0; }
cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)244 static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
245 #endif
246
247 extern bool cpu_mitigations_off(void);
248 extern bool cpu_mitigations_auto_nosmt(void);
249
250 #endif /* _LINUX_CPU_H_ */
251