• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPU subsystem support
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/cpu.h>
11 #include <linux/topology.h>
12 #include <linux/device.h>
13 #include <linux/node.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/percpu.h>
17 #include <linux/acpi.h>
18 #include <linux/of.h>
19 #include <linux/cpufeature.h>
20 #include <linux/tick.h>
21 #include <linux/pm_qos.h>
22 #include <linux/sched/isolation.h>
23 
24 #include "base.h"
25 
26 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
27 
cpu_subsys_match(struct device * dev,struct device_driver * drv)28 static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
29 {
30 	/* ACPI style match is the only one that may succeed. */
31 	if (acpi_driver_match_device(dev, drv))
32 		return 1;
33 
34 	return 0;
35 }
36 
37 #ifdef CONFIG_HOTPLUG_CPU
change_cpu_under_node(struct cpu * cpu,unsigned int from_nid,unsigned int to_nid)38 static void change_cpu_under_node(struct cpu *cpu,
39 			unsigned int from_nid, unsigned int to_nid)
40 {
41 	int cpuid = cpu->dev.id;
42 	unregister_cpu_under_node(cpuid, from_nid);
43 	register_cpu_under_node(cpuid, to_nid);
44 	cpu->node_id = to_nid;
45 }
46 
cpu_subsys_online(struct device * dev)47 static int cpu_subsys_online(struct device *dev)
48 {
49 	struct cpu *cpu = container_of(dev, struct cpu, dev);
50 	int cpuid = dev->id;
51 	int from_nid, to_nid;
52 	int ret;
53 
54 	from_nid = cpu_to_node(cpuid);
55 	if (from_nid == NUMA_NO_NODE)
56 		return -ENODEV;
57 
58 	ret = cpu_device_up(dev);
59 	/*
60 	 * When hot adding memory to memoryless node and enabling a cpu
61 	 * on the node, node number of the cpu may internally change.
62 	 */
63 	to_nid = cpu_to_node(cpuid);
64 	if (from_nid != to_nid)
65 		change_cpu_under_node(cpu, from_nid, to_nid);
66 
67 	return ret;
68 }
69 
cpu_subsys_offline(struct device * dev)70 static int cpu_subsys_offline(struct device *dev)
71 {
72 	return cpu_device_down(dev);
73 }
74 
unregister_cpu(struct cpu * cpu)75 void unregister_cpu(struct cpu *cpu)
76 {
77 	int logical_cpu = cpu->dev.id;
78 
79 	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
80 
81 	device_unregister(&cpu->dev);
82 	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
83 	return;
84 }
85 
86 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
cpu_probe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)87 static ssize_t cpu_probe_store(struct device *dev,
88 			       struct device_attribute *attr,
89 			       const char *buf,
90 			       size_t count)
91 {
92 	ssize_t cnt;
93 	int ret;
94 
95 	ret = lock_device_hotplug_sysfs();
96 	if (ret)
97 		return ret;
98 
99 	cnt = arch_cpu_probe(buf, count);
100 
101 	unlock_device_hotplug();
102 	return cnt;
103 }
104 
cpu_release_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)105 static ssize_t cpu_release_store(struct device *dev,
106 				 struct device_attribute *attr,
107 				 const char *buf,
108 				 size_t count)
109 {
110 	ssize_t cnt;
111 	int ret;
112 
113 	ret = lock_device_hotplug_sysfs();
114 	if (ret)
115 		return ret;
116 
117 	cnt = arch_cpu_release(buf, count);
118 
119 	unlock_device_hotplug();
120 	return cnt;
121 }
122 
123 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
124 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
125 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
126 #endif /* CONFIG_HOTPLUG_CPU */
127 
128 struct bus_type cpu_subsys = {
129 	.name = "cpu",
130 	.dev_name = "cpu",
131 	.match = cpu_subsys_match,
132 #ifdef CONFIG_HOTPLUG_CPU
133 	.online = cpu_subsys_online,
134 	.offline = cpu_subsys_offline,
135 #endif
136 };
137 EXPORT_SYMBOL_GPL(cpu_subsys);
138 
139 #ifdef CONFIG_KEXEC
140 #include <linux/kexec.h>
141 
crash_notes_show(struct device * dev,struct device_attribute * attr,char * buf)142 static ssize_t crash_notes_show(struct device *dev,
143 				struct device_attribute *attr,
144 				char *buf)
145 {
146 	struct cpu *cpu = container_of(dev, struct cpu, dev);
147 	unsigned long long addr;
148 	int cpunum;
149 
150 	cpunum = cpu->dev.id;
151 
152 	/*
153 	 * Might be reading other cpu's data based on which cpu read thread
154 	 * has been scheduled. But cpu data (memory) is allocated once during
155 	 * boot up and this data does not change there after. Hence this
156 	 * operation should be safe. No locking required.
157 	 */
158 	addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
159 
160 	return sysfs_emit(buf, "%llx\n", addr);
161 }
162 static DEVICE_ATTR_ADMIN_RO(crash_notes);
163 
crash_notes_size_show(struct device * dev,struct device_attribute * attr,char * buf)164 static ssize_t crash_notes_size_show(struct device *dev,
165 				     struct device_attribute *attr,
166 				     char *buf)
167 {
168 	return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
169 }
170 static DEVICE_ATTR_ADMIN_RO(crash_notes_size);
171 
172 static struct attribute *crash_note_cpu_attrs[] = {
173 	&dev_attr_crash_notes.attr,
174 	&dev_attr_crash_notes_size.attr,
175 	NULL
176 };
177 
178 static const struct attribute_group crash_note_cpu_attr_group = {
179 	.attrs = crash_note_cpu_attrs,
180 };
181 #endif
182 
183 static const struct attribute_group *common_cpu_attr_groups[] = {
184 #ifdef CONFIG_KEXEC
185 	&crash_note_cpu_attr_group,
186 #endif
187 	NULL
188 };
189 
190 static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
191 #ifdef CONFIG_KEXEC
192 	&crash_note_cpu_attr_group,
193 #endif
194 	NULL
195 };
196 
197 /*
198  * Print cpu online, possible, present, and system maps
199  */
200 
201 struct cpu_attr {
202 	struct device_attribute attr;
203 	const struct cpumask *const map;
204 };
205 
show_cpus_attr(struct device * dev,struct device_attribute * attr,char * buf)206 static ssize_t show_cpus_attr(struct device *dev,
207 			      struct device_attribute *attr,
208 			      char *buf)
209 {
210 	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
211 
212 	return cpumap_print_to_pagebuf(true, buf, ca->map);
213 }
214 
215 #define _CPU_ATTR(name, map) \
216 	{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
217 
218 /* Keep in sync with cpu_subsys_attrs */
219 static struct cpu_attr cpu_attrs[] = {
220 	_CPU_ATTR(online, &__cpu_online_mask),
221 	_CPU_ATTR(possible, &__cpu_possible_mask),
222 	_CPU_ATTR(present, &__cpu_present_mask),
223 };
224 
225 /*
226  * Print values for NR_CPUS and offlined cpus
227  */
print_cpus_kernel_max(struct device * dev,struct device_attribute * attr,char * buf)228 static ssize_t print_cpus_kernel_max(struct device *dev,
229 				     struct device_attribute *attr, char *buf)
230 {
231 	return sysfs_emit(buf, "%d\n", NR_CPUS - 1);
232 }
233 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
234 
235 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
236 unsigned int total_cpus;
237 
print_cpus_offline(struct device * dev,struct device_attribute * attr,char * buf)238 static ssize_t print_cpus_offline(struct device *dev,
239 				  struct device_attribute *attr, char *buf)
240 {
241 	int len = 0;
242 	cpumask_var_t offline;
243 
244 	/* display offline cpus < nr_cpu_ids */
245 	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
246 		return -ENOMEM;
247 	cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
248 	len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline));
249 	free_cpumask_var(offline);
250 
251 	/* display offline cpus >= nr_cpu_ids */
252 	if (total_cpus && nr_cpu_ids < total_cpus) {
253 		len += sysfs_emit_at(buf, len, ",");
254 
255 		if (nr_cpu_ids == total_cpus-1)
256 			len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids);
257 		else
258 			len += sysfs_emit_at(buf, len, "%u-%d",
259 					     nr_cpu_ids, total_cpus - 1);
260 	}
261 
262 	len += sysfs_emit_at(buf, len, "\n");
263 
264 	return len;
265 }
266 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
267 
print_cpus_isolated(struct device * dev,struct device_attribute * attr,char * buf)268 static ssize_t print_cpus_isolated(struct device *dev,
269 				  struct device_attribute *attr, char *buf)
270 {
271 	int len;
272 	cpumask_var_t isolated;
273 
274 	if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
275 		return -ENOMEM;
276 
277 	cpumask_andnot(isolated, cpu_possible_mask,
278 		       housekeeping_cpumask(HK_TYPE_DOMAIN));
279 	len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
280 
281 	free_cpumask_var(isolated);
282 
283 	return len;
284 }
285 static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
286 
287 #ifdef CONFIG_NO_HZ_FULL
print_cpus_nohz_full(struct device * dev,struct device_attribute * attr,char * buf)288 static ssize_t print_cpus_nohz_full(struct device *dev,
289 				    struct device_attribute *attr, char *buf)
290 {
291 	return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
292 }
293 static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
294 #endif
295 
cpu_device_release(struct device * dev)296 static void cpu_device_release(struct device *dev)
297 {
298 	/*
299 	 * This is an empty function to prevent the driver core from spitting a
300 	 * warning at us.  Yes, I know this is directly opposite of what the
301 	 * documentation for the driver core and kobjects say, and the author
302 	 * of this code has already been publically ridiculed for doing
303 	 * something as foolish as this.  However, at this point in time, it is
304 	 * the only way to handle the issue of statically allocated cpu
305 	 * devices.  The different architectures will have their cpu device
306 	 * code reworked to properly handle this in the near future, so this
307 	 * function will then be changed to correctly free up the memory held
308 	 * by the cpu device.
309 	 *
310 	 * Never copy this way of doing things, or you too will be made fun of
311 	 * on the linux-kernel list, you have been warned.
312 	 */
313 }
314 
315 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
print_cpu_modalias(struct device * dev,struct device_attribute * attr,char * buf)316 static ssize_t print_cpu_modalias(struct device *dev,
317 				  struct device_attribute *attr,
318 				  char *buf)
319 {
320 	int len = 0;
321 	u32 i;
322 
323 	len += sysfs_emit_at(buf, len,
324 			     "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
325 			     CPU_FEATURE_TYPEVAL);
326 
327 	for (i = 0; i < MAX_CPU_FEATURES; i++)
328 		if (cpu_have_feature(i)) {
329 			if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
330 				WARN(1, "CPU features overflow page\n");
331 				break;
332 			}
333 			len += sysfs_emit_at(buf, len, ",%04X", i);
334 		}
335 	len += sysfs_emit_at(buf, len, "\n");
336 	return len;
337 }
338 
cpu_uevent(struct device * dev,struct kobj_uevent_env * env)339 static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
340 {
341 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
342 	if (buf) {
343 		print_cpu_modalias(NULL, NULL, buf);
344 		add_uevent_var(env, "MODALIAS=%s", buf);
345 		kfree(buf);
346 	}
347 	return 0;
348 }
349 #endif
350 
351 /*
352  * register_cpu - Setup a sysfs device for a CPU.
353  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
354  *	  sysfs for this CPU.
355  * @num - CPU number to use when creating the device.
356  *
357  * Initialize and register the CPU device.
358  */
register_cpu(struct cpu * cpu,int num)359 int register_cpu(struct cpu *cpu, int num)
360 {
361 	int error;
362 
363 	cpu->node_id = cpu_to_node(num);
364 	memset(&cpu->dev, 0x00, sizeof(struct device));
365 	cpu->dev.id = num;
366 	cpu->dev.bus = &cpu_subsys;
367 	cpu->dev.release = cpu_device_release;
368 	cpu->dev.offline_disabled = !cpu->hotpluggable;
369 	cpu->dev.offline = !cpu_online(num);
370 	cpu->dev.of_node = of_get_cpu_node(num, NULL);
371 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
372 	cpu->dev.bus->uevent = cpu_uevent;
373 #endif
374 	cpu->dev.groups = common_cpu_attr_groups;
375 	if (cpu->hotpluggable)
376 		cpu->dev.groups = hotplugable_cpu_attr_groups;
377 	error = device_register(&cpu->dev);
378 	if (error) {
379 		put_device(&cpu->dev);
380 		return error;
381 	}
382 
383 	per_cpu(cpu_sys_devices, num) = &cpu->dev;
384 	register_cpu_under_node(num, cpu_to_node(num));
385 	dev_pm_qos_expose_latency_limit(&cpu->dev,
386 					PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
387 
388 	return 0;
389 }
390 
get_cpu_device(unsigned int cpu)391 struct device *get_cpu_device(unsigned int cpu)
392 {
393 	if (cpu < nr_cpu_ids && cpu_possible(cpu))
394 		return per_cpu(cpu_sys_devices, cpu);
395 	else
396 		return NULL;
397 }
398 EXPORT_SYMBOL_GPL(get_cpu_device);
399 
device_create_release(struct device * dev)400 static void device_create_release(struct device *dev)
401 {
402 	kfree(dev);
403 }
404 
405 __printf(4, 0)
406 static struct device *
__cpu_device_create(struct device * parent,void * drvdata,const struct attribute_group ** groups,const char * fmt,va_list args)407 __cpu_device_create(struct device *parent, void *drvdata,
408 		    const struct attribute_group **groups,
409 		    const char *fmt, va_list args)
410 {
411 	struct device *dev = NULL;
412 	int retval = -ENOMEM;
413 
414 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
415 	if (!dev)
416 		goto error;
417 
418 	device_initialize(dev);
419 	dev->parent = parent;
420 	dev->groups = groups;
421 	dev->release = device_create_release;
422 	device_set_pm_not_required(dev);
423 	dev_set_drvdata(dev, drvdata);
424 
425 	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
426 	if (retval)
427 		goto error;
428 
429 	retval = device_add(dev);
430 	if (retval)
431 		goto error;
432 
433 	return dev;
434 
435 error:
436 	put_device(dev);
437 	return ERR_PTR(retval);
438 }
439 
cpu_device_create(struct device * parent,void * drvdata,const struct attribute_group ** groups,const char * fmt,...)440 struct device *cpu_device_create(struct device *parent, void *drvdata,
441 				 const struct attribute_group **groups,
442 				 const char *fmt, ...)
443 {
444 	va_list vargs;
445 	struct device *dev;
446 
447 	va_start(vargs, fmt);
448 	dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
449 	va_end(vargs);
450 	return dev;
451 }
452 EXPORT_SYMBOL_GPL(cpu_device_create);
453 
454 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
455 static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
456 #endif
457 
458 static struct attribute *cpu_root_attrs[] = {
459 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
460 	&dev_attr_probe.attr,
461 	&dev_attr_release.attr,
462 #endif
463 	&cpu_attrs[0].attr.attr,
464 	&cpu_attrs[1].attr.attr,
465 	&cpu_attrs[2].attr.attr,
466 	&dev_attr_kernel_max.attr,
467 	&dev_attr_offline.attr,
468 	&dev_attr_isolated.attr,
469 #ifdef CONFIG_NO_HZ_FULL
470 	&dev_attr_nohz_full.attr,
471 #endif
472 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
473 	&dev_attr_modalias.attr,
474 #endif
475 	NULL
476 };
477 
478 static const struct attribute_group cpu_root_attr_group = {
479 	.attrs = cpu_root_attrs,
480 };
481 
482 static const struct attribute_group *cpu_root_attr_groups[] = {
483 	&cpu_root_attr_group,
484 	NULL,
485 };
486 
cpu_is_hotpluggable(unsigned int cpu)487 bool cpu_is_hotpluggable(unsigned int cpu)
488 {
489 	struct device *dev = get_cpu_device(cpu);
490 	return dev && container_of(dev, struct cpu, dev)->hotpluggable
491 		&& tick_nohz_cpu_hotpluggable(cpu);
492 }
493 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
494 
495 #ifdef CONFIG_GENERIC_CPU_DEVICES
496 static DEFINE_PER_CPU(struct cpu, cpu_devices);
497 #endif
498 
cpu_dev_register_generic(void)499 static void __init cpu_dev_register_generic(void)
500 {
501 #ifdef CONFIG_GENERIC_CPU_DEVICES
502 	int i;
503 
504 	for_each_possible_cpu(i) {
505 		if (register_cpu(&per_cpu(cpu_devices, i), i))
506 			panic("Failed to register CPU device");
507 	}
508 #endif
509 }
510 
511 #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
512 
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)513 ssize_t __weak cpu_show_meltdown(struct device *dev,
514 				 struct device_attribute *attr, char *buf)
515 {
516 	return sysfs_emit(buf, "Not affected\n");
517 }
518 
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)519 ssize_t __weak cpu_show_spectre_v1(struct device *dev,
520 				   struct device_attribute *attr, char *buf)
521 {
522 	return sysfs_emit(buf, "Not affected\n");
523 }
524 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)525 ssize_t __weak cpu_show_spectre_v2(struct device *dev,
526 				   struct device_attribute *attr, char *buf)
527 {
528 	return sysfs_emit(buf, "Not affected\n");
529 }
530 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)531 ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
532 					  struct device_attribute *attr, char *buf)
533 {
534 	return sysfs_emit(buf, "Not affected\n");
535 }
536 
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)537 ssize_t __weak cpu_show_l1tf(struct device *dev,
538 			     struct device_attribute *attr, char *buf)
539 {
540 	return sysfs_emit(buf, "Not affected\n");
541 }
542 
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)543 ssize_t __weak cpu_show_mds(struct device *dev,
544 			    struct device_attribute *attr, char *buf)
545 {
546 	return sysfs_emit(buf, "Not affected\n");
547 }
548 
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)549 ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
550 					struct device_attribute *attr,
551 					char *buf)
552 {
553 	return sysfs_emit(buf, "Not affected\n");
554 }
555 
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)556 ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
557 				      struct device_attribute *attr, char *buf)
558 {
559 	return sysfs_emit(buf, "Not affected\n");
560 }
561 
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)562 ssize_t __weak cpu_show_srbds(struct device *dev,
563 			      struct device_attribute *attr, char *buf)
564 {
565 	return sysfs_emit(buf, "Not affected\n");
566 }
567 
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)568 ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
569 					struct device_attribute *attr, char *buf)
570 {
571 	return sysfs_emit(buf, "Not affected\n");
572 }
573 
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)574 ssize_t __weak cpu_show_retbleed(struct device *dev,
575 				 struct device_attribute *attr, char *buf)
576 {
577 	return sysfs_emit(buf, "Not affected\n");
578 }
579 
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)580 ssize_t __weak cpu_show_gds(struct device *dev,
581 			    struct device_attribute *attr, char *buf)
582 {
583 	return sysfs_emit(buf, "Not affected\n");
584 }
585 
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)586 ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
587 					     struct device_attribute *attr, char *buf)
588 {
589 	return sysfs_emit(buf, "Not affected\n");
590 }
591 
592 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
593 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
594 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
595 static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
596 static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
597 static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
598 static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
599 static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
600 static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
601 static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
602 static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
603 static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
604 static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
605 
606 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
607 	&dev_attr_meltdown.attr,
608 	&dev_attr_spectre_v1.attr,
609 	&dev_attr_spectre_v2.attr,
610 	&dev_attr_spec_store_bypass.attr,
611 	&dev_attr_l1tf.attr,
612 	&dev_attr_mds.attr,
613 	&dev_attr_tsx_async_abort.attr,
614 	&dev_attr_itlb_multihit.attr,
615 	&dev_attr_srbds.attr,
616 	&dev_attr_mmio_stale_data.attr,
617 	&dev_attr_retbleed.attr,
618 	&dev_attr_gather_data_sampling.attr,
619 	&dev_attr_spec_rstack_overflow.attr,
620 	NULL
621 };
622 
623 static const struct attribute_group cpu_root_vulnerabilities_group = {
624 	.name  = "vulnerabilities",
625 	.attrs = cpu_root_vulnerabilities_attrs,
626 };
627 
cpu_register_vulnerabilities(void)628 static void __init cpu_register_vulnerabilities(void)
629 {
630 	if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
631 			       &cpu_root_vulnerabilities_group))
632 		pr_err("Unable to register CPU vulnerabilities\n");
633 }
634 
635 #else
cpu_register_vulnerabilities(void)636 static inline void cpu_register_vulnerabilities(void) { }
637 #endif
638 
cpu_dev_init(void)639 void __init cpu_dev_init(void)
640 {
641 	if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
642 		panic("Failed to register CPU subsystem");
643 
644 	cpu_dev_register_generic();
645 	cpu_register_vulnerabilities();
646 }
647