1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * cacheinfo support - processor cache information via sysfs
4 *
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/of_device.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
22
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
28 #define per_cpu_cacheinfo_idx(cpu, idx) \
29 (per_cpu_cacheinfo(cpu) + (idx))
30
get_cpu_cacheinfo(unsigned int cpu)31 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
32 {
33 return ci_cacheinfo(cpu);
34 }
35
cache_leaves_are_shared(struct cacheinfo * this_leaf,struct cacheinfo * sib_leaf)36 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
37 struct cacheinfo *sib_leaf)
38 {
39 /*
40 * For non DT/ACPI systems, assume unique level 1 caches,
41 * system-wide shared caches for all other levels.
42 */
43 if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
44 return (this_leaf->level != 1) && (sib_leaf->level != 1);
45
46 if ((sib_leaf->attributes & CACHE_ID) &&
47 (this_leaf->attributes & CACHE_ID))
48 return sib_leaf->id == this_leaf->id;
49
50 return sib_leaf->fw_token == this_leaf->fw_token;
51 }
52
last_level_cache_is_valid(unsigned int cpu)53 bool last_level_cache_is_valid(unsigned int cpu)
54 {
55 struct cacheinfo *llc;
56
57 if (!cache_leaves(cpu))
58 return false;
59
60 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
61
62 return (llc->attributes & CACHE_ID) || !!llc->fw_token;
63
64 }
65
last_level_cache_is_shared(unsigned int cpu_x,unsigned int cpu_y)66 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
67 {
68 struct cacheinfo *llc_x, *llc_y;
69
70 if (!last_level_cache_is_valid(cpu_x) ||
71 !last_level_cache_is_valid(cpu_y))
72 return false;
73
74 llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
75 llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
76
77 return cache_leaves_are_shared(llc_x, llc_y);
78 }
79
80 #ifdef CONFIG_OF
81 /* OF properties to query for a given cache type */
82 struct cache_type_info {
83 const char *size_prop;
84 const char *line_size_props[2];
85 const char *nr_sets_prop;
86 };
87
88 static const struct cache_type_info cache_type_info[] = {
89 {
90 .size_prop = "cache-size",
91 .line_size_props = { "cache-line-size",
92 "cache-block-size", },
93 .nr_sets_prop = "cache-sets",
94 }, {
95 .size_prop = "i-cache-size",
96 .line_size_props = { "i-cache-line-size",
97 "i-cache-block-size", },
98 .nr_sets_prop = "i-cache-sets",
99 }, {
100 .size_prop = "d-cache-size",
101 .line_size_props = { "d-cache-line-size",
102 "d-cache-block-size", },
103 .nr_sets_prop = "d-cache-sets",
104 },
105 };
106
get_cacheinfo_idx(enum cache_type type)107 static inline int get_cacheinfo_idx(enum cache_type type)
108 {
109 if (type == CACHE_TYPE_UNIFIED)
110 return 0;
111 return type;
112 }
113
cache_size(struct cacheinfo * this_leaf,struct device_node * np)114 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
115 {
116 const char *propname;
117 int ct_idx;
118
119 ct_idx = get_cacheinfo_idx(this_leaf->type);
120 propname = cache_type_info[ct_idx].size_prop;
121
122 of_property_read_u32(np, propname, &this_leaf->size);
123 }
124
125 /* not cache_line_size() because that's a macro in include/linux/cache.h */
cache_get_line_size(struct cacheinfo * this_leaf,struct device_node * np)126 static void cache_get_line_size(struct cacheinfo *this_leaf,
127 struct device_node *np)
128 {
129 int i, lim, ct_idx;
130
131 ct_idx = get_cacheinfo_idx(this_leaf->type);
132 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
133
134 for (i = 0; i < lim; i++) {
135 int ret;
136 u32 line_size;
137 const char *propname;
138
139 propname = cache_type_info[ct_idx].line_size_props[i];
140 ret = of_property_read_u32(np, propname, &line_size);
141 if (!ret) {
142 this_leaf->coherency_line_size = line_size;
143 break;
144 }
145 }
146 }
147
cache_nr_sets(struct cacheinfo * this_leaf,struct device_node * np)148 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
149 {
150 const char *propname;
151 int ct_idx;
152
153 ct_idx = get_cacheinfo_idx(this_leaf->type);
154 propname = cache_type_info[ct_idx].nr_sets_prop;
155
156 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
157 }
158
cache_associativity(struct cacheinfo * this_leaf)159 static void cache_associativity(struct cacheinfo *this_leaf)
160 {
161 unsigned int line_size = this_leaf->coherency_line_size;
162 unsigned int nr_sets = this_leaf->number_of_sets;
163 unsigned int size = this_leaf->size;
164
165 /*
166 * If the cache is fully associative, there is no need to
167 * check the other properties.
168 */
169 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
170 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
171 }
172
cache_node_is_unified(struct cacheinfo * this_leaf,struct device_node * np)173 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
174 struct device_node *np)
175 {
176 return of_property_read_bool(np, "cache-unified");
177 }
178
cache_of_set_props(struct cacheinfo * this_leaf,struct device_node * np)179 static void cache_of_set_props(struct cacheinfo *this_leaf,
180 struct device_node *np)
181 {
182 /*
183 * init_cache_level must setup the cache level correctly
184 * overriding the architecturally specified levels, so
185 * if type is NONE at this stage, it should be unified
186 */
187 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
188 cache_node_is_unified(this_leaf, np))
189 this_leaf->type = CACHE_TYPE_UNIFIED;
190 cache_size(this_leaf, np);
191 cache_get_line_size(this_leaf, np);
192 cache_nr_sets(this_leaf, np);
193 cache_associativity(this_leaf);
194 }
195
cache_setup_of_node(unsigned int cpu)196 static int cache_setup_of_node(unsigned int cpu)
197 {
198 struct device_node *np;
199 struct cacheinfo *this_leaf;
200 unsigned int index = 0;
201
202 np = of_cpu_device_node_get(cpu);
203 if (!np) {
204 pr_err("Failed to find cpu%d device node\n", cpu);
205 return -ENOENT;
206 }
207
208 while (index < cache_leaves(cpu)) {
209 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
210 if (this_leaf->level != 1)
211 np = of_find_next_cache_node(np);
212 else
213 np = of_node_get(np);/* cpu node itself */
214 if (!np)
215 break;
216 cache_of_set_props(this_leaf, np);
217 this_leaf->fw_token = np;
218 index++;
219 }
220
221 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
222 return -ENOENT;
223
224 return 0;
225 }
226 #else
cache_setup_of_node(unsigned int cpu)227 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
228 #endif
229
cache_setup_acpi(unsigned int cpu)230 int __weak cache_setup_acpi(unsigned int cpu)
231 {
232 return -ENOTSUPP;
233 }
234
235 unsigned int coherency_max_size;
236
cache_setup_properties(unsigned int cpu)237 static int cache_setup_properties(unsigned int cpu)
238 {
239 int ret = 0;
240
241 if (of_have_populated_dt())
242 ret = cache_setup_of_node(cpu);
243 else if (!acpi_disabled)
244 ret = cache_setup_acpi(cpu);
245
246 return ret;
247 }
248
cache_shared_cpu_map_setup(unsigned int cpu)249 static int cache_shared_cpu_map_setup(unsigned int cpu)
250 {
251 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
252 struct cacheinfo *this_leaf, *sib_leaf;
253 unsigned int index, sib_index;
254 int ret = 0;
255
256 if (this_cpu_ci->cpu_map_populated)
257 return 0;
258
259 /*
260 * skip setting up cache properties if LLC is valid, just need
261 * to update the shared cpu_map if the cache attributes were
262 * populated early before all the cpus are brought online
263 */
264 if (!last_level_cache_is_valid(cpu)) {
265 ret = cache_setup_properties(cpu);
266 if (ret)
267 return ret;
268 }
269
270 for (index = 0; index < cache_leaves(cpu); index++) {
271 unsigned int i;
272
273 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
274
275 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
276 for_each_online_cpu(i) {
277 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
278
279 if (i == cpu || !sib_cpu_ci->info_list)
280 continue;/* skip if itself or no cacheinfo */
281 for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
282 sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
283
284 /*
285 * Comparing cache IDs only makes sense if the leaves
286 * belong to the same cache level of same type. Skip
287 * the check if level and type do not match.
288 */
289 if (sib_leaf->level != this_leaf->level ||
290 sib_leaf->type != this_leaf->type)
291 continue;
292
293 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
294 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
295 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
296 break;
297 }
298 }
299 }
300 /* record the maximum cache line size */
301 if (this_leaf->coherency_line_size > coherency_max_size)
302 coherency_max_size = this_leaf->coherency_line_size;
303 }
304
305 return 0;
306 }
307
cache_shared_cpu_map_remove(unsigned int cpu)308 static void cache_shared_cpu_map_remove(unsigned int cpu)
309 {
310 struct cacheinfo *this_leaf, *sib_leaf;
311 unsigned int sibling, index, sib_index;
312
313 for (index = 0; index < cache_leaves(cpu); index++) {
314 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
315 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
316 struct cpu_cacheinfo *sib_cpu_ci =
317 get_cpu_cacheinfo(sibling);
318
319 if (sibling == cpu || !sib_cpu_ci->info_list)
320 continue;/* skip if itself or no cacheinfo */
321
322 for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
323 sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
324
325 /*
326 * Comparing cache IDs only makes sense if the leaves
327 * belong to the same cache level of same type. Skip
328 * the check if level and type do not match.
329 */
330 if (sib_leaf->level != this_leaf->level ||
331 sib_leaf->type != this_leaf->type)
332 continue;
333
334 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
335 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
336 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
337 break;
338 }
339 }
340 }
341 if (of_have_populated_dt())
342 of_node_put(this_leaf->fw_token);
343 }
344 }
345
free_cache_attributes(unsigned int cpu)346 static void free_cache_attributes(unsigned int cpu)
347 {
348 if (!per_cpu_cacheinfo(cpu))
349 return;
350
351 cache_shared_cpu_map_remove(cpu);
352
353 kfree(per_cpu_cacheinfo(cpu));
354 per_cpu_cacheinfo(cpu) = NULL;
355 cache_leaves(cpu) = 0;
356 }
357
init_cache_level(unsigned int cpu)358 int __weak init_cache_level(unsigned int cpu)
359 {
360 return -ENOENT;
361 }
362
populate_cache_leaves(unsigned int cpu)363 int __weak populate_cache_leaves(unsigned int cpu)
364 {
365 return -ENOENT;
366 }
367
detect_cache_attributes(unsigned int cpu)368 int detect_cache_attributes(unsigned int cpu)
369 {
370 int ret;
371
372 /* Since early detection of the cacheinfo is allowed via this
373 * function and this also gets called as CPU hotplug callbacks via
374 * cacheinfo_cpu_online, the initialisation can be skipped and only
375 * CPU maps can be updated as the CPU online status would be update
376 * if called via cacheinfo_cpu_online path.
377 */
378 if (per_cpu_cacheinfo(cpu))
379 goto update_cpu_map;
380
381 if (init_cache_level(cpu) || !cache_leaves(cpu))
382 return -ENOENT;
383
384 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
385 sizeof(struct cacheinfo), GFP_ATOMIC);
386 if (per_cpu_cacheinfo(cpu) == NULL) {
387 cache_leaves(cpu) = 0;
388 return -ENOMEM;
389 }
390
391 /*
392 * populate_cache_leaves() may completely setup the cache leaves and
393 * shared_cpu_map or it may leave it partially setup.
394 */
395 ret = populate_cache_leaves(cpu);
396 if (ret)
397 goto free_ci;
398
399 update_cpu_map:
400 /*
401 * For systems using DT for cache hierarchy, fw_token
402 * and shared_cpu_map will be set up here only if they are
403 * not populated already
404 */
405 ret = cache_shared_cpu_map_setup(cpu);
406 if (ret) {
407 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
408 goto free_ci;
409 }
410
411 return 0;
412
413 free_ci:
414 free_cache_attributes(cpu);
415 return ret;
416 }
417
418 /* pointer to cpuX/cache device */
419 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
420 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
421
422 static cpumask_t cache_dev_map;
423
424 /* pointer to array of devices for cpuX/cache/indexY */
425 static DEFINE_PER_CPU(struct device **, ci_index_dev);
426 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
427 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
428
429 #define show_one(file_name, object) \
430 static ssize_t file_name##_show(struct device *dev, \
431 struct device_attribute *attr, char *buf) \
432 { \
433 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
434 return sysfs_emit(buf, "%u\n", this_leaf->object); \
435 }
436
437 show_one(id, id);
438 show_one(level, level);
439 show_one(coherency_line_size, coherency_line_size);
440 show_one(number_of_sets, number_of_sets);
441 show_one(physical_line_partition, physical_line_partition);
442 show_one(ways_of_associativity, ways_of_associativity);
443
size_show(struct device * dev,struct device_attribute * attr,char * buf)444 static ssize_t size_show(struct device *dev,
445 struct device_attribute *attr, char *buf)
446 {
447 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
448
449 return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
450 }
451
shared_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)452 static ssize_t shared_cpu_map_show(struct device *dev,
453 struct device_attribute *attr, char *buf)
454 {
455 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
456 const struct cpumask *mask = &this_leaf->shared_cpu_map;
457
458 return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
459 }
460
shared_cpu_list_show(struct device * dev,struct device_attribute * attr,char * buf)461 static ssize_t shared_cpu_list_show(struct device *dev,
462 struct device_attribute *attr, char *buf)
463 {
464 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
465 const struct cpumask *mask = &this_leaf->shared_cpu_map;
466
467 return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
468 }
469
type_show(struct device * dev,struct device_attribute * attr,char * buf)470 static ssize_t type_show(struct device *dev,
471 struct device_attribute *attr, char *buf)
472 {
473 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
474 const char *output;
475
476 switch (this_leaf->type) {
477 case CACHE_TYPE_DATA:
478 output = "Data";
479 break;
480 case CACHE_TYPE_INST:
481 output = "Instruction";
482 break;
483 case CACHE_TYPE_UNIFIED:
484 output = "Unified";
485 break;
486 default:
487 return -EINVAL;
488 }
489
490 return sysfs_emit(buf, "%s\n", output);
491 }
492
allocation_policy_show(struct device * dev,struct device_attribute * attr,char * buf)493 static ssize_t allocation_policy_show(struct device *dev,
494 struct device_attribute *attr, char *buf)
495 {
496 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
497 unsigned int ci_attr = this_leaf->attributes;
498 const char *output;
499
500 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
501 output = "ReadWriteAllocate";
502 else if (ci_attr & CACHE_READ_ALLOCATE)
503 output = "ReadAllocate";
504 else if (ci_attr & CACHE_WRITE_ALLOCATE)
505 output = "WriteAllocate";
506 else
507 return 0;
508
509 return sysfs_emit(buf, "%s\n", output);
510 }
511
write_policy_show(struct device * dev,struct device_attribute * attr,char * buf)512 static ssize_t write_policy_show(struct device *dev,
513 struct device_attribute *attr, char *buf)
514 {
515 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
516 unsigned int ci_attr = this_leaf->attributes;
517 int n = 0;
518
519 if (ci_attr & CACHE_WRITE_THROUGH)
520 n = sysfs_emit(buf, "WriteThrough\n");
521 else if (ci_attr & CACHE_WRITE_BACK)
522 n = sysfs_emit(buf, "WriteBack\n");
523 return n;
524 }
525
526 static DEVICE_ATTR_RO(id);
527 static DEVICE_ATTR_RO(level);
528 static DEVICE_ATTR_RO(type);
529 static DEVICE_ATTR_RO(coherency_line_size);
530 static DEVICE_ATTR_RO(ways_of_associativity);
531 static DEVICE_ATTR_RO(number_of_sets);
532 static DEVICE_ATTR_RO(size);
533 static DEVICE_ATTR_RO(allocation_policy);
534 static DEVICE_ATTR_RO(write_policy);
535 static DEVICE_ATTR_RO(shared_cpu_map);
536 static DEVICE_ATTR_RO(shared_cpu_list);
537 static DEVICE_ATTR_RO(physical_line_partition);
538
539 static struct attribute *cache_default_attrs[] = {
540 &dev_attr_id.attr,
541 &dev_attr_type.attr,
542 &dev_attr_level.attr,
543 &dev_attr_shared_cpu_map.attr,
544 &dev_attr_shared_cpu_list.attr,
545 &dev_attr_coherency_line_size.attr,
546 &dev_attr_ways_of_associativity.attr,
547 &dev_attr_number_of_sets.attr,
548 &dev_attr_size.attr,
549 &dev_attr_allocation_policy.attr,
550 &dev_attr_write_policy.attr,
551 &dev_attr_physical_line_partition.attr,
552 NULL
553 };
554
555 static umode_t
cache_default_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int unused)556 cache_default_attrs_is_visible(struct kobject *kobj,
557 struct attribute *attr, int unused)
558 {
559 struct device *dev = kobj_to_dev(kobj);
560 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
561 const struct cpumask *mask = &this_leaf->shared_cpu_map;
562 umode_t mode = attr->mode;
563
564 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
565 return mode;
566 if ((attr == &dev_attr_type.attr) && this_leaf->type)
567 return mode;
568 if ((attr == &dev_attr_level.attr) && this_leaf->level)
569 return mode;
570 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
571 return mode;
572 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
573 return mode;
574 if ((attr == &dev_attr_coherency_line_size.attr) &&
575 this_leaf->coherency_line_size)
576 return mode;
577 if ((attr == &dev_attr_ways_of_associativity.attr) &&
578 this_leaf->size) /* allow 0 = full associativity */
579 return mode;
580 if ((attr == &dev_attr_number_of_sets.attr) &&
581 this_leaf->number_of_sets)
582 return mode;
583 if ((attr == &dev_attr_size.attr) && this_leaf->size)
584 return mode;
585 if ((attr == &dev_attr_write_policy.attr) &&
586 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
587 return mode;
588 if ((attr == &dev_attr_allocation_policy.attr) &&
589 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
590 return mode;
591 if ((attr == &dev_attr_physical_line_partition.attr) &&
592 this_leaf->physical_line_partition)
593 return mode;
594
595 return 0;
596 }
597
598 static const struct attribute_group cache_default_group = {
599 .attrs = cache_default_attrs,
600 .is_visible = cache_default_attrs_is_visible,
601 };
602
603 static const struct attribute_group *cache_default_groups[] = {
604 &cache_default_group,
605 NULL,
606 };
607
608 static const struct attribute_group *cache_private_groups[] = {
609 &cache_default_group,
610 NULL, /* Place holder for private group */
611 NULL,
612 };
613
614 const struct attribute_group *
cache_get_priv_group(struct cacheinfo * this_leaf)615 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
616 {
617 return NULL;
618 }
619
620 static const struct attribute_group **
cache_get_attribute_groups(struct cacheinfo * this_leaf)621 cache_get_attribute_groups(struct cacheinfo *this_leaf)
622 {
623 const struct attribute_group *priv_group =
624 cache_get_priv_group(this_leaf);
625
626 if (!priv_group)
627 return cache_default_groups;
628
629 if (!cache_private_groups[1])
630 cache_private_groups[1] = priv_group;
631
632 return cache_private_groups;
633 }
634
635 /* Add/Remove cache interface for CPU device */
cpu_cache_sysfs_exit(unsigned int cpu)636 static void cpu_cache_sysfs_exit(unsigned int cpu)
637 {
638 int i;
639 struct device *ci_dev;
640
641 if (per_cpu_index_dev(cpu)) {
642 for (i = 0; i < cache_leaves(cpu); i++) {
643 ci_dev = per_cache_index_dev(cpu, i);
644 if (!ci_dev)
645 continue;
646 device_unregister(ci_dev);
647 }
648 kfree(per_cpu_index_dev(cpu));
649 per_cpu_index_dev(cpu) = NULL;
650 }
651 device_unregister(per_cpu_cache_dev(cpu));
652 per_cpu_cache_dev(cpu) = NULL;
653 }
654
cpu_cache_sysfs_init(unsigned int cpu)655 static int cpu_cache_sysfs_init(unsigned int cpu)
656 {
657 struct device *dev = get_cpu_device(cpu);
658
659 if (per_cpu_cacheinfo(cpu) == NULL)
660 return -ENOENT;
661
662 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
663 if (IS_ERR(per_cpu_cache_dev(cpu)))
664 return PTR_ERR(per_cpu_cache_dev(cpu));
665
666 /* Allocate all required memory */
667 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
668 sizeof(struct device *), GFP_KERNEL);
669 if (unlikely(per_cpu_index_dev(cpu) == NULL))
670 goto err_out;
671
672 return 0;
673
674 err_out:
675 cpu_cache_sysfs_exit(cpu);
676 return -ENOMEM;
677 }
678
cache_add_dev(unsigned int cpu)679 static int cache_add_dev(unsigned int cpu)
680 {
681 unsigned int i;
682 int rc;
683 struct device *ci_dev, *parent;
684 struct cacheinfo *this_leaf;
685 const struct attribute_group **cache_groups;
686
687 rc = cpu_cache_sysfs_init(cpu);
688 if (unlikely(rc < 0))
689 return rc;
690
691 parent = per_cpu_cache_dev(cpu);
692 for (i = 0; i < cache_leaves(cpu); i++) {
693 this_leaf = per_cpu_cacheinfo_idx(cpu, i);
694 if (this_leaf->disable_sysfs)
695 continue;
696 if (this_leaf->type == CACHE_TYPE_NOCACHE)
697 break;
698 cache_groups = cache_get_attribute_groups(this_leaf);
699 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
700 "index%1u", i);
701 if (IS_ERR(ci_dev)) {
702 rc = PTR_ERR(ci_dev);
703 goto err;
704 }
705 per_cache_index_dev(cpu, i) = ci_dev;
706 }
707 cpumask_set_cpu(cpu, &cache_dev_map);
708
709 return 0;
710 err:
711 cpu_cache_sysfs_exit(cpu);
712 return rc;
713 }
714
cacheinfo_cpu_online(unsigned int cpu)715 static int cacheinfo_cpu_online(unsigned int cpu)
716 {
717 int rc = detect_cache_attributes(cpu);
718
719 if (rc)
720 return rc;
721 rc = cache_add_dev(cpu);
722 if (rc)
723 free_cache_attributes(cpu);
724 return rc;
725 }
726
cacheinfo_cpu_pre_down(unsigned int cpu)727 static int cacheinfo_cpu_pre_down(unsigned int cpu)
728 {
729 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
730 cpu_cache_sysfs_exit(cpu);
731
732 free_cache_attributes(cpu);
733 return 0;
734 }
735
cacheinfo_sysfs_init(void)736 static int __init cacheinfo_sysfs_init(void)
737 {
738 return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
739 "base/cacheinfo:online",
740 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
741 }
742 device_initcall(cacheinfo_sysfs_init);
743