1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * This file contains NUMA specific variables and functions which can
7 * be split away from DISCONTIGMEM and are used on NUMA machines with
8 * contiguous memory.
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj
12 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
14 */
15
16 #include <linux/cpu.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/node.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h>
22 #include <linux/nodemask.h>
23 #include <linux/notifier.h>
24 #include <asm/mmzone.h>
25 #include <asm/numa.h>
26 #include <asm/cpu.h>
27
28 static struct ia64_cpu *sysfs_cpus;
29
arch_fix_phys_package_id(int num,u32 slot)30 void arch_fix_phys_package_id(int num, u32 slot)
31 {
32 #ifdef CONFIG_SMP
33 if (cpu_data(num)->socket_id == -1)
34 cpu_data(num)->socket_id = slot;
35 #endif
36 }
37 EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
38
39
40 #ifdef CONFIG_HOTPLUG_CPU
arch_register_cpu(int num)41 int __ref arch_register_cpu(int num)
42 {
43 #ifdef CONFIG_ACPI
44 /*
45 * If CPEI can be re-targetted or if this is not
46 * CPEI target, then it is hotpluggable
47 */
48 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
49 sysfs_cpus[num].cpu.hotpluggable = 1;
50 map_cpu_to_node(num, node_cpuid[num].nid);
51 #endif
52 return register_cpu(&sysfs_cpus[num].cpu, num);
53 }
54 EXPORT_SYMBOL(arch_register_cpu);
55
arch_unregister_cpu(int num)56 void __ref arch_unregister_cpu(int num)
57 {
58 unregister_cpu(&sysfs_cpus[num].cpu);
59 #ifdef CONFIG_ACPI
60 unmap_cpu_from_node(num, cpu_to_node(num));
61 #endif
62 }
63 EXPORT_SYMBOL(arch_unregister_cpu);
64 #else
arch_register_cpu(int num)65 static int __init arch_register_cpu(int num)
66 {
67 return register_cpu(&sysfs_cpus[num].cpu, num);
68 }
69 #endif /*CONFIG_HOTPLUG_CPU*/
70
71
topology_init(void)72 static int __init topology_init(void)
73 {
74 int i, err = 0;
75
76 #ifdef CONFIG_NUMA
77 /*
78 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
79 */
80 for_each_online_node(i) {
81 if ((err = register_one_node(i)))
82 goto out;
83 }
84 #endif
85
86 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
87 if (!sysfs_cpus)
88 panic("kzalloc in topology_init failed - NR_CPUS too big?");
89
90 for_each_present_cpu(i) {
91 if((err = arch_register_cpu(i)))
92 goto out;
93 }
94 out:
95 return err;
96 }
97
98 subsys_initcall(topology_init);
99
100
101 /*
102 * Export cpu cache information through sysfs
103 */
104
105 /*
106 * A bunch of string array to get pretty printing
107 */
108 static const char *cache_types[] = {
109 "", /* not used */
110 "Instruction",
111 "Data",
112 "Unified" /* unified */
113 };
114
115 static const char *cache_mattrib[]={
116 "WriteThrough",
117 "WriteBack",
118 "", /* reserved */
119 "" /* reserved */
120 };
121
122 struct cache_info {
123 pal_cache_config_info_t cci;
124 cpumask_t shared_cpu_map;
125 int level;
126 int type;
127 struct kobject kobj;
128 };
129
130 struct cpu_cache_info {
131 struct cache_info *cache_leaves;
132 int num_cache_leaves;
133 struct kobject kobj;
134 };
135
136 static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata;
137 #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
138
139 #ifdef CONFIG_SMP
cache_shared_cpu_map_setup(unsigned int cpu,struct cache_info * this_leaf)140 static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
141 struct cache_info * this_leaf)
142 {
143 pal_cache_shared_info_t csi;
144 int num_shared, i = 0;
145 unsigned int j;
146
147 if (cpu_data(cpu)->threads_per_core <= 1 &&
148 cpu_data(cpu)->cores_per_socket <= 1) {
149 cpu_set(cpu, this_leaf->shared_cpu_map);
150 return;
151 }
152
153 if (ia64_pal_cache_shared_info(this_leaf->level,
154 this_leaf->type,
155 0,
156 &csi) != PAL_STATUS_SUCCESS)
157 return;
158
159 num_shared = (int) csi.num_shared;
160 do {
161 for_each_possible_cpu(j)
162 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
163 && cpu_data(j)->core_id == csi.log1_cid
164 && cpu_data(j)->thread_id == csi.log1_tid)
165 cpu_set(j, this_leaf->shared_cpu_map);
166
167 i++;
168 } while (i < num_shared &&
169 ia64_pal_cache_shared_info(this_leaf->level,
170 this_leaf->type,
171 i,
172 &csi) == PAL_STATUS_SUCCESS);
173 }
174 #else
cache_shared_cpu_map_setup(unsigned int cpu,struct cache_info * this_leaf)175 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
176 struct cache_info * this_leaf)
177 {
178 cpu_set(cpu, this_leaf->shared_cpu_map);
179 return;
180 }
181 #endif
182
show_coherency_line_size(struct cache_info * this_leaf,char * buf)183 static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
184 char *buf)
185 {
186 return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
187 }
188
show_ways_of_associativity(struct cache_info * this_leaf,char * buf)189 static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
190 char *buf)
191 {
192 return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
193 }
194
show_attributes(struct cache_info * this_leaf,char * buf)195 static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
196 {
197 return sprintf(buf,
198 "%s\n",
199 cache_mattrib[this_leaf->cci.pcci_cache_attr]);
200 }
201
show_size(struct cache_info * this_leaf,char * buf)202 static ssize_t show_size(struct cache_info *this_leaf, char *buf)
203 {
204 return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
205 }
206
show_number_of_sets(struct cache_info * this_leaf,char * buf)207 static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
208 {
209 unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
210 number_of_sets /= this_leaf->cci.pcci_assoc;
211 number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
212
213 return sprintf(buf, "%u\n", number_of_sets);
214 }
215
show_shared_cpu_map(struct cache_info * this_leaf,char * buf)216 static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
217 {
218 ssize_t len;
219 cpumask_t shared_cpu_map;
220
221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
222 len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
223 len += sprintf(buf+len, "\n");
224 return len;
225 }
226
show_type(struct cache_info * this_leaf,char * buf)227 static ssize_t show_type(struct cache_info *this_leaf, char *buf)
228 {
229 int type = this_leaf->type + this_leaf->cci.pcci_unified;
230 return sprintf(buf, "%s\n", cache_types[type]);
231 }
232
show_level(struct cache_info * this_leaf,char * buf)233 static ssize_t show_level(struct cache_info *this_leaf, char *buf)
234 {
235 return sprintf(buf, "%u\n", this_leaf->level);
236 }
237
238 struct cache_attr {
239 struct attribute attr;
240 ssize_t (*show)(struct cache_info *, char *);
241 ssize_t (*store)(struct cache_info *, const char *, size_t count);
242 };
243
244 #ifdef define_one_ro
245 #undef define_one_ro
246 #endif
247 #define define_one_ro(_name) \
248 static struct cache_attr _name = \
249 __ATTR(_name, 0444, show_##_name, NULL)
250
251 define_one_ro(level);
252 define_one_ro(type);
253 define_one_ro(coherency_line_size);
254 define_one_ro(ways_of_associativity);
255 define_one_ro(size);
256 define_one_ro(number_of_sets);
257 define_one_ro(shared_cpu_map);
258 define_one_ro(attributes);
259
260 static struct attribute * cache_default_attrs[] = {
261 &type.attr,
262 &level.attr,
263 &coherency_line_size.attr,
264 &ways_of_associativity.attr,
265 &attributes.attr,
266 &size.attr,
267 &number_of_sets.attr,
268 &shared_cpu_map.attr,
269 NULL
270 };
271
272 #define to_object(k) container_of(k, struct cache_info, kobj)
273 #define to_attr(a) container_of(a, struct cache_attr, attr)
274
cache_show(struct kobject * kobj,struct attribute * attr,char * buf)275 static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
276 {
277 struct cache_attr *fattr = to_attr(attr);
278 struct cache_info *this_leaf = to_object(kobj);
279 ssize_t ret;
280
281 ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
282 return ret;
283 }
284
285 static struct sysfs_ops cache_sysfs_ops = {
286 .show = cache_show
287 };
288
289 static struct kobj_type cache_ktype = {
290 .sysfs_ops = &cache_sysfs_ops,
291 .default_attrs = cache_default_attrs,
292 };
293
294 static struct kobj_type cache_ktype_percpu_entry = {
295 .sysfs_ops = &cache_sysfs_ops,
296 };
297
cpu_cache_sysfs_exit(unsigned int cpu)298 static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
299 {
300 kfree(all_cpu_cache_info[cpu].cache_leaves);
301 all_cpu_cache_info[cpu].cache_leaves = NULL;
302 all_cpu_cache_info[cpu].num_cache_leaves = 0;
303 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
304 return;
305 }
306
cpu_cache_sysfs_init(unsigned int cpu)307 static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
308 {
309 u64 i, levels, unique_caches;
310 pal_cache_config_info_t cci;
311 int j;
312 s64 status;
313 struct cache_info *this_cache;
314 int num_cache_leaves = 0;
315
316 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
317 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
318 return -1;
319 }
320
321 this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
322 GFP_KERNEL);
323 if (this_cache == NULL)
324 return -ENOMEM;
325
326 for (i=0; i < levels; i++) {
327 for (j=2; j >0 ; j--) {
328 if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
329 PAL_STATUS_SUCCESS)
330 continue;
331
332 this_cache[num_cache_leaves].cci = cci;
333 this_cache[num_cache_leaves].level = i + 1;
334 this_cache[num_cache_leaves].type = j;
335
336 cache_shared_cpu_map_setup(cpu,
337 &this_cache[num_cache_leaves]);
338 num_cache_leaves ++;
339 }
340 }
341
342 all_cpu_cache_info[cpu].cache_leaves = this_cache;
343 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
344
345 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
346
347 return 0;
348 }
349
350 /* Add cache interface for CPU device */
cache_add_dev(struct sys_device * sys_dev)351 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
352 {
353 unsigned int cpu = sys_dev->id;
354 unsigned long i, j;
355 struct cache_info *this_object;
356 int retval = 0;
357 cpumask_t oldmask;
358
359 if (all_cpu_cache_info[cpu].kobj.parent)
360 return 0;
361
362 oldmask = current->cpus_allowed;
363 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
364 if (unlikely(retval))
365 return retval;
366
367 retval = cpu_cache_sysfs_init(cpu);
368 set_cpus_allowed(current, oldmask);
369 if (unlikely(retval < 0))
370 return retval;
371
372 retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
373 &cache_ktype_percpu_entry, &sys_dev->kobj,
374 "%s", "cache");
375
376 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
377 this_object = LEAF_KOBJECT_PTR(cpu,i);
378 retval = kobject_init_and_add(&(this_object->kobj),
379 &cache_ktype,
380 &all_cpu_cache_info[cpu].kobj,
381 "index%1lu", i);
382 if (unlikely(retval)) {
383 for (j = 0; j < i; j++) {
384 kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
385 }
386 kobject_put(&all_cpu_cache_info[cpu].kobj);
387 cpu_cache_sysfs_exit(cpu);
388 break;
389 }
390 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
391 }
392 kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
393 return retval;
394 }
395
396 /* Remove cache interface for CPU device */
cache_remove_dev(struct sys_device * sys_dev)397 static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
398 {
399 unsigned int cpu = sys_dev->id;
400 unsigned long i;
401
402 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
403 kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
404
405 if (all_cpu_cache_info[cpu].kobj.parent) {
406 kobject_put(&all_cpu_cache_info[cpu].kobj);
407 memset(&all_cpu_cache_info[cpu].kobj,
408 0,
409 sizeof(struct kobject));
410 }
411
412 cpu_cache_sysfs_exit(cpu);
413
414 return 0;
415 }
416
417 /*
418 * When a cpu is hot-plugged, do a check and initiate
419 * cache kobject if necessary
420 */
cache_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)421 static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
422 unsigned long action, void *hcpu)
423 {
424 unsigned int cpu = (unsigned long)hcpu;
425 struct sys_device *sys_dev;
426
427 sys_dev = get_cpu_sysdev(cpu);
428 switch (action) {
429 case CPU_ONLINE:
430 case CPU_ONLINE_FROZEN:
431 cache_add_dev(sys_dev);
432 break;
433 case CPU_DEAD:
434 case CPU_DEAD_FROZEN:
435 cache_remove_dev(sys_dev);
436 break;
437 }
438 return NOTIFY_OK;
439 }
440
441 static struct notifier_block __cpuinitdata cache_cpu_notifier =
442 {
443 .notifier_call = cache_cpu_callback
444 };
445
cache_sysfs_init(void)446 static int __init cache_sysfs_init(void)
447 {
448 int i;
449
450 for_each_online_cpu(i) {
451 struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
452 cache_add_dev(sys_dev);
453 }
454
455 register_hotcpu_notifier(&cache_cpu_notifier);
456
457 return 0;
458 }
459
460 device_initcall(cache_sysfs_init);
461
462