• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * palinfo.c
4  *
5  * Prints processor specific information reported by PAL.
6  * This code is based on specification of PAL as of the
7  * Intel IA-64 Architecture Software Developer's Manual v1.0.
8  *
9  *
10  * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
11  *	Stephane Eranian <eranian@hpl.hp.com>
12  * Copyright (C) 2004 Intel Corporation
13  *  Ashok Raj <ashok.raj@intel.com>
14  *
15  * 05/26/2000	S.Eranian	initial release
16  * 08/21/2000	S.Eranian	updated to July 2000 PAL specs
17  * 02/05/2001   S.Eranian	fixed module support
18  * 10/23/2001	S.Eranian	updated pal_perf_mon_info bug fixes
19  * 03/24/2004	Ashok Raj	updated to work with CPU Hotplug
20  * 10/26/2006   Russ Anderson	updated processor features to rev 2.2 spec
21  */
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/efi.h>
30 #include <linux/notifier.h>
31 #include <linux/cpu.h>
32 #include <linux/cpumask.h>
33 
34 #include <asm/pal.h>
35 #include <asm/sal.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <linux/smp.h>
39 
40 MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
41 MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
42 MODULE_LICENSE("GPL");
43 
44 #define PALINFO_VERSION "0.5"
45 
46 typedef int (*palinfo_func_t)(struct seq_file *);
47 
48 typedef struct {
49 	const char		*name;		/* name of the proc entry */
50 	palinfo_func_t		proc_read;	/* function to call for reading */
51 	struct proc_dir_entry	*entry;		/* registered entry (removal) */
52 } palinfo_entry_t;
53 
54 
55 /*
56  *  A bunch of string array to get pretty printing
57  */
58 
59 static const char *cache_types[] = {
60 	"",			/* not used */
61 	"Instruction",
62 	"Data",
63 	"Data/Instruction"	/* unified */
64 };
65 
66 static const char *cache_mattrib[]={
67 	"WriteThrough",
68 	"WriteBack",
69 	"",		/* reserved */
70 	""		/* reserved */
71 };
72 
73 static const char *cache_st_hints[]={
74 	"Temporal, level 1",
75 	"Reserved",
76 	"Reserved",
77 	"Non-temporal, all levels",
78 	"Reserved",
79 	"Reserved",
80 	"Reserved",
81 	"Reserved"
82 };
83 
84 static const char *cache_ld_hints[]={
85 	"Temporal, level 1",
86 	"Non-temporal, level 1",
87 	"Reserved",
88 	"Non-temporal, all levels",
89 	"Reserved",
90 	"Reserved",
91 	"Reserved",
92 	"Reserved"
93 };
94 
95 static const char *rse_hints[]={
96 	"enforced lazy",
97 	"eager stores",
98 	"eager loads",
99 	"eager loads and stores"
100 };
101 
102 #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
103 
104 static const char *mem_attrib[]={
105 	"WB",		/* 000 */
106 	"SW",		/* 001 */
107 	"010",		/* 010 */
108 	"011",		/* 011 */
109 	"UC",		/* 100 */
110 	"UCE",		/* 101 */
111 	"WC",		/* 110 */
112 	"NaTPage"	/* 111 */
113 };
114 
115 /*
116  * Take a 64bit vector and produces a string such that
117  * if bit n is set then 2^n in clear text is generated. The adjustment
118  * to the right unit is also done.
119  *
120  * Input:
121  *	- a pointer to a buffer to hold the string
122  *	- a 64-bit vector
123  * Ouput:
124  *	- a pointer to the end of the buffer
125  *
126  */
bitvector_process(struct seq_file * m,u64 vector)127 static void bitvector_process(struct seq_file *m, u64 vector)
128 {
129 	int i,j;
130 	static const char *units[]={ "", "K", "M", "G", "T" };
131 
132 	for (i=0, j=0; i < 64; i++ , j=i/10) {
133 		if (vector & 0x1)
134 			seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]);
135 		vector >>= 1;
136 	}
137 }
138 
139 /*
140  * Take a 64bit vector and produces a string such that
141  * if bit n is set then register n is present. The function
142  * takes into account consecutive registers and prints out ranges.
143  *
144  * Input:
145  *	- a pointer to a buffer to hold the string
146  *	- a 64-bit vector
147  * Ouput:
148  *	- a pointer to the end of the buffer
149  *
150  */
bitregister_process(struct seq_file * m,u64 * reg_info,int max)151 static void bitregister_process(struct seq_file *m, u64 *reg_info, int max)
152 {
153 	int i, begin, skip = 0;
154 	u64 value = reg_info[0];
155 
156 	value >>= i = begin = ffs(value) - 1;
157 
158 	for(; i < max; i++ ) {
159 
160 		if (i != 0 && (i%64) == 0) value = *++reg_info;
161 
162 		if ((value & 0x1) == 0 && skip == 0) {
163 			if (begin  <= i - 2)
164 				seq_printf(m, "%d-%d ", begin, i-1);
165 			else
166 				seq_printf(m, "%d ", i-1);
167 			skip  = 1;
168 			begin = -1;
169 		} else if ((value & 0x1) && skip == 1) {
170 			skip = 0;
171 			begin = i;
172 		}
173 		value >>=1;
174 	}
175 	if (begin > -1) {
176 		if (begin < 127)
177 			seq_printf(m, "%d-127", begin);
178 		else
179 			seq_puts(m, "127");
180 	}
181 }
182 
power_info(struct seq_file * m)183 static int power_info(struct seq_file *m)
184 {
185 	s64 status;
186 	u64 halt_info_buffer[8];
187 	pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
188 	int i;
189 
190 	status = ia64_pal_halt_info(halt_info);
191 	if (status != 0) return 0;
192 
193 	for (i=0; i < 8 ; i++ ) {
194 		if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
195 			seq_printf(m,
196 				   "Power level %d:\n"
197 				   "\tentry_latency       : %d cycles\n"
198 				   "\texit_latency        : %d cycles\n"
199 				   "\tpower consumption   : %d mW\n"
200 				   "\tCache+TLB coherency : %s\n", i,
201 				   halt_info[i].pal_power_mgmt_info_s.entry_latency,
202 				   halt_info[i].pal_power_mgmt_info_s.exit_latency,
203 				   halt_info[i].pal_power_mgmt_info_s.power_consumption,
204 				   halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
205 		} else {
206 			seq_printf(m,"Power level %d: not implemented\n", i);
207 		}
208 	}
209 	return 0;
210 }
211 
cache_info(struct seq_file * m)212 static int cache_info(struct seq_file *m)
213 {
214 	unsigned long i, levels, unique_caches;
215 	pal_cache_config_info_t cci;
216 	int j, k;
217 	long status;
218 
219 	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
220 		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
221 		return 0;
222 	}
223 
224 	seq_printf(m, "Cache levels  : %ld\nUnique caches : %ld\n\n",
225 		   levels, unique_caches);
226 
227 	for (i=0; i < levels; i++) {
228 		for (j=2; j >0 ; j--) {
229 			/* even without unification some level may not be present */
230 			if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0)
231 				continue;
232 
233 			seq_printf(m,
234 				   "%s Cache level %lu:\n"
235 				   "\tSize           : %u bytes\n"
236 				   "\tAttributes     : ",
237 				   cache_types[j+cci.pcci_unified], i+1,
238 				   cci.pcci_cache_size);
239 
240 			if (cci.pcci_unified)
241 				seq_puts(m, "Unified ");
242 
243 			seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
244 
245 			seq_printf(m,
246 				   "\tAssociativity  : %d\n"
247 				   "\tLine size      : %d bytes\n"
248 				   "\tStride         : %d bytes\n",
249 				   cci.pcci_assoc,
250 				   1<<cci.pcci_line_size,
251 				   1<<cci.pcci_stride);
252 			if (j == 1)
253 				seq_puts(m, "\tStore latency  : N/A\n");
254 			else
255 				seq_printf(m, "\tStore latency  : %d cycle(s)\n",
256 					   cci.pcci_st_latency);
257 
258 			seq_printf(m,
259 				   "\tLoad latency   : %d cycle(s)\n"
260 				   "\tStore hints    : ", cci.pcci_ld_latency);
261 
262 			for(k=0; k < 8; k++ ) {
263 				if ( cci.pcci_st_hints & 0x1)
264 					seq_printf(m, "[%s]", cache_st_hints[k]);
265 				cci.pcci_st_hints >>=1;
266 			}
267 			seq_puts(m, "\n\tLoad hints     : ");
268 
269 			for(k=0; k < 8; k++ ) {
270 				if (cci.pcci_ld_hints & 0x1)
271 					seq_printf(m, "[%s]", cache_ld_hints[k]);
272 				cci.pcci_ld_hints >>=1;
273 			}
274 			seq_printf(m,
275 				   "\n\tAlias boundary : %d byte(s)\n"
276 				   "\tTag LSB        : %d\n"
277 				   "\tTag MSB        : %d\n",
278 				   1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
279 				   cci.pcci_tag_msb);
280 
281 			/* when unified, data(j=2) is enough */
282 			if (cci.pcci_unified)
283 				break;
284 		}
285 	}
286 	return 0;
287 }
288 
289 
vm_info(struct seq_file * m)290 static int vm_info(struct seq_file *m)
291 {
292 	u64 tr_pages =0, vw_pages=0, tc_pages;
293 	u64 attrib;
294 	pal_vm_info_1_u_t vm_info_1;
295 	pal_vm_info_2_u_t vm_info_2;
296 	pal_tc_info_u_t	tc_info;
297 	ia64_ptce_info_t ptce;
298 	const char *sep;
299 	int i, j;
300 	long status;
301 
302 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
303 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
304 	} else {
305 
306 		seq_printf(m,
307 		     "Physical Address Space         : %d bits\n"
308 		     "Virtual Address Space          : %d bits\n"
309 		     "Protection Key Registers(PKR)  : %d\n"
310 		     "Implemented bits in PKR.key    : %d\n"
311 		     "Hash Tag ID                    : 0x%x\n"
312 		     "Size of RR.rid                 : %d\n"
313 		     "Max Purges                     : ",
314 		     vm_info_1.pal_vm_info_1_s.phys_add_size,
315 		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
316 		     vm_info_1.pal_vm_info_1_s.max_pkr+1,
317 		     vm_info_1.pal_vm_info_1_s.key_size,
318 		     vm_info_1.pal_vm_info_1_s.hash_tag_id,
319 		     vm_info_2.pal_vm_info_2_s.rid_size);
320 		if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
321 			seq_puts(m, "unlimited\n");
322 		else
323 			seq_printf(m, "%d\n",
324 		     		vm_info_2.pal_vm_info_2_s.max_purges ?
325 				vm_info_2.pal_vm_info_2_s.max_purges : 1);
326 	}
327 
328 	if (ia64_pal_mem_attrib(&attrib) == 0) {
329 		seq_puts(m, "Supported memory attributes    : ");
330 		sep = "";
331 		for (i = 0; i < 8; i++) {
332 			if (attrib & (1 << i)) {
333 				seq_printf(m, "%s%s", sep, mem_attrib[i]);
334 				sep = ", ";
335 			}
336 		}
337 		seq_putc(m, '\n');
338 	}
339 
340 	if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
341 		printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
342 	} else {
343 
344 		seq_printf(m,
345 			   "\nTLB walker                     : %simplemented\n"
346 			   "Number of DTR                  : %d\n"
347 			   "Number of ITR                  : %d\n"
348 			   "TLB insertable page sizes      : ",
349 			   vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
350 			   vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
351 			   vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
352 
353 		bitvector_process(m, tr_pages);
354 
355 		seq_puts(m, "\nTLB purgeable page sizes       : ");
356 
357 		bitvector_process(m, vw_pages);
358 	}
359 
360 	if ((status = ia64_get_ptce(&ptce)) != 0) {
361 		printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
362 	} else {
363 		seq_printf(m,
364 		     "\nPurge base address             : 0x%016lx\n"
365 		     "Purge outer loop count         : %d\n"
366 		     "Purge inner loop count         : %d\n"
367 		     "Purge outer loop stride        : %d\n"
368 		     "Purge inner loop stride        : %d\n",
369 		     ptce.base, ptce.count[0], ptce.count[1],
370 		     ptce.stride[0], ptce.stride[1]);
371 
372 		seq_printf(m,
373 		     "TC Levels                      : %d\n"
374 		     "Unique TC(s)                   : %d\n",
375 		     vm_info_1.pal_vm_info_1_s.num_tc_levels,
376 		     vm_info_1.pal_vm_info_1_s.max_unique_tcs);
377 
378 		for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
379 			for (j=2; j>0 ; j--) {
380 				tc_pages = 0; /* just in case */
381 
382 				/* even without unification, some levels may not be present */
383 				if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0)
384 					continue;
385 
386 				seq_printf(m,
387 				     "\n%s Translation Cache Level %d:\n"
388 				     "\tHash sets           : %d\n"
389 				     "\tAssociativity       : %d\n"
390 				     "\tNumber of entries   : %d\n"
391 				     "\tFlags               : ",
392 				     cache_types[j+tc_info.tc_unified], i+1,
393 				     tc_info.tc_num_sets,
394 				     tc_info.tc_associativity,
395 				     tc_info.tc_num_entries);
396 
397 				if (tc_info.tc_pf)
398 					seq_puts(m, "PreferredPageSizeOptimized ");
399 				if (tc_info.tc_unified)
400 					seq_puts(m, "Unified ");
401 				if (tc_info.tc_reduce_tr)
402 					seq_puts(m, "TCReduction");
403 
404 				seq_puts(m, "\n\tSupported page sizes: ");
405 
406 				bitvector_process(m, tc_pages);
407 
408 				/* when unified date (j=2) is enough */
409 				if (tc_info.tc_unified)
410 					break;
411 			}
412 		}
413 	}
414 
415 	seq_putc(m, '\n');
416 	return 0;
417 }
418 
419 
register_info(struct seq_file * m)420 static int register_info(struct seq_file *m)
421 {
422 	u64 reg_info[2];
423 	u64 info;
424 	unsigned long phys_stacked;
425 	pal_hints_u_t hints;
426 	unsigned long iregs, dregs;
427 	static const char * const info_type[] = {
428 		"Implemented AR(s)",
429 		"AR(s) with read side-effects",
430 		"Implemented CR(s)",
431 		"CR(s) with read side-effects",
432 	};
433 
434 	for(info=0; info < 4; info++) {
435 		if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0)
436 			return 0;
437 		seq_printf(m, "%-32s : ", info_type[info]);
438 		bitregister_process(m, reg_info, 128);
439 		seq_putc(m, '\n');
440 	}
441 
442 	if (ia64_pal_rse_info(&phys_stacked, &hints) == 0)
443 		seq_printf(m,
444 			   "RSE stacked physical registers   : %ld\n"
445 			   "RSE load/store hints             : %ld (%s)\n",
446 			   phys_stacked, hints.ph_data,
447 			   hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
448 
449 	if (ia64_pal_debug_info(&iregs, &dregs))
450 		return 0;
451 
452 	seq_printf(m,
453 		   "Instruction debug register pairs : %ld\n"
454 		   "Data debug register pairs        : %ld\n", iregs, dregs);
455 
456 	return 0;
457 }
458 
459 static const char *const proc_features_0[]={		/* Feature set 0 */
460 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
461 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
462 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
463 	NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
464 	"Unimplemented instruction address fault",
465 	"INIT, PMI, and LINT pins",
466 	"Simple unimplemented instr addresses",
467 	"Variable P-state performance",
468 	"Virtual machine features implemented",
469 	"XIP,XPSR,XFS implemented",
470 	"XR1-XR3 implemented",
471 	"Disable dynamic predicate prediction",
472 	"Disable processor physical number",
473 	"Disable dynamic data cache prefetch",
474 	"Disable dynamic inst cache prefetch",
475 	"Disable dynamic branch prediction",
476 	NULL, NULL, NULL, NULL,
477 	"Disable P-states",
478 	"Enable MCA on Data Poisoning",
479 	"Enable vmsw instruction",
480 	"Enable extern environmental notification",
481 	"Disable BINIT on processor time-out",
482 	"Disable dynamic power management (DPM)",
483 	"Disable coherency",
484 	"Disable cache",
485 	"Enable CMCI promotion",
486 	"Enable MCA to BINIT promotion",
487 	"Enable MCA promotion",
488 	"Enable BERR promotion"
489 };
490 
491 static const char *const proc_features_16[]={		/* Feature set 16 */
492 	"Disable ETM",
493 	"Enable ETM",
494 	"Enable MCA on half-way timer",
495 	"Enable snoop WC",
496 	NULL,
497 	"Enable Fast Deferral",
498 	"Disable MCA on memory aliasing",
499 	"Enable RSB",
500 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
501 	"DP system processor",
502 	"Low Voltage",
503 	"HT supported",
504 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
505 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
506 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
507 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
508 	NULL, NULL, NULL, NULL, NULL
509 };
510 
511 static const char *const *const proc_features[]={
512 	proc_features_0,
513 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
514 	NULL, NULL, NULL, NULL,
515 	proc_features_16,
516 	NULL, NULL, NULL, NULL,
517 };
518 
feature_set_info(struct seq_file * m,u64 avail,u64 status,u64 control,unsigned long set)519 static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control,
520 			     unsigned long set)
521 {
522 	const char *const *vf, *const *v;
523 	int i;
524 
525 	vf = v = proc_features[set];
526 	for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
527 
528 		if (!(control))		/* No remaining bits set */
529 			break;
530 		if (!(avail & 0x1))	/* Print only bits that are available */
531 			continue;
532 		if (vf)
533 			v = vf + i;
534 		if ( v && *v ) {
535 			seq_printf(m, "%-40s : %s %s\n", *v,
536 				avail & 0x1 ? (status & 0x1 ?
537 					      "On " : "Off"): "",
538 				avail & 0x1 ? (control & 0x1 ?
539 						"Ctrl" : "NoCtrl"): "");
540 		} else {
541 			seq_printf(m, "Feature set %2ld bit %2d\t\t\t"
542 					" : %s %s\n",
543 				set, i,
544 				avail & 0x1 ? (status & 0x1 ?
545 						"On " : "Off"): "",
546 				avail & 0x1 ? (control & 0x1 ?
547 						"Ctrl" : "NoCtrl"): "");
548 		}
549 	}
550 }
551 
processor_info(struct seq_file * m)552 static int processor_info(struct seq_file *m)
553 {
554 	u64 avail=1, status=1, control=1, feature_set=0;
555 	s64 ret;
556 
557 	do {
558 		ret = ia64_pal_proc_get_features(&avail, &status, &control,
559 						feature_set);
560 		if (ret < 0)
561 			return 0;
562 
563 		if (ret == 1) {
564 			feature_set++;
565 			continue;
566 		}
567 
568 		feature_set_info(m, avail, status, control, feature_set);
569 		feature_set++;
570 	} while(1);
571 
572 	return 0;
573 }
574 
575 static const char *const bus_features[]={
576 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
577 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
578 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
579 	NULL,NULL,
580 	"Request  Bus Parking",
581 	"Bus Lock Mask",
582 	"Enable Half Transfer",
583 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
584 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
585 	NULL, NULL, NULL, NULL,
586 	"Enable Cache Line Repl. Shared",
587 	"Enable Cache Line Repl. Exclusive",
588 	"Disable Transaction Queuing",
589 	"Disable Response Error Checking",
590 	"Disable Bus Error Checking",
591 	"Disable Bus Requester Internal Error Signalling",
592 	"Disable Bus Requester Error Signalling",
593 	"Disable Bus Initialization Event Checking",
594 	"Disable Bus Initialization Event Signalling",
595 	"Disable Bus Address Error Checking",
596 	"Disable Bus Address Error Signalling",
597 	"Disable Bus Data Error Checking"
598 };
599 
600 
bus_info(struct seq_file * m)601 static int bus_info(struct seq_file *m)
602 {
603 	const char *const *v = bus_features;
604 	pal_bus_features_u_t av, st, ct;
605 	u64 avail, status, control;
606 	int i;
607 	s64 ret;
608 
609 	if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0)
610 		return 0;
611 
612 	avail   = av.pal_bus_features_val;
613 	status  = st.pal_bus_features_val;
614 	control = ct.pal_bus_features_val;
615 
616 	for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
617 		if ( ! *v )
618 			continue;
619 		seq_printf(m, "%-48s : %s%s %s\n", *v,
620 			   avail & 0x1 ? "" : "NotImpl",
621 			   avail & 0x1 ? (status  & 0x1 ? "On" : "Off"): "",
622 			   avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
623 	}
624 	return 0;
625 }
626 
version_info(struct seq_file * m)627 static int version_info(struct seq_file *m)
628 {
629 	pal_version_u_t min_ver, cur_ver;
630 
631 	if (ia64_pal_version(&min_ver, &cur_ver) != 0)
632 		return 0;
633 
634 	seq_printf(m,
635 		   "PAL_vendor : 0x%02x (min=0x%02x)\n"
636 		   "PAL_A      : %02x.%02x (min=%02x.%02x)\n"
637 		   "PAL_B      : %02x.%02x (min=%02x.%02x)\n",
638 		   cur_ver.pal_version_s.pv_pal_vendor,
639 		   min_ver.pal_version_s.pv_pal_vendor,
640 		   cur_ver.pal_version_s.pv_pal_a_model,
641 		   cur_ver.pal_version_s.pv_pal_a_rev,
642 		   min_ver.pal_version_s.pv_pal_a_model,
643 		   min_ver.pal_version_s.pv_pal_a_rev,
644 		   cur_ver.pal_version_s.pv_pal_b_model,
645 		   cur_ver.pal_version_s.pv_pal_b_rev,
646 		   min_ver.pal_version_s.pv_pal_b_model,
647 		   min_ver.pal_version_s.pv_pal_b_rev);
648 	return 0;
649 }
650 
perfmon_info(struct seq_file * m)651 static int perfmon_info(struct seq_file *m)
652 {
653 	u64 pm_buffer[16];
654 	pal_perf_mon_info_u_t pm_info;
655 
656 	if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0)
657 		return 0;
658 
659 	seq_printf(m,
660 		   "PMC/PMD pairs                 : %d\n"
661 		   "Counter width                 : %d bits\n"
662 		   "Cycle event number            : %d\n"
663 		   "Retired event number          : %d\n"
664 		   "Implemented PMC               : ",
665 		   pm_info.pal_perf_mon_info_s.generic,
666 		   pm_info.pal_perf_mon_info_s.width,
667 		   pm_info.pal_perf_mon_info_s.cycles,
668 		   pm_info.pal_perf_mon_info_s.retired);
669 
670 	bitregister_process(m, pm_buffer, 256);
671 	seq_puts(m, "\nImplemented PMD               : ");
672 	bitregister_process(m, pm_buffer+4, 256);
673 	seq_puts(m, "\nCycles count capable          : ");
674 	bitregister_process(m, pm_buffer+8, 256);
675 	seq_puts(m, "\nRetired bundles count capable : ");
676 
677 #ifdef CONFIG_ITANIUM
678 	/*
679 	 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
680 	 * which is wrong, both PMC4 and PMD5 support it.
681 	 */
682 	if (pm_buffer[12] == 0x10)
683 		pm_buffer[12]=0x30;
684 #endif
685 
686 	bitregister_process(m, pm_buffer+12, 256);
687 	seq_putc(m, '\n');
688 	return 0;
689 }
690 
frequency_info(struct seq_file * m)691 static int frequency_info(struct seq_file *m)
692 {
693 	struct pal_freq_ratio proc, itc, bus;
694 	unsigned long base;
695 
696 	if (ia64_pal_freq_base(&base) == -1)
697 		seq_puts(m, "Output clock            : not implemented\n");
698 	else
699 		seq_printf(m, "Output clock            : %ld ticks/s\n", base);
700 
701 	if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
702 
703 	seq_printf(m,
704 		     "Processor/Clock ratio   : %d/%d\n"
705 		     "Bus/Clock ratio         : %d/%d\n"
706 		     "ITC/Clock ratio         : %d/%d\n",
707 		     proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
708 	return 0;
709 }
710 
tr_info(struct seq_file * m)711 static int tr_info(struct seq_file *m)
712 {
713 	long status;
714 	pal_tr_valid_u_t tr_valid;
715 	u64 tr_buffer[4];
716 	pal_vm_info_1_u_t vm_info_1;
717 	pal_vm_info_2_u_t vm_info_2;
718 	unsigned long i, j;
719 	unsigned long max[3], pgm;
720 	struct ifa_reg {
721 		unsigned long valid:1;
722 		unsigned long ig:11;
723 		unsigned long vpn:52;
724 	} *ifa_reg;
725 	struct itir_reg {
726 		unsigned long rv1:2;
727 		unsigned long ps:6;
728 		unsigned long key:24;
729 		unsigned long rv2:32;
730 	} *itir_reg;
731 	struct gr_reg {
732 		unsigned long p:1;
733 		unsigned long rv1:1;
734 		unsigned long ma:3;
735 		unsigned long a:1;
736 		unsigned long d:1;
737 		unsigned long pl:2;
738 		unsigned long ar:3;
739 		unsigned long ppn:38;
740 		unsigned long rv2:2;
741 		unsigned long ed:1;
742 		unsigned long ig:11;
743 	} *gr_reg;
744 	struct rid_reg {
745 		unsigned long ig1:1;
746 		unsigned long rv1:1;
747 		unsigned long ig2:6;
748 		unsigned long rid:24;
749 		unsigned long rv2:32;
750 	} *rid_reg;
751 
752 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
753 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
754 		return 0;
755 	}
756 	max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
757 	max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
758 
759 	for (i=0; i < 2; i++ ) {
760 		for (j=0; j < max[i]; j++) {
761 
762 		status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
763 		if (status != 0) {
764 			printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
765 			       i, j, status);
766 			continue;
767 		}
768 
769 		ifa_reg  = (struct ifa_reg *)&tr_buffer[2];
770 
771 		if (ifa_reg->valid == 0)
772 			continue;
773 
774 		gr_reg   = (struct gr_reg *)tr_buffer;
775 		itir_reg = (struct itir_reg *)&tr_buffer[1];
776 		rid_reg  = (struct rid_reg *)&tr_buffer[3];
777 
778 		pgm	 = -1 << (itir_reg->ps - 12);
779 		seq_printf(m,
780 			   "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
781 			   "\tppn  : 0x%lx\n"
782 			   "\tvpn  : 0x%lx\n"
783 			   "\tps   : ",
784 			   "ID"[i], j,
785 			   tr_valid.pal_tr_valid_s.access_rights_valid,
786 			   tr_valid.pal_tr_valid_s.priv_level_valid,
787 			   tr_valid.pal_tr_valid_s.dirty_bit_valid,
788 			   tr_valid.pal_tr_valid_s.mem_attr_valid,
789 			   (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
790 
791 		bitvector_process(m, 1<< itir_reg->ps);
792 
793 		seq_printf(m,
794 			   "\n\tpl   : %d\n"
795 			   "\tar   : %d\n"
796 			   "\trid  : %x\n"
797 			   "\tp    : %d\n"
798 			   "\tma   : %d\n"
799 			   "\td    : %d\n",
800 			   gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
801 			   gr_reg->d);
802 		}
803 	}
804 	return 0;
805 }
806 
807 
808 
809 /*
810  * List {name,function} pairs for every entry in /proc/palinfo/cpu*
811  */
812 static const palinfo_entry_t palinfo_entries[]={
813 	{ "version_info",	version_info, },
814 	{ "vm_info",		vm_info, },
815 	{ "cache_info",		cache_info, },
816 	{ "power_info",		power_info, },
817 	{ "register_info",	register_info, },
818 	{ "processor_info",	processor_info, },
819 	{ "perfmon_info",	perfmon_info, },
820 	{ "frequency_info",	frequency_info, },
821 	{ "bus_info",		bus_info },
822 	{ "tr_info",		tr_info, }
823 };
824 
825 #define NR_PALINFO_ENTRIES	(int) ARRAY_SIZE(palinfo_entries)
826 
827 static struct proc_dir_entry *palinfo_dir;
828 
829 /*
830  * This data structure is used to pass which cpu,function is being requested
831  * It must fit in a 64bit quantity to be passed to the proc callback routine
832  *
833  * In SMP mode, when we get a request for another CPU, we must call that
834  * other CPU using IPI and wait for the result before returning.
835  */
836 typedef union {
837 	u64 value;
838 	struct {
839 		unsigned	req_cpu: 32;	/* for which CPU this info is */
840 		unsigned	func_id: 32;	/* which function is requested */
841 	} pal_func_cpu;
842 } pal_func_cpu_u_t;
843 
844 #define req_cpu	pal_func_cpu.req_cpu
845 #define func_id pal_func_cpu.func_id
846 
847 #ifdef CONFIG_SMP
848 
849 /*
850  * used to hold information about final function to call
851  */
852 typedef struct {
853 	palinfo_func_t	func;	/* pointer to function to call */
854 	struct seq_file *m;	/* buffer to store results */
855 	int		ret;	/* return value from call */
856 } palinfo_smp_data_t;
857 
858 
859 /*
860  * this function does the actual final call and he called
861  * from the smp code, i.e., this is the palinfo callback routine
862  */
863 static void
palinfo_smp_call(void * info)864 palinfo_smp_call(void *info)
865 {
866 	palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
867 	data->ret = (*data->func)(data->m);
868 }
869 
870 /*
871  * function called to trigger the IPI, we need to access a remote CPU
872  * Return:
873  *	0 : error or nothing to output
874  *	otherwise how many bytes in the "page" buffer were written
875  */
876 static
palinfo_handle_smp(struct seq_file * m,pal_func_cpu_u_t * f)877 int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
878 {
879 	palinfo_smp_data_t ptr;
880 	int ret;
881 
882 	ptr.func = palinfo_entries[f->func_id].proc_read;
883 	ptr.m = m;
884 	ptr.ret  = 0; /* just in case */
885 
886 
887 	/* will send IPI to other CPU and wait for completion of remote call */
888 	if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
889 		printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
890 		       "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
891 		return 0;
892 	}
893 	return ptr.ret;
894 }
895 #else /* ! CONFIG_SMP */
896 static
palinfo_handle_smp(struct seq_file * m,pal_func_cpu_u_t * f)897 int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
898 {
899 	printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
900 	return 0;
901 }
902 #endif /* CONFIG_SMP */
903 
904 /*
905  * Entry point routine: all calls go through this function
906  */
proc_palinfo_show(struct seq_file * m,void * v)907 static int proc_palinfo_show(struct seq_file *m, void *v)
908 {
909 	pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private;
910 
911 	/*
912 	 * in SMP mode, we may need to call another CPU to get correct
913 	 * information. PAL, by definition, is processor specific
914 	 */
915 	if (f->req_cpu == get_cpu())
916 		(*palinfo_entries[f->func_id].proc_read)(m);
917 	else
918 		palinfo_handle_smp(m, f);
919 
920 	put_cpu();
921 	return 0;
922 }
923 
palinfo_add_proc(unsigned int cpu)924 static int palinfo_add_proc(unsigned int cpu)
925 {
926 	pal_func_cpu_u_t f;
927 	struct proc_dir_entry *cpu_dir;
928 	int j;
929 	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */
930 	sprintf(cpustr, "cpu%d", cpu);
931 
932 	cpu_dir = proc_mkdir(cpustr, palinfo_dir);
933 	if (!cpu_dir)
934 		return -EINVAL;
935 
936 	f.req_cpu = cpu;
937 
938 	for (j=0; j < NR_PALINFO_ENTRIES; j++) {
939 		f.func_id = j;
940 		proc_create_single_data(palinfo_entries[j].name, 0, cpu_dir,
941 				proc_palinfo_show, (void *)f.value);
942 	}
943 	return 0;
944 }
945 
palinfo_del_proc(unsigned int hcpu)946 static int palinfo_del_proc(unsigned int hcpu)
947 {
948 	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */
949 
950 	sprintf(cpustr, "cpu%d", hcpu);
951 	remove_proc_subtree(cpustr, palinfo_dir);
952 	return 0;
953 }
954 
955 static enum cpuhp_state hp_online;
956 
palinfo_init(void)957 static int __init palinfo_init(void)
958 {
959 	int i = 0;
960 
961 	printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
962 	palinfo_dir = proc_mkdir("pal", NULL);
963 	if (!palinfo_dir)
964 		return -ENOMEM;
965 
966 	i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
967 			      palinfo_add_proc, palinfo_del_proc);
968 	if (i < 0) {
969 		remove_proc_subtree("pal", NULL);
970 		return i;
971 	}
972 	hp_online = i;
973 	return 0;
974 }
975 
palinfo_exit(void)976 static void __exit palinfo_exit(void)
977 {
978 	cpuhp_remove_state(hp_online);
979 	remove_proc_subtree("pal", NULL);
980 }
981 
982 module_init(palinfo_init);
983 module_exit(palinfo_exit);
984