1 /*
2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
4 *
5 * Copyright (c) 2012 Intel Corporation.
6 * Len Brown <len.brown@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22 #define _GNU_SOURCE
23 #include <stdio.h>
24 #include <unistd.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <sys/stat.h>
28 #include <sys/resource.h>
29 #include <fcntl.h>
30 #include <signal.h>
31 #include <sys/time.h>
32 #include <stdlib.h>
33 #include <dirent.h>
34 #include <string.h>
35 #include <ctype.h>
36 #include <sched.h>
37
38 #define MSR_TSC 0x10
39 #define MSR_NEHALEM_PLATFORM_INFO 0xCE
40 #define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD
41 #define MSR_APERF 0xE8
42 #define MSR_MPERF 0xE7
43 #define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */
44 #define MSR_PKG_C3_RESIDENCY 0x3F8
45 #define MSR_PKG_C6_RESIDENCY 0x3F9
46 #define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */
47 #define MSR_CORE_C3_RESIDENCY 0x3FC
48 #define MSR_CORE_C6_RESIDENCY 0x3FD
49 #define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */
50
51 char *proc_stat = "/proc/stat";
52 unsigned int interval_sec = 5; /* set with -i interval_sec */
53 unsigned int verbose; /* set with -v */
54 unsigned int summary_only; /* set with -s */
55 unsigned int skip_c0;
56 unsigned int skip_c1;
57 unsigned int do_nhm_cstates;
58 unsigned int do_snb_cstates;
59 unsigned int has_aperf;
60 unsigned int units = 1000000000; /* Ghz etc */
61 unsigned int genuine_intel;
62 unsigned int has_invariant_tsc;
63 unsigned int do_nehalem_platform_info;
64 unsigned int do_nehalem_turbo_ratio_limit;
65 unsigned int extra_msr_offset;
66 double bclk;
67 unsigned int show_pkg;
68 unsigned int show_core;
69 unsigned int show_cpu;
70
71 int aperf_mperf_unstable;
72 int backwards_count;
73 char *progname;
74
75 int num_cpus;
76 cpu_set_t *cpu_mask;
77 size_t cpu_mask_size;
78
79 struct counters {
80 unsigned long long tsc; /* per thread */
81 unsigned long long aperf; /* per thread */
82 unsigned long long mperf; /* per thread */
83 unsigned long long c1; /* per thread (calculated) */
84 unsigned long long c3; /* per core */
85 unsigned long long c6; /* per core */
86 unsigned long long c7; /* per core */
87 unsigned long long pc2; /* per package */
88 unsigned long long pc3; /* per package */
89 unsigned long long pc6; /* per package */
90 unsigned long long pc7; /* per package */
91 unsigned long long extra_msr; /* per thread */
92 int pkg;
93 int core;
94 int cpu;
95 struct counters *next;
96 };
97
98 struct counters *cnt_even;
99 struct counters *cnt_odd;
100 struct counters *cnt_delta;
101 struct counters *cnt_average;
102 struct timeval tv_even;
103 struct timeval tv_odd;
104 struct timeval tv_delta;
105
106 /*
107 * cpu_mask_init(ncpus)
108 *
109 * allocate and clear cpu_mask
110 * set cpu_mask_size
111 */
cpu_mask_init(int ncpus)112 void cpu_mask_init(int ncpus)
113 {
114 cpu_mask = CPU_ALLOC(ncpus);
115 if (cpu_mask == NULL) {
116 perror("CPU_ALLOC");
117 exit(3);
118 }
119 cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
120 CPU_ZERO_S(cpu_mask_size, cpu_mask);
121 }
122
cpu_mask_uninit()123 void cpu_mask_uninit()
124 {
125 CPU_FREE(cpu_mask);
126 cpu_mask = NULL;
127 cpu_mask_size = 0;
128 }
129
cpu_migrate(int cpu)130 int cpu_migrate(int cpu)
131 {
132 CPU_ZERO_S(cpu_mask_size, cpu_mask);
133 CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
134 if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
135 return -1;
136 else
137 return 0;
138 }
139
get_msr(int cpu,off_t offset,unsigned long long * msr)140 int get_msr(int cpu, off_t offset, unsigned long long *msr)
141 {
142 ssize_t retval;
143 char pathname[32];
144 int fd;
145
146 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
147 fd = open(pathname, O_RDONLY);
148 if (fd < 0)
149 return -1;
150
151 retval = pread(fd, msr, sizeof *msr, offset);
152 close(fd);
153
154 if (retval != sizeof *msr)
155 return -1;
156
157 return 0;
158 }
159
print_header(void)160 void print_header(void)
161 {
162 if (show_pkg)
163 fprintf(stderr, "pk");
164 if (show_pkg)
165 fprintf(stderr, " ");
166 if (show_core)
167 fprintf(stderr, "cor");
168 if (show_cpu)
169 fprintf(stderr, " CPU");
170 if (show_pkg || show_core || show_cpu)
171 fprintf(stderr, " ");
172 if (do_nhm_cstates)
173 fprintf(stderr, " %%c0");
174 if (has_aperf)
175 fprintf(stderr, " GHz");
176 fprintf(stderr, " TSC");
177 if (do_nhm_cstates)
178 fprintf(stderr, " %%c1");
179 if (do_nhm_cstates)
180 fprintf(stderr, " %%c3");
181 if (do_nhm_cstates)
182 fprintf(stderr, " %%c6");
183 if (do_snb_cstates)
184 fprintf(stderr, " %%c7");
185 if (do_snb_cstates)
186 fprintf(stderr, " %%pc2");
187 if (do_nhm_cstates)
188 fprintf(stderr, " %%pc3");
189 if (do_nhm_cstates)
190 fprintf(stderr, " %%pc6");
191 if (do_snb_cstates)
192 fprintf(stderr, " %%pc7");
193 if (extra_msr_offset)
194 fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
195
196 putc('\n', stderr);
197 }
198
dump_cnt(struct counters * cnt)199 void dump_cnt(struct counters *cnt)
200 {
201 if (!cnt)
202 return;
203 if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
204 if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
205 if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
206 if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
207 if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
208 if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
209 if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
210 if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
211 if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
212 if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
213 if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
214 if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
215 if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
216 }
217
dump_list(struct counters * cnt)218 void dump_list(struct counters *cnt)
219 {
220 printf("dump_list 0x%p\n", cnt);
221
222 for (; cnt; cnt = cnt->next)
223 dump_cnt(cnt);
224 }
225
226 /*
227 * column formatting convention & formats
228 * package: "pk" 2 columns %2d
229 * core: "cor" 3 columns %3d
230 * CPU: "CPU" 3 columns %3d
231 * GHz: "GHz" 3 columns %3.2
232 * TSC: "TSC" 3 columns %3.2
233 * percentage " %pc3" %6.2
234 */
print_cnt(struct counters * p)235 void print_cnt(struct counters *p)
236 {
237 double interval_float;
238
239 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
240
241 /* topology columns, print blanks on 1st (average) line */
242 if (p == cnt_average) {
243 if (show_pkg)
244 fprintf(stderr, " ");
245 if (show_pkg && show_core)
246 fprintf(stderr, " ");
247 if (show_core)
248 fprintf(stderr, " ");
249 if (show_cpu)
250 fprintf(stderr, " " " ");
251 } else {
252 if (show_pkg)
253 fprintf(stderr, "%2d", p->pkg);
254 if (show_pkg && show_core)
255 fprintf(stderr, " ");
256 if (show_core)
257 fprintf(stderr, "%3d", p->core);
258 if (show_cpu)
259 fprintf(stderr, " %3d", p->cpu);
260 }
261
262 /* %c0 */
263 if (do_nhm_cstates) {
264 if (show_pkg || show_core || show_cpu)
265 fprintf(stderr, " ");
266 if (!skip_c0)
267 fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
268 else
269 fprintf(stderr, " ****");
270 }
271
272 /* GHz */
273 if (has_aperf) {
274 if (!aperf_mperf_unstable) {
275 fprintf(stderr, " %3.2f",
276 1.0 * p->tsc / units * p->aperf /
277 p->mperf / interval_float);
278 } else {
279 if (p->aperf > p->tsc || p->mperf > p->tsc) {
280 fprintf(stderr, " ***");
281 } else {
282 fprintf(stderr, "%3.1f*",
283 1.0 * p->tsc /
284 units * p->aperf /
285 p->mperf / interval_float);
286 }
287 }
288 }
289
290 /* TSC */
291 fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
292
293 if (do_nhm_cstates) {
294 if (!skip_c1)
295 fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
296 else
297 fprintf(stderr, " ****");
298 }
299 if (do_nhm_cstates)
300 fprintf(stderr, " %6.2f", 100.0 * p->c3/p->tsc);
301 if (do_nhm_cstates)
302 fprintf(stderr, " %6.2f", 100.0 * p->c6/p->tsc);
303 if (do_snb_cstates)
304 fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
305 if (do_snb_cstates)
306 fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
307 if (do_nhm_cstates)
308 fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
309 if (do_nhm_cstates)
310 fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
311 if (do_snb_cstates)
312 fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
313 if (extra_msr_offset)
314 fprintf(stderr, " 0x%016llx", p->extra_msr);
315 putc('\n', stderr);
316 }
317
print_counters(struct counters * counters)318 void print_counters(struct counters *counters)
319 {
320 struct counters *cnt;
321 static int printed;
322
323
324 if (!printed || !summary_only)
325 print_header();
326
327 if (num_cpus > 1)
328 print_cnt(cnt_average);
329
330 printed = 1;
331
332 if (summary_only)
333 return;
334
335 for (cnt = counters; cnt != NULL; cnt = cnt->next)
336 print_cnt(cnt);
337
338 }
339
340 #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
341
compute_delta(struct counters * after,struct counters * before,struct counters * delta)342 int compute_delta(struct counters *after,
343 struct counters *before, struct counters *delta)
344 {
345 int errors = 0;
346 int perf_err = 0;
347
348 skip_c0 = skip_c1 = 0;
349
350 for ( ; after && before && delta;
351 after = after->next, before = before->next, delta = delta->next) {
352 if (before->cpu != after->cpu) {
353 printf("cpu configuration changed: %d != %d\n",
354 before->cpu, after->cpu);
355 return -1;
356 }
357
358 if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
359 fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
360 before->cpu, before->tsc, after->tsc);
361 errors++;
362 }
363 /* check for TSC < 1 Mcycles over interval */
364 if (delta->tsc < (1000 * 1000)) {
365 fprintf(stderr, "Insanely slow TSC rate,"
366 " TSC stops in idle?\n");
367 fprintf(stderr, "You can disable all c-states"
368 " by booting with \"idle=poll\"\n");
369 fprintf(stderr, "or just the deep ones with"
370 " \"processor.max_cstate=1\"\n");
371 exit(-3);
372 }
373 if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
374 fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
375 before->cpu, before->c3, after->c3);
376 errors++;
377 }
378 if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
379 fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
380 before->cpu, before->c6, after->c6);
381 errors++;
382 }
383 if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
384 fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
385 before->cpu, before->c7, after->c7);
386 errors++;
387 }
388 if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
389 fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
390 before->cpu, before->pc2, after->pc2);
391 errors++;
392 }
393 if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
394 fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
395 before->cpu, before->pc3, after->pc3);
396 errors++;
397 }
398 if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
399 fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
400 before->cpu, before->pc6, after->pc6);
401 errors++;
402 }
403 if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
404 fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
405 before->cpu, before->pc7, after->pc7);
406 errors++;
407 }
408
409 perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
410 if (perf_err) {
411 fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
412 before->cpu, before->aperf, after->aperf);
413 }
414 perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
415 if (perf_err) {
416 fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
417 before->cpu, before->mperf, after->mperf);
418 }
419 if (perf_err) {
420 if (!aperf_mperf_unstable) {
421 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
422 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
423 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
424
425 aperf_mperf_unstable = 1;
426 }
427 /*
428 * mperf delta is likely a huge "positive" number
429 * can not use it for calculating c0 time
430 */
431 skip_c0 = 1;
432 skip_c1 = 1;
433 }
434
435 /*
436 * As mperf and tsc collection are not atomic,
437 * it is possible for mperf's non-halted cycles
438 * to exceed TSC's all cycles: show c1 = 0% in that case.
439 */
440 if (delta->mperf > delta->tsc)
441 delta->c1 = 0;
442 else /* normal case, derive c1 */
443 delta->c1 = delta->tsc - delta->mperf
444 - delta->c3 - delta->c6 - delta->c7;
445
446 if (delta->mperf == 0)
447 delta->mperf = 1; /* divide by 0 protection */
448
449 /*
450 * for "extra msr", just copy the latest w/o subtracting
451 */
452 delta->extra_msr = after->extra_msr;
453 if (errors) {
454 fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
455 dump_cnt(before);
456 fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
457 dump_cnt(after);
458 errors = 0;
459 }
460 }
461 return 0;
462 }
463
compute_average(struct counters * delta,struct counters * avg)464 void compute_average(struct counters *delta, struct counters *avg)
465 {
466 struct counters *sum;
467
468 sum = calloc(1, sizeof(struct counters));
469 if (sum == NULL) {
470 perror("calloc sum");
471 exit(1);
472 }
473
474 for (; delta; delta = delta->next) {
475 sum->tsc += delta->tsc;
476 sum->c1 += delta->c1;
477 sum->c3 += delta->c3;
478 sum->c6 += delta->c6;
479 sum->c7 += delta->c7;
480 sum->aperf += delta->aperf;
481 sum->mperf += delta->mperf;
482 sum->pc2 += delta->pc2;
483 sum->pc3 += delta->pc3;
484 sum->pc6 += delta->pc6;
485 sum->pc7 += delta->pc7;
486 }
487 avg->tsc = sum->tsc/num_cpus;
488 avg->c1 = sum->c1/num_cpus;
489 avg->c3 = sum->c3/num_cpus;
490 avg->c6 = sum->c6/num_cpus;
491 avg->c7 = sum->c7/num_cpus;
492 avg->aperf = sum->aperf/num_cpus;
493 avg->mperf = sum->mperf/num_cpus;
494 avg->pc2 = sum->pc2/num_cpus;
495 avg->pc3 = sum->pc3/num_cpus;
496 avg->pc6 = sum->pc6/num_cpus;
497 avg->pc7 = sum->pc7/num_cpus;
498
499 free(sum);
500 }
501
get_counters(struct counters * cnt)502 int get_counters(struct counters *cnt)
503 {
504 for ( ; cnt; cnt = cnt->next) {
505
506 if (cpu_migrate(cnt->cpu))
507 return -1;
508
509 if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
510 return -1;
511
512 if (has_aperf) {
513 if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
514 return -1;
515 if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
516 return -1;
517 }
518
519 if (do_nhm_cstates) {
520 if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
521 return -1;
522 if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
523 return -1;
524 }
525
526 if (do_snb_cstates)
527 if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
528 return -1;
529
530 if (do_nhm_cstates) {
531 if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
532 return -1;
533 if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
534 return -1;
535 }
536 if (do_snb_cstates) {
537 if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
538 return -1;
539 if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
540 return -1;
541 }
542 if (extra_msr_offset)
543 if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
544 return -1;
545 }
546 return 0;
547 }
548
print_nehalem_info(void)549 void print_nehalem_info(void)
550 {
551 unsigned long long msr;
552 unsigned int ratio;
553
554 if (!do_nehalem_platform_info)
555 return;
556
557 get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
558
559 ratio = (msr >> 40) & 0xFF;
560 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
561 ratio, bclk, ratio * bclk);
562
563 ratio = (msr >> 8) & 0xFF;
564 fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
565 ratio, bclk, ratio * bclk);
566
567 if (verbose > 1)
568 fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
569
570 if (!do_nehalem_turbo_ratio_limit)
571 return;
572
573 get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
574
575 ratio = (msr >> 24) & 0xFF;
576 if (ratio)
577 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
578 ratio, bclk, ratio * bclk);
579
580 ratio = (msr >> 16) & 0xFF;
581 if (ratio)
582 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
583 ratio, bclk, ratio * bclk);
584
585 ratio = (msr >> 8) & 0xFF;
586 if (ratio)
587 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
588 ratio, bclk, ratio * bclk);
589
590 ratio = (msr >> 0) & 0xFF;
591 if (ratio)
592 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
593 ratio, bclk, ratio * bclk);
594
595 }
596
free_counter_list(struct counters * list)597 void free_counter_list(struct counters *list)
598 {
599 struct counters *p;
600
601 for (p = list; p; ) {
602 struct counters *free_me;
603
604 free_me = p;
605 p = p->next;
606 free(free_me);
607 }
608 }
609
free_all_counters(void)610 void free_all_counters(void)
611 {
612 free_counter_list(cnt_even);
613 cnt_even = NULL;
614
615 free_counter_list(cnt_odd);
616 cnt_odd = NULL;
617
618 free_counter_list(cnt_delta);
619 cnt_delta = NULL;
620
621 free_counter_list(cnt_average);
622 cnt_average = NULL;
623 }
624
insert_counters(struct counters ** list,struct counters * new)625 void insert_counters(struct counters **list,
626 struct counters *new)
627 {
628 struct counters *prev;
629
630 /*
631 * list was empty
632 */
633 if (*list == NULL) {
634 new->next = *list;
635 *list = new;
636 return;
637 }
638
639 if (!summary_only)
640 show_cpu = 1; /* there is more than one CPU */
641
642 /*
643 * insert on front of list.
644 * It is sorted by ascending package#, core#, cpu#
645 */
646 if (((*list)->pkg > new->pkg) ||
647 (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
648 (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
649 new->next = *list;
650 *list = new;
651 return;
652 }
653
654 prev = *list;
655
656 while (prev->next && (prev->next->pkg < new->pkg)) {
657 prev = prev->next;
658 if (!summary_only)
659 show_pkg = 1; /* there is more than 1 package */
660 }
661
662 while (prev->next && (prev->next->pkg == new->pkg)
663 && (prev->next->core < new->core)) {
664 prev = prev->next;
665 if (!summary_only)
666 show_core = 1; /* there is more than 1 core */
667 }
668
669 while (prev->next && (prev->next->pkg == new->pkg)
670 && (prev->next->core == new->core)
671 && (prev->next->cpu < new->cpu)) {
672 prev = prev->next;
673 }
674
675 /*
676 * insert after "prev"
677 */
678 new->next = prev->next;
679 prev->next = new;
680 }
681
alloc_new_counters(int pkg,int core,int cpu)682 void alloc_new_counters(int pkg, int core, int cpu)
683 {
684 struct counters *new;
685
686 if (verbose > 1)
687 printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
688
689 new = (struct counters *)calloc(1, sizeof(struct counters));
690 if (new == NULL) {
691 perror("calloc");
692 exit(1);
693 }
694 new->pkg = pkg;
695 new->core = core;
696 new->cpu = cpu;
697 insert_counters(&cnt_odd, new);
698
699 new = (struct counters *)calloc(1,
700 sizeof(struct counters));
701 if (new == NULL) {
702 perror("calloc");
703 exit(1);
704 }
705 new->pkg = pkg;
706 new->core = core;
707 new->cpu = cpu;
708 insert_counters(&cnt_even, new);
709
710 new = (struct counters *)calloc(1, sizeof(struct counters));
711 if (new == NULL) {
712 perror("calloc");
713 exit(1);
714 }
715 new->pkg = pkg;
716 new->core = core;
717 new->cpu = cpu;
718 insert_counters(&cnt_delta, new);
719
720 new = (struct counters *)calloc(1, sizeof(struct counters));
721 if (new == NULL) {
722 perror("calloc");
723 exit(1);
724 }
725 new->pkg = pkg;
726 new->core = core;
727 new->cpu = cpu;
728 cnt_average = new;
729 }
730
get_physical_package_id(int cpu)731 int get_physical_package_id(int cpu)
732 {
733 char path[64];
734 FILE *filep;
735 int pkg;
736
737 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
738 filep = fopen(path, "r");
739 if (filep == NULL) {
740 perror(path);
741 exit(1);
742 }
743 fscanf(filep, "%d", &pkg);
744 fclose(filep);
745 return pkg;
746 }
747
get_core_id(int cpu)748 int get_core_id(int cpu)
749 {
750 char path[64];
751 FILE *filep;
752 int core;
753
754 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
755 filep = fopen(path, "r");
756 if (filep == NULL) {
757 perror(path);
758 exit(1);
759 }
760 fscanf(filep, "%d", &core);
761 fclose(filep);
762 return core;
763 }
764
765 /*
766 * run func(pkg, core, cpu) on every cpu in /proc/stat
767 */
768
for_all_cpus(void (func)(int,int,int))769 int for_all_cpus(void (func)(int, int, int))
770 {
771 FILE *fp;
772 int cpu_count;
773 int retval;
774
775 fp = fopen(proc_stat, "r");
776 if (fp == NULL) {
777 perror(proc_stat);
778 exit(1);
779 }
780
781 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
782 if (retval != 0) {
783 perror("/proc/stat format");
784 exit(1);
785 }
786
787 for (cpu_count = 0; ; cpu_count++) {
788 int cpu;
789
790 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
791 if (retval != 1)
792 break;
793
794 func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
795 }
796 fclose(fp);
797 return cpu_count;
798 }
799
re_initialize(void)800 void re_initialize(void)
801 {
802 free_all_counters();
803 num_cpus = for_all_cpus(alloc_new_counters);
804 cpu_mask_uninit();
805 cpu_mask_init(num_cpus);
806 printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
807 }
808
dummy(int pkg,int core,int cpu)809 void dummy(int pkg, int core, int cpu) { return; }
810 /*
811 * check to see if a cpu came on-line
812 */
verify_num_cpus(void)813 int verify_num_cpus(void)
814 {
815 int new_num_cpus;
816
817 new_num_cpus = for_all_cpus(dummy);
818
819 if (new_num_cpus != num_cpus) {
820 if (verbose)
821 printf("num_cpus was %d, is now %d\n",
822 num_cpus, new_num_cpus);
823 return -1;
824 }
825 return 0;
826 }
827
turbostat_loop()828 void turbostat_loop()
829 {
830 restart:
831 get_counters(cnt_even);
832 gettimeofday(&tv_even, (struct timezone *)NULL);
833
834 while (1) {
835 if (verify_num_cpus()) {
836 re_initialize();
837 goto restart;
838 }
839 sleep(interval_sec);
840 if (get_counters(cnt_odd)) {
841 re_initialize();
842 goto restart;
843 }
844 gettimeofday(&tv_odd, (struct timezone *)NULL);
845 compute_delta(cnt_odd, cnt_even, cnt_delta);
846 timersub(&tv_odd, &tv_even, &tv_delta);
847 compute_average(cnt_delta, cnt_average);
848 print_counters(cnt_delta);
849 sleep(interval_sec);
850 if (get_counters(cnt_even)) {
851 re_initialize();
852 goto restart;
853 }
854 gettimeofday(&tv_even, (struct timezone *)NULL);
855 compute_delta(cnt_even, cnt_odd, cnt_delta);
856 timersub(&tv_even, &tv_odd, &tv_delta);
857 compute_average(cnt_delta, cnt_average);
858 print_counters(cnt_delta);
859 }
860 }
861
check_dev_msr()862 void check_dev_msr()
863 {
864 struct stat sb;
865
866 if (stat("/dev/cpu/0/msr", &sb)) {
867 fprintf(stderr, "no /dev/cpu/0/msr\n");
868 fprintf(stderr, "Try \"# modprobe msr\"\n");
869 exit(-5);
870 }
871 }
872
check_super_user()873 void check_super_user()
874 {
875 if (getuid() != 0) {
876 fprintf(stderr, "must be root\n");
877 exit(-6);
878 }
879 }
880
has_nehalem_turbo_ratio_limit(unsigned int family,unsigned int model)881 int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
882 {
883 if (!genuine_intel)
884 return 0;
885
886 if (family != 6)
887 return 0;
888
889 switch (model) {
890 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
891 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
892 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
893 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
894 case 0x2C: /* Westmere EP - Gulftown */
895 case 0x2A: /* SNB */
896 case 0x2D: /* SNB Xeon */
897 case 0x3A: /* IVB */
898 case 0x3D: /* IVB Xeon */
899 return 1;
900 case 0x2E: /* Nehalem-EX Xeon - Beckton */
901 case 0x2F: /* Westmere-EX Xeon - Eagleton */
902 default:
903 return 0;
904 }
905 }
906
is_snb(unsigned int family,unsigned int model)907 int is_snb(unsigned int family, unsigned int model)
908 {
909 if (!genuine_intel)
910 return 0;
911
912 switch (model) {
913 case 0x2A:
914 case 0x2D:
915 return 1;
916 }
917 return 0;
918 }
919
discover_bclk(unsigned int family,unsigned int model)920 double discover_bclk(unsigned int family, unsigned int model)
921 {
922 if (is_snb(family, model))
923 return 100.00;
924 else
925 return 133.33;
926 }
927
check_cpuid()928 void check_cpuid()
929 {
930 unsigned int eax, ebx, ecx, edx, max_level;
931 unsigned int fms, family, model, stepping;
932
933 eax = ebx = ecx = edx = 0;
934
935 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
936
937 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
938 genuine_intel = 1;
939
940 if (verbose)
941 fprintf(stderr, "%.4s%.4s%.4s ",
942 (char *)&ebx, (char *)&edx, (char *)&ecx);
943
944 asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
945 family = (fms >> 8) & 0xf;
946 model = (fms >> 4) & 0xf;
947 stepping = fms & 0xf;
948 if (family == 6 || family == 0xf)
949 model += ((fms >> 16) & 0xf) << 4;
950
951 if (verbose)
952 fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
953 max_level, family, model, stepping, family, model, stepping);
954
955 if (!(edx & (1 << 5))) {
956 fprintf(stderr, "CPUID: no MSR\n");
957 exit(1);
958 }
959
960 /*
961 * check max extended function levels of CPUID.
962 * This is needed to check for invariant TSC.
963 * This check is valid for both Intel and AMD.
964 */
965 ebx = ecx = edx = 0;
966 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
967
968 if (max_level < 0x80000007) {
969 fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
970 exit(1);
971 }
972
973 /*
974 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
975 * this check is valid for both Intel and AMD
976 */
977 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
978 has_invariant_tsc = edx & (1 << 8);
979
980 if (!has_invariant_tsc) {
981 fprintf(stderr, "No invariant TSC\n");
982 exit(1);
983 }
984
985 /*
986 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
987 * this check is valid for both Intel and AMD
988 */
989
990 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
991 has_aperf = ecx & (1 << 0);
992 if (!has_aperf) {
993 fprintf(stderr, "No APERF MSR\n");
994 exit(1);
995 }
996
997 do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
998 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
999 do_snb_cstates = is_snb(family, model);
1000 bclk = discover_bclk(family, model);
1001
1002 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
1003 }
1004
1005
usage()1006 void usage()
1007 {
1008 fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
1009 progname);
1010 exit(1);
1011 }
1012
1013
1014 /*
1015 * in /dev/cpu/ return success for names that are numbers
1016 * ie. filter out ".", "..", "microcode".
1017 */
dir_filter(const struct dirent * dirp)1018 int dir_filter(const struct dirent *dirp)
1019 {
1020 if (isdigit(dirp->d_name[0]))
1021 return 1;
1022 else
1023 return 0;
1024 }
1025
open_dev_cpu_msr(int dummy1)1026 int open_dev_cpu_msr(int dummy1)
1027 {
1028 return 0;
1029 }
1030
turbostat_init()1031 void turbostat_init()
1032 {
1033 check_cpuid();
1034
1035 check_dev_msr();
1036 check_super_user();
1037
1038 num_cpus = for_all_cpus(alloc_new_counters);
1039 cpu_mask_init(num_cpus);
1040
1041 if (verbose)
1042 print_nehalem_info();
1043 }
1044
fork_it(char ** argv)1045 int fork_it(char **argv)
1046 {
1047 int retval;
1048 pid_t child_pid;
1049 get_counters(cnt_even);
1050 gettimeofday(&tv_even, (struct timezone *)NULL);
1051
1052 child_pid = fork();
1053 if (!child_pid) {
1054 /* child */
1055 execvp(argv[0], argv);
1056 } else {
1057 int status;
1058
1059 /* parent */
1060 if (child_pid == -1) {
1061 perror("fork");
1062 exit(1);
1063 }
1064
1065 signal(SIGINT, SIG_IGN);
1066 signal(SIGQUIT, SIG_IGN);
1067 if (waitpid(child_pid, &status, 0) == -1) {
1068 perror("wait");
1069 exit(1);
1070 }
1071 }
1072 get_counters(cnt_odd);
1073 gettimeofday(&tv_odd, (struct timezone *)NULL);
1074 retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
1075
1076 timersub(&tv_odd, &tv_even, &tv_delta);
1077 compute_average(cnt_delta, cnt_average);
1078 if (!retval)
1079 print_counters(cnt_delta);
1080
1081 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
1082
1083 return 0;
1084 }
1085
cmdline(int argc,char ** argv)1086 void cmdline(int argc, char **argv)
1087 {
1088 int opt;
1089
1090 progname = argv[0];
1091
1092 while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
1093 switch (opt) {
1094 case 's':
1095 summary_only++;
1096 break;
1097 case 'v':
1098 verbose++;
1099 break;
1100 case 'i':
1101 interval_sec = atoi(optarg);
1102 break;
1103 case 'M':
1104 sscanf(optarg, "%x", &extra_msr_offset);
1105 if (verbose > 1)
1106 fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
1107 break;
1108 default:
1109 usage();
1110 }
1111 }
1112 }
1113
main(int argc,char ** argv)1114 int main(int argc, char **argv)
1115 {
1116 cmdline(argc, argv);
1117
1118 if (verbose > 1)
1119 fprintf(stderr, "turbostat Dec 6, 2010"
1120 " - Len Brown <lenb@kernel.org>\n");
1121 if (verbose > 1)
1122 fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
1123
1124 turbostat_init();
1125
1126 /*
1127 * if any params left, it must be a command to fork
1128 */
1129 if (argc - optind)
1130 return fork_it(argv + optind);
1131 else
1132 turbostat_loop();
1133
1134 return 0;
1135 }
1136