• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file nmi_int.c
3  *
4  * @remark Copyright 2002-2009 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  * @author Robert Richter <robert.richter@amd.com>
9  * @author Barry Kasindorf <barry.kasindorf@amd.com>
10  * @author Jason Yeh <jason.yeh@amd.com>
11  * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
12  */
13 
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
23 #include <asm/nmi.h>
24 #include <asm/msr.h>
25 #include <asm/apic.h>
26 
27 #include "op_counter.h"
28 #include "op_x86_model.h"
29 
30 static struct op_x86_model_spec *model;
31 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
33 
34 /* must be protected with get_online_cpus()/put_online_cpus(): */
35 static int nmi_enabled;
36 static int ctr_running;
37 
38 struct op_counter_config counter_config[OP_MAX_COUNTER];
39 
40 /* common functions */
41 
op_x86_get_ctrl(struct op_x86_model_spec const * model,struct op_counter_config * counter_config)42 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
43 		    struct op_counter_config *counter_config)
44 {
45 	u64 val = 0;
46 	u16 event = (u16)counter_config->event;
47 
48 	val |= ARCH_PERFMON_EVENTSEL_INT;
49 	val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
50 	val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
51 	val |= (counter_config->unit_mask & 0xFF) << 8;
52 	counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
53 				  ARCH_PERFMON_EVENTSEL_EDGE |
54 				  ARCH_PERFMON_EVENTSEL_CMASK);
55 	val |= counter_config->extra;
56 	event &= model->event_mask ? model->event_mask : 0xFF;
57 	val |= event & 0xFF;
58 	val |= (u64)(event & 0x0F00) << 24;
59 
60 	return val;
61 }
62 
63 
profile_exceptions_notify(unsigned int val,struct pt_regs * regs)64 static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
65 {
66 	if (ctr_running)
67 		model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
68 	else if (!nmi_enabled)
69 		return NMI_DONE;
70 	else
71 		model->stop(this_cpu_ptr(&cpu_msrs));
72 	return NMI_HANDLED;
73 }
74 
nmi_cpu_save_registers(struct op_msrs * msrs)75 static void nmi_cpu_save_registers(struct op_msrs *msrs)
76 {
77 	struct op_msr *counters = msrs->counters;
78 	struct op_msr *controls = msrs->controls;
79 	unsigned int i;
80 
81 	for (i = 0; i < model->num_counters; ++i) {
82 		if (counters[i].addr)
83 			rdmsrl(counters[i].addr, counters[i].saved);
84 	}
85 
86 	for (i = 0; i < model->num_controls; ++i) {
87 		if (controls[i].addr)
88 			rdmsrl(controls[i].addr, controls[i].saved);
89 	}
90 }
91 
nmi_cpu_start(void * dummy)92 static void nmi_cpu_start(void *dummy)
93 {
94 	struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
95 	if (!msrs->controls)
96 		WARN_ON_ONCE(1);
97 	else
98 		model->start(msrs);
99 }
100 
nmi_start(void)101 static int nmi_start(void)
102 {
103 	get_online_cpus();
104 	ctr_running = 1;
105 	/* make ctr_running visible to the nmi handler: */
106 	smp_mb();
107 	on_each_cpu(nmi_cpu_start, NULL, 1);
108 	put_online_cpus();
109 	return 0;
110 }
111 
nmi_cpu_stop(void * dummy)112 static void nmi_cpu_stop(void *dummy)
113 {
114 	struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
115 	if (!msrs->controls)
116 		WARN_ON_ONCE(1);
117 	else
118 		model->stop(msrs);
119 }
120 
nmi_stop(void)121 static void nmi_stop(void)
122 {
123 	get_online_cpus();
124 	on_each_cpu(nmi_cpu_stop, NULL, 1);
125 	ctr_running = 0;
126 	put_online_cpus();
127 }
128 
129 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
130 
131 static DEFINE_PER_CPU(int, switch_index);
132 
has_mux(void)133 static inline int has_mux(void)
134 {
135 	return !!model->switch_ctrl;
136 }
137 
op_x86_phys_to_virt(int phys)138 inline int op_x86_phys_to_virt(int phys)
139 {
140 	return __this_cpu_read(switch_index) + phys;
141 }
142 
op_x86_virt_to_phys(int virt)143 inline int op_x86_virt_to_phys(int virt)
144 {
145 	return virt % model->num_counters;
146 }
147 
nmi_shutdown_mux(void)148 static void nmi_shutdown_mux(void)
149 {
150 	int i;
151 
152 	if (!has_mux())
153 		return;
154 
155 	for_each_possible_cpu(i) {
156 		kfree(per_cpu(cpu_msrs, i).multiplex);
157 		per_cpu(cpu_msrs, i).multiplex = NULL;
158 		per_cpu(switch_index, i) = 0;
159 	}
160 }
161 
nmi_setup_mux(void)162 static int nmi_setup_mux(void)
163 {
164 	size_t multiplex_size =
165 		sizeof(struct op_msr) * model->num_virt_counters;
166 	int i;
167 
168 	if (!has_mux())
169 		return 1;
170 
171 	for_each_possible_cpu(i) {
172 		per_cpu(cpu_msrs, i).multiplex =
173 			kzalloc(multiplex_size, GFP_KERNEL);
174 		if (!per_cpu(cpu_msrs, i).multiplex)
175 			return 0;
176 	}
177 
178 	return 1;
179 }
180 
nmi_cpu_setup_mux(int cpu,struct op_msrs const * const msrs)181 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
182 {
183 	int i;
184 	struct op_msr *multiplex = msrs->multiplex;
185 
186 	if (!has_mux())
187 		return;
188 
189 	for (i = 0; i < model->num_virt_counters; ++i) {
190 		if (counter_config[i].enabled) {
191 			multiplex[i].saved = -(u64)counter_config[i].count;
192 		} else {
193 			multiplex[i].saved = 0;
194 		}
195 	}
196 
197 	per_cpu(switch_index, cpu) = 0;
198 }
199 
nmi_cpu_save_mpx_registers(struct op_msrs * msrs)200 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
201 {
202 	struct op_msr *counters = msrs->counters;
203 	struct op_msr *multiplex = msrs->multiplex;
204 	int i;
205 
206 	for (i = 0; i < model->num_counters; ++i) {
207 		int virt = op_x86_phys_to_virt(i);
208 		if (counters[i].addr)
209 			rdmsrl(counters[i].addr, multiplex[virt].saved);
210 	}
211 }
212 
nmi_cpu_restore_mpx_registers(struct op_msrs * msrs)213 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
214 {
215 	struct op_msr *counters = msrs->counters;
216 	struct op_msr *multiplex = msrs->multiplex;
217 	int i;
218 
219 	for (i = 0; i < model->num_counters; ++i) {
220 		int virt = op_x86_phys_to_virt(i);
221 		if (counters[i].addr)
222 			wrmsrl(counters[i].addr, multiplex[virt].saved);
223 	}
224 }
225 
nmi_cpu_switch(void * dummy)226 static void nmi_cpu_switch(void *dummy)
227 {
228 	int cpu = smp_processor_id();
229 	int si = per_cpu(switch_index, cpu);
230 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
231 
232 	nmi_cpu_stop(NULL);
233 	nmi_cpu_save_mpx_registers(msrs);
234 
235 	/* move to next set */
236 	si += model->num_counters;
237 	if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
238 		per_cpu(switch_index, cpu) = 0;
239 	else
240 		per_cpu(switch_index, cpu) = si;
241 
242 	model->switch_ctrl(model, msrs);
243 	nmi_cpu_restore_mpx_registers(msrs);
244 
245 	nmi_cpu_start(NULL);
246 }
247 
248 
249 /*
250  * Quick check to see if multiplexing is necessary.
251  * The check should be sufficient since counters are used
252  * in ordre.
253  */
nmi_multiplex_on(void)254 static int nmi_multiplex_on(void)
255 {
256 	return counter_config[model->num_counters].count ? 0 : -EINVAL;
257 }
258 
nmi_switch_event(void)259 static int nmi_switch_event(void)
260 {
261 	if (!has_mux())
262 		return -ENOSYS;		/* not implemented */
263 	if (nmi_multiplex_on() < 0)
264 		return -EINVAL;		/* not necessary */
265 
266 	get_online_cpus();
267 	if (ctr_running)
268 		on_each_cpu(nmi_cpu_switch, NULL, 1);
269 	put_online_cpus();
270 
271 	return 0;
272 }
273 
mux_init(struct oprofile_operations * ops)274 static inline void mux_init(struct oprofile_operations *ops)
275 {
276 	if (has_mux())
277 		ops->switch_events = nmi_switch_event;
278 }
279 
mux_clone(int cpu)280 static void mux_clone(int cpu)
281 {
282 	if (!has_mux())
283 		return;
284 
285 	memcpy(per_cpu(cpu_msrs, cpu).multiplex,
286 	       per_cpu(cpu_msrs, 0).multiplex,
287 	       sizeof(struct op_msr) * model->num_virt_counters);
288 }
289 
290 #else
291 
op_x86_phys_to_virt(int phys)292 inline int op_x86_phys_to_virt(int phys) { return phys; }
op_x86_virt_to_phys(int virt)293 inline int op_x86_virt_to_phys(int virt) { return virt; }
nmi_shutdown_mux(void)294 static inline void nmi_shutdown_mux(void) { }
nmi_setup_mux(void)295 static inline int nmi_setup_mux(void) { return 1; }
296 static inline void
nmi_cpu_setup_mux(int cpu,struct op_msrs const * const msrs)297 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
mux_init(struct oprofile_operations * ops)298 static inline void mux_init(struct oprofile_operations *ops) { }
mux_clone(int cpu)299 static void mux_clone(int cpu) { }
300 
301 #endif
302 
free_msrs(void)303 static void free_msrs(void)
304 {
305 	int i;
306 	for_each_possible_cpu(i) {
307 		kfree(per_cpu(cpu_msrs, i).counters);
308 		per_cpu(cpu_msrs, i).counters = NULL;
309 		kfree(per_cpu(cpu_msrs, i).controls);
310 		per_cpu(cpu_msrs, i).controls = NULL;
311 	}
312 	nmi_shutdown_mux();
313 }
314 
allocate_msrs(void)315 static int allocate_msrs(void)
316 {
317 	size_t controls_size = sizeof(struct op_msr) * model->num_controls;
318 	size_t counters_size = sizeof(struct op_msr) * model->num_counters;
319 
320 	int i;
321 	for_each_possible_cpu(i) {
322 		per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
323 							GFP_KERNEL);
324 		if (!per_cpu(cpu_msrs, i).counters)
325 			goto fail;
326 		per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
327 							GFP_KERNEL);
328 		if (!per_cpu(cpu_msrs, i).controls)
329 			goto fail;
330 	}
331 
332 	if (!nmi_setup_mux())
333 		goto fail;
334 
335 	return 1;
336 
337 fail:
338 	free_msrs();
339 	return 0;
340 }
341 
nmi_cpu_setup(void)342 static void nmi_cpu_setup(void)
343 {
344 	int cpu = smp_processor_id();
345 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
346 
347 	nmi_cpu_save_registers(msrs);
348 	raw_spin_lock(&oprofilefs_lock);
349 	model->setup_ctrs(model, msrs);
350 	nmi_cpu_setup_mux(cpu, msrs);
351 	raw_spin_unlock(&oprofilefs_lock);
352 	per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
353 	apic_write(APIC_LVTPC, APIC_DM_NMI);
354 }
355 
nmi_cpu_restore_registers(struct op_msrs * msrs)356 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
357 {
358 	struct op_msr *counters = msrs->counters;
359 	struct op_msr *controls = msrs->controls;
360 	unsigned int i;
361 
362 	for (i = 0; i < model->num_controls; ++i) {
363 		if (controls[i].addr)
364 			wrmsrl(controls[i].addr, controls[i].saved);
365 	}
366 
367 	for (i = 0; i < model->num_counters; ++i) {
368 		if (counters[i].addr)
369 			wrmsrl(counters[i].addr, counters[i].saved);
370 	}
371 }
372 
nmi_cpu_shutdown(void)373 static void nmi_cpu_shutdown(void)
374 {
375 	unsigned int v;
376 	int cpu = smp_processor_id();
377 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
378 
379 	/* restoring APIC_LVTPC can trigger an apic error because the delivery
380 	 * mode and vector nr combination can be illegal. That's by design: on
381 	 * power on apic lvt contain a zero vector nr which are legal only for
382 	 * NMI delivery mode. So inhibit apic err before restoring lvtpc
383 	 */
384 	v = apic_read(APIC_LVTERR);
385 	apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
386 	apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
387 	apic_write(APIC_LVTERR, v);
388 	nmi_cpu_restore_registers(msrs);
389 }
390 
nmi_cpu_online(unsigned int cpu)391 static int nmi_cpu_online(unsigned int cpu)
392 {
393 	local_irq_disable();
394 	if (nmi_enabled)
395 		nmi_cpu_setup();
396 	if (ctr_running)
397 		nmi_cpu_start(NULL);
398 	local_irq_enable();
399 	return 0;
400 }
401 
nmi_cpu_down_prep(unsigned int cpu)402 static int nmi_cpu_down_prep(unsigned int cpu)
403 {
404 	local_irq_disable();
405 	if (ctr_running)
406 		nmi_cpu_stop(NULL);
407 	if (nmi_enabled)
408 		nmi_cpu_shutdown();
409 	local_irq_enable();
410 	return 0;
411 }
412 
nmi_create_files(struct dentry * root)413 static int nmi_create_files(struct dentry *root)
414 {
415 	unsigned int i;
416 
417 	for (i = 0; i < model->num_virt_counters; ++i) {
418 		struct dentry *dir;
419 		char buf[4];
420 
421 		/* quick little hack to _not_ expose a counter if it is not
422 		 * available for use.  This should protect userspace app.
423 		 * NOTE:  assumes 1:1 mapping here (that counters are organized
424 		 *        sequentially in their struct assignment).
425 		 */
426 		if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
427 			continue;
428 
429 		snprintf(buf,  sizeof(buf), "%d", i);
430 		dir = oprofilefs_mkdir(root, buf);
431 		oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
432 		oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
433 		oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
434 		oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
435 		oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
436 		oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
437 		oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
438 	}
439 
440 	return 0;
441 }
442 
443 static enum cpuhp_state cpuhp_nmi_online;
444 
nmi_setup(void)445 static int nmi_setup(void)
446 {
447 	int err = 0;
448 	int cpu;
449 
450 	if (!allocate_msrs())
451 		return -ENOMEM;
452 
453 	/* We need to serialize save and setup for HT because the subset
454 	 * of msrs are distinct for save and setup operations
455 	 */
456 
457 	/* Assume saved/restored counters are the same on all CPUs */
458 	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
459 	if (err)
460 		goto fail;
461 
462 	for_each_possible_cpu(cpu) {
463 		if (!IS_ENABLED(CONFIG_SMP) || !cpu)
464 			continue;
465 
466 		memcpy(per_cpu(cpu_msrs, cpu).counters,
467 		       per_cpu(cpu_msrs, 0).counters,
468 		       sizeof(struct op_msr) * model->num_counters);
469 
470 		memcpy(per_cpu(cpu_msrs, cpu).controls,
471 		       per_cpu(cpu_msrs, 0).controls,
472 		       sizeof(struct op_msr) * model->num_controls);
473 
474 		mux_clone(cpu);
475 	}
476 
477 	nmi_enabled = 0;
478 	ctr_running = 0;
479 	/* make variables visible to the nmi handler: */
480 	smp_mb();
481 	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
482 					0, "oprofile");
483 	if (err)
484 		goto fail;
485 
486 	nmi_enabled = 1;
487 	/* make nmi_enabled visible to the nmi handler: */
488 	smp_mb();
489 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
490 				nmi_cpu_online, nmi_cpu_down_prep);
491 	if (err < 0)
492 		goto fail_nmi;
493 	cpuhp_nmi_online = err;
494 	return 0;
495 fail_nmi:
496 	unregister_nmi_handler(NMI_LOCAL, "oprofile");
497 fail:
498 	free_msrs();
499 	return err;
500 }
501 
nmi_shutdown(void)502 static void nmi_shutdown(void)
503 {
504 	struct op_msrs *msrs;
505 
506 	cpuhp_remove_state(cpuhp_nmi_online);
507 	nmi_enabled = 0;
508 	ctr_running = 0;
509 
510 	/* make variables visible to the nmi handler: */
511 	smp_mb();
512 	unregister_nmi_handler(NMI_LOCAL, "oprofile");
513 	msrs = &get_cpu_var(cpu_msrs);
514 	model->shutdown(msrs);
515 	free_msrs();
516 	put_cpu_var(cpu_msrs);
517 }
518 
519 #ifdef CONFIG_PM
520 
nmi_suspend(void)521 static int nmi_suspend(void)
522 {
523 	/* Only one CPU left, just stop that one */
524 	if (nmi_enabled == 1)
525 		nmi_cpu_stop(NULL);
526 	return 0;
527 }
528 
nmi_resume(void)529 static void nmi_resume(void)
530 {
531 	if (nmi_enabled == 1)
532 		nmi_cpu_start(NULL);
533 }
534 
535 static struct syscore_ops oprofile_syscore_ops = {
536 	.resume		= nmi_resume,
537 	.suspend	= nmi_suspend,
538 };
539 
init_suspend_resume(void)540 static void __init init_suspend_resume(void)
541 {
542 	register_syscore_ops(&oprofile_syscore_ops);
543 }
544 
exit_suspend_resume(void)545 static void exit_suspend_resume(void)
546 {
547 	unregister_syscore_ops(&oprofile_syscore_ops);
548 }
549 
550 #else
551 
init_suspend_resume(void)552 static inline void init_suspend_resume(void) { }
exit_suspend_resume(void)553 static inline void exit_suspend_resume(void) { }
554 
555 #endif /* CONFIG_PM */
556 
p4_init(char ** cpu_type)557 static int __init p4_init(char **cpu_type)
558 {
559 	__u8 cpu_model = boot_cpu_data.x86_model;
560 
561 	if (cpu_model > 6 || cpu_model == 5)
562 		return 0;
563 
564 #ifndef CONFIG_SMP
565 	*cpu_type = "i386/p4";
566 	model = &op_p4_spec;
567 	return 1;
568 #else
569 	switch (smp_num_siblings) {
570 	case 1:
571 		*cpu_type = "i386/p4";
572 		model = &op_p4_spec;
573 		return 1;
574 
575 	case 2:
576 		*cpu_type = "i386/p4-ht";
577 		model = &op_p4_ht2_spec;
578 		return 1;
579 	}
580 #endif
581 
582 	printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
583 	printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
584 	return 0;
585 }
586 
587 enum __force_cpu_type {
588 	reserved = 0,		/* do not force */
589 	timer,
590 	arch_perfmon,
591 };
592 
593 static int force_cpu_type;
594 
set_cpu_type(const char * str,const struct kernel_param * kp)595 static int set_cpu_type(const char *str, const struct kernel_param *kp)
596 {
597 	if (!strcmp(str, "timer")) {
598 		force_cpu_type = timer;
599 		printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
600 	} else if (!strcmp(str, "arch_perfmon")) {
601 		force_cpu_type = arch_perfmon;
602 		printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
603 	} else {
604 		force_cpu_type = 0;
605 	}
606 
607 	return 0;
608 }
609 module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
610 
ppro_init(char ** cpu_type)611 static int __init ppro_init(char **cpu_type)
612 {
613 	__u8 cpu_model = boot_cpu_data.x86_model;
614 	struct op_x86_model_spec *spec = &op_ppro_spec;	/* default */
615 
616 	if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
617 		return 0;
618 
619 	/*
620 	 * Documentation on identifying Intel processors by CPU family
621 	 * and model can be found in the Intel Software Developer's
622 	 * Manuals (SDM):
623 	 *
624 	 *  http://www.intel.com/products/processor/manuals/
625 	 *
626 	 * As of May 2010 the documentation for this was in the:
627 	 * "Intel 64 and IA-32 Architectures Software Developer's
628 	 * Manual Volume 3B: System Programming Guide", "Table B-1
629 	 * CPUID Signature Values of DisplayFamily_DisplayModel".
630 	 */
631 	switch (cpu_model) {
632 	case 0 ... 2:
633 		*cpu_type = "i386/ppro";
634 		break;
635 	case 3 ... 5:
636 		*cpu_type = "i386/pii";
637 		break;
638 	case 6 ... 8:
639 	case 10 ... 11:
640 		*cpu_type = "i386/piii";
641 		break;
642 	case 9:
643 	case 13:
644 		*cpu_type = "i386/p6_mobile";
645 		break;
646 	case 14:
647 		*cpu_type = "i386/core";
648 		break;
649 	case 0x0f:
650 	case 0x16:
651 	case 0x17:
652 	case 0x1d:
653 		*cpu_type = "i386/core_2";
654 		break;
655 	case 0x1a:
656 	case 0x1e:
657 	case 0x2e:
658 		spec = &op_arch_perfmon_spec;
659 		*cpu_type = "i386/core_i7";
660 		break;
661 	case 0x1c:
662 		*cpu_type = "i386/atom";
663 		break;
664 	default:
665 		/* Unknown */
666 		return 0;
667 	}
668 
669 	model = spec;
670 	return 1;
671 }
672 
op_nmi_init(struct oprofile_operations * ops)673 int __init op_nmi_init(struct oprofile_operations *ops)
674 {
675 	__u8 vendor = boot_cpu_data.x86_vendor;
676 	__u8 family = boot_cpu_data.x86;
677 	char *cpu_type = NULL;
678 	int ret = 0;
679 
680 	if (!boot_cpu_has(X86_FEATURE_APIC))
681 		return -ENODEV;
682 
683 	if (force_cpu_type == timer)
684 		return -ENODEV;
685 
686 	switch (vendor) {
687 	case X86_VENDOR_AMD:
688 		/* Needs to be at least an Athlon (or hammer in 32bit mode) */
689 
690 		switch (family) {
691 		case 6:
692 			cpu_type = "i386/athlon";
693 			break;
694 		case 0xf:
695 			/*
696 			 * Actually it could be i386/hammer too, but
697 			 * give user space an consistent name.
698 			 */
699 			cpu_type = "x86-64/hammer";
700 			break;
701 		case 0x10:
702 			cpu_type = "x86-64/family10";
703 			break;
704 		case 0x11:
705 			cpu_type = "x86-64/family11h";
706 			break;
707 		case 0x12:
708 			cpu_type = "x86-64/family12h";
709 			break;
710 		case 0x14:
711 			cpu_type = "x86-64/family14h";
712 			break;
713 		case 0x15:
714 			cpu_type = "x86-64/family15h";
715 			break;
716 		default:
717 			return -ENODEV;
718 		}
719 		model = &op_amd_spec;
720 		break;
721 
722 	case X86_VENDOR_INTEL:
723 		switch (family) {
724 			/* Pentium IV */
725 		case 0xf:
726 			p4_init(&cpu_type);
727 			break;
728 
729 			/* A P6-class processor */
730 		case 6:
731 			ppro_init(&cpu_type);
732 			break;
733 
734 		default:
735 			break;
736 		}
737 
738 		if (cpu_type)
739 			break;
740 
741 		if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
742 			return -ENODEV;
743 
744 		/* use arch perfmon as fallback */
745 		cpu_type = "i386/arch_perfmon";
746 		model = &op_arch_perfmon_spec;
747 		break;
748 
749 	default:
750 		return -ENODEV;
751 	}
752 
753 	/* default values, can be overwritten by model */
754 	ops->create_files	= nmi_create_files;
755 	ops->setup		= nmi_setup;
756 	ops->shutdown		= nmi_shutdown;
757 	ops->start		= nmi_start;
758 	ops->stop		= nmi_stop;
759 	ops->cpu_type		= cpu_type;
760 
761 	if (model->init)
762 		ret = model->init(ops);
763 	if (ret)
764 		return ret;
765 
766 	if (!model->num_virt_counters)
767 		model->num_virt_counters = model->num_counters;
768 
769 	mux_init(ops);
770 
771 	init_suspend_resume();
772 
773 	printk(KERN_INFO "oprofile: using NMI interrupt.\n");
774 	return 0;
775 }
776 
op_nmi_exit(void)777 void op_nmi_exit(void)
778 {
779 	exit_suspend_resume();
780 }
781