• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  "High Precision Event Timer" based timekeeping.
3  *
4  *  Copyright (c) 1991,1992,1995  Linus Torvalds
5  *  Copyright (c) 1994  Alan Modra
6  *  Copyright (c) 1995  Markus Kuhn
7  *  Copyright (c) 1996  Ingo Molnar
8  *  Copyright (c) 1998  Andrea Arcangeli
9  *  Copyright (c) 2002,2006  Vojtech Pavlik
10  *  Copyright (c) 2003  Andi Kleen
11  *  RTC support code taken from arch/i386/kernel/timers/time_hpet.c
12  */
13 
14 #include <linux/clockchips.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/time.h>
19 #include <linux/mca.h>
20 #include <linux/nmi.h>
21 
22 #include <asm/i8253.h>
23 #include <asm/hpet.h>
24 #include <asm/vgtod.h>
25 #include <asm/time.h>
26 #include <asm/timer.h>
27 
28 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
29 
profile_pc(struct pt_regs * regs)30 unsigned long profile_pc(struct pt_regs *regs)
31 {
32 	unsigned long pc = instruction_pointer(regs);
33 
34 	/* Assume the lock function has either no stack frame or a copy
35 	   of flags from PUSHF
36 	   Eflags always has bits 22 and up cleared unlike kernel addresses. */
37 	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
38 #ifdef CONFIG_FRAME_POINTER
39 		return *(unsigned long *)(regs->bp + sizeof(long));
40 #else
41 		unsigned long *sp = (unsigned long *)regs->sp;
42 		if (sp[0] >> 22)
43 			return sp[0];
44 		if (sp[1] >> 22)
45 			return sp[1];
46 #endif
47 	}
48 	return pc;
49 }
50 EXPORT_SYMBOL(profile_pc);
51 
timer_interrupt(int irq,void * dev_id)52 static irqreturn_t timer_interrupt(int irq, void *dev_id)
53 {
54 	inc_irq_stat(irq0_irqs);
55 
56 	global_clock_event->event_handler(global_clock_event);
57 
58 #ifdef CONFIG_MCA
59 	if (MCA_bus) {
60 		u8 irq_v = inb_p(0x61);       /* read the current state */
61 		outb_p(irq_v|0x80, 0x61);     /* reset the IRQ */
62 	}
63 #endif
64 
65 	return IRQ_HANDLED;
66 }
67 
68 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
69  * processor frequency */
70 #define TICK_COUNT 100000000
calibrate_cpu(void)71 unsigned long __init calibrate_cpu(void)
72 {
73 	int tsc_start, tsc_now;
74 	int i, no_ctr_free;
75 	unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
76 	unsigned long flags;
77 
78 	for (i = 0; i < 4; i++)
79 		if (avail_to_resrv_perfctr_nmi_bit(i))
80 			break;
81 	no_ctr_free = (i == 4);
82 	if (no_ctr_free) {
83 		WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
84 		     "cpu_khz value may be incorrect.\n");
85 		i = 3;
86 		rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
87 		wrmsrl(MSR_K7_EVNTSEL3, 0);
88 		rdmsrl(MSR_K7_PERFCTR3, pmc3);
89 	} else {
90 		reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
91 		reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
92 	}
93 	local_irq_save(flags);
94 	/* start measuring cycles, incrementing from 0 */
95 	wrmsrl(MSR_K7_PERFCTR0 + i, 0);
96 	wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
97 	rdtscl(tsc_start);
98 	do {
99 		rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
100 		tsc_now = get_cycles();
101 	} while ((tsc_now - tsc_start) < TICK_COUNT);
102 
103 	local_irq_restore(flags);
104 	if (no_ctr_free) {
105 		wrmsrl(MSR_K7_EVNTSEL3, 0);
106 		wrmsrl(MSR_K7_PERFCTR3, pmc3);
107 		wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
108 	} else {
109 		release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
110 		release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
111 	}
112 
113 	return pmc_now * tsc_khz / (tsc_now - tsc_start);
114 }
115 
116 static struct irqaction irq0 = {
117 	.handler	= timer_interrupt,
118 	.flags		= IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER,
119 	.mask		= CPU_MASK_NONE,
120 	.name		= "timer"
121 };
122 
hpet_time_init(void)123 void __init hpet_time_init(void)
124 {
125 	if (!hpet_enable())
126 		setup_pit_timer();
127 
128 	irq0.mask = cpumask_of_cpu(0);
129 	setup_irq(0, &irq0);
130 }
131 
time_init(void)132 void __init time_init(void)
133 {
134 	tsc_init();
135 
136 	late_time_init = choose_time_init();
137 }
138