1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 */
9 #include <linux/clockchips.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12
13 #include <asm/smtc_ipi.h>
14 #include <asm/time.h>
15 #include <asm/cevt-r4k.h>
16
17 /*
18 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
19 * of these routines with SMTC-specific variants.
20 */
21
22 #ifndef CONFIG_MIPS_MT_SMTC
23
mips_next_event(unsigned long delta,struct clock_event_device * evt)24 static int mips_next_event(unsigned long delta,
25 struct clock_event_device *evt)
26 {
27 unsigned int cnt;
28 int res;
29
30 cnt = read_c0_count();
31 cnt += delta;
32 write_c0_compare(cnt);
33 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
34 return res;
35 }
36
37 #endif /* CONFIG_MIPS_MT_SMTC */
38
mips_set_clock_mode(enum clock_event_mode mode,struct clock_event_device * evt)39 void mips_set_clock_mode(enum clock_event_mode mode,
40 struct clock_event_device *evt)
41 {
42 /* Nothing to do ... */
43 }
44
45 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
46 int cp0_timer_irq_installed;
47
48 #ifndef CONFIG_MIPS_MT_SMTC
49
c0_compare_interrupt(int irq,void * dev_id)50 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
51 {
52 const int r2 = cpu_has_mips_r2;
53 struct clock_event_device *cd;
54 int cpu = smp_processor_id();
55
56 /*
57 * Suckage alert:
58 * Before R2 of the architecture there was no way to see if a
59 * performance counter interrupt was pending, so we have to run
60 * the performance counter interrupt handler anyway.
61 */
62 if (handle_perf_irq(r2))
63 goto out;
64
65 /*
66 * The same applies to performance counter interrupts. But with the
67 * above we now know that the reason we got here must be a timer
68 * interrupt. Being the paranoiacs we are we check anyway.
69 */
70 if (!r2 || (read_c0_cause() & (1 << 30))) {
71 /* Clear Count/Compare Interrupt */
72 write_c0_compare(read_c0_compare());
73 cd = &per_cpu(mips_clockevent_device, cpu);
74 cd->event_handler(cd);
75 }
76
77 out:
78 return IRQ_HANDLED;
79 }
80
81 #endif /* Not CONFIG_MIPS_MT_SMTC */
82
83 struct irqaction c0_compare_irqaction = {
84 .handler = c0_compare_interrupt,
85 .flags = IRQF_DISABLED | IRQF_PERCPU,
86 .name = "timer",
87 };
88
89
mips_event_handler(struct clock_event_device * dev)90 void mips_event_handler(struct clock_event_device *dev)
91 {
92 }
93
94 /*
95 * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
96 */
c0_compare_int_pending(void)97 static int c0_compare_int_pending(void)
98 {
99 return (read_c0_cause() >> cp0_compare_irq) & 0x100;
100 }
101
102 /*
103 * Compare interrupt can be routed and latched outside the core,
104 * so a single execution hazard barrier may not be enough to give
105 * it time to clear as seen in the Cause register. 4 time the
106 * pipeline depth seems reasonably conservative, and empirically
107 * works better in configurations with high CPU/bus clock ratios.
108 */
109
110 #define compare_change_hazard() \
111 do { \
112 irq_disable_hazard(); \
113 irq_disable_hazard(); \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 } while (0)
117
c0_compare_int_usable(void)118 int c0_compare_int_usable(void)
119 {
120 unsigned int delta;
121 unsigned int cnt;
122
123 /*
124 * IP7 already pending? Try to clear it by acking the timer.
125 */
126 if (c0_compare_int_pending()) {
127 write_c0_compare(read_c0_count());
128 compare_change_hazard();
129 if (c0_compare_int_pending())
130 return 0;
131 }
132
133 for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
134 cnt = read_c0_count();
135 cnt += delta;
136 write_c0_compare(cnt);
137 compare_change_hazard();
138 if ((int)(read_c0_count() - cnt) < 0)
139 break;
140 /* increase delta if the timer was already expired */
141 }
142
143 while ((int)(read_c0_count() - cnt) <= 0)
144 ; /* Wait for expiry */
145
146 compare_change_hazard();
147 if (!c0_compare_int_pending())
148 return 0;
149
150 write_c0_compare(read_c0_count());
151 compare_change_hazard();
152 if (c0_compare_int_pending())
153 return 0;
154
155 /*
156 * Feels like a real count / compare timer.
157 */
158 return 1;
159 }
160
161 #ifndef CONFIG_MIPS_MT_SMTC
162
r4k_clockevent_init(void)163 int __cpuinit r4k_clockevent_init(void)
164 {
165 uint64_t mips_freq = mips_hpt_frequency;
166 unsigned int cpu = smp_processor_id();
167 struct clock_event_device *cd;
168 unsigned int irq;
169
170 if (!cpu_has_counter || !mips_hpt_frequency)
171 return -ENXIO;
172
173 if (!c0_compare_int_usable())
174 #ifdef CONFIG_MIPS_GOLDFISH
175 /* FIXME: this is not a reliable test with QEMU */
176 ;
177 #else
178 return -ENXIO;
179 #endif
180
181 /*
182 * With vectored interrupts things are getting platform specific.
183 * get_c0_compare_int is a hook to allow a platform to return the
184 * interrupt number of it's liking.
185 */
186 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
187 if (get_c0_compare_int)
188 irq = get_c0_compare_int();
189
190 cd = &per_cpu(mips_clockevent_device, cpu);
191
192 cd->name = "MIPS";
193 cd->features = CLOCK_EVT_FEAT_ONESHOT;
194
195 /* Calculate the min / max delta */
196 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
197 cd->shift = 32;
198 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
199 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
200
201 cd->rating = 300;
202 cd->irq = irq;
203 cd->cpumask = cpumask_of(cpu);
204 cd->set_next_event = mips_next_event;
205 cd->set_mode = mips_set_clock_mode;
206 cd->event_handler = mips_event_handler;
207
208 clockevents_register_device(cd);
209
210 if (cp0_timer_irq_installed)
211 return 0;
212
213 cp0_timer_irq_installed = 1;
214
215 setup_irq(irq, &c0_compare_irqaction);
216
217 return 0;
218 }
219
220 #endif /* Not CONFIG_MIPS_MT_SMTC */
221