1 /*
2 * DaVinci timer subsystem
3 *
4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/types.h>
14 #include <linux/interrupt.h>
15 #include <linux/clocksource.h>
16 #include <linux/clockchips.h>
17 #include <linux/io.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched_clock.h>
23
24 #include <asm/mach/irq.h>
25 #include <asm/mach/time.h>
26
27 #include <mach/cputype.h>
28 #include <mach/hardware.h>
29 #include <mach/time.h>
30
31 static struct clock_event_device clockevent_davinci;
32 static unsigned int davinci_clock_tick_rate;
33
34 /*
35 * This driver configures the 2 64-bit count-up timers as 4 independent
36 * 32-bit count-up timers used as follows:
37 */
38
39 enum {
40 TID_CLOCKEVENT,
41 TID_CLOCKSOURCE,
42 };
43
44 /* Timer register offsets */
45 #define PID12 0x0
46 #define TIM12 0x10
47 #define TIM34 0x14
48 #define PRD12 0x18
49 #define PRD34 0x1c
50 #define TCR 0x20
51 #define TGCR 0x24
52 #define WDTCR 0x28
53
54 /* Offsets of the 8 compare registers */
55 #define CMP12_0 0x60
56 #define CMP12_1 0x64
57 #define CMP12_2 0x68
58 #define CMP12_3 0x6c
59 #define CMP12_4 0x70
60 #define CMP12_5 0x74
61 #define CMP12_6 0x78
62 #define CMP12_7 0x7c
63
64 /* Timer register bitfields */
65 #define TCR_ENAMODE_DISABLE 0x0
66 #define TCR_ENAMODE_ONESHOT 0x1
67 #define TCR_ENAMODE_PERIODIC 0x2
68 #define TCR_ENAMODE_MASK 0x3
69
70 #define TGCR_TIMMODE_SHIFT 2
71 #define TGCR_TIMMODE_64BIT_GP 0x0
72 #define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
73 #define TGCR_TIMMODE_64BIT_WDOG 0x2
74 #define TGCR_TIMMODE_32BIT_CHAINED 0x3
75
76 #define TGCR_TIM12RS_SHIFT 0
77 #define TGCR_TIM34RS_SHIFT 1
78 #define TGCR_RESET 0x0
79 #define TGCR_UNRESET 0x1
80 #define TGCR_RESET_MASK 0x3
81
82 struct timer_s {
83 char *name;
84 unsigned int id;
85 unsigned long period;
86 unsigned long opts;
87 unsigned long flags;
88 void __iomem *base;
89 unsigned long tim_off;
90 unsigned long prd_off;
91 unsigned long enamode_shift;
92 struct irqaction irqaction;
93 };
94 static struct timer_s timers[];
95
96 /* values for 'opts' field of struct timer_s */
97 #define TIMER_OPTS_DISABLED 0x01
98 #define TIMER_OPTS_ONESHOT 0x02
99 #define TIMER_OPTS_PERIODIC 0x04
100 #define TIMER_OPTS_STATE_MASK 0x07
101
102 #define TIMER_OPTS_USE_COMPARE 0x80000000
103 #define USING_COMPARE(t) ((t)->opts & TIMER_OPTS_USE_COMPARE)
104
105 static char *id_to_name[] = {
106 [T0_BOT] = "timer0_0",
107 [T0_TOP] = "timer0_1",
108 [T1_BOT] = "timer1_0",
109 [T1_TOP] = "timer1_1",
110 };
111
timer32_config(struct timer_s * t)112 static int timer32_config(struct timer_s *t)
113 {
114 u32 tcr;
115 struct davinci_soc_info *soc_info = &davinci_soc_info;
116
117 if (USING_COMPARE(t)) {
118 struct davinci_timer_instance *dtip =
119 soc_info->timer_info->timers;
120 int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
121
122 /*
123 * Next interrupt should be the current time reg value plus
124 * the new period (using 32-bit unsigned addition/wrapping
125 * to 0 on overflow). This assumes that the clocksource
126 * is setup to count to 2^32-1 before wrapping around to 0.
127 */
128 __raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
129 t->base + dtip[event_timer].cmp_off);
130 } else {
131 tcr = __raw_readl(t->base + TCR);
132
133 /* disable timer */
134 tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
135 __raw_writel(tcr, t->base + TCR);
136
137 /* reset counter to zero, set new period */
138 __raw_writel(0, t->base + t->tim_off);
139 __raw_writel(t->period, t->base + t->prd_off);
140
141 /* Set enable mode */
142 if (t->opts & TIMER_OPTS_ONESHOT)
143 tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
144 else if (t->opts & TIMER_OPTS_PERIODIC)
145 tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
146
147 __raw_writel(tcr, t->base + TCR);
148 }
149 return 0;
150 }
151
timer32_read(struct timer_s * t)152 static inline u32 timer32_read(struct timer_s *t)
153 {
154 return __raw_readl(t->base + t->tim_off);
155 }
156
timer_interrupt(int irq,void * dev_id)157 static irqreturn_t timer_interrupt(int irq, void *dev_id)
158 {
159 struct clock_event_device *evt = &clockevent_davinci;
160
161 evt->event_handler(evt);
162 return IRQ_HANDLED;
163 }
164
165 /* called when 32-bit counter wraps */
freerun_interrupt(int irq,void * dev_id)166 static irqreturn_t freerun_interrupt(int irq, void *dev_id)
167 {
168 return IRQ_HANDLED;
169 }
170
171 static struct timer_s timers[] = {
172 [TID_CLOCKEVENT] = {
173 .name = "clockevent",
174 .opts = TIMER_OPTS_DISABLED,
175 .irqaction = {
176 .flags = IRQF_TIMER,
177 .handler = timer_interrupt,
178 }
179 },
180 [TID_CLOCKSOURCE] = {
181 .name = "free-run counter",
182 .period = ~0,
183 .opts = TIMER_OPTS_PERIODIC,
184 .irqaction = {
185 .flags = IRQF_TIMER,
186 .handler = freerun_interrupt,
187 }
188 },
189 };
190
timer_init(void)191 static void __init timer_init(void)
192 {
193 struct davinci_soc_info *soc_info = &davinci_soc_info;
194 struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
195 void __iomem *base[2];
196 int i;
197
198 /* Global init of each 64-bit timer as a whole */
199 for(i=0; i<2; i++) {
200 u32 tgcr;
201
202 base[i] = ioremap(dtip[i].base, SZ_4K);
203 if (WARN_ON(!base[i]))
204 continue;
205
206 /* Disabled, Internal clock source */
207 __raw_writel(0, base[i] + TCR);
208
209 /* reset both timers, no pre-scaler for timer34 */
210 tgcr = 0;
211 __raw_writel(tgcr, base[i] + TGCR);
212
213 /* Set both timers to unchained 32-bit */
214 tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
215 __raw_writel(tgcr, base[i] + TGCR);
216
217 /* Unreset timers */
218 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
219 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
220 __raw_writel(tgcr, base[i] + TGCR);
221
222 /* Init both counters to zero */
223 __raw_writel(0, base[i] + TIM12);
224 __raw_writel(0, base[i] + TIM34);
225 }
226
227 /* Init of each timer as a 32-bit timer */
228 for (i=0; i< ARRAY_SIZE(timers); i++) {
229 struct timer_s *t = &timers[i];
230 int timer = ID_TO_TIMER(t->id);
231 u32 irq;
232
233 t->base = base[timer];
234 if (!t->base)
235 continue;
236
237 if (IS_TIMER_BOT(t->id)) {
238 t->enamode_shift = 6;
239 t->tim_off = TIM12;
240 t->prd_off = PRD12;
241 irq = dtip[timer].bottom_irq;
242 } else {
243 t->enamode_shift = 22;
244 t->tim_off = TIM34;
245 t->prd_off = PRD34;
246 irq = dtip[timer].top_irq;
247 }
248
249 /* Register interrupt */
250 t->irqaction.name = t->name;
251 t->irqaction.dev_id = (void *)t;
252
253 if (t->irqaction.handler != NULL) {
254 irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
255 setup_irq(irq, &t->irqaction);
256 }
257 }
258 }
259
260 /*
261 * clocksource
262 */
read_cycles(struct clocksource * cs)263 static u64 read_cycles(struct clocksource *cs)
264 {
265 struct timer_s *t = &timers[TID_CLOCKSOURCE];
266
267 return (cycles_t)timer32_read(t);
268 }
269
270 static struct clocksource clocksource_davinci = {
271 .rating = 300,
272 .read = read_cycles,
273 .mask = CLOCKSOURCE_MASK(32),
274 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
275 };
276
277 /*
278 * Overwrite weak default sched_clock with something more precise
279 */
davinci_read_sched_clock(void)280 static u64 notrace davinci_read_sched_clock(void)
281 {
282 return timer32_read(&timers[TID_CLOCKSOURCE]);
283 }
284
285 /*
286 * clockevent
287 */
davinci_set_next_event(unsigned long cycles,struct clock_event_device * evt)288 static int davinci_set_next_event(unsigned long cycles,
289 struct clock_event_device *evt)
290 {
291 struct timer_s *t = &timers[TID_CLOCKEVENT];
292
293 t->period = cycles;
294 timer32_config(t);
295 return 0;
296 }
297
davinci_shutdown(struct clock_event_device * evt)298 static int davinci_shutdown(struct clock_event_device *evt)
299 {
300 struct timer_s *t = &timers[TID_CLOCKEVENT];
301
302 t->opts &= ~TIMER_OPTS_STATE_MASK;
303 t->opts |= TIMER_OPTS_DISABLED;
304 return 0;
305 }
306
davinci_set_oneshot(struct clock_event_device * evt)307 static int davinci_set_oneshot(struct clock_event_device *evt)
308 {
309 struct timer_s *t = &timers[TID_CLOCKEVENT];
310
311 t->opts &= ~TIMER_OPTS_STATE_MASK;
312 t->opts |= TIMER_OPTS_ONESHOT;
313 return 0;
314 }
315
davinci_set_periodic(struct clock_event_device * evt)316 static int davinci_set_periodic(struct clock_event_device *evt)
317 {
318 struct timer_s *t = &timers[TID_CLOCKEVENT];
319
320 t->period = davinci_clock_tick_rate / (HZ);
321 t->opts &= ~TIMER_OPTS_STATE_MASK;
322 t->opts |= TIMER_OPTS_PERIODIC;
323 timer32_config(t);
324 return 0;
325 }
326
327 static struct clock_event_device clockevent_davinci = {
328 .features = CLOCK_EVT_FEAT_PERIODIC |
329 CLOCK_EVT_FEAT_ONESHOT,
330 .set_next_event = davinci_set_next_event,
331 .set_state_shutdown = davinci_shutdown,
332 .set_state_periodic = davinci_set_periodic,
333 .set_state_oneshot = davinci_set_oneshot,
334 };
335
davinci_timer_init(struct clk * timer_clk)336 void __init davinci_timer_init(struct clk *timer_clk)
337 {
338 struct davinci_soc_info *soc_info = &davinci_soc_info;
339 unsigned int clockevent_id;
340 unsigned int clocksource_id;
341 int i;
342
343 clockevent_id = soc_info->timer_info->clockevent_id;
344 clocksource_id = soc_info->timer_info->clocksource_id;
345
346 timers[TID_CLOCKEVENT].id = clockevent_id;
347 timers[TID_CLOCKSOURCE].id = clocksource_id;
348
349 /*
350 * If using same timer for both clock events & clocksource,
351 * a compare register must be used to generate an event interrupt.
352 * This is equivalent to a oneshot timer only (not periodic).
353 */
354 if (clockevent_id == clocksource_id) {
355 struct davinci_timer_instance *dtip =
356 soc_info->timer_info->timers;
357 int event_timer = ID_TO_TIMER(clockevent_id);
358
359 /* Only bottom timers can use compare regs */
360 if (IS_TIMER_TOP(clockevent_id))
361 pr_warn("%s: Invalid use of system timers. Results unpredictable.\n",
362 __func__);
363 else if ((dtip[event_timer].cmp_off == 0)
364 || (dtip[event_timer].cmp_irq == 0))
365 pr_warn("%s: Invalid timer instance setup. Results unpredictable.\n",
366 __func__);
367 else {
368 timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
369 clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
370 }
371 }
372
373 BUG_ON(IS_ERR(timer_clk));
374 clk_prepare_enable(timer_clk);
375
376 /* init timer hw */
377 timer_init();
378
379 davinci_clock_tick_rate = clk_get_rate(timer_clk);
380
381 /* setup clocksource */
382 clocksource_davinci.name = id_to_name[clocksource_id];
383 if (clocksource_register_hz(&clocksource_davinci,
384 davinci_clock_tick_rate))
385 pr_err("%s: can't register clocksource!\n",
386 clocksource_davinci.name);
387
388 sched_clock_register(davinci_read_sched_clock, 32,
389 davinci_clock_tick_rate);
390
391 /* setup clockevent */
392 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
393
394 clockevent_davinci.cpumask = cpumask_of(0);
395 clockevents_config_and_register(&clockevent_davinci,
396 davinci_clock_tick_rate, 1, 0xfffffffe);
397
398 for (i=0; i< ARRAY_SIZE(timers); i++)
399 timer32_config(&timers[i]);
400 }
401