• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/arch/arm/plat-omap/dmtimer.c
3  *
4  * OMAP Dual-Mode Timers
5  *
6  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
7  * Tarun Kanti DebBarma <tarun.kanti@ti.com>
8  * Thara Gopinath <thara@ti.com>
9  *
10  * dmtimer adaptation to platform_driver.
11  *
12  * Copyright (C) 2005 Nokia Corporation
13  * OMAP2 support by Juha Yrjola
14  * API improvements and OMAP2 clock framework support by Timo Teras
15  *
16  * Copyright (C) 2009 Texas Instruments
17  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
18  *
19  * This program is free software; you can redistribute it and/or modify it
20  * under the terms of the GNU General Public License as published by the
21  * Free Software Foundation; either version 2 of the License, or (at your
22  * option) any later version.
23  *
24  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
27  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * You should have received a copy of the  GNU General Public License along
34  * with this program; if not, write  to the Free Software Foundation, Inc.,
35  * 675 Mass Ave, Cambridge, MA 02139, USA.
36  */
37 
38 #include <linux/module.h>
39 #include <linux/io.h>
40 #include <linux/slab.h>
41 #include <linux/err.h>
42 #include <linux/pm_runtime.h>
43 
44 #include <plat/dmtimer.h>
45 
46 #include <mach/hardware.h>
47 
48 static LIST_HEAD(omap_timer_list);
49 static DEFINE_SPINLOCK(dm_timer_lock);
50 
51 /**
52  * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
53  * @timer:      timer pointer over which read operation to perform
54  * @reg:        lowest byte holds the register offset
55  *
56  * The posted mode bit is encoded in reg. Note that in posted mode write
57  * pending bit must be checked. Otherwise a read of a non completed write
58  * will produce an error.
59  */
omap_dm_timer_read_reg(struct omap_dm_timer * timer,u32 reg)60 static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
61 {
62 	WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
63 	return __omap_dm_timer_read(timer, reg, timer->posted);
64 }
65 
66 /**
67  * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
68  * @timer:      timer pointer over which write operation is to perform
69  * @reg:        lowest byte holds the register offset
70  * @value:      data to write into the register
71  *
72  * The posted mode bit is encoded in reg. Note that in posted mode the write
73  * pending bit must be checked. Otherwise a write on a register which has a
74  * pending write will be lost.
75  */
omap_dm_timer_write_reg(struct omap_dm_timer * timer,u32 reg,u32 value)76 static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
77 						u32 value)
78 {
79 	WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
80 	__omap_dm_timer_write(timer, reg, value, timer->posted);
81 }
82 
omap_timer_restore_context(struct omap_dm_timer * timer)83 static void omap_timer_restore_context(struct omap_dm_timer *timer)
84 {
85 	__raw_writel(timer->context.tiocp_cfg,
86 			timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
87 	if (timer->revision == 1)
88 		__raw_writel(timer->context.tistat, timer->sys_stat);
89 
90 	__raw_writel(timer->context.tisr, timer->irq_stat);
91 	omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
92 				timer->context.twer);
93 	omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
94 				timer->context.tcrr);
95 	omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
96 				timer->context.tldr);
97 	omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
98 				timer->context.tmar);
99 	omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
100 				timer->context.tsicr);
101 	__raw_writel(timer->context.tier, timer->irq_ena);
102 	omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
103 				timer->context.tclr);
104 }
105 
omap_dm_timer_wait_for_reset(struct omap_dm_timer * timer)106 static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
107 {
108 	int c;
109 
110 	if (!timer->sys_stat)
111 		return;
112 
113 	c = 0;
114 	while (!(__raw_readl(timer->sys_stat) & 1)) {
115 		c++;
116 		if (c > 100000) {
117 			printk(KERN_ERR "Timer failed to reset\n");
118 			return;
119 		}
120 	}
121 }
122 
omap_dm_timer_reset(struct omap_dm_timer * timer)123 static void omap_dm_timer_reset(struct omap_dm_timer *timer)
124 {
125 	omap_dm_timer_enable(timer);
126 	if (timer->pdev->id != 1) {
127 		omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
128 		omap_dm_timer_wait_for_reset(timer);
129 	}
130 
131 	__omap_dm_timer_reset(timer, 0, 0);
132 	omap_dm_timer_disable(timer);
133 	timer->posted = 1;
134 }
135 
omap_dm_timer_prepare(struct omap_dm_timer * timer)136 int omap_dm_timer_prepare(struct omap_dm_timer *timer)
137 {
138 	struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
139 	int ret;
140 
141 	timer->fclk = clk_get(&timer->pdev->dev, "fck");
142 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
143 		timer->fclk = NULL;
144 		dev_err(&timer->pdev->dev, ": No fclk handle.\n");
145 		return -EINVAL;
146 	}
147 
148 	if (pdata->needs_manual_reset)
149 		omap_dm_timer_reset(timer);
150 
151 	ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
152 
153 	timer->posted = 1;
154 	return ret;
155 }
156 
omap_dm_timer_request(void)157 struct omap_dm_timer *omap_dm_timer_request(void)
158 {
159 	struct omap_dm_timer *timer = NULL, *t;
160 	unsigned long flags;
161 	int ret = 0;
162 
163 	spin_lock_irqsave(&dm_timer_lock, flags);
164 	list_for_each_entry(t, &omap_timer_list, node) {
165 		if (t->reserved)
166 			continue;
167 
168 		timer = t;
169 		timer->reserved = 1;
170 		break;
171 	}
172 
173 	if (timer) {
174 		ret = omap_dm_timer_prepare(timer);
175 		if (ret) {
176 			timer->reserved = 0;
177 			timer = NULL;
178 		}
179 	}
180 	spin_unlock_irqrestore(&dm_timer_lock, flags);
181 
182 	if (!timer)
183 		pr_debug("%s: timer request failed!\n", __func__);
184 
185 	return timer;
186 }
187 EXPORT_SYMBOL_GPL(omap_dm_timer_request);
188 
omap_dm_timer_request_specific(int id)189 struct omap_dm_timer *omap_dm_timer_request_specific(int id)
190 {
191 	struct omap_dm_timer *timer = NULL, *t;
192 	unsigned long flags;
193 	int ret = 0;
194 
195 	spin_lock_irqsave(&dm_timer_lock, flags);
196 	list_for_each_entry(t, &omap_timer_list, node) {
197 		if (t->pdev->id == id && !t->reserved) {
198 			timer = t;
199 			timer->reserved = 1;
200 			break;
201 		}
202 	}
203 
204 	if (timer) {
205 		ret = omap_dm_timer_prepare(timer);
206 		if (ret) {
207 			timer->reserved = 0;
208 			timer = NULL;
209 		}
210 	}
211 	spin_unlock_irqrestore(&dm_timer_lock, flags);
212 
213 	if (!timer)
214 		pr_debug("%s: timer%d request failed!\n", __func__, id);
215 
216 	return timer;
217 }
218 EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific);
219 
omap_dm_timer_free(struct omap_dm_timer * timer)220 int omap_dm_timer_free(struct omap_dm_timer *timer)
221 {
222 	if (unlikely(!timer))
223 		return -EINVAL;
224 
225 	clk_put(timer->fclk);
226 
227 	WARN_ON(!timer->reserved);
228 	timer->reserved = 0;
229 	return 0;
230 }
231 EXPORT_SYMBOL_GPL(omap_dm_timer_free);
232 
omap_dm_timer_enable(struct omap_dm_timer * timer)233 void omap_dm_timer_enable(struct omap_dm_timer *timer)
234 {
235 	pm_runtime_get_sync(&timer->pdev->dev);
236 }
237 EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
238 
omap_dm_timer_disable(struct omap_dm_timer * timer)239 void omap_dm_timer_disable(struct omap_dm_timer *timer)
240 {
241 	pm_runtime_put_sync(&timer->pdev->dev);
242 }
243 EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
244 
omap_dm_timer_get_irq(struct omap_dm_timer * timer)245 int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
246 {
247 	if (timer)
248 		return timer->irq;
249 	return -EINVAL;
250 }
251 EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
252 
253 #if defined(CONFIG_ARCH_OMAP1)
254 
255 /**
256  * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
257  * @inputmask: current value of idlect mask
258  */
omap_dm_timer_modify_idlect_mask(__u32 inputmask)259 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
260 {
261 	int i = 0;
262 	struct omap_dm_timer *timer = NULL;
263 	unsigned long flags;
264 
265 	/* If ARMXOR cannot be idled this function call is unnecessary */
266 	if (!(inputmask & (1 << 1)))
267 		return inputmask;
268 
269 	/* If any active timer is using ARMXOR return modified mask */
270 	spin_lock_irqsave(&dm_timer_lock, flags);
271 	list_for_each_entry(timer, &omap_timer_list, node) {
272 		u32 l;
273 
274 		l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
275 		if (l & OMAP_TIMER_CTRL_ST) {
276 			if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
277 				inputmask &= ~(1 << 1);
278 			else
279 				inputmask &= ~(1 << 2);
280 		}
281 		i++;
282 	}
283 	spin_unlock_irqrestore(&dm_timer_lock, flags);
284 
285 	return inputmask;
286 }
287 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
288 
289 #else
290 
omap_dm_timer_get_fclk(struct omap_dm_timer * timer)291 struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
292 {
293 	if (timer)
294 		return timer->fclk;
295 	return NULL;
296 }
297 EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk);
298 
omap_dm_timer_modify_idlect_mask(__u32 inputmask)299 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
300 {
301 	BUG();
302 
303 	return 0;
304 }
305 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
306 
307 #endif
308 
omap_dm_timer_trigger(struct omap_dm_timer * timer)309 int omap_dm_timer_trigger(struct omap_dm_timer *timer)
310 {
311 	if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
312 		pr_err("%s: timer not available or enabled.\n", __func__);
313 		return -EINVAL;
314 	}
315 
316 	omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
317 	return 0;
318 }
319 EXPORT_SYMBOL_GPL(omap_dm_timer_trigger);
320 
omap_dm_timer_start(struct omap_dm_timer * timer)321 int omap_dm_timer_start(struct omap_dm_timer *timer)
322 {
323 	u32 l;
324 
325 	if (unlikely(!timer))
326 		return -EINVAL;
327 
328 	omap_dm_timer_enable(timer);
329 
330 	if (timer->loses_context) {
331 		u32 ctx_loss_cnt_after =
332 			timer->get_context_loss_count(&timer->pdev->dev);
333 		if (ctx_loss_cnt_after != timer->ctx_loss_count)
334 			omap_timer_restore_context(timer);
335 	}
336 
337 	l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
338 	if (!(l & OMAP_TIMER_CTRL_ST)) {
339 		l |= OMAP_TIMER_CTRL_ST;
340 		omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
341 	}
342 
343 	/* Save the context */
344 	timer->context.tclr = l;
345 	return 0;
346 }
347 EXPORT_SYMBOL_GPL(omap_dm_timer_start);
348 
omap_dm_timer_stop(struct omap_dm_timer * timer)349 int omap_dm_timer_stop(struct omap_dm_timer *timer)
350 {
351 	unsigned long rate = 0;
352 	struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
353 
354 	if (unlikely(!timer))
355 		return -EINVAL;
356 
357 	if (!pdata->needs_manual_reset)
358 		rate = clk_get_rate(timer->fclk);
359 
360 	__omap_dm_timer_stop(timer, timer->posted, rate);
361 
362 	if (timer->loses_context && timer->get_context_loss_count)
363 		timer->ctx_loss_count =
364 			timer->get_context_loss_count(&timer->pdev->dev);
365 
366 	/*
367 	 * Since the register values are computed and written within
368 	 * __omap_dm_timer_stop, we need to use read to retrieve the
369 	 * context.
370 	 */
371 	timer->context.tclr =
372 			omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
373 	timer->context.tisr = __raw_readl(timer->irq_stat);
374 	omap_dm_timer_disable(timer);
375 	return 0;
376 }
377 EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
378 
omap_dm_timer_set_source(struct omap_dm_timer * timer,int source)379 int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
380 {
381 	int ret;
382 	struct dmtimer_platform_data *pdata;
383 
384 	if (unlikely(!timer))
385 		return -EINVAL;
386 
387 	pdata = timer->pdev->dev.platform_data;
388 
389 	if (source < 0 || source >= 3)
390 		return -EINVAL;
391 
392 	ret = pdata->set_timer_src(timer->pdev, source);
393 
394 	return ret;
395 }
396 EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
397 
omap_dm_timer_set_load(struct omap_dm_timer * timer,int autoreload,unsigned int load)398 int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
399 			    unsigned int load)
400 {
401 	u32 l;
402 
403 	if (unlikely(!timer))
404 		return -EINVAL;
405 
406 	omap_dm_timer_enable(timer);
407 	l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
408 	if (autoreload)
409 		l |= OMAP_TIMER_CTRL_AR;
410 	else
411 		l &= ~OMAP_TIMER_CTRL_AR;
412 	omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
413 	omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
414 
415 	omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
416 	/* Save the context */
417 	timer->context.tclr = l;
418 	timer->context.tldr = load;
419 	omap_dm_timer_disable(timer);
420 	return 0;
421 }
422 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load);
423 
424 /* Optimized set_load which removes costly spin wait in timer_start */
omap_dm_timer_set_load_start(struct omap_dm_timer * timer,int autoreload,unsigned int load)425 int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
426                             unsigned int load)
427 {
428 	u32 l;
429 
430 	if (unlikely(!timer))
431 		return -EINVAL;
432 
433 	omap_dm_timer_enable(timer);
434 
435 	if (timer->loses_context) {
436 		u32 ctx_loss_cnt_after =
437 			timer->get_context_loss_count(&timer->pdev->dev);
438 		if (ctx_loss_cnt_after != timer->ctx_loss_count)
439 			omap_timer_restore_context(timer);
440 	}
441 
442 	l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
443 	if (autoreload) {
444 		l |= OMAP_TIMER_CTRL_AR;
445 		omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
446 	} else {
447 		l &= ~OMAP_TIMER_CTRL_AR;
448 	}
449 	l |= OMAP_TIMER_CTRL_ST;
450 
451 	__omap_dm_timer_load_start(timer, l, load, timer->posted);
452 
453 	/* Save the context */
454 	timer->context.tclr = l;
455 	timer->context.tldr = load;
456 	timer->context.tcrr = load;
457 	return 0;
458 }
459 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
460 
omap_dm_timer_set_match(struct omap_dm_timer * timer,int enable,unsigned int match)461 int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
462 			     unsigned int match)
463 {
464 	u32 l;
465 
466 	if (unlikely(!timer))
467 		return -EINVAL;
468 
469 	omap_dm_timer_enable(timer);
470 	l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
471 	if (enable)
472 		l |= OMAP_TIMER_CTRL_CE;
473 	else
474 		l &= ~OMAP_TIMER_CTRL_CE;
475 	omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
476 	omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
477 
478 	/* Save the context */
479 	timer->context.tclr = l;
480 	timer->context.tmar = match;
481 	omap_dm_timer_disable(timer);
482 	return 0;
483 }
484 EXPORT_SYMBOL_GPL(omap_dm_timer_set_match);
485 
omap_dm_timer_set_pwm(struct omap_dm_timer * timer,int def_on,int toggle,int trigger)486 int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
487 			   int toggle, int trigger)
488 {
489 	u32 l;
490 
491 	if (unlikely(!timer))
492 		return -EINVAL;
493 
494 	omap_dm_timer_enable(timer);
495 	l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
496 	l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
497 	       OMAP_TIMER_CTRL_PT | (0x03 << 10));
498 	if (def_on)
499 		l |= OMAP_TIMER_CTRL_SCPWM;
500 	if (toggle)
501 		l |= OMAP_TIMER_CTRL_PT;
502 	l |= trigger << 10;
503 	omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
504 
505 	/* Save the context */
506 	timer->context.tclr = l;
507 	omap_dm_timer_disable(timer);
508 	return 0;
509 }
510 EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
511 
omap_dm_timer_set_prescaler(struct omap_dm_timer * timer,int prescaler)512 int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
513 {
514 	u32 l;
515 
516 	if (unlikely(!timer))
517 		return -EINVAL;
518 
519 	omap_dm_timer_enable(timer);
520 	l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
521 	l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
522 	if (prescaler >= 0x00 && prescaler <= 0x07) {
523 		l |= OMAP_TIMER_CTRL_PRE;
524 		l |= prescaler << 2;
525 	}
526 	omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
527 
528 	/* Save the context */
529 	timer->context.tclr = l;
530 	omap_dm_timer_disable(timer);
531 	return 0;
532 }
533 EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
534 
omap_dm_timer_set_int_enable(struct omap_dm_timer * timer,unsigned int value)535 int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
536 				  unsigned int value)
537 {
538 	if (unlikely(!timer))
539 		return -EINVAL;
540 
541 	omap_dm_timer_enable(timer);
542 	__omap_dm_timer_int_enable(timer, value);
543 
544 	/* Save the context */
545 	timer->context.tier = value;
546 	timer->context.twer = value;
547 	omap_dm_timer_disable(timer);
548 	return 0;
549 }
550 EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
551 
omap_dm_timer_read_status(struct omap_dm_timer * timer)552 unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
553 {
554 	unsigned int l;
555 
556 	if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
557 		pr_err("%s: timer not available or enabled.\n", __func__);
558 		return 0;
559 	}
560 
561 	l = __raw_readl(timer->irq_stat);
562 
563 	return l;
564 }
565 EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
566 
omap_dm_timer_write_status(struct omap_dm_timer * timer,unsigned int value)567 int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
568 {
569 	if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
570 		return -EINVAL;
571 
572 	__omap_dm_timer_write_status(timer, value);
573 	/* Save the context */
574 	timer->context.tisr = value;
575 	return 0;
576 }
577 EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
578 
omap_dm_timer_read_counter(struct omap_dm_timer * timer)579 unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
580 {
581 	if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
582 		pr_err("%s: timer not iavailable or enabled.\n", __func__);
583 		return 0;
584 	}
585 
586 	return __omap_dm_timer_read_counter(timer, timer->posted);
587 }
588 EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
589 
omap_dm_timer_write_counter(struct omap_dm_timer * timer,unsigned int value)590 int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
591 {
592 	if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
593 		pr_err("%s: timer not available or enabled.\n", __func__);
594 		return -EINVAL;
595 	}
596 
597 	omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
598 
599 	/* Save the context */
600 	timer->context.tcrr = value;
601 	return 0;
602 }
603 EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter);
604 
omap_dm_timers_active(void)605 int omap_dm_timers_active(void)
606 {
607 	struct omap_dm_timer *timer;
608 
609 	list_for_each_entry(timer, &omap_timer_list, node) {
610 		if (!timer->reserved)
611 			continue;
612 
613 		if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
614 		    OMAP_TIMER_CTRL_ST) {
615 			return 1;
616 		}
617 	}
618 	return 0;
619 }
620 EXPORT_SYMBOL_GPL(omap_dm_timers_active);
621 
622 /**
623  * omap_dm_timer_probe - probe function called for every registered device
624  * @pdev:	pointer to current timer platform device
625  *
626  * Called by driver framework at the end of device registration for all
627  * timer devices.
628  */
omap_dm_timer_probe(struct platform_device * pdev)629 static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
630 {
631 	int ret;
632 	unsigned long flags;
633 	struct omap_dm_timer *timer;
634 	struct resource *mem, *irq, *ioarea;
635 	struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
636 
637 	if (!pdata) {
638 		dev_err(&pdev->dev, "%s: no platform data.\n", __func__);
639 		return -ENODEV;
640 	}
641 
642 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
643 	if (unlikely(!irq)) {
644 		dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__);
645 		return -ENODEV;
646 	}
647 
648 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
649 	if (unlikely(!mem)) {
650 		dev_err(&pdev->dev, "%s: no memory resource.\n", __func__);
651 		return -ENODEV;
652 	}
653 
654 	ioarea = request_mem_region(mem->start, resource_size(mem),
655 			pdev->name);
656 	if (!ioarea) {
657 		dev_err(&pdev->dev, "%s: region already claimed.\n", __func__);
658 		return -EBUSY;
659 	}
660 
661 	timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL);
662 	if (!timer) {
663 		dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n",
664 			__func__);
665 		ret = -ENOMEM;
666 		goto err_free_ioregion;
667 	}
668 
669 	timer->io_base = ioremap(mem->start, resource_size(mem));
670 	if (!timer->io_base) {
671 		dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__);
672 		ret = -ENOMEM;
673 		goto err_free_mem;
674 	}
675 
676 	timer->id = pdev->id;
677 	timer->irq = irq->start;
678 	timer->reserved = pdata->reserved;
679 	timer->pdev = pdev;
680 	timer->loses_context = pdata->loses_context;
681 	timer->get_context_loss_count = pdata->get_context_loss_count;
682 
683 	/* Skip pm_runtime_enable for OMAP1 */
684 	if (!pdata->needs_manual_reset) {
685 		pm_runtime_enable(&pdev->dev);
686 		pm_runtime_irq_safe(&pdev->dev);
687 	}
688 
689 	if (!timer->reserved) {
690 		pm_runtime_get_sync(&pdev->dev);
691 		__omap_dm_timer_init_regs(timer);
692 		pm_runtime_put(&pdev->dev);
693 	}
694 
695 	/* add the timer element to the list */
696 	spin_lock_irqsave(&dm_timer_lock, flags);
697 	list_add_tail(&timer->node, &omap_timer_list);
698 	spin_unlock_irqrestore(&dm_timer_lock, flags);
699 
700 	dev_dbg(&pdev->dev, "Device Probed.\n");
701 
702 	return 0;
703 
704 err_free_mem:
705 	kfree(timer);
706 
707 err_free_ioregion:
708 	release_mem_region(mem->start, resource_size(mem));
709 
710 	return ret;
711 }
712 
713 /**
714  * omap_dm_timer_remove - cleanup a registered timer device
715  * @pdev:	pointer to current timer platform device
716  *
717  * Called by driver framework whenever a timer device is unregistered.
718  * In addition to freeing platform resources it also deletes the timer
719  * entry from the local list.
720  */
omap_dm_timer_remove(struct platform_device * pdev)721 static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
722 {
723 	struct omap_dm_timer *timer;
724 	unsigned long flags;
725 	int ret = -EINVAL;
726 
727 	spin_lock_irqsave(&dm_timer_lock, flags);
728 	list_for_each_entry(timer, &omap_timer_list, node)
729 		if (timer->pdev->id == pdev->id) {
730 			list_del(&timer->node);
731 			kfree(timer);
732 			ret = 0;
733 			break;
734 		}
735 	spin_unlock_irqrestore(&dm_timer_lock, flags);
736 
737 	return ret;
738 }
739 
740 static struct platform_driver omap_dm_timer_driver = {
741 	.probe  = omap_dm_timer_probe,
742 	.remove = __devexit_p(omap_dm_timer_remove),
743 	.driver = {
744 		.name   = "omap_timer",
745 	},
746 };
747 
omap_dm_timer_driver_init(void)748 static int __init omap_dm_timer_driver_init(void)
749 {
750 	return platform_driver_register(&omap_dm_timer_driver);
751 }
752 
omap_dm_timer_driver_exit(void)753 static void __exit omap_dm_timer_driver_exit(void)
754 {
755 	platform_driver_unregister(&omap_dm_timer_driver);
756 }
757 
758 early_platform_init("earlytimer", &omap_dm_timer_driver);
759 module_init(omap_dm_timer_driver_init);
760 module_exit(omap_dm_timer_driver_exit);
761 
762 MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
763 MODULE_LICENSE("GPL");
764 MODULE_ALIAS("platform:" DRIVER_NAME);
765 MODULE_AUTHOR("Texas Instruments Inc");
766