• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/arch/arm/plat-mxc/time.c
3  *
4  *  Copyright (C) 2000-2001 Deep Blue Solutions
5  *  Copyright (C) 2002 Shane Nay (shane@minirl.com)
6  *  Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
7  *  Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21  * MA 02110-1301, USA.
22  */
23 
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/clockchips.h>
27 #include <linux/clk.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/sched_clock.h>
31 #include <linux/of.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 
35 #include <asm/mach/time.h>
36 
37 #include "common.h"
38 #include "hardware.h"
39 
40 /*
41  * There are 2 versions of the timer hardware on Freescale MXC hardware.
42  * Version 1: MX1/MXL, MX21, MX27.
43  * Version 2: MX25, MX31, MX35, MX37, MX51
44  */
45 
46 /* defines common for all i.MX */
47 #define MXC_TCTL		0x00
48 #define MXC_TCTL_TEN		(1 << 0) /* Enable module */
49 #define MXC_TPRER		0x04
50 
51 /* MX1, MX21, MX27 */
52 #define MX1_2_TCTL_CLK_PCLK1	(1 << 1)
53 #define MX1_2_TCTL_IRQEN	(1 << 4)
54 #define MX1_2_TCTL_FRR		(1 << 8)
55 #define MX1_2_TCMP		0x08
56 #define MX1_2_TCN		0x10
57 #define MX1_2_TSTAT		0x14
58 
59 /* MX21, MX27 */
60 #define MX2_TSTAT_CAPT		(1 << 1)
61 #define MX2_TSTAT_COMP		(1 << 0)
62 
63 /* MX31, MX35, MX25, MX5, MX6 */
64 #define V2_TCTL_WAITEN		(1 << 3) /* Wait enable mode */
65 #define V2_TCTL_CLK_IPG		(1 << 6)
66 #define V2_TCTL_CLK_PER		(2 << 6)
67 #define V2_TCTL_CLK_OSC_DIV8	(5 << 6)
68 #define V2_TCTL_FRR		(1 << 9)
69 #define V2_TCTL_24MEN		(1 << 10)
70 #define V2_TPRER_PRE24M		12
71 #define V2_IR			0x0c
72 #define V2_TSTAT		0x08
73 #define V2_TSTAT_OF1		(1 << 0)
74 #define V2_TCN			0x24
75 #define V2_TCMP			0x10
76 
77 #define V2_TIMER_RATE_OSC_DIV8	3000000
78 
79 #define timer_is_v1()	(cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
80 #define timer_is_v2()	(!timer_is_v1())
81 
82 static struct clock_event_device clockevent_mxc;
83 static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED;
84 
85 static void __iomem *timer_base;
86 
gpt_irq_disable(void)87 static inline void gpt_irq_disable(void)
88 {
89 	unsigned int tmp;
90 
91 	if (timer_is_v2())
92 		__raw_writel(0, timer_base + V2_IR);
93 	else {
94 		tmp = __raw_readl(timer_base + MXC_TCTL);
95 		__raw_writel(tmp & ~MX1_2_TCTL_IRQEN, timer_base + MXC_TCTL);
96 	}
97 }
98 
gpt_irq_enable(void)99 static inline void gpt_irq_enable(void)
100 {
101 	if (timer_is_v2())
102 		__raw_writel(1<<0, timer_base + V2_IR);
103 	else {
104 		__raw_writel(__raw_readl(timer_base + MXC_TCTL) | MX1_2_TCTL_IRQEN,
105 			timer_base + MXC_TCTL);
106 	}
107 }
108 
gpt_irq_acknowledge(void)109 static void gpt_irq_acknowledge(void)
110 {
111 	if (timer_is_v1()) {
112 		if (cpu_is_mx1())
113 			__raw_writel(0, timer_base + MX1_2_TSTAT);
114 		else
115 			__raw_writel(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
116 				timer_base + MX1_2_TSTAT);
117 	} else if (timer_is_v2())
118 		__raw_writel(V2_TSTAT_OF1, timer_base + V2_TSTAT);
119 }
120 
121 static void __iomem *sched_clock_reg;
122 
mxc_read_sched_clock(void)123 static u64 notrace mxc_read_sched_clock(void)
124 {
125 	return sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
126 }
127 
128 static struct delay_timer imx_delay_timer;
129 
imx_read_current_timer(void)130 static unsigned long imx_read_current_timer(void)
131 {
132 	return __raw_readl(sched_clock_reg);
133 }
134 
mxc_clocksource_init(struct clk * timer_clk)135 static int __init mxc_clocksource_init(struct clk *timer_clk)
136 {
137 	unsigned int c = clk_get_rate(timer_clk);
138 	void __iomem *reg = timer_base + (timer_is_v2() ? V2_TCN : MX1_2_TCN);
139 
140 	imx_delay_timer.read_current_timer = &imx_read_current_timer;
141 	imx_delay_timer.freq = c;
142 	register_current_timer_delay(&imx_delay_timer);
143 
144 	sched_clock_reg = reg;
145 
146 	sched_clock_register(mxc_read_sched_clock, 32, c);
147 	return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
148 			clocksource_mmio_readl_up);
149 }
150 
151 /* clock event */
152 
mx1_2_set_next_event(unsigned long evt,struct clock_event_device * unused)153 static int mx1_2_set_next_event(unsigned long evt,
154 			      struct clock_event_device *unused)
155 {
156 	unsigned long tcmp;
157 
158 	tcmp = __raw_readl(timer_base + MX1_2_TCN) + evt;
159 
160 	__raw_writel(tcmp, timer_base + MX1_2_TCMP);
161 
162 	return (int)(tcmp - __raw_readl(timer_base + MX1_2_TCN)) < 0 ?
163 				-ETIME : 0;
164 }
165 
v2_set_next_event(unsigned long evt,struct clock_event_device * unused)166 static int v2_set_next_event(unsigned long evt,
167 			      struct clock_event_device *unused)
168 {
169 	unsigned long tcmp;
170 
171 	tcmp = __raw_readl(timer_base + V2_TCN) + evt;
172 
173 	__raw_writel(tcmp, timer_base + V2_TCMP);
174 
175 	return evt < 0x7fffffff &&
176 		(int)(tcmp - __raw_readl(timer_base + V2_TCN)) < 0 ?
177 				-ETIME : 0;
178 }
179 
180 #ifdef DEBUG
181 static const char *clock_event_mode_label[] = {
182 	[CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
183 	[CLOCK_EVT_MODE_ONESHOT]  = "CLOCK_EVT_MODE_ONESHOT",
184 	[CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
185 	[CLOCK_EVT_MODE_UNUSED]   = "CLOCK_EVT_MODE_UNUSED",
186 	[CLOCK_EVT_MODE_RESUME]   = "CLOCK_EVT_MODE_RESUME",
187 };
188 #endif /* DEBUG */
189 
mxc_set_mode(enum clock_event_mode mode,struct clock_event_device * evt)190 static void mxc_set_mode(enum clock_event_mode mode,
191 				struct clock_event_device *evt)
192 {
193 	unsigned long flags;
194 
195 	/*
196 	 * The timer interrupt generation is disabled at least
197 	 * for enough time to call mxc_set_next_event()
198 	 */
199 	local_irq_save(flags);
200 
201 	/* Disable interrupt in GPT module */
202 	gpt_irq_disable();
203 
204 	if (mode != clockevent_mode) {
205 		/* Set event time into far-far future */
206 		if (timer_is_v2())
207 			__raw_writel(__raw_readl(timer_base + V2_TCN) - 3,
208 					timer_base + V2_TCMP);
209 		else
210 			__raw_writel(__raw_readl(timer_base + MX1_2_TCN) - 3,
211 					timer_base + MX1_2_TCMP);
212 
213 		/* Clear pending interrupt */
214 		gpt_irq_acknowledge();
215 	}
216 
217 #ifdef DEBUG
218 	printk(KERN_INFO "mxc_set_mode: changing mode from %s to %s\n",
219 		clock_event_mode_label[clockevent_mode],
220 		clock_event_mode_label[mode]);
221 #endif /* DEBUG */
222 
223 	/* Remember timer mode */
224 	clockevent_mode = mode;
225 	local_irq_restore(flags);
226 
227 	switch (mode) {
228 	case CLOCK_EVT_MODE_PERIODIC:
229 		printk(KERN_ERR"mxc_set_mode: Periodic mode is not "
230 				"supported for i.MX\n");
231 		break;
232 	case CLOCK_EVT_MODE_ONESHOT:
233 	/*
234 	 * Do not put overhead of interrupt enable/disable into
235 	 * mxc_set_next_event(), the core has about 4 minutes
236 	 * to call mxc_set_next_event() or shutdown clock after
237 	 * mode switching
238 	 */
239 		local_irq_save(flags);
240 		gpt_irq_enable();
241 		local_irq_restore(flags);
242 		break;
243 	case CLOCK_EVT_MODE_SHUTDOWN:
244 	case CLOCK_EVT_MODE_UNUSED:
245 	case CLOCK_EVT_MODE_RESUME:
246 		/* Left event sources disabled, no more interrupts appear */
247 		break;
248 	}
249 }
250 
251 /*
252  * IRQ handler for the timer
253  */
mxc_timer_interrupt(int irq,void * dev_id)254 static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
255 {
256 	struct clock_event_device *evt = &clockevent_mxc;
257 	uint32_t tstat;
258 
259 	if (timer_is_v2())
260 		tstat = __raw_readl(timer_base + V2_TSTAT);
261 	else
262 		tstat = __raw_readl(timer_base + MX1_2_TSTAT);
263 
264 	gpt_irq_acknowledge();
265 
266 	evt->event_handler(evt);
267 
268 	return IRQ_HANDLED;
269 }
270 
271 static struct irqaction mxc_timer_irq = {
272 	.name		= "i.MX Timer Tick",
273 	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
274 	.handler	= mxc_timer_interrupt,
275 };
276 
277 static struct clock_event_device clockevent_mxc = {
278 	.name		= "mxc_timer1",
279 	.features	= CLOCK_EVT_FEAT_ONESHOT,
280 	.set_mode	= mxc_set_mode,
281 	.set_next_event	= mx1_2_set_next_event,
282 	.rating		= 200,
283 };
284 
mxc_clockevent_init(struct clk * timer_clk)285 static int __init mxc_clockevent_init(struct clk *timer_clk)
286 {
287 	if (timer_is_v2())
288 		clockevent_mxc.set_next_event = v2_set_next_event;
289 
290 	clockevent_mxc.cpumask = cpumask_of(0);
291 	clockevents_config_and_register(&clockevent_mxc,
292 					clk_get_rate(timer_clk),
293 					0xff, 0xfffffffe);
294 
295 	return 0;
296 }
297 
_mxc_timer_init(int irq,struct clk * clk_per,struct clk * clk_ipg)298 static void __init _mxc_timer_init(int irq,
299 				   struct clk *clk_per, struct clk *clk_ipg)
300 {
301 	uint32_t tctl_val;
302 
303 	if (IS_ERR(clk_per)) {
304 		pr_err("i.MX timer: unable to get clk\n");
305 		return;
306 	}
307 
308 	if (!IS_ERR(clk_ipg))
309 		clk_prepare_enable(clk_ipg);
310 
311 	clk_prepare_enable(clk_per);
312 
313 	/*
314 	 * Initialise to a known state (all timers off, and timing reset)
315 	 */
316 
317 	__raw_writel(0, timer_base + MXC_TCTL);
318 	__raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
319 
320 	if (timer_is_v2()) {
321 		tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
322 		if (clk_get_rate(clk_per) == V2_TIMER_RATE_OSC_DIV8) {
323 			tctl_val |= V2_TCTL_CLK_OSC_DIV8;
324 			if (cpu_is_imx6dl() || cpu_is_imx6sx()) {
325 				/* 24 / 8 = 3 MHz */
326 				__raw_writel(7 << V2_TPRER_PRE24M,
327 					timer_base + MXC_TPRER);
328 				tctl_val |= V2_TCTL_24MEN;
329 			}
330 		} else {
331 			tctl_val |= V2_TCTL_CLK_PER;
332 		}
333 	} else {
334 		tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
335 	}
336 
337 	__raw_writel(tctl_val, timer_base + MXC_TCTL);
338 
339 	/* init and register the timer to the framework */
340 	mxc_clocksource_init(clk_per);
341 	mxc_clockevent_init(clk_per);
342 
343 	/* Make irqs happen */
344 	setup_irq(irq, &mxc_timer_irq);
345 }
346 
mxc_timer_init(void __iomem * base,int irq)347 void __init mxc_timer_init(void __iomem *base, int irq)
348 {
349 	struct clk *clk_per = clk_get_sys("imx-gpt.0", "per");
350 	struct clk *clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
351 
352 	timer_base = base;
353 
354 	_mxc_timer_init(irq, clk_per, clk_ipg);
355 }
356 
mxc_timer_init_dt(struct device_node * np)357 static void __init mxc_timer_init_dt(struct device_node *np)
358 {
359 	struct clk *clk_per, *clk_ipg;
360 	int irq;
361 
362 	if (timer_base)
363 		return;
364 
365 	timer_base = of_iomap(np, 0);
366 	WARN_ON(!timer_base);
367 	irq = irq_of_parse_and_map(np, 0);
368 
369 	clk_ipg = of_clk_get_by_name(np, "ipg");
370 
371 	/* Try osc_per first, and fall back to per otherwise */
372 	clk_per = of_clk_get_by_name(np, "osc_per");
373 	if (IS_ERR(clk_per))
374 		clk_per = of_clk_get_by_name(np, "per");
375 
376 	_mxc_timer_init(irq, clk_per, clk_ipg);
377 }
378 CLOCKSOURCE_OF_DECLARE(mx1_timer, "fsl,imx1-gpt", mxc_timer_init_dt);
379 CLOCKSOURCE_OF_DECLARE(mx25_timer, "fsl,imx25-gpt", mxc_timer_init_dt);
380 CLOCKSOURCE_OF_DECLARE(mx50_timer, "fsl,imx50-gpt", mxc_timer_init_dt);
381 CLOCKSOURCE_OF_DECLARE(mx51_timer, "fsl,imx51-gpt", mxc_timer_init_dt);
382 CLOCKSOURCE_OF_DECLARE(mx53_timer, "fsl,imx53-gpt", mxc_timer_init_dt);
383 CLOCKSOURCE_OF_DECLARE(mx6q_timer, "fsl,imx6q-gpt", mxc_timer_init_dt);
384 CLOCKSOURCE_OF_DECLARE(mx6sl_timer, "fsl,imx6sl-gpt", mxc_timer_init_dt);
385 CLOCKSOURCE_OF_DECLARE(mx6sx_timer, "fsl,imx6sx-gpt", mxc_timer_init_dt);
386