• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SuperH Timer Support - MTU2
4  *
5  *  Copyright (C) 2009 Magnus Damm
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/clockchips.h>
10 #include <linux/delay.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/ioport.h>
16 #include <linux/irq.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sh_timer.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 
26 struct sh_mtu2_device;
27 
28 struct sh_mtu2_channel {
29 	struct sh_mtu2_device *mtu;
30 	unsigned int index;
31 
32 	void __iomem *base;
33 
34 	struct clock_event_device ced;
35 };
36 
37 struct sh_mtu2_device {
38 	struct platform_device *pdev;
39 
40 	void __iomem *mapbase;
41 	struct clk *clk;
42 
43 	raw_spinlock_t lock; /* Protect the shared registers */
44 
45 	struct sh_mtu2_channel *channels;
46 	unsigned int num_channels;
47 
48 	bool has_clockevent;
49 };
50 
51 #define TSTR -1 /* shared register */
52 #define TCR  0 /* channel register */
53 #define TMDR 1 /* channel register */
54 #define TIOR 2 /* channel register */
55 #define TIER 3 /* channel register */
56 #define TSR  4 /* channel register */
57 #define TCNT 5 /* channel register */
58 #define TGR  6 /* channel register */
59 
60 #define TCR_CCLR_NONE		(0 << 5)
61 #define TCR_CCLR_TGRA		(1 << 5)
62 #define TCR_CCLR_TGRB		(2 << 5)
63 #define TCR_CCLR_SYNC		(3 << 5)
64 #define TCR_CCLR_TGRC		(5 << 5)
65 #define TCR_CCLR_TGRD		(6 << 5)
66 #define TCR_CCLR_MASK		(7 << 5)
67 #define TCR_CKEG_RISING		(0 << 3)
68 #define TCR_CKEG_FALLING	(1 << 3)
69 #define TCR_CKEG_BOTH		(2 << 3)
70 #define TCR_CKEG_MASK		(3 << 3)
71 /* Values 4 to 7 are channel-dependent */
72 #define TCR_TPSC_P1		(0 << 0)
73 #define TCR_TPSC_P4		(1 << 0)
74 #define TCR_TPSC_P16		(2 << 0)
75 #define TCR_TPSC_P64		(3 << 0)
76 #define TCR_TPSC_CH0_TCLKA	(4 << 0)
77 #define TCR_TPSC_CH0_TCLKB	(5 << 0)
78 #define TCR_TPSC_CH0_TCLKC	(6 << 0)
79 #define TCR_TPSC_CH0_TCLKD	(7 << 0)
80 #define TCR_TPSC_CH1_TCLKA	(4 << 0)
81 #define TCR_TPSC_CH1_TCLKB	(5 << 0)
82 #define TCR_TPSC_CH1_P256	(6 << 0)
83 #define TCR_TPSC_CH1_TCNT2	(7 << 0)
84 #define TCR_TPSC_CH2_TCLKA	(4 << 0)
85 #define TCR_TPSC_CH2_TCLKB	(5 << 0)
86 #define TCR_TPSC_CH2_TCLKC	(6 << 0)
87 #define TCR_TPSC_CH2_P1024	(7 << 0)
88 #define TCR_TPSC_CH34_P256	(4 << 0)
89 #define TCR_TPSC_CH34_P1024	(5 << 0)
90 #define TCR_TPSC_CH34_TCLKA	(6 << 0)
91 #define TCR_TPSC_CH34_TCLKB	(7 << 0)
92 #define TCR_TPSC_MASK		(7 << 0)
93 
94 #define TMDR_BFE		(1 << 6)
95 #define TMDR_BFB		(1 << 5)
96 #define TMDR_BFA		(1 << 4)
97 #define TMDR_MD_NORMAL		(0 << 0)
98 #define TMDR_MD_PWM_1		(2 << 0)
99 #define TMDR_MD_PWM_2		(3 << 0)
100 #define TMDR_MD_PHASE_1		(4 << 0)
101 #define TMDR_MD_PHASE_2		(5 << 0)
102 #define TMDR_MD_PHASE_3		(6 << 0)
103 #define TMDR_MD_PHASE_4		(7 << 0)
104 #define TMDR_MD_PWM_SYNC	(8 << 0)
105 #define TMDR_MD_PWM_COMP_CREST	(13 << 0)
106 #define TMDR_MD_PWM_COMP_TROUGH	(14 << 0)
107 #define TMDR_MD_PWM_COMP_BOTH	(15 << 0)
108 #define TMDR_MD_MASK		(15 << 0)
109 
110 #define TIOC_IOCH(n)		((n) << 4)
111 #define TIOC_IOCL(n)		((n) << 0)
112 #define TIOR_OC_RETAIN		(0 << 0)
113 #define TIOR_OC_0_CLEAR		(1 << 0)
114 #define TIOR_OC_0_SET		(2 << 0)
115 #define TIOR_OC_0_TOGGLE	(3 << 0)
116 #define TIOR_OC_1_CLEAR		(5 << 0)
117 #define TIOR_OC_1_SET		(6 << 0)
118 #define TIOR_OC_1_TOGGLE	(7 << 0)
119 #define TIOR_IC_RISING		(8 << 0)
120 #define TIOR_IC_FALLING		(9 << 0)
121 #define TIOR_IC_BOTH		(10 << 0)
122 #define TIOR_IC_TCNT		(12 << 0)
123 #define TIOR_MASK		(15 << 0)
124 
125 #define TIER_TTGE		(1 << 7)
126 #define TIER_TTGE2		(1 << 6)
127 #define TIER_TCIEU		(1 << 5)
128 #define TIER_TCIEV		(1 << 4)
129 #define TIER_TGIED		(1 << 3)
130 #define TIER_TGIEC		(1 << 2)
131 #define TIER_TGIEB		(1 << 1)
132 #define TIER_TGIEA		(1 << 0)
133 
134 #define TSR_TCFD		(1 << 7)
135 #define TSR_TCFU		(1 << 5)
136 #define TSR_TCFV		(1 << 4)
137 #define TSR_TGFD		(1 << 3)
138 #define TSR_TGFC		(1 << 2)
139 #define TSR_TGFB		(1 << 1)
140 #define TSR_TGFA		(1 << 0)
141 
142 static unsigned long mtu2_reg_offs[] = {
143 	[TCR] = 0,
144 	[TMDR] = 1,
145 	[TIOR] = 2,
146 	[TIER] = 4,
147 	[TSR] = 5,
148 	[TCNT] = 6,
149 	[TGR] = 8,
150 };
151 
sh_mtu2_read(struct sh_mtu2_channel * ch,int reg_nr)152 static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
153 {
154 	unsigned long offs;
155 
156 	if (reg_nr == TSTR)
157 		return ioread8(ch->mtu->mapbase + 0x280);
158 
159 	offs = mtu2_reg_offs[reg_nr];
160 
161 	if ((reg_nr == TCNT) || (reg_nr == TGR))
162 		return ioread16(ch->base + offs);
163 	else
164 		return ioread8(ch->base + offs);
165 }
166 
sh_mtu2_write(struct sh_mtu2_channel * ch,int reg_nr,unsigned long value)167 static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
168 				unsigned long value)
169 {
170 	unsigned long offs;
171 
172 	if (reg_nr == TSTR)
173 		return iowrite8(value, ch->mtu->mapbase + 0x280);
174 
175 	offs = mtu2_reg_offs[reg_nr];
176 
177 	if ((reg_nr == TCNT) || (reg_nr == TGR))
178 		iowrite16(value, ch->base + offs);
179 	else
180 		iowrite8(value, ch->base + offs);
181 }
182 
sh_mtu2_start_stop_ch(struct sh_mtu2_channel * ch,int start)183 static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
184 {
185 	unsigned long flags, value;
186 
187 	/* start stop register shared by multiple timer channels */
188 	raw_spin_lock_irqsave(&ch->mtu->lock, flags);
189 	value = sh_mtu2_read(ch, TSTR);
190 
191 	if (start)
192 		value |= 1 << ch->index;
193 	else
194 		value &= ~(1 << ch->index);
195 
196 	sh_mtu2_write(ch, TSTR, value);
197 	raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
198 }
199 
sh_mtu2_enable(struct sh_mtu2_channel * ch)200 static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
201 {
202 	unsigned long periodic;
203 	unsigned long rate;
204 	int ret;
205 
206 	pm_runtime_get_sync(&ch->mtu->pdev->dev);
207 	dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
208 
209 	/* enable clock */
210 	ret = clk_enable(ch->mtu->clk);
211 	if (ret) {
212 		dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
213 			ch->index);
214 		return ret;
215 	}
216 
217 	/* make sure channel is disabled */
218 	sh_mtu2_start_stop_ch(ch, 0);
219 
220 	rate = clk_get_rate(ch->mtu->clk) / 64;
221 	periodic = (rate + HZ/2) / HZ;
222 
223 	/*
224 	 * "Periodic Counter Operation"
225 	 * Clear on TGRA compare match, divide clock by 64.
226 	 */
227 	sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
228 	sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
229 		      TIOC_IOCL(TIOR_OC_0_CLEAR));
230 	sh_mtu2_write(ch, TGR, periodic);
231 	sh_mtu2_write(ch, TCNT, 0);
232 	sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
233 	sh_mtu2_write(ch, TIER, TIER_TGIEA);
234 
235 	/* enable channel */
236 	sh_mtu2_start_stop_ch(ch, 1);
237 
238 	return 0;
239 }
240 
sh_mtu2_disable(struct sh_mtu2_channel * ch)241 static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
242 {
243 	/* disable channel */
244 	sh_mtu2_start_stop_ch(ch, 0);
245 
246 	/* stop clock */
247 	clk_disable(ch->mtu->clk);
248 
249 	dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
250 	pm_runtime_put(&ch->mtu->pdev->dev);
251 }
252 
sh_mtu2_interrupt(int irq,void * dev_id)253 static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
254 {
255 	struct sh_mtu2_channel *ch = dev_id;
256 
257 	/* acknowledge interrupt */
258 	sh_mtu2_read(ch, TSR);
259 	sh_mtu2_write(ch, TSR, ~TSR_TGFA);
260 
261 	/* notify clockevent layer */
262 	ch->ced.event_handler(&ch->ced);
263 	return IRQ_HANDLED;
264 }
265 
ced_to_sh_mtu2(struct clock_event_device * ced)266 static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
267 {
268 	return container_of(ced, struct sh_mtu2_channel, ced);
269 }
270 
sh_mtu2_clock_event_shutdown(struct clock_event_device * ced)271 static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
272 {
273 	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
274 
275 	if (clockevent_state_periodic(ced))
276 		sh_mtu2_disable(ch);
277 
278 	return 0;
279 }
280 
sh_mtu2_clock_event_set_periodic(struct clock_event_device * ced)281 static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
282 {
283 	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
284 
285 	if (clockevent_state_periodic(ced))
286 		sh_mtu2_disable(ch);
287 
288 	dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
289 		 ch->index);
290 	sh_mtu2_enable(ch);
291 	return 0;
292 }
293 
sh_mtu2_clock_event_suspend(struct clock_event_device * ced)294 static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
295 {
296 	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
297 }
298 
sh_mtu2_clock_event_resume(struct clock_event_device * ced)299 static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
300 {
301 	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
302 }
303 
sh_mtu2_register_clockevent(struct sh_mtu2_channel * ch,const char * name)304 static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
305 					const char *name)
306 {
307 	struct clock_event_device *ced = &ch->ced;
308 
309 	ced->name = name;
310 	ced->features = CLOCK_EVT_FEAT_PERIODIC;
311 	ced->rating = 200;
312 	ced->cpumask = cpu_possible_mask;
313 	ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
314 	ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
315 	ced->suspend = sh_mtu2_clock_event_suspend;
316 	ced->resume = sh_mtu2_clock_event_resume;
317 
318 	dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
319 		 ch->index);
320 	clockevents_register_device(ced);
321 }
322 
sh_mtu2_register(struct sh_mtu2_channel * ch,const char * name)323 static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
324 {
325 	ch->mtu->has_clockevent = true;
326 	sh_mtu2_register_clockevent(ch, name);
327 
328 	return 0;
329 }
330 
331 static const unsigned int sh_mtu2_channel_offsets[] = {
332 	0x300, 0x380, 0x000,
333 };
334 
sh_mtu2_setup_channel(struct sh_mtu2_channel * ch,unsigned int index,struct sh_mtu2_device * mtu)335 static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
336 				 struct sh_mtu2_device *mtu)
337 {
338 	char name[6];
339 	int irq;
340 	int ret;
341 
342 	ch->mtu = mtu;
343 
344 	sprintf(name, "tgi%ua", index);
345 	irq = platform_get_irq_byname(mtu->pdev, name);
346 	if (irq < 0) {
347 		/* Skip channels with no declared interrupt. */
348 		return 0;
349 	}
350 
351 	ret = request_irq(irq, sh_mtu2_interrupt,
352 			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
353 			  dev_name(&ch->mtu->pdev->dev), ch);
354 	if (ret) {
355 		dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
356 			index, irq);
357 		return ret;
358 	}
359 
360 	ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
361 	ch->index = index;
362 
363 	return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
364 }
365 
sh_mtu2_map_memory(struct sh_mtu2_device * mtu)366 static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
367 {
368 	struct resource *res;
369 
370 	res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
371 	if (!res) {
372 		dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
373 		return -ENXIO;
374 	}
375 
376 	mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
377 	if (mtu->mapbase == NULL)
378 		return -ENXIO;
379 
380 	return 0;
381 }
382 
sh_mtu2_setup(struct sh_mtu2_device * mtu,struct platform_device * pdev)383 static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
384 			 struct platform_device *pdev)
385 {
386 	unsigned int i;
387 	int ret;
388 
389 	mtu->pdev = pdev;
390 
391 	raw_spin_lock_init(&mtu->lock);
392 
393 	/* Get hold of clock. */
394 	mtu->clk = clk_get(&mtu->pdev->dev, "fck");
395 	if (IS_ERR(mtu->clk)) {
396 		dev_err(&mtu->pdev->dev, "cannot get clock\n");
397 		return PTR_ERR(mtu->clk);
398 	}
399 
400 	ret = clk_prepare(mtu->clk);
401 	if (ret < 0)
402 		goto err_clk_put;
403 
404 	/* Map the memory resource. */
405 	ret = sh_mtu2_map_memory(mtu);
406 	if (ret < 0) {
407 		dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
408 		goto err_clk_unprepare;
409 	}
410 
411 	/* Allocate and setup the channels. */
412 	ret = platform_irq_count(pdev);
413 	if (ret < 0)
414 		goto err_unmap;
415 
416 	mtu->num_channels = min_t(unsigned int, ret,
417 				  ARRAY_SIZE(sh_mtu2_channel_offsets));
418 
419 	mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
420 				GFP_KERNEL);
421 	if (mtu->channels == NULL) {
422 		ret = -ENOMEM;
423 		goto err_unmap;
424 	}
425 
426 	for (i = 0; i < mtu->num_channels; ++i) {
427 		ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
428 		if (ret < 0)
429 			goto err_unmap;
430 	}
431 
432 	platform_set_drvdata(pdev, mtu);
433 
434 	return 0;
435 
436 err_unmap:
437 	kfree(mtu->channels);
438 	iounmap(mtu->mapbase);
439 err_clk_unprepare:
440 	clk_unprepare(mtu->clk);
441 err_clk_put:
442 	clk_put(mtu->clk);
443 	return ret;
444 }
445 
sh_mtu2_probe(struct platform_device * pdev)446 static int sh_mtu2_probe(struct platform_device *pdev)
447 {
448 	struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
449 	int ret;
450 
451 	if (!is_early_platform_device(pdev)) {
452 		pm_runtime_set_active(&pdev->dev);
453 		pm_runtime_enable(&pdev->dev);
454 	}
455 
456 	if (mtu) {
457 		dev_info(&pdev->dev, "kept as earlytimer\n");
458 		goto out;
459 	}
460 
461 	mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
462 	if (mtu == NULL)
463 		return -ENOMEM;
464 
465 	ret = sh_mtu2_setup(mtu, pdev);
466 	if (ret) {
467 		kfree(mtu);
468 		pm_runtime_idle(&pdev->dev);
469 		return ret;
470 	}
471 	if (is_early_platform_device(pdev))
472 		return 0;
473 
474  out:
475 	if (mtu->has_clockevent)
476 		pm_runtime_irq_safe(&pdev->dev);
477 	else
478 		pm_runtime_idle(&pdev->dev);
479 
480 	return 0;
481 }
482 
sh_mtu2_remove(struct platform_device * pdev)483 static int sh_mtu2_remove(struct platform_device *pdev)
484 {
485 	return -EBUSY; /* cannot unregister clockevent */
486 }
487 
488 static const struct platform_device_id sh_mtu2_id_table[] = {
489 	{ "sh-mtu2", 0 },
490 	{ },
491 };
492 MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
493 
494 static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
495 	{ .compatible = "renesas,mtu2" },
496 	{ }
497 };
498 MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
499 
500 static struct platform_driver sh_mtu2_device_driver = {
501 	.probe		= sh_mtu2_probe,
502 	.remove		= sh_mtu2_remove,
503 	.driver		= {
504 		.name	= "sh_mtu2",
505 		.of_match_table = of_match_ptr(sh_mtu2_of_table),
506 	},
507 	.id_table	= sh_mtu2_id_table,
508 };
509 
sh_mtu2_init(void)510 static int __init sh_mtu2_init(void)
511 {
512 	return platform_driver_register(&sh_mtu2_device_driver);
513 }
514 
sh_mtu2_exit(void)515 static void __exit sh_mtu2_exit(void)
516 {
517 	platform_driver_unregister(&sh_mtu2_device_driver);
518 }
519 
520 early_platform_init("earlytimer", &sh_mtu2_device_driver);
521 subsys_initcall(sh_mtu2_init);
522 module_exit(sh_mtu2_exit);
523 
524 MODULE_AUTHOR("Magnus Damm");
525 MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
526 MODULE_LICENSE("GPL v2");
527