• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ingenic SoCs TCU IRQ driver
4  * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
5  * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/clockchips.h>
11 #include <linux/clocksource.h>
12 #include <linux/interrupt.h>
13 #include <linux/mfd/ingenic-tcu.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/overflow.h>
20 #include <linux/platform_device.h>
21 #include <linux/regmap.h>
22 #include <linux/sched_clock.h>
23 
24 #include <dt-bindings/clock/ingenic,tcu.h>
25 
26 static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
27 
28 struct ingenic_soc_info {
29 	unsigned int num_channels;
30 };
31 
32 struct ingenic_tcu_timer {
33 	unsigned int cpu;
34 	unsigned int channel;
35 	struct clock_event_device cevt;
36 	struct clk *clk;
37 	char name[8];
38 };
39 
40 struct ingenic_tcu {
41 	struct regmap *map;
42 	struct device_node *np;
43 	struct clk *cs_clk;
44 	unsigned int cs_channel;
45 	struct clocksource cs;
46 	unsigned long pwm_channels_mask;
47 	struct ingenic_tcu_timer timers[];
48 };
49 
50 static struct ingenic_tcu *ingenic_tcu;
51 
ingenic_tcu_timer_read(void)52 static u64 notrace ingenic_tcu_timer_read(void)
53 {
54 	struct ingenic_tcu *tcu = ingenic_tcu;
55 	unsigned int count;
56 
57 	regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
58 
59 	return count;
60 }
61 
ingenic_tcu_timer_cs_read(struct clocksource * cs)62 static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
63 {
64 	return ingenic_tcu_timer_read();
65 }
66 
67 static inline struct ingenic_tcu *
to_ingenic_tcu(struct ingenic_tcu_timer * timer)68 to_ingenic_tcu(struct ingenic_tcu_timer *timer)
69 {
70 	return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
71 }
72 
73 static inline struct ingenic_tcu_timer *
to_ingenic_tcu_timer(struct clock_event_device * evt)74 to_ingenic_tcu_timer(struct clock_event_device *evt)
75 {
76 	return container_of(evt, struct ingenic_tcu_timer, cevt);
77 }
78 
ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device * evt)79 static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
80 {
81 	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
82 	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
83 
84 	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
85 
86 	return 0;
87 }
88 
ingenic_tcu_cevt_set_next(unsigned long next,struct clock_event_device * evt)89 static int ingenic_tcu_cevt_set_next(unsigned long next,
90 				     struct clock_event_device *evt)
91 {
92 	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
93 	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
94 
95 	if (next > 0xffff)
96 		return -EINVAL;
97 
98 	regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
99 	regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
100 	regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
101 
102 	return 0;
103 }
104 
ingenic_per_cpu_event_handler(void * info)105 static void ingenic_per_cpu_event_handler(void *info)
106 {
107 	struct clock_event_device *cevt = (struct clock_event_device *) info;
108 
109 	cevt->event_handler(cevt);
110 }
111 
ingenic_tcu_cevt_cb(int irq,void * dev_id)112 static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
113 {
114 	struct ingenic_tcu_timer *timer = dev_id;
115 	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
116 	call_single_data_t *csd;
117 
118 	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
119 
120 	if (timer->cevt.event_handler) {
121 		csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
122 		csd->info = (void *) &timer->cevt;
123 		csd->func = ingenic_per_cpu_event_handler;
124 		smp_call_function_single_async(timer->cpu, csd);
125 	}
126 
127 	return IRQ_HANDLED;
128 }
129 
ingenic_tcu_get_clock(struct device_node * np,int id)130 static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
131 {
132 	struct of_phandle_args args;
133 
134 	args.np = np;
135 	args.args_count = 1;
136 	args.args[0] = id;
137 
138 	return of_clk_get_from_provider(&args);
139 }
140 
ingenic_tcu_setup_cevt(unsigned int cpu)141 static int ingenic_tcu_setup_cevt(unsigned int cpu)
142 {
143 	struct ingenic_tcu *tcu = ingenic_tcu;
144 	struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
145 	unsigned int timer_virq;
146 	struct irq_domain *domain;
147 	unsigned long rate;
148 	int err;
149 
150 	timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
151 	if (IS_ERR(timer->clk))
152 		return PTR_ERR(timer->clk);
153 
154 	err = clk_prepare_enable(timer->clk);
155 	if (err)
156 		goto err_clk_put;
157 
158 	rate = clk_get_rate(timer->clk);
159 	if (!rate) {
160 		err = -EINVAL;
161 		goto err_clk_disable;
162 	}
163 
164 	domain = irq_find_host(tcu->np);
165 	if (!domain) {
166 		err = -ENODEV;
167 		goto err_clk_disable;
168 	}
169 
170 	timer_virq = irq_create_mapping(domain, timer->channel);
171 	if (!timer_virq) {
172 		err = -EINVAL;
173 		goto err_clk_disable;
174 	}
175 
176 	snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
177 
178 	err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
179 			  timer->name, timer);
180 	if (err)
181 		goto err_irq_dispose_mapping;
182 
183 	timer->cpu = smp_processor_id();
184 	timer->cevt.cpumask = cpumask_of(smp_processor_id());
185 	timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
186 	timer->cevt.name = timer->name;
187 	timer->cevt.rating = 200;
188 	timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
189 	timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
190 
191 	clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
192 
193 	return 0;
194 
195 err_irq_dispose_mapping:
196 	irq_dispose_mapping(timer_virq);
197 err_clk_disable:
198 	clk_disable_unprepare(timer->clk);
199 err_clk_put:
200 	clk_put(timer->clk);
201 	return err;
202 }
203 
ingenic_tcu_clocksource_init(struct device_node * np,struct ingenic_tcu * tcu)204 static int __init ingenic_tcu_clocksource_init(struct device_node *np,
205 					       struct ingenic_tcu *tcu)
206 {
207 	unsigned int channel = tcu->cs_channel;
208 	struct clocksource *cs = &tcu->cs;
209 	unsigned long rate;
210 	int err;
211 
212 	tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
213 	if (IS_ERR(tcu->cs_clk))
214 		return PTR_ERR(tcu->cs_clk);
215 
216 	err = clk_prepare_enable(tcu->cs_clk);
217 	if (err)
218 		goto err_clk_put;
219 
220 	rate = clk_get_rate(tcu->cs_clk);
221 	if (!rate) {
222 		err = -EINVAL;
223 		goto err_clk_disable;
224 	}
225 
226 	/* Reset channel */
227 	regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
228 			   0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
229 
230 	/* Reset counter */
231 	regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
232 	regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
233 
234 	/* Enable channel */
235 	regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
236 
237 	cs->name = "ingenic-timer";
238 	cs->rating = 200;
239 	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
240 	cs->mask = CLOCKSOURCE_MASK(16);
241 	cs->read = ingenic_tcu_timer_cs_read;
242 
243 	err = clocksource_register_hz(cs, rate);
244 	if (err)
245 		goto err_clk_disable;
246 
247 	return 0;
248 
249 err_clk_disable:
250 	clk_disable_unprepare(tcu->cs_clk);
251 err_clk_put:
252 	clk_put(tcu->cs_clk);
253 	return err;
254 }
255 
256 static const struct ingenic_soc_info jz4740_soc_info = {
257 	.num_channels = 8,
258 };
259 
260 static const struct ingenic_soc_info jz4725b_soc_info = {
261 	.num_channels = 6,
262 };
263 
264 static const struct of_device_id ingenic_tcu_of_match[] = {
265 	{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
266 	{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
267 	{ .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
268 	{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
269 	{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
270 	{ /* sentinel */ }
271 };
272 
ingenic_tcu_init(struct device_node * np)273 static int __init ingenic_tcu_init(struct device_node *np)
274 {
275 	const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
276 	const struct ingenic_soc_info *soc_info = id->data;
277 	struct ingenic_tcu_timer *timer;
278 	struct ingenic_tcu *tcu;
279 	struct regmap *map;
280 	unsigned int cpu;
281 	int ret, last_bit = -1;
282 	long rate;
283 
284 	of_node_clear_flag(np, OF_POPULATED);
285 
286 	map = device_node_to_regmap(np);
287 	if (IS_ERR(map))
288 		return PTR_ERR(map);
289 
290 	tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
291 		      GFP_KERNEL);
292 	if (!tcu)
293 		return -ENOMEM;
294 
295 	/*
296 	 * Enable all TCU channels for PWM use by default except channels 0/1,
297 	 * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
298 	 */
299 	tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
300 					 num_possible_cpus() + 1);
301 	of_property_read_u32(np, "ingenic,pwm-channels-mask",
302 			     (u32 *)&tcu->pwm_channels_mask);
303 
304 	/* Verify that we have at least num_possible_cpus() + 1 free channels */
305 	if (hweight8(tcu->pwm_channels_mask) >
306 			soc_info->num_channels - num_possible_cpus() + 1) {
307 		pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
308 			tcu->pwm_channels_mask);
309 		ret = -EINVAL;
310 		goto err_free_ingenic_tcu;
311 	}
312 
313 	tcu->map = map;
314 	tcu->np = np;
315 	ingenic_tcu = tcu;
316 
317 	for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
318 		timer = &tcu->timers[cpu];
319 
320 		timer->cpu = cpu;
321 		timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
322 						  soc_info->num_channels,
323 						  last_bit + 1);
324 		last_bit = timer->channel;
325 	}
326 
327 	tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
328 					     soc_info->num_channels,
329 					     last_bit + 1);
330 
331 	ret = ingenic_tcu_clocksource_init(np, tcu);
332 	if (ret) {
333 		pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
334 		goto err_free_ingenic_tcu;
335 	}
336 
337 	/* Setup clock events on each CPU core */
338 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
339 				ingenic_tcu_setup_cevt, NULL);
340 	if (ret < 0) {
341 		pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
342 		goto err_tcu_clocksource_cleanup;
343 	}
344 
345 	/* Register the sched_clock at the end as there's no way to undo it */
346 	rate = clk_get_rate(tcu->cs_clk);
347 	sched_clock_register(ingenic_tcu_timer_read, 16, rate);
348 
349 	return 0;
350 
351 err_tcu_clocksource_cleanup:
352 	clocksource_unregister(&tcu->cs);
353 	clk_disable_unprepare(tcu->cs_clk);
354 	clk_put(tcu->cs_clk);
355 err_free_ingenic_tcu:
356 	kfree(tcu);
357 	return ret;
358 }
359 
360 TIMER_OF_DECLARE(jz4740_tcu_intc,  "ingenic,jz4740-tcu",  ingenic_tcu_init);
361 TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
362 TIMER_OF_DECLARE(jz4760_tcu_intc,  "ingenic,jz4760-tcu",  ingenic_tcu_init);
363 TIMER_OF_DECLARE(jz4770_tcu_intc,  "ingenic,jz4770-tcu",  ingenic_tcu_init);
364 TIMER_OF_DECLARE(x1000_tcu_intc,  "ingenic,x1000-tcu",  ingenic_tcu_init);
365 
ingenic_tcu_probe(struct platform_device * pdev)366 static int __init ingenic_tcu_probe(struct platform_device *pdev)
367 {
368 	platform_set_drvdata(pdev, ingenic_tcu);
369 
370 	return 0;
371 }
372 
ingenic_tcu_suspend(struct device * dev)373 static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
374 {
375 	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
376 	unsigned int cpu;
377 
378 	clk_disable(tcu->cs_clk);
379 
380 	for (cpu = 0; cpu < num_online_cpus(); cpu++)
381 		clk_disable(tcu->timers[cpu].clk);
382 
383 	return 0;
384 }
385 
ingenic_tcu_resume(struct device * dev)386 static int __maybe_unused ingenic_tcu_resume(struct device *dev)
387 {
388 	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
389 	unsigned int cpu;
390 	int ret;
391 
392 	for (cpu = 0; cpu < num_online_cpus(); cpu++) {
393 		ret = clk_enable(tcu->timers[cpu].clk);
394 		if (ret)
395 			goto err_timer_clk_disable;
396 	}
397 
398 	ret = clk_enable(tcu->cs_clk);
399 	if (ret)
400 		goto err_timer_clk_disable;
401 
402 	return 0;
403 
404 err_timer_clk_disable:
405 	for (; cpu > 0; cpu--)
406 		clk_disable(tcu->timers[cpu - 1].clk);
407 	return ret;
408 }
409 
410 static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
411 	/* _noirq: We want the TCU clocks to be gated last / ungated first */
412 	.suspend_noirq = ingenic_tcu_suspend,
413 	.resume_noirq  = ingenic_tcu_resume,
414 };
415 
416 static struct platform_driver ingenic_tcu_driver = {
417 	.driver = {
418 		.name	= "ingenic-tcu-timer",
419 #ifdef CONFIG_PM_SLEEP
420 		.pm	= &ingenic_tcu_pm_ops,
421 #endif
422 		.of_match_table = ingenic_tcu_of_match,
423 	},
424 };
425 builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);
426