• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OMAP3/4 - specific DPLL control functions
4  *
5  * Copyright (C) 2009-2010 Texas Instruments, Inc.
6  * Copyright (C) 2009-2010 Nokia Corporation
7  *
8  * Written by Paul Walmsley
9  * Testing and integration fixes by Jouni Högander
10  *
11  * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
12  * Menon
13  *
14  * Parts of this code are based on code written by
15  * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/bitops.h>
26 #include <linux/clkdev.h>
27 #include <linux/clk/ti.h>
28 
29 #include "clock.h"
30 
31 /* CM_AUTOIDLE_PLL*.AUTO_* bit values */
32 #define DPLL_AUTOIDLE_DISABLE			0x0
33 #define DPLL_AUTOIDLE_LOW_POWER_STOP		0x1
34 
35 #define MAX_DPLL_WAIT_TRIES		1000000
36 
37 #define OMAP3XXX_EN_DPLL_LOCKED		0x7
38 
39 /* Forward declarations */
40 static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
41 static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
42 static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
43 
44 /* Private functions */
45 
46 /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
_omap3_dpll_write_clken(struct clk_hw_omap * clk,u8 clken_bits)47 static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
48 {
49 	const struct dpll_data *dd;
50 	u32 v;
51 
52 	dd = clk->dpll_data;
53 
54 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
55 	v &= ~dd->enable_mask;
56 	v |= clken_bits << __ffs(dd->enable_mask);
57 	ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
58 }
59 
60 /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
_omap3_wait_dpll_status(struct clk_hw_omap * clk,u8 state)61 static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
62 {
63 	const struct dpll_data *dd;
64 	int i = 0;
65 	int ret = -EINVAL;
66 	const char *clk_name;
67 
68 	dd = clk->dpll_data;
69 	clk_name = clk_hw_get_name(&clk->hw);
70 
71 	state <<= __ffs(dd->idlest_mask);
72 
73 	while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
74 		!= state) && i < MAX_DPLL_WAIT_TRIES) {
75 		i++;
76 		udelay(1);
77 	}
78 
79 	if (i == MAX_DPLL_WAIT_TRIES) {
80 		pr_err("clock: %s failed transition to '%s'\n",
81 		       clk_name, (state) ? "locked" : "bypassed");
82 	} else {
83 		pr_debug("clock: %s transition to '%s' in %d loops\n",
84 			 clk_name, (state) ? "locked" : "bypassed", i);
85 
86 		ret = 0;
87 	}
88 
89 	return ret;
90 }
91 
92 /* From 3430 TRM ES2 4.7.6.2 */
_omap3_dpll_compute_freqsel(struct clk_hw_omap * clk,u8 n)93 static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
94 {
95 	unsigned long fint;
96 	u16 f = 0;
97 
98 	fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
99 
100 	pr_debug("clock: fint is %lu\n", fint);
101 
102 	if (fint >= 750000 && fint <= 1000000)
103 		f = 0x3;
104 	else if (fint > 1000000 && fint <= 1250000)
105 		f = 0x4;
106 	else if (fint > 1250000 && fint <= 1500000)
107 		f = 0x5;
108 	else if (fint > 1500000 && fint <= 1750000)
109 		f = 0x6;
110 	else if (fint > 1750000 && fint <= 2100000)
111 		f = 0x7;
112 	else if (fint > 7500000 && fint <= 10000000)
113 		f = 0xB;
114 	else if (fint > 10000000 && fint <= 12500000)
115 		f = 0xC;
116 	else if (fint > 12500000 && fint <= 15000000)
117 		f = 0xD;
118 	else if (fint > 15000000 && fint <= 17500000)
119 		f = 0xE;
120 	else if (fint > 17500000 && fint <= 21000000)
121 		f = 0xF;
122 	else
123 		pr_debug("clock: unknown freqsel setting for %d\n", n);
124 
125 	return f;
126 }
127 
128 /**
129  * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
130  * @clk: pointer to a DPLL struct clk
131  *
132  * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
133  * readiness before returning.  Will save and restore the DPLL's
134  * autoidle state across the enable, per the CDP code.  If the DPLL
135  * locked successfully, return 0; if the DPLL did not lock in the time
136  * allotted, or DPLL3 was passed in, return -EINVAL.
137  */
_omap3_noncore_dpll_lock(struct clk_hw_omap * clk)138 static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
139 {
140 	const struct dpll_data *dd;
141 	u8 ai;
142 	u8 state = 1;
143 	int r = 0;
144 
145 	pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
146 
147 	dd = clk->dpll_data;
148 	state <<= __ffs(dd->idlest_mask);
149 
150 	/* Check if already locked */
151 	if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
152 	    state)
153 		goto done;
154 
155 	ai = omap3_dpll_autoidle_read(clk);
156 
157 	if (ai)
158 		omap3_dpll_deny_idle(clk);
159 
160 	_omap3_dpll_write_clken(clk, DPLL_LOCKED);
161 
162 	r = _omap3_wait_dpll_status(clk, 1);
163 
164 	if (ai)
165 		omap3_dpll_allow_idle(clk);
166 
167 done:
168 	return r;
169 }
170 
171 /**
172  * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
173  * @clk: pointer to a DPLL struct clk
174  *
175  * Instructs a non-CORE DPLL to enter low-power bypass mode.  In
176  * bypass mode, the DPLL's rate is set equal to its parent clock's
177  * rate.  Waits for the DPLL to report readiness before returning.
178  * Will save and restore the DPLL's autoidle state across the enable,
179  * per the CDP code.  If the DPLL entered bypass mode successfully,
180  * return 0; if the DPLL did not enter bypass in the time allotted, or
181  * DPLL3 was passed in, or the DPLL does not support low-power bypass,
182  * return -EINVAL.
183  */
_omap3_noncore_dpll_bypass(struct clk_hw_omap * clk)184 static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
185 {
186 	int r;
187 	u8 ai;
188 
189 	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
190 		return -EINVAL;
191 
192 	pr_debug("clock: configuring DPLL %s for low-power bypass\n",
193 		 clk_hw_get_name(&clk->hw));
194 
195 	ai = omap3_dpll_autoidle_read(clk);
196 
197 	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
198 
199 	r = _omap3_wait_dpll_status(clk, 0);
200 
201 	if (ai)
202 		omap3_dpll_allow_idle(clk);
203 
204 	return r;
205 }
206 
207 /**
208  * _omap3_noncore_dpll_stop - instruct a DPLL to stop
209  * @clk: pointer to a DPLL struct clk
210  *
211  * Instructs a non-CORE DPLL to enter low-power stop. Will save and
212  * restore the DPLL's autoidle state across the stop, per the CDP
213  * code.  If DPLL3 was passed in, or the DPLL does not support
214  * low-power stop, return -EINVAL; otherwise, return 0.
215  */
_omap3_noncore_dpll_stop(struct clk_hw_omap * clk)216 static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
217 {
218 	u8 ai;
219 
220 	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
221 		return -EINVAL;
222 
223 	pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
224 
225 	ai = omap3_dpll_autoidle_read(clk);
226 
227 	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
228 
229 	if (ai)
230 		omap3_dpll_allow_idle(clk);
231 
232 	return 0;
233 }
234 
235 /**
236  * _lookup_dco - Lookup DCO used by j-type DPLL
237  * @clk: pointer to a DPLL struct clk
238  * @dco: digital control oscillator selector
239  * @m: DPLL multiplier to set
240  * @n: DPLL divider to set
241  *
242  * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
243  *
244  * XXX This code is not needed for 3430/AM35xx; can it be optimized
245  * out in non-multi-OMAP builds for those chips?
246  */
_lookup_dco(struct clk_hw_omap * clk,u8 * dco,u16 m,u8 n)247 static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
248 {
249 	unsigned long fint, clkinp; /* watch out for overflow */
250 
251 	clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
252 	fint = (clkinp / n) * m;
253 
254 	if (fint < 1000000000)
255 		*dco = 2;
256 	else
257 		*dco = 4;
258 }
259 
260 /**
261  * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
262  * @clk: pointer to a DPLL struct clk
263  * @sd_div: target sigma-delta divider
264  * @m: DPLL multiplier to set
265  * @n: DPLL divider to set
266  *
267  * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
268  *
269  * XXX This code is not needed for 3430/AM35xx; can it be optimized
270  * out in non-multi-OMAP builds for those chips?
271  */
_lookup_sddiv(struct clk_hw_omap * clk,u8 * sd_div,u16 m,u8 n)272 static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
273 {
274 	unsigned long clkinp, sd; /* watch out for overflow */
275 	int mod1, mod2;
276 
277 	clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
278 
279 	/*
280 	 * target sigma-delta to near 250MHz
281 	 * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
282 	 */
283 	clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
284 	mod1 = (clkinp * m) % (250 * n);
285 	sd = (clkinp * m) / (250 * n);
286 	mod2 = sd % 10;
287 	sd /= 10;
288 
289 	if (mod1 || mod2)
290 		sd++;
291 	*sd_div = sd;
292 }
293 
294 /**
295  * omap3_noncore_dpll_ssc_program - set spread-spectrum clocking registers
296  * @clk:	struct clk * of DPLL to set
297  *
298  * Enable the DPLL spread spectrum clocking if frequency modulation and
299  * frequency spreading have been set, otherwise disable it.
300  */
omap3_noncore_dpll_ssc_program(struct clk_hw_omap * clk)301 static void omap3_noncore_dpll_ssc_program(struct clk_hw_omap *clk)
302 {
303 	struct dpll_data *dd = clk->dpll_data;
304 	unsigned long ref_rate;
305 	u32 v, ctrl, mod_freq_divider, exponent, mantissa;
306 	u32 deltam_step, deltam_ceil;
307 
308 	ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
309 
310 	if (dd->ssc_modfreq && dd->ssc_deltam) {
311 		ctrl |= dd->ssc_enable_mask;
312 
313 		if (dd->ssc_downspread)
314 			ctrl |= dd->ssc_downspread_mask;
315 		else
316 			ctrl &= ~dd->ssc_downspread_mask;
317 
318 		ref_rate = clk_hw_get_rate(dd->clk_ref);
319 		mod_freq_divider =
320 		    (ref_rate / dd->last_rounded_n) / (4 * dd->ssc_modfreq);
321 		if (dd->ssc_modfreq > (ref_rate / 70))
322 			pr_warn("clock: SSC modulation frequency of DPLL %s greater than %ld\n",
323 				__clk_get_name(clk->hw.clk), ref_rate / 70);
324 
325 		exponent = 0;
326 		mantissa = mod_freq_divider;
327 		while ((mantissa > 127) && (exponent < 7)) {
328 			exponent++;
329 			mantissa /= 2;
330 		}
331 		if (mantissa > 127)
332 			mantissa = 127;
333 
334 		v = ti_clk_ll_ops->clk_readl(&dd->ssc_modfreq_reg);
335 		v &= ~(dd->ssc_modfreq_mant_mask | dd->ssc_modfreq_exp_mask);
336 		v |= mantissa << __ffs(dd->ssc_modfreq_mant_mask);
337 		v |= exponent << __ffs(dd->ssc_modfreq_exp_mask);
338 		ti_clk_ll_ops->clk_writel(v, &dd->ssc_modfreq_reg);
339 
340 		deltam_step = dd->last_rounded_m * dd->ssc_deltam;
341 		deltam_step /= 10;
342 		if (dd->ssc_downspread)
343 			deltam_step /= 2;
344 
345 		deltam_step <<= __ffs(dd->ssc_deltam_int_mask);
346 		deltam_step /= 100;
347 		deltam_step /= mod_freq_divider;
348 		if (deltam_step > 0xFFFFF)
349 			deltam_step = 0xFFFFF;
350 
351 		deltam_ceil = (deltam_step & dd->ssc_deltam_int_mask) >>
352 		    __ffs(dd->ssc_deltam_int_mask);
353 		if (deltam_step & dd->ssc_deltam_frac_mask)
354 			deltam_ceil++;
355 
356 		if ((dd->ssc_downspread &&
357 		     ((dd->last_rounded_m - (2 * deltam_ceil)) < 20 ||
358 		      dd->last_rounded_m > 2045)) ||
359 		    ((dd->last_rounded_m - deltam_ceil) < 20 ||
360 		     (dd->last_rounded_m + deltam_ceil) > 2045))
361 			pr_warn("clock: SSC multiplier of DPLL %s is out of range\n",
362 				__clk_get_name(clk->hw.clk));
363 
364 		v = ti_clk_ll_ops->clk_readl(&dd->ssc_deltam_reg);
365 		v &= ~(dd->ssc_deltam_int_mask | dd->ssc_deltam_frac_mask);
366 		v |= deltam_step << __ffs(dd->ssc_deltam_int_mask |
367 					  dd->ssc_deltam_frac_mask);
368 		ti_clk_ll_ops->clk_writel(v, &dd->ssc_deltam_reg);
369 	} else {
370 		ctrl &= ~dd->ssc_enable_mask;
371 	}
372 
373 	ti_clk_ll_ops->clk_writel(ctrl, &dd->control_reg);
374 }
375 
376 /**
377  * omap3_noncore_dpll_program - set non-core DPLL M,N values directly
378  * @clk:	struct clk * of DPLL to set
379  * @freqsel:	FREQSEL value to set
380  *
381  * Program the DPLL with the last M, N values calculated, and wait for
382  * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
383  */
omap3_noncore_dpll_program(struct clk_hw_omap * clk,u16 freqsel)384 static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
385 {
386 	struct dpll_data *dd = clk->dpll_data;
387 	u8 dco, sd_div, ai = 0;
388 	u32 v;
389 	bool errata_i810;
390 
391 	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
392 	_omap3_noncore_dpll_bypass(clk);
393 
394 	/*
395 	 * Set jitter correction. Jitter correction applicable for OMAP343X
396 	 * only since freqsel field is no longer present on other devices.
397 	 */
398 	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
399 		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
400 		v &= ~dd->freqsel_mask;
401 		v |= freqsel << __ffs(dd->freqsel_mask);
402 		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
403 	}
404 
405 	/* Set DPLL multiplier, divider */
406 	v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
407 
408 	/* Handle Duty Cycle Correction */
409 	if (dd->dcc_mask) {
410 		if (dd->last_rounded_rate >= dd->dcc_rate)
411 			v |= dd->dcc_mask; /* Enable DCC */
412 		else
413 			v &= ~dd->dcc_mask; /* Disable DCC */
414 	}
415 
416 	v &= ~(dd->mult_mask | dd->div1_mask);
417 	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
418 	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
419 
420 	/* Configure dco and sd_div for dplls that have these fields */
421 	if (dd->dco_mask) {
422 		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
423 		v &= ~(dd->dco_mask);
424 		v |= dco << __ffs(dd->dco_mask);
425 	}
426 	if (dd->sddiv_mask) {
427 		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
428 			      dd->last_rounded_n);
429 		v &= ~(dd->sddiv_mask);
430 		v |= sd_div << __ffs(dd->sddiv_mask);
431 	}
432 
433 	/*
434 	 * Errata i810 - DPLL controller can get stuck while transitioning
435 	 * to a power saving state. Software must ensure the DPLL can not
436 	 * transition to a low power state while changing M/N values.
437 	 * Easiest way to accomplish this is to prevent DPLL autoidle
438 	 * before doing the M/N re-program.
439 	 */
440 	errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
441 
442 	if (errata_i810) {
443 		ai = omap3_dpll_autoidle_read(clk);
444 		if (ai) {
445 			omap3_dpll_deny_idle(clk);
446 
447 			/* OCP barrier */
448 			omap3_dpll_autoidle_read(clk);
449 		}
450 	}
451 
452 	ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
453 
454 	/* Set 4X multiplier and low-power mode */
455 	if (dd->m4xen_mask || dd->lpmode_mask) {
456 		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
457 
458 		if (dd->m4xen_mask) {
459 			if (dd->last_rounded_m4xen)
460 				v |= dd->m4xen_mask;
461 			else
462 				v &= ~dd->m4xen_mask;
463 		}
464 
465 		if (dd->lpmode_mask) {
466 			if (dd->last_rounded_lpmode)
467 				v |= dd->lpmode_mask;
468 			else
469 				v &= ~dd->lpmode_mask;
470 		}
471 
472 		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
473 	}
474 
475 	if (dd->ssc_enable_mask)
476 		omap3_noncore_dpll_ssc_program(clk);
477 
478 	/* We let the clock framework set the other output dividers later */
479 
480 	/* REVISIT: Set ramp-up delay? */
481 
482 	_omap3_noncore_dpll_lock(clk);
483 
484 	if (errata_i810 && ai)
485 		omap3_dpll_allow_idle(clk);
486 
487 	return 0;
488 }
489 
490 /* Public functions */
491 
492 /**
493  * omap3_dpll_recalc - recalculate DPLL rate
494  * @hw: struct clk_hw containing the DPLL struct clk
495  * @parent_rate: clock rate of the DPLL parent
496  *
497  * Recalculate and propagate the DPLL rate.
498  */
omap3_dpll_recalc(struct clk_hw * hw,unsigned long parent_rate)499 unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
500 {
501 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
502 
503 	return omap2_get_dpll_rate(clk);
504 }
505 
506 /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
507 
508 /**
509  * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
510  * @hw: struct clk_hw containing then pointer to a DPLL struct clk
511  *
512  * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
513  * The choice of modes depends on the DPLL's programmed rate: if it is
514  * the same as the DPLL's parent clock, it will enter bypass;
515  * otherwise, it will enter lock.  This code will wait for the DPLL to
516  * indicate readiness before returning, unless the DPLL takes too long
517  * to enter the target state.  Intended to be used as the struct clk's
518  * enable function.  If DPLL3 was passed in, or the DPLL does not
519  * support low-power stop, or if the DPLL took too long to enter
520  * bypass or lock, return -EINVAL; otherwise, return 0.
521  */
omap3_noncore_dpll_enable(struct clk_hw * hw)522 int omap3_noncore_dpll_enable(struct clk_hw *hw)
523 {
524 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
525 	int r;
526 	struct dpll_data *dd;
527 	struct clk_hw *parent;
528 
529 	dd = clk->dpll_data;
530 	if (!dd)
531 		return -EINVAL;
532 
533 	if (clk->clkdm) {
534 		r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
535 		if (r) {
536 			WARN(1,
537 			     "%s: could not enable %s's clockdomain %s: %d\n",
538 			     __func__, clk_hw_get_name(hw),
539 			     clk->clkdm_name, r);
540 			return r;
541 		}
542 	}
543 
544 	parent = clk_hw_get_parent(hw);
545 
546 	if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
547 		WARN_ON(parent != dd->clk_bypass);
548 		r = _omap3_noncore_dpll_bypass(clk);
549 	} else {
550 		WARN_ON(parent != dd->clk_ref);
551 		r = _omap3_noncore_dpll_lock(clk);
552 	}
553 
554 	return r;
555 }
556 
557 /**
558  * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
559  * @hw: struct clk_hw containing then pointer to a DPLL struct clk
560  *
561  * Instructs a non-CORE DPLL to enter low-power stop.  This function is
562  * intended for use in struct clkops.  No return value.
563  */
omap3_noncore_dpll_disable(struct clk_hw * hw)564 void omap3_noncore_dpll_disable(struct clk_hw *hw)
565 {
566 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
567 
568 	_omap3_noncore_dpll_stop(clk);
569 	if (clk->clkdm)
570 		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
571 }
572 
573 /* Non-CORE DPLL rate set code */
574 
575 /**
576  * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
577  * @hw: pointer to the clock to determine rate for
578  * @req: target rate request
579  *
580  * Determines which DPLL mode to use for reaching a desired target rate.
581  * Checks whether the DPLL shall be in bypass or locked mode, and if
582  * locked, calculates the M,N values for the DPLL via round-rate.
583  * Returns a 0 on success, negative error value in failure.
584  */
omap3_noncore_dpll_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)585 int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
586 				      struct clk_rate_request *req)
587 {
588 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
589 	struct dpll_data *dd;
590 
591 	if (!req->rate)
592 		return -EINVAL;
593 
594 	dd = clk->dpll_data;
595 	if (!dd)
596 		return -EINVAL;
597 
598 	if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
599 	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
600 		req->best_parent_hw = dd->clk_bypass;
601 	} else {
602 		req->rate = omap2_dpll_round_rate(hw, req->rate,
603 					  &req->best_parent_rate);
604 		req->best_parent_hw = dd->clk_ref;
605 	}
606 
607 	req->best_parent_rate = req->rate;
608 
609 	return 0;
610 }
611 
612 /**
613  * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
614  * @hw: pointer to the clock to set parent for
615  * @index: parent index to select
616  *
617  * Sets parent for a DPLL clock. This sets the DPLL into bypass or
618  * locked mode. Returns 0 with success, negative error value otherwise.
619  */
omap3_noncore_dpll_set_parent(struct clk_hw * hw,u8 index)620 int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
621 {
622 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
623 	int ret;
624 
625 	if (!hw)
626 		return -EINVAL;
627 
628 	if (index)
629 		ret = _omap3_noncore_dpll_bypass(clk);
630 	else
631 		ret = _omap3_noncore_dpll_lock(clk);
632 
633 	return ret;
634 }
635 
636 /**
637  * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
638  * @hw: pointer to the clock to set parent for
639  * @rate: target rate for the clock
640  * @parent_rate: rate of the parent clock
641  *
642  * Sets rate for a DPLL clock. First checks if the clock parent is
643  * reference clock (in bypass mode, the rate of the clock can't be
644  * changed) and proceeds with the rate change operation. Returns 0
645  * with success, negative error value otherwise.
646  */
omap3_noncore_dpll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)647 int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
648 				unsigned long parent_rate)
649 {
650 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
651 	struct dpll_data *dd;
652 	u16 freqsel = 0;
653 	int ret;
654 
655 	if (!hw || !rate)
656 		return -EINVAL;
657 
658 	dd = clk->dpll_data;
659 	if (!dd)
660 		return -EINVAL;
661 
662 	if (clk_hw_get_parent(hw) != dd->clk_ref)
663 		return -EINVAL;
664 
665 	if (dd->last_rounded_rate == 0)
666 		return -EINVAL;
667 
668 	/* Freqsel is available only on OMAP343X devices */
669 	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
670 		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
671 		WARN_ON(!freqsel);
672 	}
673 
674 	pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
675 		 clk_hw_get_name(hw), rate);
676 
677 	ret = omap3_noncore_dpll_program(clk, freqsel);
678 
679 	return ret;
680 }
681 
682 /**
683  * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
684  * @hw: pointer to the clock to set rate and parent for
685  * @rate: target rate for the DPLL
686  * @parent_rate: clock rate of the DPLL parent
687  * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
688  *
689  * Sets rate and parent for a DPLL clock. If new parent is the bypass
690  * clock, only selects the parent. Otherwise proceeds with a rate
691  * change, as this will effectively also change the parent as the
692  * DPLL is put into locked mode. Returns 0 with success, negative error
693  * value otherwise.
694  */
omap3_noncore_dpll_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)695 int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
696 					   unsigned long rate,
697 					   unsigned long parent_rate,
698 					   u8 index)
699 {
700 	int ret;
701 
702 	if (!hw || !rate)
703 		return -EINVAL;
704 
705 	/*
706 	 * clk-ref at index[0], in which case we only need to set rate,
707 	 * the parent will be changed automatically with the lock sequence.
708 	 * With clk-bypass case we only need to change parent.
709 	 */
710 	if (index)
711 		ret = omap3_noncore_dpll_set_parent(hw, index);
712 	else
713 		ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
714 
715 	return ret;
716 }
717 
718 /* DPLL autoidle read/set code */
719 
720 /**
721  * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
722  * @clk: struct clk * of the DPLL to read
723  *
724  * Return the DPLL's autoidle bits, shifted down to bit 0.  Returns
725  * -EINVAL if passed a null pointer or if the struct clk does not
726  * appear to refer to a DPLL.
727  */
omap3_dpll_autoidle_read(struct clk_hw_omap * clk)728 static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
729 {
730 	const struct dpll_data *dd;
731 	u32 v;
732 
733 	if (!clk || !clk->dpll_data)
734 		return -EINVAL;
735 
736 	dd = clk->dpll_data;
737 
738 	if (!dd->autoidle_mask)
739 		return -EINVAL;
740 
741 	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
742 	v &= dd->autoidle_mask;
743 	v >>= __ffs(dd->autoidle_mask);
744 
745 	return v;
746 }
747 
748 /**
749  * omap3_dpll_allow_idle - enable DPLL autoidle bits
750  * @clk: struct clk * of the DPLL to operate on
751  *
752  * Enable DPLL automatic idle control.  This automatic idle mode
753  * switching takes effect only when the DPLL is locked, at least on
754  * OMAP3430.  The DPLL will enter low-power stop when its downstream
755  * clocks are gated.  No return value.
756  */
omap3_dpll_allow_idle(struct clk_hw_omap * clk)757 static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
758 {
759 	const struct dpll_data *dd;
760 	u32 v;
761 
762 	if (!clk || !clk->dpll_data)
763 		return;
764 
765 	dd = clk->dpll_data;
766 
767 	if (!dd->autoidle_mask)
768 		return;
769 
770 	/*
771 	 * REVISIT: CORE DPLL can optionally enter low-power bypass
772 	 * by writing 0x5 instead of 0x1.  Add some mechanism to
773 	 * optionally enter this mode.
774 	 */
775 	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
776 	v &= ~dd->autoidle_mask;
777 	v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
778 	ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
779 }
780 
781 /**
782  * omap3_dpll_deny_idle - prevent DPLL from automatically idling
783  * @clk: struct clk * of the DPLL to operate on
784  *
785  * Disable DPLL automatic idle control.  No return value.
786  */
omap3_dpll_deny_idle(struct clk_hw_omap * clk)787 static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
788 {
789 	const struct dpll_data *dd;
790 	u32 v;
791 
792 	if (!clk || !clk->dpll_data)
793 		return;
794 
795 	dd = clk->dpll_data;
796 
797 	if (!dd->autoidle_mask)
798 		return;
799 
800 	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
801 	v &= ~dd->autoidle_mask;
802 	v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
803 	ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
804 }
805 
806 /* Clock control for DPLL outputs */
807 
808 /* Find the parent DPLL for the given clkoutx2 clock */
omap3_find_clkoutx2_dpll(struct clk_hw * hw)809 static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
810 {
811 	struct clk_hw_omap *pclk = NULL;
812 
813 	/* Walk up the parents of clk, looking for a DPLL */
814 	do {
815 		do {
816 			hw = clk_hw_get_parent(hw);
817 		} while (hw && (!omap2_clk_is_hw_omap(hw)));
818 		if (!hw)
819 			break;
820 		pclk = to_clk_hw_omap(hw);
821 	} while (pclk && !pclk->dpll_data);
822 
823 	/* clk does not have a DPLL as a parent?  error in the clock data */
824 	if (!pclk) {
825 		WARN_ON(1);
826 		return NULL;
827 	}
828 
829 	return pclk;
830 }
831 
832 /**
833  * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
834  * @hw: pointer  struct clk_hw
835  * @parent_rate: clock rate of the DPLL parent
836  *
837  * Using parent clock DPLL data, look up DPLL state.  If locked, set our
838  * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
839  */
omap3_clkoutx2_recalc(struct clk_hw * hw,unsigned long parent_rate)840 unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
841 				    unsigned long parent_rate)
842 {
843 	const struct dpll_data *dd;
844 	unsigned long rate;
845 	u32 v;
846 	struct clk_hw_omap *pclk = NULL;
847 
848 	if (!parent_rate)
849 		return 0;
850 
851 	pclk = omap3_find_clkoutx2_dpll(hw);
852 
853 	if (!pclk)
854 		return 0;
855 
856 	dd = pclk->dpll_data;
857 
858 	WARN_ON(!dd->enable_mask);
859 
860 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
861 	v >>= __ffs(dd->enable_mask);
862 	if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
863 		rate = parent_rate;
864 	else
865 		rate = parent_rate * 2;
866 	return rate;
867 }
868 
869 /**
870  * omap3_core_dpll_save_context - Save the m and n values of the divider
871  * @hw: pointer  struct clk_hw
872  *
873  * Before the dpll registers are lost save the last rounded rate m and n
874  * and the enable mask.
875  */
omap3_core_dpll_save_context(struct clk_hw * hw)876 int omap3_core_dpll_save_context(struct clk_hw *hw)
877 {
878 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
879 	struct dpll_data *dd;
880 	u32 v;
881 
882 	dd = clk->dpll_data;
883 
884 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
885 	clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
886 
887 	if (clk->context == DPLL_LOCKED) {
888 		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
889 		dd->last_rounded_m = (v & dd->mult_mask) >>
890 						__ffs(dd->mult_mask);
891 		dd->last_rounded_n = ((v & dd->div1_mask) >>
892 						__ffs(dd->div1_mask)) + 1;
893 	}
894 
895 	return 0;
896 }
897 
898 /**
899  * omap3_core_dpll_restore_context - restore the m and n values of the divider
900  * @hw: pointer  struct clk_hw
901  *
902  * Restore the last rounded rate m and n
903  * and the enable mask.
904  */
omap3_core_dpll_restore_context(struct clk_hw * hw)905 void omap3_core_dpll_restore_context(struct clk_hw *hw)
906 {
907 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
908 	const struct dpll_data *dd;
909 	u32 v;
910 
911 	dd = clk->dpll_data;
912 
913 	if (clk->context == DPLL_LOCKED) {
914 		_omap3_dpll_write_clken(clk, 0x4);
915 		_omap3_wait_dpll_status(clk, 0);
916 
917 		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
918 		v &= ~(dd->mult_mask | dd->div1_mask);
919 		v |= dd->last_rounded_m << __ffs(dd->mult_mask);
920 		v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
921 		ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
922 
923 		_omap3_dpll_write_clken(clk, DPLL_LOCKED);
924 		_omap3_wait_dpll_status(clk, 1);
925 	} else {
926 		_omap3_dpll_write_clken(clk, clk->context);
927 	}
928 }
929 
930 /**
931  * omap3_non_core_dpll_save_context - Save the m and n values of the divider
932  * @hw: pointer  struct clk_hw
933  *
934  * Before the dpll registers are lost save the last rounded rate m and n
935  * and the enable mask.
936  */
omap3_noncore_dpll_save_context(struct clk_hw * hw)937 int omap3_noncore_dpll_save_context(struct clk_hw *hw)
938 {
939 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
940 	struct dpll_data *dd;
941 	u32 v;
942 
943 	dd = clk->dpll_data;
944 
945 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
946 	clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
947 
948 	if (clk->context == DPLL_LOCKED) {
949 		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
950 		dd->last_rounded_m = (v & dd->mult_mask) >>
951 						__ffs(dd->mult_mask);
952 		dd->last_rounded_n = ((v & dd->div1_mask) >>
953 						__ffs(dd->div1_mask)) + 1;
954 	}
955 
956 	return 0;
957 }
958 
959 /**
960  * omap3_core_dpll_restore_context - restore the m and n values of the divider
961  * @hw: pointer  struct clk_hw
962  *
963  * Restore the last rounded rate m and n
964  * and the enable mask.
965  */
omap3_noncore_dpll_restore_context(struct clk_hw * hw)966 void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
967 {
968 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
969 	const struct dpll_data *dd;
970 	u32 ctrl, mult_div1;
971 
972 	dd = clk->dpll_data;
973 
974 	ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
975 	mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
976 
977 	if (clk->context == ((ctrl & dd->enable_mask) >>
978 			     __ffs(dd->enable_mask)) &&
979 	    dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
980 				   __ffs(dd->mult_mask)) &&
981 	    dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
982 				   __ffs(dd->div1_mask)) + 1) {
983 		/* nothing to be done */
984 		return;
985 	}
986 
987 	if (clk->context == DPLL_LOCKED)
988 		omap3_noncore_dpll_program(clk, 0);
989 	else
990 		_omap3_dpll_write_clken(clk, clk->context);
991 }
992 
993 /* OMAP3/4 non-CORE DPLL clkops */
994 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
995 	.allow_idle	= omap3_dpll_allow_idle,
996 	.deny_idle	= omap3_dpll_deny_idle,
997 };
998 
999 /**
1000  * omap3_dpll4_set_rate - set rate for omap3 per-dpll
1001  * @hw: clock to change
1002  * @rate: target rate for clock
1003  * @parent_rate: clock rate of the DPLL parent
1004  *
1005  * Check if the current SoC supports the per-dpll reprogram operation
1006  * or not, and then do the rate change if supported. Returns -EINVAL
1007  * if not supported, 0 for success, and potential error codes from the
1008  * clock rate change.
1009  */
omap3_dpll4_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1010 int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
1011 			 unsigned long parent_rate)
1012 {
1013 	/*
1014 	 * According to the 12-5 CDP code from TI, "Limitation 2.5"
1015 	 * on 3430ES1 prevents us from changing DPLL multipliers or dividers
1016 	 * on DPLL4.
1017 	 */
1018 	if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
1019 		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
1020 		return -EINVAL;
1021 	}
1022 
1023 	return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1024 }
1025 
1026 /**
1027  * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
1028  * @hw: clock to change
1029  * @rate: target rate for clock
1030  * @parent_rate: rate of the parent clock
1031  * @index: parent index, 0 - reference clock, 1 - bypass clock
1032  *
1033  * Check if the current SoC support the per-dpll reprogram operation
1034  * or not, and then do the rate + parent change if supported. Returns
1035  * -EINVAL if not supported, 0 for success, and potential error codes
1036  * from the clock rate change.
1037  */
omap3_dpll4_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)1038 int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1039 				    unsigned long parent_rate, u8 index)
1040 {
1041 	if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
1042 		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
1047 						      index);
1048 }
1049 
1050 /* Apply DM3730 errata sprz319 advisory 2.1. */
omap3_dpll5_apply_errata(struct clk_hw * hw,unsigned long parent_rate)1051 static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
1052 				     unsigned long parent_rate)
1053 {
1054 	struct omap3_dpll5_settings {
1055 		unsigned int rate, m, n;
1056 	};
1057 
1058 	static const struct omap3_dpll5_settings precomputed[] = {
1059 		/*
1060 		 * From DM3730 errata advisory 2.1, table 35 and 36.
1061 		 * The N value is increased by 1 compared to the tables as the
1062 		 * errata lists register values while last_rounded_field is the
1063 		 * real divider value.
1064 		 */
1065 		{ 12000000,  80,  0 + 1 },
1066 		{ 13000000, 443,  5 + 1 },
1067 		{ 19200000,  50,  0 + 1 },
1068 		{ 26000000, 443, 11 + 1 },
1069 		{ 38400000,  25,  0 + 1 }
1070 	};
1071 
1072 	const struct omap3_dpll5_settings *d;
1073 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
1074 	struct dpll_data *dd;
1075 	unsigned int i;
1076 
1077 	for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
1078 		if (parent_rate == precomputed[i].rate)
1079 			break;
1080 	}
1081 
1082 	if (i == ARRAY_SIZE(precomputed))
1083 		return false;
1084 
1085 	d = &precomputed[i];
1086 
1087 	/* Update the M, N and rounded rate values and program the DPLL. */
1088 	dd = clk->dpll_data;
1089 	dd->last_rounded_m = d->m;
1090 	dd->last_rounded_n = d->n;
1091 	dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
1092 	omap3_noncore_dpll_program(clk, 0);
1093 
1094 	return true;
1095 }
1096 
1097 /**
1098  * omap3_dpll5_set_rate - set rate for omap3 dpll5
1099  * @hw: clock to change
1100  * @rate: target rate for clock
1101  * @parent_rate: rate of the parent clock
1102  *
1103  * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
1104  * the DPLL is used for USB host (detected through the requested rate).
1105  */
omap3_dpll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1106 int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
1107 			 unsigned long parent_rate)
1108 {
1109 	if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
1110 		if (omap3_dpll5_apply_errata(hw, parent_rate))
1111 			return 0;
1112 	}
1113 
1114 	return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1115 }
1116