1 /*
2 * OMAP DPLL clock support
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * Tero Kristo <t-kristo@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/slab.h>
21 #include <linux/err.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/clk/ti.h>
25 #include "clock.h"
26
27 #undef pr_fmt
28 #define pr_fmt(fmt) "%s: " fmt, __func__
29
30 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
31 defined(CONFIG_SOC_DRA7XX)
32 static const struct clk_ops dpll_m4xen_ck_ops = {
33 .enable = &omap3_noncore_dpll_enable,
34 .disable = &omap3_noncore_dpll_disable,
35 .recalc_rate = &omap4_dpll_regm4xen_recalc,
36 .round_rate = &omap4_dpll_regm4xen_round_rate,
37 .set_rate = &omap3_noncore_dpll_set_rate,
38 .set_parent = &omap3_noncore_dpll_set_parent,
39 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
40 .determine_rate = &omap4_dpll_regm4xen_determine_rate,
41 .get_parent = &omap2_init_dpll_parent,
42 };
43 #else
44 static const struct clk_ops dpll_m4xen_ck_ops = {};
45 #endif
46
47 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
48 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
49 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
50 static const struct clk_ops dpll_core_ck_ops = {
51 .recalc_rate = &omap3_dpll_recalc,
52 .get_parent = &omap2_init_dpll_parent,
53 };
54
55 static const struct clk_ops dpll_ck_ops = {
56 .enable = &omap3_noncore_dpll_enable,
57 .disable = &omap3_noncore_dpll_disable,
58 .recalc_rate = &omap3_dpll_recalc,
59 .round_rate = &omap2_dpll_round_rate,
60 .set_rate = &omap3_noncore_dpll_set_rate,
61 .set_parent = &omap3_noncore_dpll_set_parent,
62 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
63 .determine_rate = &omap3_noncore_dpll_determine_rate,
64 .get_parent = &omap2_init_dpll_parent,
65 };
66
67 static const struct clk_ops dpll_no_gate_ck_ops = {
68 .recalc_rate = &omap3_dpll_recalc,
69 .get_parent = &omap2_init_dpll_parent,
70 .round_rate = &omap2_dpll_round_rate,
71 .set_rate = &omap3_noncore_dpll_set_rate,
72 .set_parent = &omap3_noncore_dpll_set_parent,
73 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
74 .determine_rate = &omap3_noncore_dpll_determine_rate,
75 };
76 #else
77 static const struct clk_ops dpll_core_ck_ops = {};
78 static const struct clk_ops dpll_ck_ops = {};
79 static const struct clk_ops dpll_no_gate_ck_ops = {};
80 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
81 #endif
82
83 #ifdef CONFIG_ARCH_OMAP2
84 static const struct clk_ops omap2_dpll_core_ck_ops = {
85 .get_parent = &omap2_init_dpll_parent,
86 .recalc_rate = &omap2_dpllcore_recalc,
87 .round_rate = &omap2_dpll_round_rate,
88 .set_rate = &omap2_reprogram_dpllcore,
89 };
90 #else
91 static const struct clk_ops omap2_dpll_core_ck_ops = {};
92 #endif
93
94 #ifdef CONFIG_ARCH_OMAP3
95 static const struct clk_ops omap3_dpll_core_ck_ops = {
96 .get_parent = &omap2_init_dpll_parent,
97 .recalc_rate = &omap3_dpll_recalc,
98 .round_rate = &omap2_dpll_round_rate,
99 };
100 #else
101 static const struct clk_ops omap3_dpll_core_ck_ops = {};
102 #endif
103
104 #ifdef CONFIG_ARCH_OMAP3
105 static const struct clk_ops omap3_dpll_ck_ops = {
106 .enable = &omap3_noncore_dpll_enable,
107 .disable = &omap3_noncore_dpll_disable,
108 .get_parent = &omap2_init_dpll_parent,
109 .recalc_rate = &omap3_dpll_recalc,
110 .set_rate = &omap3_noncore_dpll_set_rate,
111 .set_parent = &omap3_noncore_dpll_set_parent,
112 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
113 .determine_rate = &omap3_noncore_dpll_determine_rate,
114 .round_rate = &omap2_dpll_round_rate,
115 };
116
117 static const struct clk_ops omap3_dpll5_ck_ops = {
118 .enable = &omap3_noncore_dpll_enable,
119 .disable = &omap3_noncore_dpll_disable,
120 .get_parent = &omap2_init_dpll_parent,
121 .recalc_rate = &omap3_dpll_recalc,
122 .set_rate = &omap3_dpll5_set_rate,
123 .set_parent = &omap3_noncore_dpll_set_parent,
124 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
125 .determine_rate = &omap3_noncore_dpll_determine_rate,
126 .round_rate = &omap2_dpll_round_rate,
127 };
128
129 static const struct clk_ops omap3_dpll_per_ck_ops = {
130 .enable = &omap3_noncore_dpll_enable,
131 .disable = &omap3_noncore_dpll_disable,
132 .get_parent = &omap2_init_dpll_parent,
133 .recalc_rate = &omap3_dpll_recalc,
134 .set_rate = &omap3_dpll4_set_rate,
135 .set_parent = &omap3_noncore_dpll_set_parent,
136 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
137 .determine_rate = &omap3_noncore_dpll_determine_rate,
138 .round_rate = &omap2_dpll_round_rate,
139 };
140 #endif
141
142 static const struct clk_ops dpll_x2_ck_ops = {
143 .recalc_rate = &omap3_clkoutx2_recalc,
144 };
145
146 /**
147 * _register_dpll - low level registration of a DPLL clock
148 * @hw: hardware clock definition for the clock
149 * @node: device node for the clock
150 *
151 * Finalizes DPLL registration process. In case a failure (clk-ref or
152 * clk-bypass is missing), the clock is added to retry list and
153 * the initialization is retried on later stage.
154 */
_register_dpll(struct clk_hw * hw,struct device_node * node)155 static void __init _register_dpll(struct clk_hw *hw,
156 struct device_node *node)
157 {
158 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
159 struct dpll_data *dd = clk_hw->dpll_data;
160 struct clk *clk;
161
162 dd->clk_ref = of_clk_get(node, 0);
163 dd->clk_bypass = of_clk_get(node, 1);
164
165 if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
166 pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
167 node->name);
168 if (!ti_clk_retry_init(node, hw, _register_dpll))
169 return;
170
171 goto cleanup;
172 }
173
174 /* register the clock */
175 clk = clk_register(NULL, &clk_hw->hw);
176
177 if (!IS_ERR(clk)) {
178 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
179 of_clk_add_provider(node, of_clk_src_simple_get, clk);
180 kfree(clk_hw->hw.init->parent_names);
181 kfree(clk_hw->hw.init);
182 return;
183 }
184
185 cleanup:
186 kfree(clk_hw->dpll_data);
187 kfree(clk_hw->hw.init->parent_names);
188 kfree(clk_hw->hw.init);
189 kfree(clk_hw);
190 }
191
192 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
_get_reg(u8 module,u16 offset)193 static void __iomem *_get_reg(u8 module, u16 offset)
194 {
195 u32 reg;
196 struct clk_omap_reg *reg_setup;
197
198 reg_setup = (struct clk_omap_reg *)®
199
200 reg_setup->index = module;
201 reg_setup->offset = offset;
202
203 return (void __iomem *)reg;
204 }
205
ti_clk_register_dpll(struct ti_clk * setup)206 struct clk *ti_clk_register_dpll(struct ti_clk *setup)
207 {
208 struct clk_hw_omap *clk_hw;
209 struct clk_init_data init = { NULL };
210 struct dpll_data *dd;
211 struct clk *clk;
212 struct ti_clk_dpll *dpll;
213 const struct clk_ops *ops = &omap3_dpll_ck_ops;
214 struct clk *clk_ref;
215 struct clk *clk_bypass;
216
217 dpll = setup->data;
218
219 if (dpll->num_parents < 2)
220 return ERR_PTR(-EINVAL);
221
222 clk_ref = clk_get_sys(NULL, dpll->parents[0]);
223 clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
224
225 if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
226 return ERR_PTR(-EAGAIN);
227
228 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
229 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
230 if (!dd || !clk_hw) {
231 clk = ERR_PTR(-ENOMEM);
232 goto cleanup;
233 }
234
235 clk_hw->dpll_data = dd;
236 clk_hw->ops = &clkhwops_omap3_dpll;
237 clk_hw->hw.init = &init;
238 clk_hw->flags = MEMMAP_ADDRESSING;
239
240 init.name = setup->name;
241 init.ops = ops;
242
243 init.num_parents = dpll->num_parents;
244 init.parent_names = dpll->parents;
245
246 dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
247 dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
248 dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
249 dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
250
251 dd->modes = dpll->modes;
252 dd->div1_mask = dpll->div1_mask;
253 dd->idlest_mask = dpll->idlest_mask;
254 dd->mult_mask = dpll->mult_mask;
255 dd->autoidle_mask = dpll->autoidle_mask;
256 dd->enable_mask = dpll->enable_mask;
257 dd->sddiv_mask = dpll->sddiv_mask;
258 dd->dco_mask = dpll->dco_mask;
259 dd->max_divider = dpll->max_divider;
260 dd->min_divider = dpll->min_divider;
261 dd->max_multiplier = dpll->max_multiplier;
262 dd->auto_recal_bit = dpll->auto_recal_bit;
263 dd->recal_en_bit = dpll->recal_en_bit;
264 dd->recal_st_bit = dpll->recal_st_bit;
265
266 dd->clk_ref = clk_ref;
267 dd->clk_bypass = clk_bypass;
268
269 if (dpll->flags & CLKF_CORE)
270 ops = &omap3_dpll_core_ck_ops;
271
272 if (dpll->flags & CLKF_PER)
273 ops = &omap3_dpll_per_ck_ops;
274
275 if (dpll->flags & CLKF_J_TYPE)
276 dd->flags |= DPLL_J_TYPE;
277
278 clk = clk_register(NULL, &clk_hw->hw);
279
280 if (!IS_ERR(clk))
281 return clk;
282
283 cleanup:
284 kfree(dd);
285 kfree(clk_hw);
286 return clk;
287 }
288 #endif
289
290 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
291 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
292 defined(CONFIG_SOC_AM43XX)
293 /**
294 * _register_dpll_x2 - Registers a DPLLx2 clock
295 * @node: device node for this clock
296 * @ops: clk_ops for this clock
297 * @hw_ops: clk_hw_ops for this clock
298 *
299 * Initializes a DPLL x 2 clock from device tree data.
300 */
_register_dpll_x2(struct device_node * node,const struct clk_ops * ops,const struct clk_hw_omap_ops * hw_ops)301 static void _register_dpll_x2(struct device_node *node,
302 const struct clk_ops *ops,
303 const struct clk_hw_omap_ops *hw_ops)
304 {
305 struct clk *clk;
306 struct clk_init_data init = { NULL };
307 struct clk_hw_omap *clk_hw;
308 const char *name = node->name;
309 const char *parent_name;
310
311 parent_name = of_clk_get_parent_name(node, 0);
312 if (!parent_name) {
313 pr_err("%s must have parent\n", node->name);
314 return;
315 }
316
317 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
318 if (!clk_hw)
319 return;
320
321 clk_hw->ops = hw_ops;
322 clk_hw->hw.init = &init;
323
324 init.name = name;
325 init.ops = ops;
326 init.parent_names = &parent_name;
327 init.num_parents = 1;
328
329 /* register the clock */
330 clk = clk_register(NULL, &clk_hw->hw);
331
332 if (IS_ERR(clk)) {
333 kfree(clk_hw);
334 } else {
335 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
336 of_clk_add_provider(node, of_clk_src_simple_get, clk);
337 }
338 }
339 #endif
340
341 /**
342 * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
343 * @node: device node containing the DPLL info
344 * @ops: ops for the DPLL
345 * @ddt: DPLL data template to use
346 *
347 * Initializes a DPLL clock from device tree data.
348 */
of_ti_dpll_setup(struct device_node * node,const struct clk_ops * ops,const struct dpll_data * ddt)349 static void __init of_ti_dpll_setup(struct device_node *node,
350 const struct clk_ops *ops,
351 const struct dpll_data *ddt)
352 {
353 struct clk_hw_omap *clk_hw = NULL;
354 struct clk_init_data *init = NULL;
355 const char **parent_names = NULL;
356 struct dpll_data *dd = NULL;
357 u8 dpll_mode = 0;
358
359 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
360 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
361 init = kzalloc(sizeof(*init), GFP_KERNEL);
362 if (!dd || !clk_hw || !init)
363 goto cleanup;
364
365 memcpy(dd, ddt, sizeof(*dd));
366
367 clk_hw->dpll_data = dd;
368 clk_hw->ops = &clkhwops_omap3_dpll;
369 clk_hw->hw.init = init;
370 clk_hw->flags = MEMMAP_ADDRESSING;
371
372 init->name = node->name;
373 init->ops = ops;
374
375 init->num_parents = of_clk_get_parent_count(node);
376 if (init->num_parents < 1) {
377 pr_err("%s must have parent(s)\n", node->name);
378 goto cleanup;
379 }
380
381 parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
382 if (!parent_names)
383 goto cleanup;
384
385 of_clk_parent_fill(node, parent_names, init->num_parents);
386
387 init->parent_names = parent_names;
388
389 dd->control_reg = ti_clk_get_reg_addr(node, 0);
390
391 /*
392 * Special case for OMAP2 DPLL, register order is different due to
393 * missing idlest_reg, also clkhwops is different. Detected from
394 * missing idlest_mask.
395 */
396 if (!dd->idlest_mask) {
397 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
398 #ifdef CONFIG_ARCH_OMAP2
399 clk_hw->ops = &clkhwops_omap2xxx_dpll;
400 omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
401 #endif
402 } else {
403 dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
404 if (IS_ERR(dd->idlest_reg))
405 goto cleanup;
406
407 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
408 }
409
410 if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg))
411 goto cleanup;
412
413 if (dd->autoidle_mask) {
414 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
415 if (IS_ERR(dd->autoidle_reg))
416 goto cleanup;
417 }
418
419 if (of_property_read_bool(node, "ti,low-power-stop"))
420 dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
421
422 if (of_property_read_bool(node, "ti,low-power-bypass"))
423 dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
424
425 if (of_property_read_bool(node, "ti,lock"))
426 dpll_mode |= 1 << DPLL_LOCKED;
427
428 if (dpll_mode)
429 dd->modes = dpll_mode;
430
431 _register_dpll(&clk_hw->hw, node);
432 return;
433
434 cleanup:
435 kfree(dd);
436 kfree(parent_names);
437 kfree(init);
438 kfree(clk_hw);
439 }
440
441 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
442 defined(CONFIG_SOC_DRA7XX)
of_ti_omap4_dpll_x2_setup(struct device_node * node)443 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
444 {
445 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
446 }
447 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
448 of_ti_omap4_dpll_x2_setup);
449 #endif
450
451 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
of_ti_am3_dpll_x2_setup(struct device_node * node)452 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
453 {
454 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
455 }
456 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
457 of_ti_am3_dpll_x2_setup);
458 #endif
459
460 #ifdef CONFIG_ARCH_OMAP3
of_ti_omap3_dpll_setup(struct device_node * node)461 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
462 {
463 const struct dpll_data dd = {
464 .idlest_mask = 0x1,
465 .enable_mask = 0x7,
466 .autoidle_mask = 0x7,
467 .mult_mask = 0x7ff << 8,
468 .div1_mask = 0x7f,
469 .max_multiplier = 2047,
470 .max_divider = 128,
471 .min_divider = 1,
472 .freqsel_mask = 0xf0,
473 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
474 };
475
476 if ((of_machine_is_compatible("ti,omap3630") ||
477 of_machine_is_compatible("ti,omap36xx")) &&
478 !strcmp(node->name, "dpll5_ck"))
479 of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
480 else
481 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
482 }
483 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
484 of_ti_omap3_dpll_setup);
485
of_ti_omap3_core_dpll_setup(struct device_node * node)486 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
487 {
488 const struct dpll_data dd = {
489 .idlest_mask = 0x1,
490 .enable_mask = 0x7,
491 .autoidle_mask = 0x7,
492 .mult_mask = 0x7ff << 16,
493 .div1_mask = 0x7f << 8,
494 .max_multiplier = 2047,
495 .max_divider = 128,
496 .min_divider = 1,
497 .freqsel_mask = 0xf0,
498 };
499
500 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
501 }
502 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
503 of_ti_omap3_core_dpll_setup);
504
of_ti_omap3_per_dpll_setup(struct device_node * node)505 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
506 {
507 const struct dpll_data dd = {
508 .idlest_mask = 0x1 << 1,
509 .enable_mask = 0x7 << 16,
510 .autoidle_mask = 0x7 << 3,
511 .mult_mask = 0x7ff << 8,
512 .div1_mask = 0x7f,
513 .max_multiplier = 2047,
514 .max_divider = 128,
515 .min_divider = 1,
516 .freqsel_mask = 0xf00000,
517 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
518 };
519
520 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
521 }
522 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
523 of_ti_omap3_per_dpll_setup);
524
of_ti_omap3_per_jtype_dpll_setup(struct device_node * node)525 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
526 {
527 const struct dpll_data dd = {
528 .idlest_mask = 0x1 << 1,
529 .enable_mask = 0x7 << 16,
530 .autoidle_mask = 0x7 << 3,
531 .mult_mask = 0xfff << 8,
532 .div1_mask = 0x7f,
533 .max_multiplier = 4095,
534 .max_divider = 128,
535 .min_divider = 1,
536 .sddiv_mask = 0xff << 24,
537 .dco_mask = 0xe << 20,
538 .flags = DPLL_J_TYPE,
539 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
540 };
541
542 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
543 }
544 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
545 of_ti_omap3_per_jtype_dpll_setup);
546 #endif
547
of_ti_omap4_dpll_setup(struct device_node * node)548 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
549 {
550 const struct dpll_data dd = {
551 .idlest_mask = 0x1,
552 .enable_mask = 0x7,
553 .autoidle_mask = 0x7,
554 .mult_mask = 0x7ff << 8,
555 .div1_mask = 0x7f,
556 .max_multiplier = 2047,
557 .max_divider = 128,
558 .min_divider = 1,
559 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
560 };
561
562 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
563 }
564 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
565 of_ti_omap4_dpll_setup);
566
of_ti_omap5_mpu_dpll_setup(struct device_node * node)567 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
568 {
569 const struct dpll_data dd = {
570 .idlest_mask = 0x1,
571 .enable_mask = 0x7,
572 .autoidle_mask = 0x7,
573 .mult_mask = 0x7ff << 8,
574 .div1_mask = 0x7f,
575 .max_multiplier = 2047,
576 .max_divider = 128,
577 .dcc_mask = BIT(22),
578 .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
579 .min_divider = 1,
580 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
581 };
582
583 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
584 }
585 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
586 of_ti_omap5_mpu_dpll_setup);
587
of_ti_omap4_core_dpll_setup(struct device_node * node)588 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
589 {
590 const struct dpll_data dd = {
591 .idlest_mask = 0x1,
592 .enable_mask = 0x7,
593 .autoidle_mask = 0x7,
594 .mult_mask = 0x7ff << 8,
595 .div1_mask = 0x7f,
596 .max_multiplier = 2047,
597 .max_divider = 128,
598 .min_divider = 1,
599 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
600 };
601
602 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
603 }
604 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
605 of_ti_omap4_core_dpll_setup);
606
607 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
608 defined(CONFIG_SOC_DRA7XX)
of_ti_omap4_m4xen_dpll_setup(struct device_node * node)609 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
610 {
611 const struct dpll_data dd = {
612 .idlest_mask = 0x1,
613 .enable_mask = 0x7,
614 .autoidle_mask = 0x7,
615 .mult_mask = 0x7ff << 8,
616 .div1_mask = 0x7f,
617 .max_multiplier = 2047,
618 .max_divider = 128,
619 .min_divider = 1,
620 .m4xen_mask = 0x800,
621 .lpmode_mask = 1 << 10,
622 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
623 };
624
625 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
626 }
627 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
628 of_ti_omap4_m4xen_dpll_setup);
629
of_ti_omap4_jtype_dpll_setup(struct device_node * node)630 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
631 {
632 const struct dpll_data dd = {
633 .idlest_mask = 0x1,
634 .enable_mask = 0x7,
635 .autoidle_mask = 0x7,
636 .mult_mask = 0xfff << 8,
637 .div1_mask = 0xff,
638 .max_multiplier = 4095,
639 .max_divider = 256,
640 .min_divider = 1,
641 .sddiv_mask = 0xff << 24,
642 .flags = DPLL_J_TYPE,
643 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
644 };
645
646 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
647 }
648 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
649 of_ti_omap4_jtype_dpll_setup);
650 #endif
651
of_ti_am3_no_gate_dpll_setup(struct device_node * node)652 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
653 {
654 const struct dpll_data dd = {
655 .idlest_mask = 0x1,
656 .enable_mask = 0x7,
657 .mult_mask = 0x7ff << 8,
658 .div1_mask = 0x7f,
659 .max_multiplier = 2047,
660 .max_divider = 128,
661 .min_divider = 1,
662 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
663 };
664
665 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
666 }
667 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
668 of_ti_am3_no_gate_dpll_setup);
669
of_ti_am3_jtype_dpll_setup(struct device_node * node)670 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
671 {
672 const struct dpll_data dd = {
673 .idlest_mask = 0x1,
674 .enable_mask = 0x7,
675 .mult_mask = 0x7ff << 8,
676 .div1_mask = 0x7f,
677 .max_multiplier = 4095,
678 .max_divider = 256,
679 .min_divider = 2,
680 .flags = DPLL_J_TYPE,
681 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
682 };
683
684 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
685 }
686 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
687 of_ti_am3_jtype_dpll_setup);
688
of_ti_am3_no_gate_jtype_dpll_setup(struct device_node * node)689 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
690 {
691 const struct dpll_data dd = {
692 .idlest_mask = 0x1,
693 .enable_mask = 0x7,
694 .mult_mask = 0x7ff << 8,
695 .div1_mask = 0x7f,
696 .max_multiplier = 2047,
697 .max_divider = 128,
698 .min_divider = 1,
699 .flags = DPLL_J_TYPE,
700 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
701 };
702
703 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
704 }
705 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
706 "ti,am3-dpll-no-gate-j-type-clock",
707 of_ti_am3_no_gate_jtype_dpll_setup);
708
of_ti_am3_dpll_setup(struct device_node * node)709 static void __init of_ti_am3_dpll_setup(struct device_node *node)
710 {
711 const struct dpll_data dd = {
712 .idlest_mask = 0x1,
713 .enable_mask = 0x7,
714 .mult_mask = 0x7ff << 8,
715 .div1_mask = 0x7f,
716 .max_multiplier = 2047,
717 .max_divider = 128,
718 .min_divider = 1,
719 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
720 };
721
722 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
723 }
724 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
725
of_ti_am3_core_dpll_setup(struct device_node * node)726 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
727 {
728 const struct dpll_data dd = {
729 .idlest_mask = 0x1,
730 .enable_mask = 0x7,
731 .mult_mask = 0x7ff << 8,
732 .div1_mask = 0x7f,
733 .max_multiplier = 2047,
734 .max_divider = 128,
735 .min_divider = 1,
736 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
737 };
738
739 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
740 }
741 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
742 of_ti_am3_core_dpll_setup);
743
of_ti_omap2_core_dpll_setup(struct device_node * node)744 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
745 {
746 const struct dpll_data dd = {
747 .enable_mask = 0x3,
748 .mult_mask = 0x3ff << 12,
749 .div1_mask = 0xf << 8,
750 .max_divider = 16,
751 .min_divider = 1,
752 };
753
754 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
755 }
756 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
757 of_ti_omap2_core_dpll_setup);
758