1 /*
2 * Synopsys HSDK SDP Generic PLL clock driver
3 *
4 * Copyright (C) 2017 Synopsys
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/io.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21
22 #define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
23 #define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
24 #define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
25 #define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
26
27 #define CGU_PLL_CTRL_ODIV_SHIFT 2
28 #define CGU_PLL_CTRL_IDIV_SHIFT 4
29 #define CGU_PLL_CTRL_FBDIV_SHIFT 9
30 #define CGU_PLL_CTRL_BAND_SHIFT 20
31
32 #define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
33 #define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
34 #define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
35
36 #define CGU_PLL_CTRL_PD BIT(0)
37 #define CGU_PLL_CTRL_BYPASS BIT(1)
38
39 #define CGU_PLL_STATUS_LOCK BIT(0)
40 #define CGU_PLL_STATUS_ERR BIT(1)
41
42 #define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
43
44 #define CGU_PLL_SOURCE_MAX 1
45
46 #define CORE_IF_CLK_THRESHOLD_HZ 500000000
47 #define CREG_CORE_IF_CLK_DIV_1 0x0
48 #define CREG_CORE_IF_CLK_DIV_2 0x1
49
50 struct hsdk_pll_cfg {
51 u32 rate;
52 u32 idiv;
53 u32 fbdiv;
54 u32 odiv;
55 u32 band;
56 };
57
58 static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
59 { 100000000, 0, 11, 3, 0 },
60 { 133000000, 0, 15, 3, 0 },
61 { 200000000, 1, 47, 3, 0 },
62 { 233000000, 1, 27, 2, 0 },
63 { 300000000, 1, 35, 2, 0 },
64 { 333000000, 1, 39, 2, 0 },
65 { 400000000, 1, 47, 2, 0 },
66 { 500000000, 0, 14, 1, 0 },
67 { 600000000, 0, 17, 1, 0 },
68 { 700000000, 0, 20, 1, 0 },
69 { 800000000, 0, 23, 1, 0 },
70 { 900000000, 1, 26, 0, 0 },
71 { 1000000000, 1, 29, 0, 0 },
72 { 1100000000, 1, 32, 0, 0 },
73 { 1200000000, 1, 35, 0, 0 },
74 { 1300000000, 1, 38, 0, 0 },
75 { 1400000000, 1, 41, 0, 0 },
76 { 1500000000, 1, 44, 0, 0 },
77 { 1600000000, 1, 47, 0, 0 },
78 {}
79 };
80
81 static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
82 { 297000000, 0, 21, 2, 0 },
83 { 540000000, 0, 19, 1, 0 },
84 { 594000000, 0, 21, 1, 0 },
85 {}
86 };
87
88 struct hsdk_pll_clk {
89 struct clk_hw hw;
90 void __iomem *regs;
91 void __iomem *spec_regs;
92 const struct hsdk_pll_devdata *pll_devdata;
93 struct device *dev;
94 };
95
96 struct hsdk_pll_devdata {
97 const struct hsdk_pll_cfg *pll_cfg;
98 int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
99 const struct hsdk_pll_cfg *cfg);
100 };
101
102 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
103 const struct hsdk_pll_cfg *);
104 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
105 const struct hsdk_pll_cfg *);
106
107 static const struct hsdk_pll_devdata core_pll_devdata = {
108 .pll_cfg = asdt_pll_cfg,
109 .update_rate = hsdk_pll_core_update_rate,
110 };
111
112 static const struct hsdk_pll_devdata sdt_pll_devdata = {
113 .pll_cfg = asdt_pll_cfg,
114 .update_rate = hsdk_pll_comm_update_rate,
115 };
116
117 static const struct hsdk_pll_devdata hdmi_pll_devdata = {
118 .pll_cfg = hdmi_pll_cfg,
119 .update_rate = hsdk_pll_comm_update_rate,
120 };
121
hsdk_pll_write(struct hsdk_pll_clk * clk,u32 reg,u32 val)122 static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
123 {
124 iowrite32(val, clk->regs + reg);
125 }
126
hsdk_pll_read(struct hsdk_pll_clk * clk,u32 reg)127 static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
128 {
129 return ioread32(clk->regs + reg);
130 }
131
hsdk_pll_set_cfg(struct hsdk_pll_clk * clk,const struct hsdk_pll_cfg * cfg)132 static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
133 const struct hsdk_pll_cfg *cfg)
134 {
135 u32 val = 0;
136
137 /* Powerdown and Bypass bits should be cleared */
138 val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
139 val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
140 val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
141 val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
142
143 dev_dbg(clk->dev, "write configuration: %#x\n", val);
144
145 hsdk_pll_write(clk, CGU_PLL_CTRL, val);
146 }
147
hsdk_pll_is_locked(struct hsdk_pll_clk * clk)148 static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
149 {
150 return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
151 }
152
hsdk_pll_is_err(struct hsdk_pll_clk * clk)153 static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
154 {
155 return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
156 }
157
to_hsdk_pll_clk(struct clk_hw * hw)158 static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
159 {
160 return container_of(hw, struct hsdk_pll_clk, hw);
161 }
162
hsdk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)163 static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
164 unsigned long parent_rate)
165 {
166 u32 val;
167 u64 rate;
168 u32 idiv, fbdiv, odiv;
169 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
170
171 val = hsdk_pll_read(clk, CGU_PLL_CTRL);
172
173 dev_dbg(clk->dev, "current configuration: %#x\n", val);
174
175 /* Check if PLL is disabled */
176 if (val & CGU_PLL_CTRL_PD)
177 return 0;
178
179 /* Check if PLL is bypassed */
180 if (val & CGU_PLL_CTRL_BYPASS)
181 return parent_rate;
182
183 /* input divider = reg.idiv + 1 */
184 idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
185 /* fb divider = 2*(reg.fbdiv + 1) */
186 fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
187 /* output divider = 2^(reg.odiv) */
188 odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
189
190 rate = (u64)parent_rate * fbdiv;
191 do_div(rate, idiv * odiv);
192
193 return rate;
194 }
195
hsdk_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)196 static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
197 unsigned long *prate)
198 {
199 int i;
200 unsigned long best_rate;
201 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
202 const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
203
204 if (pll_cfg[0].rate == 0)
205 return -EINVAL;
206
207 best_rate = pll_cfg[0].rate;
208
209 for (i = 1; pll_cfg[i].rate != 0; i++) {
210 if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
211 best_rate = pll_cfg[i].rate;
212 }
213
214 dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
215
216 return best_rate;
217 }
218
hsdk_pll_comm_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)219 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
220 unsigned long rate,
221 const struct hsdk_pll_cfg *cfg)
222 {
223 hsdk_pll_set_cfg(clk, cfg);
224
225 /*
226 * Wait until CGU relocks and check error status.
227 * If after timeout CGU is unlocked yet return error.
228 */
229 udelay(HSDK_PLL_MAX_LOCK_TIME);
230 if (!hsdk_pll_is_locked(clk))
231 return -ETIMEDOUT;
232
233 if (hsdk_pll_is_err(clk))
234 return -EINVAL;
235
236 return 0;
237 }
238
hsdk_pll_core_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)239 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
240 unsigned long rate,
241 const struct hsdk_pll_cfg *cfg)
242 {
243 /*
244 * When core clock exceeds 500MHz, the divider for the interface
245 * clock must be programmed to div-by-2.
246 */
247 if (rate > CORE_IF_CLK_THRESHOLD_HZ)
248 iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
249
250 hsdk_pll_set_cfg(clk, cfg);
251
252 /*
253 * Wait until CGU relocks and check error status.
254 * If after timeout CGU is unlocked yet return error.
255 */
256 udelay(HSDK_PLL_MAX_LOCK_TIME);
257 if (!hsdk_pll_is_locked(clk))
258 return -ETIMEDOUT;
259
260 if (hsdk_pll_is_err(clk))
261 return -EINVAL;
262
263 /*
264 * Program divider to div-by-1 if we succesfuly set core clock below
265 * 500MHz threshold.
266 */
267 if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
268 iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
269
270 return 0;
271 }
272
hsdk_pll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)273 static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
274 unsigned long parent_rate)
275 {
276 int i;
277 struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
278 const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
279
280 for (i = 0; pll_cfg[i].rate != 0; i++) {
281 if (pll_cfg[i].rate == rate) {
282 return clk->pll_devdata->update_rate(clk, rate,
283 &pll_cfg[i]);
284 }
285 }
286
287 dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
288 parent_rate);
289
290 return -EINVAL;
291 }
292
293 static const struct clk_ops hsdk_pll_ops = {
294 .recalc_rate = hsdk_pll_recalc_rate,
295 .round_rate = hsdk_pll_round_rate,
296 .set_rate = hsdk_pll_set_rate,
297 };
298
hsdk_pll_clk_probe(struct platform_device * pdev)299 static int hsdk_pll_clk_probe(struct platform_device *pdev)
300 {
301 int ret;
302 struct resource *mem;
303 const char *parent_name;
304 unsigned int num_parents;
305 struct hsdk_pll_clk *pll_clk;
306 struct clk_init_data init = { };
307 struct device *dev = &pdev->dev;
308
309 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
310 if (!pll_clk)
311 return -ENOMEM;
312
313 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
314 pll_clk->regs = devm_ioremap_resource(dev, mem);
315 if (IS_ERR(pll_clk->regs))
316 return PTR_ERR(pll_clk->regs);
317
318 init.name = dev->of_node->name;
319 init.ops = &hsdk_pll_ops;
320 parent_name = of_clk_get_parent_name(dev->of_node, 0);
321 init.parent_names = &parent_name;
322 num_parents = of_clk_get_parent_count(dev->of_node);
323 if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
324 dev_err(dev, "wrong clock parents number: %u\n", num_parents);
325 return -EINVAL;
326 }
327 init.num_parents = num_parents;
328
329 pll_clk->hw.init = &init;
330 pll_clk->dev = dev;
331 pll_clk->pll_devdata = of_device_get_match_data(dev);
332
333 if (!pll_clk->pll_devdata) {
334 dev_err(dev, "No OF match data provided\n");
335 return -EINVAL;
336 }
337
338 ret = devm_clk_hw_register(dev, &pll_clk->hw);
339 if (ret) {
340 dev_err(dev, "failed to register %s clock\n", init.name);
341 return ret;
342 }
343
344 return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
345 &pll_clk->hw);
346 }
347
hsdk_pll_clk_remove(struct platform_device * pdev)348 static int hsdk_pll_clk_remove(struct platform_device *pdev)
349 {
350 of_clk_del_provider(pdev->dev.of_node);
351 return 0;
352 }
353
of_hsdk_pll_clk_setup(struct device_node * node)354 static void __init of_hsdk_pll_clk_setup(struct device_node *node)
355 {
356 int ret;
357 const char *parent_name;
358 unsigned int num_parents;
359 struct hsdk_pll_clk *pll_clk;
360 struct clk_init_data init = { };
361
362 pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
363 if (!pll_clk)
364 return;
365
366 pll_clk->regs = of_iomap(node, 0);
367 if (!pll_clk->regs) {
368 pr_err("failed to map pll registers\n");
369 goto err_free_pll_clk;
370 }
371
372 pll_clk->spec_regs = of_iomap(node, 1);
373 if (!pll_clk->spec_regs) {
374 pr_err("failed to map pll registers\n");
375 goto err_unmap_comm_regs;
376 }
377
378 init.name = node->name;
379 init.ops = &hsdk_pll_ops;
380 parent_name = of_clk_get_parent_name(node, 0);
381 init.parent_names = &parent_name;
382 num_parents = of_clk_get_parent_count(node);
383 if (num_parents > CGU_PLL_SOURCE_MAX) {
384 pr_err("too much clock parents: %u\n", num_parents);
385 goto err_unmap_spec_regs;
386 }
387 init.num_parents = num_parents;
388
389 pll_clk->hw.init = &init;
390 pll_clk->pll_devdata = &core_pll_devdata;
391
392 ret = clk_hw_register(NULL, &pll_clk->hw);
393 if (ret) {
394 pr_err("failed to register %pOFn clock\n", node);
395 goto err_unmap_spec_regs;
396 }
397
398 ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
399 if (ret) {
400 pr_err("failed to add hw provider for %pOFn clock\n", node);
401 goto err_unmap_spec_regs;
402 }
403
404 return;
405
406 err_unmap_spec_regs:
407 iounmap(pll_clk->spec_regs);
408 err_unmap_comm_regs:
409 iounmap(pll_clk->regs);
410 err_free_pll_clk:
411 kfree(pll_clk);
412 }
413
414 /* Core PLL needed early for ARC cpus timers */
415 CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
416 of_hsdk_pll_clk_setup);
417
418 static const struct of_device_id hsdk_pll_clk_id[] = {
419 { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
420 { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
421 { }
422 };
423
424 static struct platform_driver hsdk_pll_clk_driver = {
425 .driver = {
426 .name = "hsdk-gp-pll-clock",
427 .of_match_table = hsdk_pll_clk_id,
428 },
429 .probe = hsdk_pll_clk_probe,
430 .remove = hsdk_pll_clk_remove,
431 };
432 builtin_platform_driver(hsdk_pll_clk_driver);
433