1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
5 * Copyright (c) 2016, NVIDIA CORPORATION.
6 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
7 */
8
9 #include <common.h>
10 #include <clk.h>
11 #include <clk-uclass.h>
12 #include <dm.h>
13 #include <dm/read.h>
14 #include <dt-structs.h>
15 #include <errno.h>
16 #include <linux/clk-provider.h>
17
clk_dev_ops(struct udevice * dev)18 static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
19 {
20 return (const struct clk_ops *)dev->driver->ops;
21 }
22
23 #if CONFIG_IS_ENABLED(OF_CONTROL)
24 # if CONFIG_IS_ENABLED(OF_PLATDATA)
clk_get_by_index_platdata(struct udevice * dev,int index,struct phandle_1_arg * cells,struct clk * clk)25 int clk_get_by_index_platdata(struct udevice *dev, int index,
26 struct phandle_1_arg *cells, struct clk *clk)
27 {
28 int ret;
29
30 if (index != 0)
31 return -ENOSYS;
32 ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev);
33 if (ret)
34 return ret;
35 clk->id = cells[0].arg[0];
36
37 return 0;
38 }
39 # else
clk_of_xlate_default(struct clk * clk,struct ofnode_phandle_args * args)40 static int clk_of_xlate_default(struct clk *clk,
41 struct ofnode_phandle_args *args)
42 {
43 debug("%s(clk=%p)\n", __func__, clk);
44
45 if (args->args_count > 1) {
46 debug("Invaild args_count: %d\n", args->args_count);
47 return -EINVAL;
48 }
49
50 if (args->args_count)
51 clk->id = args->args[0];
52 else
53 clk->id = 0;
54
55 clk->data = 0;
56
57 return 0;
58 }
59
clk_get_by_index_tail(int ret,ofnode node,struct ofnode_phandle_args * args,const char * list_name,int index,struct clk * clk)60 static int clk_get_by_index_tail(int ret, ofnode node,
61 struct ofnode_phandle_args *args,
62 const char *list_name, int index,
63 struct clk *clk)
64 {
65 struct udevice *dev_clk;
66 const struct clk_ops *ops;
67
68 assert(clk);
69 clk->dev = NULL;
70 if (ret)
71 goto err;
72
73 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
74 if (ret) {
75 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
76 __func__, ret);
77 return ret;
78 }
79
80 clk->dev = dev_clk;
81
82 ops = clk_dev_ops(dev_clk);
83
84 if (ops->of_xlate)
85 ret = ops->of_xlate(clk, args);
86 else
87 ret = clk_of_xlate_default(clk, args);
88 if (ret) {
89 debug("of_xlate() failed: %d\n", ret);
90 return ret;
91 }
92
93 return clk_request(dev_clk, clk);
94 err:
95 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
96 __func__, ofnode_get_name(node), list_name, index, ret);
97 return ret;
98 }
99
clk_get_by_indexed_prop(struct udevice * dev,const char * prop_name,int index,struct clk * clk)100 static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
101 int index, struct clk *clk)
102 {
103 int ret;
104 struct ofnode_phandle_args args;
105
106 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
107
108 assert(clk);
109 clk->dev = NULL;
110
111 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
112 index, &args);
113 if (ret) {
114 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
115 __func__, ret);
116 return ret;
117 }
118
119
120 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
121 index > 0, clk);
122 }
123
clk_get_by_index(struct udevice * dev,int index,struct clk * clk)124 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
125 {
126 struct ofnode_phandle_args args;
127 int ret;
128
129 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
130 index, &args);
131
132 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
133 index > 0, clk);
134 }
135
clk_get_by_index_nodev(ofnode node,int index,struct clk * clk)136 int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
137 {
138 struct ofnode_phandle_args args;
139 int ret;
140
141 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
142 index > 0, &args);
143
144 return clk_get_by_index_tail(ret, node, &args, "clocks",
145 index > 0, clk);
146 }
147
clk_get_bulk(struct udevice * dev,struct clk_bulk * bulk)148 int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
149 {
150 int i, ret, err, count;
151
152 bulk->count = 0;
153
154 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
155 if (count < 1)
156 return count;
157
158 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
159 if (!bulk->clks)
160 return -ENOMEM;
161
162 for (i = 0; i < count; i++) {
163 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
164 if (ret < 0)
165 goto bulk_get_err;
166
167 ++bulk->count;
168 }
169
170 return 0;
171
172 bulk_get_err:
173 err = clk_release_all(bulk->clks, bulk->count);
174 if (err)
175 debug("%s: could release all clocks for %p\n",
176 __func__, dev);
177
178 return ret;
179 }
180
clk_set_default_parents(struct udevice * dev,int stage)181 static int clk_set_default_parents(struct udevice *dev, int stage)
182 {
183 struct clk clk, parent_clk;
184 int index;
185 int num_parents;
186 int ret;
187
188 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
189 "#clock-cells");
190 if (num_parents < 0) {
191 debug("%s: could not read assigned-clock-parents for %p\n",
192 __func__, dev);
193 return 0;
194 }
195
196 for (index = 0; index < num_parents; index++) {
197 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
198 index, &parent_clk);
199 /* If -ENOENT, this is a no-op entry */
200 if (ret == -ENOENT)
201 continue;
202
203 if (ret) {
204 debug("%s: could not get parent clock %d for %s\n",
205 __func__, index, dev_read_name(dev));
206 return ret;
207 }
208
209 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
210 index, &clk);
211 if (ret) {
212 debug("%s: could not get assigned clock %d for %s\n",
213 __func__, index, dev_read_name(dev));
214 return ret;
215 }
216
217 /* This is clk provider device trying to reparent itself
218 * It cannot be done right now but need to wait after the
219 * device is probed
220 */
221 if (stage == 0 && clk.dev == dev)
222 continue;
223
224 if (stage > 0 && clk.dev != dev)
225 /* do not setup twice the parent clocks */
226 continue;
227
228 ret = clk_set_parent(&clk, &parent_clk);
229 /*
230 * Not all drivers may support clock-reparenting (as of now).
231 * Ignore errors due to this.
232 */
233 if (ret == -ENOSYS)
234 continue;
235
236 if (ret < 0) {
237 debug("%s: failed to reparent clock %d for %s\n",
238 __func__, index, dev_read_name(dev));
239 return ret;
240 }
241 }
242
243 return 0;
244 }
245
clk_set_default_rates(struct udevice * dev,int stage)246 static int clk_set_default_rates(struct udevice *dev, int stage)
247 {
248 struct clk clk;
249 int index;
250 int num_rates;
251 int size;
252 int ret = 0;
253 u32 *rates = NULL;
254
255 size = dev_read_size(dev, "assigned-clock-rates");
256 if (size < 0)
257 return 0;
258
259 num_rates = size / sizeof(u32);
260 rates = calloc(num_rates, sizeof(u32));
261 if (!rates)
262 return -ENOMEM;
263
264 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
265 if (ret)
266 goto fail;
267
268 for (index = 0; index < num_rates; index++) {
269 /* If 0 is passed, this is a no-op */
270 if (!rates[index])
271 continue;
272
273 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
274 index, &clk);
275 if (ret) {
276 debug("%s: could not get assigned clock %d for %s\n",
277 __func__, index, dev_read_name(dev));
278 continue;
279 }
280
281 /* This is clk provider device trying to program itself
282 * It cannot be done right now but need to wait after the
283 * device is probed
284 */
285 if (stage == 0 && clk.dev == dev)
286 continue;
287
288 if (stage > 0 && clk.dev != dev)
289 /* do not setup twice the parent clocks */
290 continue;
291
292 ret = clk_set_rate(&clk, rates[index]);
293
294 if (ret < 0) {
295 debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
296 __func__, index, clk.id, dev_read_name(dev));
297 break;
298 }
299 }
300
301 fail:
302 free(rates);
303 return ret;
304 }
305
clk_set_defaults(struct udevice * dev,int stage)306 int clk_set_defaults(struct udevice *dev, int stage)
307 {
308 int ret;
309
310 if (!dev_of_valid(dev))
311 return 0;
312
313 /* If this not in SPL and pre-reloc state, don't take any action. */
314 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
315 return 0;
316
317 debug("%s(%s)\n", __func__, dev_read_name(dev));
318
319 ret = clk_set_default_parents(dev, stage);
320 if (ret)
321 return ret;
322
323 ret = clk_set_default_rates(dev, stage);
324 if (ret < 0)
325 return ret;
326
327 return 0;
328 }
329 # endif /* OF_PLATDATA */
330
clk_get_by_name(struct udevice * dev,const char * name,struct clk * clk)331 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
332 {
333 int index;
334
335 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
336 clk->dev = NULL;
337
338 index = dev_read_stringlist_search(dev, "clock-names", name);
339 if (index < 0) {
340 debug("fdt_stringlist_search() failed: %d\n", index);
341 return index;
342 }
343
344 return clk_get_by_index(dev, index, clk);
345 }
346
clk_release_all(struct clk * clk,int count)347 int clk_release_all(struct clk *clk, int count)
348 {
349 int i, ret;
350
351 for (i = 0; i < count; i++) {
352 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
353
354 /* check if clock has been previously requested */
355 if (!clk[i].dev)
356 continue;
357
358 ret = clk_disable(&clk[i]);
359 if (ret && ret != -ENOSYS)
360 return ret;
361
362 ret = clk_free(&clk[i]);
363 if (ret && ret != -ENOSYS)
364 return ret;
365 }
366
367 return 0;
368 }
369
370 #endif /* OF_CONTROL */
371
clk_request(struct udevice * dev,struct clk * clk)372 int clk_request(struct udevice *dev, struct clk *clk)
373 {
374 const struct clk_ops *ops;
375
376 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
377 if (!clk)
378 return 0;
379 ops = clk_dev_ops(dev);
380
381 clk->dev = dev;
382
383 if (!ops->request)
384 return 0;
385
386 return ops->request(clk);
387 }
388
clk_free(struct clk * clk)389 int clk_free(struct clk *clk)
390 {
391 const struct clk_ops *ops;
392
393 debug("%s(clk=%p)\n", __func__, clk);
394 if (!clk)
395 return 0;
396 ops = clk_dev_ops(clk->dev);
397
398 if (!ops->free)
399 return 0;
400
401 return ops->free(clk);
402 }
403
clk_get_rate(struct clk * clk)404 ulong clk_get_rate(struct clk *clk)
405 {
406 const struct clk_ops *ops;
407
408 debug("%s(clk=%p)\n", __func__, clk);
409 if (!clk)
410 return 0;
411 ops = clk_dev_ops(clk->dev);
412
413 if (!ops->get_rate)
414 return -ENOSYS;
415
416 return ops->get_rate(clk);
417 }
418
clk_get_parent(struct clk * clk)419 struct clk *clk_get_parent(struct clk *clk)
420 {
421 struct udevice *pdev;
422 struct clk *pclk;
423
424 debug("%s(clk=%p)\n", __func__, clk);
425 if (!clk)
426 return NULL;
427
428 pdev = dev_get_parent(clk->dev);
429 pclk = dev_get_clk_ptr(pdev);
430 if (!pclk)
431 return ERR_PTR(-ENODEV);
432
433 return pclk;
434 }
435
clk_get_parent_rate(struct clk * clk)436 long long clk_get_parent_rate(struct clk *clk)
437 {
438 const struct clk_ops *ops;
439 struct clk *pclk;
440
441 debug("%s(clk=%p)\n", __func__, clk);
442 if (!clk)
443 return 0;
444
445 pclk = clk_get_parent(clk);
446 if (IS_ERR(pclk))
447 return -ENODEV;
448
449 ops = clk_dev_ops(pclk->dev);
450 if (!ops->get_rate)
451 return -ENOSYS;
452
453 /* Read the 'rate' if not already set or if proper flag set*/
454 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
455 pclk->rate = clk_get_rate(pclk);
456
457 return pclk->rate;
458 }
459
clk_set_rate(struct clk * clk,ulong rate)460 ulong clk_set_rate(struct clk *clk, ulong rate)
461 {
462 const struct clk_ops *ops;
463
464 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
465 if (!clk)
466 return 0;
467 ops = clk_dev_ops(clk->dev);
468
469 if (!ops->set_rate)
470 return -ENOSYS;
471
472 return ops->set_rate(clk, rate);
473 }
474
clk_set_parent(struct clk * clk,struct clk * parent)475 int clk_set_parent(struct clk *clk, struct clk *parent)
476 {
477 const struct clk_ops *ops;
478
479 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
480 if (!clk)
481 return 0;
482 ops = clk_dev_ops(clk->dev);
483
484 if (!ops->set_parent)
485 return -ENOSYS;
486
487 return ops->set_parent(clk, parent);
488 }
489
clk_enable(struct clk * clk)490 int clk_enable(struct clk *clk)
491 {
492 const struct clk_ops *ops;
493 struct clk *clkp = NULL;
494 int ret;
495
496 debug("%s(clk=%p)\n", __func__, clk);
497 if (!clk)
498 return 0;
499 ops = clk_dev_ops(clk->dev);
500
501 if (CONFIG_IS_ENABLED(CLK_CCF)) {
502 /* Take id 0 as a non-valid clk, such as dummy */
503 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
504 if (clkp->enable_count) {
505 clkp->enable_count++;
506 return 0;
507 }
508 if (clkp->dev->parent &&
509 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
510 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
511 if (ret) {
512 printf("Enable %s failed\n",
513 clkp->dev->parent->name);
514 return ret;
515 }
516 }
517 }
518
519 if (ops->enable) {
520 ret = ops->enable(clk);
521 if (ret) {
522 printf("Enable %s failed\n", clk->dev->name);
523 return ret;
524 }
525 }
526 if (clkp)
527 clkp->enable_count++;
528 } else {
529 if (!ops->enable)
530 return -ENOSYS;
531 return ops->enable(clk);
532 }
533
534 return 0;
535 }
536
clk_enable_bulk(struct clk_bulk * bulk)537 int clk_enable_bulk(struct clk_bulk *bulk)
538 {
539 int i, ret;
540
541 for (i = 0; i < bulk->count; i++) {
542 ret = clk_enable(&bulk->clks[i]);
543 if (ret < 0 && ret != -ENOSYS)
544 return ret;
545 }
546
547 return 0;
548 }
549
clk_disable(struct clk * clk)550 int clk_disable(struct clk *clk)
551 {
552 const struct clk_ops *ops;
553 struct clk *clkp = NULL;
554 int ret;
555
556 debug("%s(clk=%p)\n", __func__, clk);
557 if (!clk)
558 return 0;
559 ops = clk_dev_ops(clk->dev);
560
561 if (CONFIG_IS_ENABLED(CLK_CCF)) {
562 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
563 if (clkp->enable_count == 0) {
564 printf("clk %s already disabled\n",
565 clkp->dev->name);
566 return 0;
567 }
568
569 if (--clkp->enable_count > 0)
570 return 0;
571 }
572
573 if (ops->disable) {
574 ret = ops->disable(clk);
575 if (ret)
576 return ret;
577 }
578
579 if (clkp && clkp->dev->parent &&
580 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
581 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
582 if (ret) {
583 printf("Disable %s failed\n",
584 clkp->dev->parent->name);
585 return ret;
586 }
587 }
588 } else {
589 if (!ops->disable)
590 return -ENOSYS;
591
592 return ops->disable(clk);
593 }
594
595 return 0;
596 }
597
clk_disable_bulk(struct clk_bulk * bulk)598 int clk_disable_bulk(struct clk_bulk *bulk)
599 {
600 int i, ret;
601
602 for (i = 0; i < bulk->count; i++) {
603 ret = clk_disable(&bulk->clks[i]);
604 if (ret < 0 && ret != -ENOSYS)
605 return ret;
606 }
607
608 return 0;
609 }
610
clk_get_by_id(ulong id,struct clk ** clkp)611 int clk_get_by_id(ulong id, struct clk **clkp)
612 {
613 struct udevice *dev;
614 struct uclass *uc;
615 int ret;
616
617 ret = uclass_get(UCLASS_CLK, &uc);
618 if (ret)
619 return ret;
620
621 uclass_foreach_dev(dev, uc) {
622 struct clk *clk = dev_get_clk_ptr(dev);
623
624 if (clk && clk->id == id) {
625 *clkp = clk;
626 return 0;
627 }
628 }
629
630 return -ENOENT;
631 }
632
clk_is_match(const struct clk * p,const struct clk * q)633 bool clk_is_match(const struct clk *p, const struct clk *q)
634 {
635 /* trivial case: identical struct clk's or both NULL */
636 if (p == q)
637 return true;
638
639 /* trivial case #2: on the clk pointer is NULL */
640 if (!p || !q)
641 return false;
642
643 /* same device, id and data */
644 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
645 return true;
646
647 return false;
648 }
649
devm_clk_release(struct udevice * dev,void * res)650 static void devm_clk_release(struct udevice *dev, void *res)
651 {
652 clk_free(res);
653 }
654
devm_clk_match(struct udevice * dev,void * res,void * data)655 static int devm_clk_match(struct udevice *dev, void *res, void *data)
656 {
657 return res == data;
658 }
659
devm_clk_get(struct udevice * dev,const char * id)660 struct clk *devm_clk_get(struct udevice *dev, const char *id)
661 {
662 int rc;
663 struct clk *clk;
664
665 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
666 if (unlikely(!clk))
667 return ERR_PTR(-ENOMEM);
668
669 rc = clk_get_by_name(dev, id, clk);
670 if (rc)
671 return ERR_PTR(rc);
672
673 devres_add(dev, clk);
674 return clk;
675 }
676
devm_clk_get_optional(struct udevice * dev,const char * id)677 struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
678 {
679 struct clk *clk = devm_clk_get(dev, id);
680
681 if (IS_ERR(clk))
682 return NULL;
683
684 return clk;
685 }
686
devm_clk_put(struct udevice * dev,struct clk * clk)687 void devm_clk_put(struct udevice *dev, struct clk *clk)
688 {
689 int rc;
690
691 if (!clk)
692 return;
693
694 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
695 WARN_ON(rc);
696 }
697
clk_uclass_post_probe(struct udevice * dev)698 int clk_uclass_post_probe(struct udevice *dev)
699 {
700 /*
701 * when a clock provider is probed. Call clk_set_defaults()
702 * also after the device is probed. This takes care of cases
703 * where the DT is used to setup default parents and rates
704 * using assigned-clocks
705 */
706 clk_set_defaults(dev, 1);
707
708 return 0;
709 }
710
711 UCLASS_DRIVER(clk) = {
712 .id = UCLASS_CLK,
713 .name = "clk",
714 .post_probe = clk_uclass_post_probe,
715 };
716