1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/rational.h>
14 #include <linux/regmap.h>
15 #include <linux/math64.h>
16 #include <linux/minmax.h>
17 #include <linux/slab.h>
18
19 #include <asm/div64.h>
20
21 #include "clk-rcg.h"
22 #include "common.h"
23
24 #define CMD_REG 0x0
25 #define CMD_UPDATE BIT(0)
26 #define CMD_ROOT_EN BIT(1)
27 #define CMD_DIRTY_CFG BIT(4)
28 #define CMD_DIRTY_N BIT(5)
29 #define CMD_DIRTY_M BIT(6)
30 #define CMD_DIRTY_D BIT(7)
31 #define CMD_ROOT_OFF BIT(31)
32
33 #define CFG_REG 0x4
34 #define CFG_SRC_DIV_SHIFT 0
35 #define CFG_SRC_SEL_SHIFT 8
36 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
37 #define CFG_MODE_SHIFT 12
38 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
39 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
40 #define CFG_HW_CLK_CTRL_MASK BIT(20)
41
42 #define M_REG 0x8
43 #define N_REG 0xc
44 #define D_REG 0x10
45
46 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
47 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
48 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
49 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
50
51 /* Dynamic Frequency Scaling */
52 #define MAX_PERF_LEVEL 8
53 #define SE_CMD_DFSR_OFFSET 0x14
54 #define SE_CMD_DFS_EN BIT(0)
55 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
56 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
57 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
58
59 enum freq_policy {
60 FLOOR,
61 CEIL,
62 };
63
clk_rcg2_is_enabled(struct clk_hw * hw)64 static int clk_rcg2_is_enabled(struct clk_hw *hw)
65 {
66 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
67 u32 cmd;
68 int ret;
69
70 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
71 if (ret)
72 return ret;
73
74 return (cmd & CMD_ROOT_OFF) == 0;
75 }
76
clk_rcg2_get_parent(struct clk_hw * hw)77 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
78 {
79 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
80 int num_parents = clk_hw_get_num_parents(hw);
81 u32 cfg;
82 int i, ret;
83
84 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
85 if (ret)
86 goto err;
87
88 cfg &= CFG_SRC_SEL_MASK;
89 cfg >>= CFG_SRC_SEL_SHIFT;
90
91 for (i = 0; i < num_parents; i++)
92 if (cfg == rcg->parent_map[i].cfg)
93 return i;
94
95 err:
96 pr_debug("%s: Clock %s has invalid parent, using default.\n",
97 __func__, clk_hw_get_name(hw));
98 return 0;
99 }
100
update_config(struct clk_rcg2 * rcg)101 static int update_config(struct clk_rcg2 *rcg)
102 {
103 int count, ret;
104 u32 cmd;
105 struct clk_hw *hw = &rcg->clkr.hw;
106 const char *name = clk_hw_get_name(hw);
107
108 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
109 CMD_UPDATE, CMD_UPDATE);
110 if (ret)
111 return ret;
112
113 /* Wait for update to take effect */
114 for (count = 500; count > 0; count--) {
115 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
116 if (ret)
117 return ret;
118 if (!(cmd & CMD_UPDATE))
119 return 0;
120 udelay(1);
121 }
122
123 WARN(1, "%s: rcg didn't update its configuration.", name);
124 return -EBUSY;
125 }
126
clk_rcg2_set_parent(struct clk_hw * hw,u8 index)127 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
128 {
129 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
130 int ret;
131 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
132
133 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
134 CFG_SRC_SEL_MASK, cfg);
135 if (ret)
136 return ret;
137
138 return update_config(rcg);
139 }
140
141 /*
142 * Calculate m/n:d rate
143 *
144 * parent_rate m
145 * rate = ----------- x ---
146 * hid_div n
147 */
148 static unsigned long
calc_rate(unsigned long rate,u32 m,u32 n,u32 mode,u32 hid_div)149 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
150 {
151 if (hid_div)
152 rate = mult_frac(rate, 2, hid_div + 1);
153
154 if (mode)
155 rate = mult_frac(rate, m, n);
156
157 return rate;
158 }
159
160 static unsigned long
clk_rcg2_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)161 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
162 {
163 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
164 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
165
166 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
167
168 if (rcg->mnd_width) {
169 mask = BIT(rcg->mnd_width) - 1;
170 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
171 m &= mask;
172 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
173 n = ~n;
174 n &= mask;
175 n += m;
176 mode = cfg & CFG_MODE_MASK;
177 mode >>= CFG_MODE_SHIFT;
178 }
179
180 mask = BIT(rcg->hid_width) - 1;
181 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
182 hid_div &= mask;
183
184 return calc_rate(parent_rate, m, n, mode, hid_div);
185 }
186
_freq_tbl_determine_rate(struct clk_hw * hw,const struct freq_tbl * f,struct clk_rate_request * req,enum freq_policy policy)187 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
188 struct clk_rate_request *req,
189 enum freq_policy policy)
190 {
191 unsigned long clk_flags, rate = req->rate;
192 struct clk_hw *p;
193 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
194 int index;
195
196 switch (policy) {
197 case FLOOR:
198 f = qcom_find_freq_floor(f, rate);
199 break;
200 case CEIL:
201 f = qcom_find_freq(f, rate);
202 break;
203 default:
204 return -EINVAL;
205 }
206
207 if (!f)
208 return -EINVAL;
209
210 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
211 if (index < 0)
212 return index;
213
214 clk_flags = clk_hw_get_flags(hw);
215 p = clk_hw_get_parent_by_index(hw, index);
216 if (!p)
217 return -EINVAL;
218
219 if (clk_flags & CLK_SET_RATE_PARENT) {
220 rate = f->freq;
221 if (f->pre_div) {
222 if (!rate)
223 rate = req->rate;
224 rate /= 2;
225 rate *= f->pre_div + 1;
226 }
227
228 if (f->n) {
229 u64 tmp = rate;
230 tmp = tmp * f->n;
231 do_div(tmp, f->m);
232 rate = tmp;
233 }
234 } else {
235 rate = clk_hw_get_rate(p);
236 }
237 req->best_parent_hw = p;
238 req->best_parent_rate = rate;
239 req->rate = f->freq;
240
241 return 0;
242 }
243
clk_rcg2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)244 static int clk_rcg2_determine_rate(struct clk_hw *hw,
245 struct clk_rate_request *req)
246 {
247 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
248
249 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
250 }
251
clk_rcg2_determine_floor_rate(struct clk_hw * hw,struct clk_rate_request * req)252 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
253 struct clk_rate_request *req)
254 {
255 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
256
257 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
258 }
259
__clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)260 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
261 {
262 u32 cfg, mask, d_val, not2d_val, n_minus_m;
263 struct clk_hw *hw = &rcg->clkr.hw;
264 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
265
266 if (index < 0)
267 return index;
268
269 if (rcg->mnd_width && f->n) {
270 mask = BIT(rcg->mnd_width) - 1;
271 ret = regmap_update_bits(rcg->clkr.regmap,
272 RCG_M_OFFSET(rcg), mask, f->m);
273 if (ret)
274 return ret;
275
276 ret = regmap_update_bits(rcg->clkr.regmap,
277 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
278 if (ret)
279 return ret;
280
281 /* Calculate 2d value */
282 d_val = f->n;
283
284 n_minus_m = f->n - f->m;
285 n_minus_m *= 2;
286
287 d_val = clamp_t(u32, d_val, f->m, n_minus_m);
288 not2d_val = ~d_val & mask;
289
290 ret = regmap_update_bits(rcg->clkr.regmap,
291 RCG_D_OFFSET(rcg), mask, not2d_val);
292 if (ret)
293 return ret;
294 }
295
296 mask = BIT(rcg->hid_width) - 1;
297 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
298 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
299 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
300 if (rcg->mnd_width && f->n && (f->m != f->n))
301 cfg |= CFG_MODE_DUAL_EDGE;
302 return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
303 mask, cfg);
304 }
305
clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)306 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
307 {
308 int ret;
309
310 ret = __clk_rcg2_configure(rcg, f);
311 if (ret)
312 return ret;
313
314 return update_config(rcg);
315 }
316
__clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,enum freq_policy policy)317 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
318 enum freq_policy policy)
319 {
320 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
321 const struct freq_tbl *f;
322
323 switch (policy) {
324 case FLOOR:
325 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
326 break;
327 case CEIL:
328 f = qcom_find_freq(rcg->freq_tbl, rate);
329 break;
330 default:
331 return -EINVAL;
332 }
333
334 if (!f)
335 return -EINVAL;
336
337 return clk_rcg2_configure(rcg, f);
338 }
339
clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)340 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
341 unsigned long parent_rate)
342 {
343 return __clk_rcg2_set_rate(hw, rate, CEIL);
344 }
345
clk_rcg2_set_floor_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)346 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
347 unsigned long parent_rate)
348 {
349 return __clk_rcg2_set_rate(hw, rate, FLOOR);
350 }
351
clk_rcg2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)352 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
353 unsigned long rate, unsigned long parent_rate, u8 index)
354 {
355 return __clk_rcg2_set_rate(hw, rate, CEIL);
356 }
357
clk_rcg2_set_floor_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)358 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
359 unsigned long rate, unsigned long parent_rate, u8 index)
360 {
361 return __clk_rcg2_set_rate(hw, rate, FLOOR);
362 }
363
clk_rcg2_get_duty_cycle(struct clk_hw * hw,struct clk_duty * duty)364 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
365 {
366 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
367 u32 notn_m, n, m, d, not2d, mask;
368
369 if (!rcg->mnd_width) {
370 /* 50 % duty-cycle for Non-MND RCGs */
371 duty->num = 1;
372 duty->den = 2;
373 return 0;
374 }
375
376 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d);
377 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
378 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
379
380 if (!not2d && !m && !notn_m) {
381 /* 50 % duty-cycle always */
382 duty->num = 1;
383 duty->den = 2;
384 return 0;
385 }
386
387 mask = BIT(rcg->mnd_width) - 1;
388
389 d = ~(not2d) & mask;
390 d = DIV_ROUND_CLOSEST(d, 2);
391
392 n = (~(notn_m) + m) & mask;
393
394 duty->num = d;
395 duty->den = n;
396
397 return 0;
398 }
399
clk_rcg2_set_duty_cycle(struct clk_hw * hw,struct clk_duty * duty)400 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
401 {
402 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
403 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
404 int ret;
405
406 /* Duty-cycle cannot be modified for non-MND RCGs */
407 if (!rcg->mnd_width)
408 return -EINVAL;
409
410 mask = BIT(rcg->mnd_width) - 1;
411
412 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
413 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
414 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
415
416 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
417 if (!(cfg & CFG_MODE_MASK))
418 return -EINVAL;
419
420 n = (~(notn_m) + m) & mask;
421
422 duty_per = (duty->num * 100) / duty->den;
423
424 /* Calculate 2d value */
425 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
426
427 /*
428 * Check bit widths of 2d. If D is too big reduce duty cycle.
429 * Also make sure it is never zero.
430 */
431 d = clamp_val(d, 1, mask);
432
433 if ((d / 2) > (n - m))
434 d = (n - m) * 2;
435 else if ((d / 2) < (m / 2))
436 d = m;
437
438 not2d = ~d & mask;
439
440 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
441 not2d);
442 if (ret)
443 return ret;
444
445 return update_config(rcg);
446 }
447
448 const struct clk_ops clk_rcg2_ops = {
449 .is_enabled = clk_rcg2_is_enabled,
450 .get_parent = clk_rcg2_get_parent,
451 .set_parent = clk_rcg2_set_parent,
452 .recalc_rate = clk_rcg2_recalc_rate,
453 .determine_rate = clk_rcg2_determine_rate,
454 .set_rate = clk_rcg2_set_rate,
455 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
456 .get_duty_cycle = clk_rcg2_get_duty_cycle,
457 .set_duty_cycle = clk_rcg2_set_duty_cycle,
458 };
459 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
460
461 const struct clk_ops clk_rcg2_floor_ops = {
462 .is_enabled = clk_rcg2_is_enabled,
463 .get_parent = clk_rcg2_get_parent,
464 .set_parent = clk_rcg2_set_parent,
465 .recalc_rate = clk_rcg2_recalc_rate,
466 .determine_rate = clk_rcg2_determine_floor_rate,
467 .set_rate = clk_rcg2_set_floor_rate,
468 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
469 .get_duty_cycle = clk_rcg2_get_duty_cycle,
470 .set_duty_cycle = clk_rcg2_set_duty_cycle,
471 };
472 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
473
474 struct frac_entry {
475 int num;
476 int den;
477 };
478
479 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
480 { 52, 295 }, /* 119 M */
481 { 11, 57 }, /* 130.25 M */
482 { 63, 307 }, /* 138.50 M */
483 { 11, 50 }, /* 148.50 M */
484 { 47, 206 }, /* 154 M */
485 { 31, 100 }, /* 205.25 M */
486 { 107, 269 }, /* 268.50 M */
487 { },
488 };
489
490 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
491 { 31, 211 }, /* 119 M */
492 { 32, 199 }, /* 130.25 M */
493 { 63, 307 }, /* 138.50 M */
494 { 11, 60 }, /* 148.50 M */
495 { 50, 263 }, /* 154 M */
496 { 31, 120 }, /* 205.25 M */
497 { 119, 359 }, /* 268.50 M */
498 { },
499 };
500
clk_edp_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)501 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
502 unsigned long parent_rate)
503 {
504 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
505 struct freq_tbl f = *rcg->freq_tbl;
506 const struct frac_entry *frac;
507 int delta = 100000;
508 s64 src_rate = parent_rate;
509 s64 request;
510 u32 mask = BIT(rcg->hid_width) - 1;
511 u32 hid_div;
512
513 if (src_rate == 810000000)
514 frac = frac_table_810m;
515 else
516 frac = frac_table_675m;
517
518 for (; frac->num; frac++) {
519 request = rate;
520 request *= frac->den;
521 request = div_s64(request, frac->num);
522 if ((src_rate < (request - delta)) ||
523 (src_rate > (request + delta)))
524 continue;
525
526 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
527 &hid_div);
528 f.pre_div = hid_div;
529 f.pre_div >>= CFG_SRC_DIV_SHIFT;
530 f.pre_div &= mask;
531 f.m = frac->num;
532 f.n = frac->den;
533
534 return clk_rcg2_configure(rcg, &f);
535 }
536
537 return -EINVAL;
538 }
539
clk_edp_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)540 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
541 unsigned long rate, unsigned long parent_rate, u8 index)
542 {
543 /* Parent index is set statically in frequency table */
544 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
545 }
546
clk_edp_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)547 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
548 struct clk_rate_request *req)
549 {
550 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
551 const struct freq_tbl *f = rcg->freq_tbl;
552 const struct frac_entry *frac;
553 int delta = 100000;
554 s64 request;
555 u32 mask = BIT(rcg->hid_width) - 1;
556 u32 hid_div;
557 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
558
559 /* Force the correct parent */
560 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
561 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
562
563 if (req->best_parent_rate == 810000000)
564 frac = frac_table_810m;
565 else
566 frac = frac_table_675m;
567
568 for (; frac->num; frac++) {
569 request = req->rate;
570 request *= frac->den;
571 request = div_s64(request, frac->num);
572 if ((req->best_parent_rate < (request - delta)) ||
573 (req->best_parent_rate > (request + delta)))
574 continue;
575
576 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
577 &hid_div);
578 hid_div >>= CFG_SRC_DIV_SHIFT;
579 hid_div &= mask;
580
581 req->rate = calc_rate(req->best_parent_rate,
582 frac->num, frac->den,
583 !!frac->den, hid_div);
584 return 0;
585 }
586
587 return -EINVAL;
588 }
589
590 const struct clk_ops clk_edp_pixel_ops = {
591 .is_enabled = clk_rcg2_is_enabled,
592 .get_parent = clk_rcg2_get_parent,
593 .set_parent = clk_rcg2_set_parent,
594 .recalc_rate = clk_rcg2_recalc_rate,
595 .set_rate = clk_edp_pixel_set_rate,
596 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
597 .determine_rate = clk_edp_pixel_determine_rate,
598 };
599 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
600
clk_byte_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)601 static int clk_byte_determine_rate(struct clk_hw *hw,
602 struct clk_rate_request *req)
603 {
604 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
605 const struct freq_tbl *f = rcg->freq_tbl;
606 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
607 unsigned long parent_rate, div;
608 u32 mask = BIT(rcg->hid_width) - 1;
609 struct clk_hw *p;
610
611 if (req->rate == 0)
612 return -EINVAL;
613
614 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
615 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
616
617 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
618 div = min_t(u32, div, mask);
619
620 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
621
622 return 0;
623 }
624
clk_byte_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)625 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
626 unsigned long parent_rate)
627 {
628 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
629 struct freq_tbl f = *rcg->freq_tbl;
630 unsigned long div;
631 u32 mask = BIT(rcg->hid_width) - 1;
632
633 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
634 div = min_t(u32, div, mask);
635
636 f.pre_div = div;
637
638 return clk_rcg2_configure(rcg, &f);
639 }
640
clk_byte_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)641 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
642 unsigned long rate, unsigned long parent_rate, u8 index)
643 {
644 /* Parent index is set statically in frequency table */
645 return clk_byte_set_rate(hw, rate, parent_rate);
646 }
647
648 const struct clk_ops clk_byte_ops = {
649 .is_enabled = clk_rcg2_is_enabled,
650 .get_parent = clk_rcg2_get_parent,
651 .set_parent = clk_rcg2_set_parent,
652 .recalc_rate = clk_rcg2_recalc_rate,
653 .set_rate = clk_byte_set_rate,
654 .set_rate_and_parent = clk_byte_set_rate_and_parent,
655 .determine_rate = clk_byte_determine_rate,
656 };
657 EXPORT_SYMBOL_GPL(clk_byte_ops);
658
clk_byte2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)659 static int clk_byte2_determine_rate(struct clk_hw *hw,
660 struct clk_rate_request *req)
661 {
662 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
663 unsigned long parent_rate, div;
664 u32 mask = BIT(rcg->hid_width) - 1;
665 struct clk_hw *p;
666 unsigned long rate = req->rate;
667
668 if (rate == 0)
669 return -EINVAL;
670
671 p = req->best_parent_hw;
672 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
673
674 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
675 div = min_t(u32, div, mask);
676
677 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
678
679 return 0;
680 }
681
clk_byte2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)682 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
683 unsigned long parent_rate)
684 {
685 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
686 struct freq_tbl f = { 0 };
687 unsigned long div;
688 int i, num_parents = clk_hw_get_num_parents(hw);
689 u32 mask = BIT(rcg->hid_width) - 1;
690 u32 cfg;
691
692 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
693 div = min_t(u32, div, mask);
694
695 f.pre_div = div;
696
697 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
698 cfg &= CFG_SRC_SEL_MASK;
699 cfg >>= CFG_SRC_SEL_SHIFT;
700
701 for (i = 0; i < num_parents; i++) {
702 if (cfg == rcg->parent_map[i].cfg) {
703 f.src = rcg->parent_map[i].src;
704 return clk_rcg2_configure(rcg, &f);
705 }
706 }
707
708 return -EINVAL;
709 }
710
clk_byte2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)711 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
712 unsigned long rate, unsigned long parent_rate, u8 index)
713 {
714 /* Read the hardware to determine parent during set_rate */
715 return clk_byte2_set_rate(hw, rate, parent_rate);
716 }
717
718 const struct clk_ops clk_byte2_ops = {
719 .is_enabled = clk_rcg2_is_enabled,
720 .get_parent = clk_rcg2_get_parent,
721 .set_parent = clk_rcg2_set_parent,
722 .recalc_rate = clk_rcg2_recalc_rate,
723 .set_rate = clk_byte2_set_rate,
724 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
725 .determine_rate = clk_byte2_determine_rate,
726 };
727 EXPORT_SYMBOL_GPL(clk_byte2_ops);
728
729 static const struct frac_entry frac_table_pixel[] = {
730 { 3, 8 },
731 { 2, 9 },
732 { 4, 9 },
733 { 1, 1 },
734 { 2, 3 },
735 { }
736 };
737
clk_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)738 static int clk_pixel_determine_rate(struct clk_hw *hw,
739 struct clk_rate_request *req)
740 {
741 unsigned long request, src_rate;
742 int delta = 100000;
743 const struct frac_entry *frac = frac_table_pixel;
744
745 for (; frac->num; frac++) {
746 request = (req->rate * frac->den) / frac->num;
747
748 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
749 if ((src_rate < (request - delta)) ||
750 (src_rate > (request + delta)))
751 continue;
752
753 req->best_parent_rate = src_rate;
754 req->rate = (src_rate * frac->num) / frac->den;
755 return 0;
756 }
757
758 return -EINVAL;
759 }
760
clk_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)761 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
762 unsigned long parent_rate)
763 {
764 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
765 struct freq_tbl f = { 0 };
766 const struct frac_entry *frac = frac_table_pixel;
767 unsigned long request;
768 int delta = 100000;
769 u32 mask = BIT(rcg->hid_width) - 1;
770 u32 hid_div, cfg;
771 int i, num_parents = clk_hw_get_num_parents(hw);
772
773 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
774 cfg &= CFG_SRC_SEL_MASK;
775 cfg >>= CFG_SRC_SEL_SHIFT;
776
777 for (i = 0; i < num_parents; i++)
778 if (cfg == rcg->parent_map[i].cfg) {
779 f.src = rcg->parent_map[i].src;
780 break;
781 }
782
783 for (; frac->num; frac++) {
784 request = (rate * frac->den) / frac->num;
785
786 if ((parent_rate < (request - delta)) ||
787 (parent_rate > (request + delta)))
788 continue;
789
790 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
791 &hid_div);
792 f.pre_div = hid_div;
793 f.pre_div >>= CFG_SRC_DIV_SHIFT;
794 f.pre_div &= mask;
795 f.m = frac->num;
796 f.n = frac->den;
797
798 return clk_rcg2_configure(rcg, &f);
799 }
800 return -EINVAL;
801 }
802
clk_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)803 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
804 unsigned long parent_rate, u8 index)
805 {
806 return clk_pixel_set_rate(hw, rate, parent_rate);
807 }
808
809 const struct clk_ops clk_pixel_ops = {
810 .is_enabled = clk_rcg2_is_enabled,
811 .get_parent = clk_rcg2_get_parent,
812 .set_parent = clk_rcg2_set_parent,
813 .recalc_rate = clk_rcg2_recalc_rate,
814 .set_rate = clk_pixel_set_rate,
815 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
816 .determine_rate = clk_pixel_determine_rate,
817 };
818 EXPORT_SYMBOL_GPL(clk_pixel_ops);
819
clk_gfx3d_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)820 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
821 struct clk_rate_request *req)
822 {
823 struct clk_rate_request parent_req = { };
824 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
825 struct clk_hw *xo, *p0, *p1, *p2;
826 unsigned long p0_rate;
827 u8 mux_div = cgfx->div;
828 int ret;
829
830 p0 = cgfx->hws[0];
831 p1 = cgfx->hws[1];
832 p2 = cgfx->hws[2];
833 /*
834 * This function does ping-pong the RCG between PLLs: if we don't
835 * have at least one fixed PLL and two variable ones,
836 * then it's not going to work correctly.
837 */
838 if (WARN_ON(!p0 || !p1 || !p2))
839 return -EINVAL;
840
841 xo = clk_hw_get_parent_by_index(hw, 0);
842 if (req->rate == clk_hw_get_rate(xo)) {
843 req->best_parent_hw = xo;
844 return 0;
845 }
846
847 if (mux_div == 0)
848 mux_div = 1;
849
850 parent_req.rate = req->rate * mux_div;
851
852 /* This has to be a fixed rate PLL */
853 p0_rate = clk_hw_get_rate(p0);
854
855 if (parent_req.rate == p0_rate) {
856 req->rate = req->best_parent_rate = p0_rate;
857 req->best_parent_hw = p0;
858 return 0;
859 }
860
861 if (req->best_parent_hw == p0) {
862 /* Are we going back to a previously used rate? */
863 if (clk_hw_get_rate(p2) == parent_req.rate)
864 req->best_parent_hw = p2;
865 else
866 req->best_parent_hw = p1;
867 } else if (req->best_parent_hw == p2) {
868 req->best_parent_hw = p1;
869 } else {
870 req->best_parent_hw = p2;
871 }
872
873 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
874 if (ret)
875 return ret;
876
877 req->rate = req->best_parent_rate = parent_req.rate;
878 req->rate /= mux_div;
879
880 return 0;
881 }
882
clk_gfx3d_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)883 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
884 unsigned long parent_rate, u8 index)
885 {
886 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
887 struct clk_rcg2 *rcg = &cgfx->rcg;
888 u32 cfg;
889 int ret;
890
891 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
892 /* On some targets, the GFX3D RCG may need to divide PLL frequency */
893 if (cgfx->div > 1)
894 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
895
896 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
897 if (ret)
898 return ret;
899
900 return update_config(rcg);
901 }
902
clk_gfx3d_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)903 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
904 unsigned long parent_rate)
905 {
906 /*
907 * We should never get here; clk_gfx3d_determine_rate() should always
908 * make us use a different parent than what we're currently using, so
909 * clk_gfx3d_set_rate_and_parent() should always be called.
910 */
911 return 0;
912 }
913
914 const struct clk_ops clk_gfx3d_ops = {
915 .is_enabled = clk_rcg2_is_enabled,
916 .get_parent = clk_rcg2_get_parent,
917 .set_parent = clk_rcg2_set_parent,
918 .recalc_rate = clk_rcg2_recalc_rate,
919 .set_rate = clk_gfx3d_set_rate,
920 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
921 .determine_rate = clk_gfx3d_determine_rate,
922 };
923 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
924
clk_rcg2_set_force_enable(struct clk_hw * hw)925 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
926 {
927 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
928 const char *name = clk_hw_get_name(hw);
929 int ret, count;
930
931 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
932 CMD_ROOT_EN, CMD_ROOT_EN);
933 if (ret)
934 return ret;
935
936 /* wait for RCG to turn ON */
937 for (count = 500; count > 0; count--) {
938 if (clk_rcg2_is_enabled(hw))
939 return 0;
940
941 udelay(1);
942 }
943
944 pr_err("%s: RCG did not turn on\n", name);
945 return -ETIMEDOUT;
946 }
947
clk_rcg2_clear_force_enable(struct clk_hw * hw)948 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
949 {
950 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
951
952 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
953 CMD_ROOT_EN, 0);
954 }
955
956 static int
clk_rcg2_shared_force_enable_clear(struct clk_hw * hw,const struct freq_tbl * f)957 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
958 {
959 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
960 int ret;
961
962 ret = clk_rcg2_set_force_enable(hw);
963 if (ret)
964 return ret;
965
966 ret = clk_rcg2_configure(rcg, f);
967 if (ret)
968 return ret;
969
970 return clk_rcg2_clear_force_enable(hw);
971 }
972
clk_rcg2_shared_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)973 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
974 unsigned long parent_rate)
975 {
976 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
977 const struct freq_tbl *f;
978
979 f = qcom_find_freq(rcg->freq_tbl, rate);
980 if (!f)
981 return -EINVAL;
982
983 /*
984 * In case clock is disabled, update the CFG, M, N and D registers
985 * and don't hit the update bit of CMD register.
986 */
987 if (!__clk_is_enabled(hw->clk))
988 return __clk_rcg2_configure(rcg, f);
989
990 return clk_rcg2_shared_force_enable_clear(hw, f);
991 }
992
clk_rcg2_shared_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)993 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
994 unsigned long rate, unsigned long parent_rate, u8 index)
995 {
996 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
997 }
998
clk_rcg2_shared_enable(struct clk_hw * hw)999 static int clk_rcg2_shared_enable(struct clk_hw *hw)
1000 {
1001 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1002 int ret;
1003
1004 /*
1005 * Set the update bit because required configuration has already
1006 * been written in clk_rcg2_shared_set_rate()
1007 */
1008 ret = clk_rcg2_set_force_enable(hw);
1009 if (ret)
1010 return ret;
1011
1012 ret = update_config(rcg);
1013 if (ret)
1014 return ret;
1015
1016 return clk_rcg2_clear_force_enable(hw);
1017 }
1018
clk_rcg2_shared_disable(struct clk_hw * hw)1019 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1020 {
1021 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1022 u32 cfg;
1023
1024 /*
1025 * Store current configuration as switching to safe source would clear
1026 * the SRC and DIV of CFG register
1027 */
1028 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1029
1030 /*
1031 * Park the RCG at a safe configuration - sourced off of safe source.
1032 * Force enable and disable the RCG while configuring it to safeguard
1033 * against any update signal coming from the downstream clock.
1034 * The current parent is still prepared and enabled at this point, and
1035 * the safe source is always on while application processor subsystem
1036 * is online. Therefore, the RCG can safely switch its parent.
1037 */
1038 clk_rcg2_set_force_enable(hw);
1039
1040 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1041 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1042
1043 update_config(rcg);
1044
1045 clk_rcg2_clear_force_enable(hw);
1046
1047 /* Write back the stored configuration corresponding to current rate */
1048 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
1049 }
1050
1051 const struct clk_ops clk_rcg2_shared_ops = {
1052 .enable = clk_rcg2_shared_enable,
1053 .disable = clk_rcg2_shared_disable,
1054 .get_parent = clk_rcg2_get_parent,
1055 .set_parent = clk_rcg2_set_parent,
1056 .recalc_rate = clk_rcg2_recalc_rate,
1057 .determine_rate = clk_rcg2_determine_rate,
1058 .set_rate = clk_rcg2_shared_set_rate,
1059 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1060 };
1061 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1062
1063 /* Common APIs to be used for DFS based RCGR */
clk_rcg2_dfs_populate_freq(struct clk_hw * hw,unsigned int l,struct freq_tbl * f)1064 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1065 struct freq_tbl *f)
1066 {
1067 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1068 struct clk_hw *p;
1069 unsigned long prate = 0;
1070 u32 val, mask, cfg, mode, src;
1071 int i, num_parents;
1072
1073 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1074
1075 mask = BIT(rcg->hid_width) - 1;
1076 f->pre_div = 1;
1077 if (cfg & mask)
1078 f->pre_div = cfg & mask;
1079
1080 src = cfg & CFG_SRC_SEL_MASK;
1081 src >>= CFG_SRC_SEL_SHIFT;
1082
1083 num_parents = clk_hw_get_num_parents(hw);
1084 for (i = 0; i < num_parents; i++) {
1085 if (src == rcg->parent_map[i].cfg) {
1086 f->src = rcg->parent_map[i].src;
1087 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1088 prate = clk_hw_get_rate(p);
1089 }
1090 }
1091
1092 mode = cfg & CFG_MODE_MASK;
1093 mode >>= CFG_MODE_SHIFT;
1094 if (mode) {
1095 mask = BIT(rcg->mnd_width) - 1;
1096 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1097 &val);
1098 val &= mask;
1099 f->m = val;
1100
1101 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1102 &val);
1103 val = ~val;
1104 val &= mask;
1105 val += f->m;
1106 f->n = val;
1107 }
1108
1109 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1110 }
1111
clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 * rcg)1112 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1113 {
1114 struct freq_tbl *freq_tbl;
1115 int i;
1116
1117 /* Allocate space for 1 extra since table is NULL terminated */
1118 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1119 if (!freq_tbl)
1120 return -ENOMEM;
1121 rcg->freq_tbl = freq_tbl;
1122
1123 for (i = 0; i < MAX_PERF_LEVEL; i++)
1124 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1125
1126 return 0;
1127 }
1128
clk_rcg2_dfs_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1129 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1130 struct clk_rate_request *req)
1131 {
1132 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1133 int ret;
1134
1135 if (!rcg->freq_tbl) {
1136 ret = clk_rcg2_dfs_populate_freq_table(rcg);
1137 if (ret) {
1138 pr_err("Failed to update DFS tables for %s\n",
1139 clk_hw_get_name(hw));
1140 return ret;
1141 }
1142 }
1143
1144 return clk_rcg2_determine_rate(hw, req);
1145 }
1146
1147 static unsigned long
clk_rcg2_dfs_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1148 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1149 {
1150 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1151 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1152
1153 regmap_read(rcg->clkr.regmap,
1154 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1155 level &= GENMASK(4, 1);
1156 level >>= 1;
1157
1158 if (rcg->freq_tbl)
1159 return rcg->freq_tbl[level].freq;
1160
1161 /*
1162 * Assume that parent_rate is actually the parent because
1163 * we can't do any better at figuring it out when the table
1164 * hasn't been populated yet. We only populate the table
1165 * in determine_rate because we can't guarantee the parents
1166 * will be registered with the framework until then.
1167 */
1168 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1169 &cfg);
1170
1171 mask = BIT(rcg->hid_width) - 1;
1172 pre_div = 1;
1173 if (cfg & mask)
1174 pre_div = cfg & mask;
1175
1176 mode = cfg & CFG_MODE_MASK;
1177 mode >>= CFG_MODE_SHIFT;
1178 if (mode) {
1179 mask = BIT(rcg->mnd_width) - 1;
1180 regmap_read(rcg->clkr.regmap,
1181 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1182 m &= mask;
1183
1184 regmap_read(rcg->clkr.regmap,
1185 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1186 n = ~n;
1187 n &= mask;
1188 n += m;
1189 }
1190
1191 return calc_rate(parent_rate, m, n, mode, pre_div);
1192 }
1193
1194 static const struct clk_ops clk_rcg2_dfs_ops = {
1195 .is_enabled = clk_rcg2_is_enabled,
1196 .get_parent = clk_rcg2_get_parent,
1197 .determine_rate = clk_rcg2_dfs_determine_rate,
1198 .recalc_rate = clk_rcg2_dfs_recalc_rate,
1199 };
1200
clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data * data,struct regmap * regmap)1201 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1202 struct regmap *regmap)
1203 {
1204 struct clk_rcg2 *rcg = data->rcg;
1205 struct clk_init_data *init = data->init;
1206 u32 val;
1207 int ret;
1208
1209 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1210 if (ret)
1211 return -EINVAL;
1212
1213 if (!(val & SE_CMD_DFS_EN))
1214 return 0;
1215
1216 /*
1217 * Rate changes with consumer writing a register in
1218 * their own I/O region
1219 */
1220 init->flags |= CLK_GET_RATE_NOCACHE;
1221 init->ops = &clk_rcg2_dfs_ops;
1222
1223 rcg->freq_tbl = NULL;
1224
1225 return 0;
1226 }
1227
qcom_cc_register_rcg_dfs(struct regmap * regmap,const struct clk_rcg_dfs_data * rcgs,size_t len)1228 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1229 const struct clk_rcg_dfs_data *rcgs, size_t len)
1230 {
1231 int i, ret;
1232
1233 for (i = 0; i < len; i++) {
1234 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1235 if (ret)
1236 return ret;
1237 }
1238
1239 return 0;
1240 }
1241 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1242
clk_rcg2_dp_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1243 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1244 unsigned long parent_rate)
1245 {
1246 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1247 struct freq_tbl f = { 0 };
1248 u32 mask = BIT(rcg->hid_width) - 1;
1249 u32 hid_div, cfg;
1250 int i, num_parents = clk_hw_get_num_parents(hw);
1251 unsigned long num, den;
1252
1253 rational_best_approximation(parent_rate, rate,
1254 GENMASK(rcg->mnd_width - 1, 0),
1255 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1256
1257 if (!num || !den)
1258 return -EINVAL;
1259
1260 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1261 hid_div = cfg;
1262 cfg &= CFG_SRC_SEL_MASK;
1263 cfg >>= CFG_SRC_SEL_SHIFT;
1264
1265 for (i = 0; i < num_parents; i++) {
1266 if (cfg == rcg->parent_map[i].cfg) {
1267 f.src = rcg->parent_map[i].src;
1268 break;
1269 }
1270 }
1271
1272 f.pre_div = hid_div;
1273 f.pre_div >>= CFG_SRC_DIV_SHIFT;
1274 f.pre_div &= mask;
1275
1276 if (num != den) {
1277 f.m = num;
1278 f.n = den;
1279 } else {
1280 f.m = 0;
1281 f.n = 0;
1282 }
1283
1284 return clk_rcg2_configure(rcg, &f);
1285 }
1286
clk_rcg2_dp_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)1287 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1288 unsigned long rate, unsigned long parent_rate, u8 index)
1289 {
1290 return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1291 }
1292
clk_rcg2_dp_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1293 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1294 struct clk_rate_request *req)
1295 {
1296 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1297 unsigned long num, den;
1298 u64 tmp;
1299
1300 /* Parent rate is a fixed phy link rate */
1301 rational_best_approximation(req->best_parent_rate, req->rate,
1302 GENMASK(rcg->mnd_width - 1, 0),
1303 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1304
1305 if (!num || !den)
1306 return -EINVAL;
1307
1308 tmp = req->best_parent_rate * num;
1309 do_div(tmp, den);
1310 req->rate = tmp;
1311
1312 return 0;
1313 }
1314
1315 const struct clk_ops clk_dp_ops = {
1316 .is_enabled = clk_rcg2_is_enabled,
1317 .get_parent = clk_rcg2_get_parent,
1318 .set_parent = clk_rcg2_set_parent,
1319 .recalc_rate = clk_rcg2_recalc_rate,
1320 .set_rate = clk_rcg2_dp_set_rate,
1321 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1322 .determine_rate = clk_rcg2_dp_determine_rate,
1323 };
1324 EXPORT_SYMBOL_GPL(clk_dp_ops);
1325