• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/rational.h>
14 #include <linux/regmap.h>
15 #include <linux/math64.h>
16 #include <linux/slab.h>
17 
18 #include <asm/div64.h>
19 
20 #include "clk-rcg.h"
21 #include "common.h"
22 
23 #define CMD_REG			0x0
24 #define CMD_UPDATE		BIT(0)
25 #define CMD_ROOT_EN		BIT(1)
26 #define CMD_DIRTY_CFG		BIT(4)
27 #define CMD_DIRTY_N		BIT(5)
28 #define CMD_DIRTY_M		BIT(6)
29 #define CMD_DIRTY_D		BIT(7)
30 #define CMD_ROOT_OFF		BIT(31)
31 
32 #define CFG_REG			0x4
33 #define CFG_SRC_DIV_SHIFT	0
34 #define CFG_SRC_SEL_SHIFT	8
35 #define CFG_SRC_SEL_MASK	(0x7 << CFG_SRC_SEL_SHIFT)
36 #define CFG_MODE_SHIFT		12
37 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
38 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
39 #define CFG_HW_CLK_CTRL_MASK	BIT(20)
40 
41 #define M_REG			0x8
42 #define N_REG			0xc
43 #define D_REG			0x10
44 
45 #define RCG_CFG_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
46 #define RCG_M_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
47 #define RCG_N_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
48 #define RCG_D_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
49 
50 /* Dynamic Frequency Scaling */
51 #define MAX_PERF_LEVEL		8
52 #define SE_CMD_DFSR_OFFSET	0x14
53 #define SE_CMD_DFS_EN		BIT(0)
54 #define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
55 #define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
56 #define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
57 
58 enum freq_policy {
59 	FLOOR,
60 	CEIL,
61 };
62 
clk_rcg2_is_enabled(struct clk_hw * hw)63 static int clk_rcg2_is_enabled(struct clk_hw *hw)
64 {
65 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
66 	u32 cmd;
67 	int ret;
68 
69 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
70 	if (ret)
71 		return ret;
72 
73 	return (cmd & CMD_ROOT_OFF) == 0;
74 }
75 
clk_rcg2_get_parent(struct clk_hw * hw)76 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
77 {
78 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
79 	int num_parents = clk_hw_get_num_parents(hw);
80 	u32 cfg;
81 	int i, ret;
82 
83 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
84 	if (ret)
85 		goto err;
86 
87 	cfg &= CFG_SRC_SEL_MASK;
88 	cfg >>= CFG_SRC_SEL_SHIFT;
89 
90 	for (i = 0; i < num_parents; i++)
91 		if (cfg == rcg->parent_map[i].cfg)
92 			return i;
93 
94 err:
95 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
96 		 __func__, clk_hw_get_name(hw));
97 	return 0;
98 }
99 
update_config(struct clk_rcg2 * rcg)100 static int update_config(struct clk_rcg2 *rcg)
101 {
102 	int count, ret;
103 	u32 cmd;
104 	struct clk_hw *hw = &rcg->clkr.hw;
105 	const char *name = clk_hw_get_name(hw);
106 
107 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
108 				 CMD_UPDATE, CMD_UPDATE);
109 	if (ret)
110 		return ret;
111 
112 	/* Wait for update to take effect */
113 	for (count = 500; count > 0; count--) {
114 		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
115 		if (ret)
116 			return ret;
117 		if (!(cmd & CMD_UPDATE))
118 			return 0;
119 		udelay(1);
120 	}
121 
122 	WARN(1, "%s: rcg didn't update its configuration.", name);
123 	return -EBUSY;
124 }
125 
clk_rcg2_set_parent(struct clk_hw * hw,u8 index)126 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
127 {
128 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
129 	int ret;
130 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
131 
132 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
133 				 CFG_SRC_SEL_MASK, cfg);
134 	if (ret)
135 		return ret;
136 
137 	return update_config(rcg);
138 }
139 
140 /*
141  * Calculate m/n:d rate
142  *
143  *          parent_rate     m
144  *   rate = ----------- x  ---
145  *            hid_div       n
146  */
147 static unsigned long
calc_rate(unsigned long rate,u32 m,u32 n,u32 mode,u32 hid_div)148 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
149 {
150 	if (hid_div)
151 		rate = mult_frac(rate, 2, hid_div + 1);
152 
153 	if (mode)
154 		rate = mult_frac(rate, m, n);
155 
156 	return rate;
157 }
158 
159 static unsigned long
clk_rcg2_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)160 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
161 {
162 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
163 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
164 
165 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
166 
167 	if (rcg->mnd_width) {
168 		mask = BIT(rcg->mnd_width) - 1;
169 		regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
170 		m &= mask;
171 		regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
172 		n =  ~n;
173 		n &= mask;
174 		n += m;
175 		mode = cfg & CFG_MODE_MASK;
176 		mode >>= CFG_MODE_SHIFT;
177 	}
178 
179 	mask = BIT(rcg->hid_width) - 1;
180 	hid_div = cfg >> CFG_SRC_DIV_SHIFT;
181 	hid_div &= mask;
182 
183 	return calc_rate(parent_rate, m, n, mode, hid_div);
184 }
185 
_freq_tbl_determine_rate(struct clk_hw * hw,const struct freq_tbl * f,struct clk_rate_request * req,enum freq_policy policy)186 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
187 				    struct clk_rate_request *req,
188 				    enum freq_policy policy)
189 {
190 	unsigned long clk_flags, rate = req->rate;
191 	struct clk_hw *p;
192 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
193 	int index;
194 
195 	switch (policy) {
196 	case FLOOR:
197 		f = qcom_find_freq_floor(f, rate);
198 		break;
199 	case CEIL:
200 		f = qcom_find_freq(f, rate);
201 		break;
202 	default:
203 		return -EINVAL;
204 	}
205 
206 	if (!f)
207 		return -EINVAL;
208 
209 	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
210 	if (index < 0)
211 		return index;
212 
213 	clk_flags = clk_hw_get_flags(hw);
214 	p = clk_hw_get_parent_by_index(hw, index);
215 	if (!p)
216 		return -EINVAL;
217 
218 	if (clk_flags & CLK_SET_RATE_PARENT) {
219 		rate = f->freq;
220 		if (f->pre_div) {
221 			if (!rate)
222 				rate = req->rate;
223 			rate /= 2;
224 			rate *= f->pre_div + 1;
225 		}
226 
227 		if (f->n) {
228 			u64 tmp = rate;
229 			tmp = tmp * f->n;
230 			do_div(tmp, f->m);
231 			rate = tmp;
232 		}
233 	} else {
234 		rate =  clk_hw_get_rate(p);
235 	}
236 	req->best_parent_hw = p;
237 	req->best_parent_rate = rate;
238 	req->rate = f->freq;
239 
240 	return 0;
241 }
242 
clk_rcg2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)243 static int clk_rcg2_determine_rate(struct clk_hw *hw,
244 				   struct clk_rate_request *req)
245 {
246 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
247 
248 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
249 }
250 
clk_rcg2_determine_floor_rate(struct clk_hw * hw,struct clk_rate_request * req)251 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
252 					 struct clk_rate_request *req)
253 {
254 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
255 
256 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
257 }
258 
__clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)259 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
260 {
261 	u32 cfg, mask, d_val, not2d_val, n_minus_m;
262 	struct clk_hw *hw = &rcg->clkr.hw;
263 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
264 
265 	if (index < 0)
266 		return index;
267 
268 	if (rcg->mnd_width && f->n) {
269 		mask = BIT(rcg->mnd_width) - 1;
270 		ret = regmap_update_bits(rcg->clkr.regmap,
271 				RCG_M_OFFSET(rcg), mask, f->m);
272 		if (ret)
273 			return ret;
274 
275 		ret = regmap_update_bits(rcg->clkr.regmap,
276 				RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
277 		if (ret)
278 			return ret;
279 
280 		/* Calculate 2d value */
281 		d_val = f->n;
282 
283 		n_minus_m = f->n - f->m;
284 		n_minus_m *= 2;
285 
286 		d_val = clamp_t(u32, d_val, f->m, n_minus_m);
287 		not2d_val = ~d_val & mask;
288 
289 		ret = regmap_update_bits(rcg->clkr.regmap,
290 				RCG_D_OFFSET(rcg), mask, not2d_val);
291 		if (ret)
292 			return ret;
293 	}
294 
295 	mask = BIT(rcg->hid_width) - 1;
296 	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
297 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
298 	cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
299 	if (rcg->mnd_width && f->n && (f->m != f->n))
300 		cfg |= CFG_MODE_DUAL_EDGE;
301 	return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
302 					mask, cfg);
303 }
304 
clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)305 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
306 {
307 	int ret;
308 
309 	ret = __clk_rcg2_configure(rcg, f);
310 	if (ret)
311 		return ret;
312 
313 	return update_config(rcg);
314 }
315 
__clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,enum freq_policy policy)316 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
317 			       enum freq_policy policy)
318 {
319 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
320 	const struct freq_tbl *f;
321 
322 	switch (policy) {
323 	case FLOOR:
324 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
325 		break;
326 	case CEIL:
327 		f = qcom_find_freq(rcg->freq_tbl, rate);
328 		break;
329 	default:
330 		return -EINVAL;
331 	}
332 
333 	if (!f)
334 		return -EINVAL;
335 
336 	return clk_rcg2_configure(rcg, f);
337 }
338 
clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)339 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
340 			    unsigned long parent_rate)
341 {
342 	return __clk_rcg2_set_rate(hw, rate, CEIL);
343 }
344 
clk_rcg2_set_floor_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)345 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
346 				   unsigned long parent_rate)
347 {
348 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
349 }
350 
clk_rcg2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)351 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
352 		unsigned long rate, unsigned long parent_rate, u8 index)
353 {
354 	return __clk_rcg2_set_rate(hw, rate, CEIL);
355 }
356 
clk_rcg2_set_floor_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)357 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
358 		unsigned long rate, unsigned long parent_rate, u8 index)
359 {
360 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
361 }
362 
363 const struct clk_ops clk_rcg2_ops = {
364 	.is_enabled = clk_rcg2_is_enabled,
365 	.get_parent = clk_rcg2_get_parent,
366 	.set_parent = clk_rcg2_set_parent,
367 	.recalc_rate = clk_rcg2_recalc_rate,
368 	.determine_rate = clk_rcg2_determine_rate,
369 	.set_rate = clk_rcg2_set_rate,
370 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
371 };
372 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
373 
374 const struct clk_ops clk_rcg2_floor_ops = {
375 	.is_enabled = clk_rcg2_is_enabled,
376 	.get_parent = clk_rcg2_get_parent,
377 	.set_parent = clk_rcg2_set_parent,
378 	.recalc_rate = clk_rcg2_recalc_rate,
379 	.determine_rate = clk_rcg2_determine_floor_rate,
380 	.set_rate = clk_rcg2_set_floor_rate,
381 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
382 };
383 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
384 
385 struct frac_entry {
386 	int num;
387 	int den;
388 };
389 
390 static const struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
391 	{ 52, 295 },	/* 119 M */
392 	{ 11, 57 },	/* 130.25 M */
393 	{ 63, 307 },	/* 138.50 M */
394 	{ 11, 50 },	/* 148.50 M */
395 	{ 47, 206 },	/* 154 M */
396 	{ 31, 100 },	/* 205.25 M */
397 	{ 107, 269 },	/* 268.50 M */
398 	{ },
399 };
400 
401 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
402 	{ 31, 211 },	/* 119 M */
403 	{ 32, 199 },	/* 130.25 M */
404 	{ 63, 307 },	/* 138.50 M */
405 	{ 11, 60 },	/* 148.50 M */
406 	{ 50, 263 },	/* 154 M */
407 	{ 31, 120 },	/* 205.25 M */
408 	{ 119, 359 },	/* 268.50 M */
409 	{ },
410 };
411 
clk_edp_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)412 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
413 			      unsigned long parent_rate)
414 {
415 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
416 	struct freq_tbl f = *rcg->freq_tbl;
417 	const struct frac_entry *frac;
418 	int delta = 100000;
419 	s64 src_rate = parent_rate;
420 	s64 request;
421 	u32 mask = BIT(rcg->hid_width) - 1;
422 	u32 hid_div;
423 
424 	if (src_rate == 810000000)
425 		frac = frac_table_810m;
426 	else
427 		frac = frac_table_675m;
428 
429 	for (; frac->num; frac++) {
430 		request = rate;
431 		request *= frac->den;
432 		request = div_s64(request, frac->num);
433 		if ((src_rate < (request - delta)) ||
434 		    (src_rate > (request + delta)))
435 			continue;
436 
437 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
438 				&hid_div);
439 		f.pre_div = hid_div;
440 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
441 		f.pre_div &= mask;
442 		f.m = frac->num;
443 		f.n = frac->den;
444 
445 		return clk_rcg2_configure(rcg, &f);
446 	}
447 
448 	return -EINVAL;
449 }
450 
clk_edp_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)451 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
452 		unsigned long rate, unsigned long parent_rate, u8 index)
453 {
454 	/* Parent index is set statically in frequency table */
455 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
456 }
457 
clk_edp_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)458 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
459 					struct clk_rate_request *req)
460 {
461 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
462 	const struct freq_tbl *f = rcg->freq_tbl;
463 	const struct frac_entry *frac;
464 	int delta = 100000;
465 	s64 request;
466 	u32 mask = BIT(rcg->hid_width) - 1;
467 	u32 hid_div;
468 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
469 
470 	/* Force the correct parent */
471 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
472 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
473 
474 	if (req->best_parent_rate == 810000000)
475 		frac = frac_table_810m;
476 	else
477 		frac = frac_table_675m;
478 
479 	for (; frac->num; frac++) {
480 		request = req->rate;
481 		request *= frac->den;
482 		request = div_s64(request, frac->num);
483 		if ((req->best_parent_rate < (request - delta)) ||
484 		    (req->best_parent_rate > (request + delta)))
485 			continue;
486 
487 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
488 				&hid_div);
489 		hid_div >>= CFG_SRC_DIV_SHIFT;
490 		hid_div &= mask;
491 
492 		req->rate = calc_rate(req->best_parent_rate,
493 				      frac->num, frac->den,
494 				      !!frac->den, hid_div);
495 		return 0;
496 	}
497 
498 	return -EINVAL;
499 }
500 
501 const struct clk_ops clk_edp_pixel_ops = {
502 	.is_enabled = clk_rcg2_is_enabled,
503 	.get_parent = clk_rcg2_get_parent,
504 	.set_parent = clk_rcg2_set_parent,
505 	.recalc_rate = clk_rcg2_recalc_rate,
506 	.set_rate = clk_edp_pixel_set_rate,
507 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
508 	.determine_rate = clk_edp_pixel_determine_rate,
509 };
510 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
511 
clk_byte_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)512 static int clk_byte_determine_rate(struct clk_hw *hw,
513 				   struct clk_rate_request *req)
514 {
515 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
516 	const struct freq_tbl *f = rcg->freq_tbl;
517 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
518 	unsigned long parent_rate, div;
519 	u32 mask = BIT(rcg->hid_width) - 1;
520 	struct clk_hw *p;
521 
522 	if (req->rate == 0)
523 		return -EINVAL;
524 
525 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
526 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
527 
528 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
529 	div = min_t(u32, div, mask);
530 
531 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
532 
533 	return 0;
534 }
535 
clk_byte_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)536 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
537 			 unsigned long parent_rate)
538 {
539 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
540 	struct freq_tbl f = *rcg->freq_tbl;
541 	unsigned long div;
542 	u32 mask = BIT(rcg->hid_width) - 1;
543 
544 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
545 	div = min_t(u32, div, mask);
546 
547 	f.pre_div = div;
548 
549 	return clk_rcg2_configure(rcg, &f);
550 }
551 
clk_byte_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)552 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
553 		unsigned long rate, unsigned long parent_rate, u8 index)
554 {
555 	/* Parent index is set statically in frequency table */
556 	return clk_byte_set_rate(hw, rate, parent_rate);
557 }
558 
559 const struct clk_ops clk_byte_ops = {
560 	.is_enabled = clk_rcg2_is_enabled,
561 	.get_parent = clk_rcg2_get_parent,
562 	.set_parent = clk_rcg2_set_parent,
563 	.recalc_rate = clk_rcg2_recalc_rate,
564 	.set_rate = clk_byte_set_rate,
565 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
566 	.determine_rate = clk_byte_determine_rate,
567 };
568 EXPORT_SYMBOL_GPL(clk_byte_ops);
569 
clk_byte2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)570 static int clk_byte2_determine_rate(struct clk_hw *hw,
571 				    struct clk_rate_request *req)
572 {
573 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
574 	unsigned long parent_rate, div;
575 	u32 mask = BIT(rcg->hid_width) - 1;
576 	struct clk_hw *p;
577 	unsigned long rate = req->rate;
578 
579 	if (rate == 0)
580 		return -EINVAL;
581 
582 	p = req->best_parent_hw;
583 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
584 
585 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
586 	div = min_t(u32, div, mask);
587 
588 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
589 
590 	return 0;
591 }
592 
clk_byte2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)593 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
594 			 unsigned long parent_rate)
595 {
596 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
597 	struct freq_tbl f = { 0 };
598 	unsigned long div;
599 	int i, num_parents = clk_hw_get_num_parents(hw);
600 	u32 mask = BIT(rcg->hid_width) - 1;
601 	u32 cfg;
602 
603 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
604 	div = min_t(u32, div, mask);
605 
606 	f.pre_div = div;
607 
608 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
609 	cfg &= CFG_SRC_SEL_MASK;
610 	cfg >>= CFG_SRC_SEL_SHIFT;
611 
612 	for (i = 0; i < num_parents; i++) {
613 		if (cfg == rcg->parent_map[i].cfg) {
614 			f.src = rcg->parent_map[i].src;
615 			return clk_rcg2_configure(rcg, &f);
616 		}
617 	}
618 
619 	return -EINVAL;
620 }
621 
clk_byte2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)622 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
623 		unsigned long rate, unsigned long parent_rate, u8 index)
624 {
625 	/* Read the hardware to determine parent during set_rate */
626 	return clk_byte2_set_rate(hw, rate, parent_rate);
627 }
628 
629 const struct clk_ops clk_byte2_ops = {
630 	.is_enabled = clk_rcg2_is_enabled,
631 	.get_parent = clk_rcg2_get_parent,
632 	.set_parent = clk_rcg2_set_parent,
633 	.recalc_rate = clk_rcg2_recalc_rate,
634 	.set_rate = clk_byte2_set_rate,
635 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
636 	.determine_rate = clk_byte2_determine_rate,
637 };
638 EXPORT_SYMBOL_GPL(clk_byte2_ops);
639 
640 static const struct frac_entry frac_table_pixel[] = {
641 	{ 3, 8 },
642 	{ 2, 9 },
643 	{ 4, 9 },
644 	{ 1, 1 },
645 	{ 2, 3 },
646 	{ }
647 };
648 
clk_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)649 static int clk_pixel_determine_rate(struct clk_hw *hw,
650 				    struct clk_rate_request *req)
651 {
652 	unsigned long request, src_rate;
653 	int delta = 100000;
654 	const struct frac_entry *frac = frac_table_pixel;
655 
656 	for (; frac->num; frac++) {
657 		request = (req->rate * frac->den) / frac->num;
658 
659 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
660 		if ((src_rate < (request - delta)) ||
661 			(src_rate > (request + delta)))
662 			continue;
663 
664 		req->best_parent_rate = src_rate;
665 		req->rate = (src_rate * frac->num) / frac->den;
666 		return 0;
667 	}
668 
669 	return -EINVAL;
670 }
671 
clk_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)672 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
673 		unsigned long parent_rate)
674 {
675 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
676 	struct freq_tbl f = { 0 };
677 	const struct frac_entry *frac = frac_table_pixel;
678 	unsigned long request;
679 	int delta = 100000;
680 	u32 mask = BIT(rcg->hid_width) - 1;
681 	u32 hid_div, cfg;
682 	int i, num_parents = clk_hw_get_num_parents(hw);
683 
684 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
685 	cfg &= CFG_SRC_SEL_MASK;
686 	cfg >>= CFG_SRC_SEL_SHIFT;
687 
688 	for (i = 0; i < num_parents; i++)
689 		if (cfg == rcg->parent_map[i].cfg) {
690 			f.src = rcg->parent_map[i].src;
691 			break;
692 		}
693 
694 	for (; frac->num; frac++) {
695 		request = (rate * frac->den) / frac->num;
696 
697 		if ((parent_rate < (request - delta)) ||
698 			(parent_rate > (request + delta)))
699 			continue;
700 
701 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
702 				&hid_div);
703 		f.pre_div = hid_div;
704 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
705 		f.pre_div &= mask;
706 		f.m = frac->num;
707 		f.n = frac->den;
708 
709 		return clk_rcg2_configure(rcg, &f);
710 	}
711 	return -EINVAL;
712 }
713 
clk_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)714 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
715 		unsigned long parent_rate, u8 index)
716 {
717 	return clk_pixel_set_rate(hw, rate, parent_rate);
718 }
719 
720 const struct clk_ops clk_pixel_ops = {
721 	.is_enabled = clk_rcg2_is_enabled,
722 	.get_parent = clk_rcg2_get_parent,
723 	.set_parent = clk_rcg2_set_parent,
724 	.recalc_rate = clk_rcg2_recalc_rate,
725 	.set_rate = clk_pixel_set_rate,
726 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
727 	.determine_rate = clk_pixel_determine_rate,
728 };
729 EXPORT_SYMBOL_GPL(clk_pixel_ops);
730 
clk_gfx3d_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)731 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
732 				    struct clk_rate_request *req)
733 {
734 	struct clk_rate_request parent_req = { };
735 	struct clk_hw *p2, *p8, *p9, *xo;
736 	unsigned long p9_rate;
737 	int ret;
738 
739 	xo = clk_hw_get_parent_by_index(hw, 0);
740 	if (req->rate == clk_hw_get_rate(xo)) {
741 		req->best_parent_hw = xo;
742 		return 0;
743 	}
744 
745 	p9 = clk_hw_get_parent_by_index(hw, 2);
746 	p2 = clk_hw_get_parent_by_index(hw, 3);
747 	p8 = clk_hw_get_parent_by_index(hw, 4);
748 
749 	/* PLL9 is a fixed rate PLL */
750 	p9_rate = clk_hw_get_rate(p9);
751 
752 	parent_req.rate = req->rate = min(req->rate, p9_rate);
753 	if (req->rate == p9_rate) {
754 		req->rate = req->best_parent_rate = p9_rate;
755 		req->best_parent_hw = p9;
756 		return 0;
757 	}
758 
759 	if (req->best_parent_hw == p9) {
760 		/* Are we going back to a previously used rate? */
761 		if (clk_hw_get_rate(p8) == req->rate)
762 			req->best_parent_hw = p8;
763 		else
764 			req->best_parent_hw = p2;
765 	} else if (req->best_parent_hw == p8) {
766 		req->best_parent_hw = p2;
767 	} else {
768 		req->best_parent_hw = p8;
769 	}
770 
771 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
772 	if (ret)
773 		return ret;
774 
775 	req->rate = req->best_parent_rate = parent_req.rate;
776 
777 	return 0;
778 }
779 
clk_gfx3d_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)780 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
781 		unsigned long parent_rate, u8 index)
782 {
783 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
784 	u32 cfg;
785 	int ret;
786 
787 	/* Just mux it, we don't use the division or m/n hardware */
788 	cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
789 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
790 	if (ret)
791 		return ret;
792 
793 	return update_config(rcg);
794 }
795 
clk_gfx3d_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)796 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
797 			      unsigned long parent_rate)
798 {
799 	/*
800 	 * We should never get here; clk_gfx3d_determine_rate() should always
801 	 * make us use a different parent than what we're currently using, so
802 	 * clk_gfx3d_set_rate_and_parent() should always be called.
803 	 */
804 	return 0;
805 }
806 
807 const struct clk_ops clk_gfx3d_ops = {
808 	.is_enabled = clk_rcg2_is_enabled,
809 	.get_parent = clk_rcg2_get_parent,
810 	.set_parent = clk_rcg2_set_parent,
811 	.recalc_rate = clk_rcg2_recalc_rate,
812 	.set_rate = clk_gfx3d_set_rate,
813 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
814 	.determine_rate = clk_gfx3d_determine_rate,
815 };
816 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
817 
clk_rcg2_set_force_enable(struct clk_hw * hw)818 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
819 {
820 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
821 	const char *name = clk_hw_get_name(hw);
822 	int ret, count;
823 
824 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
825 				 CMD_ROOT_EN, CMD_ROOT_EN);
826 	if (ret)
827 		return ret;
828 
829 	/* wait for RCG to turn ON */
830 	for (count = 500; count > 0; count--) {
831 		if (clk_rcg2_is_enabled(hw))
832 			return 0;
833 
834 		udelay(1);
835 	}
836 
837 	pr_err("%s: RCG did not turn on\n", name);
838 	return -ETIMEDOUT;
839 }
840 
clk_rcg2_clear_force_enable(struct clk_hw * hw)841 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
842 {
843 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
844 
845 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
846 					CMD_ROOT_EN, 0);
847 }
848 
849 static int
clk_rcg2_shared_force_enable_clear(struct clk_hw * hw,const struct freq_tbl * f)850 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
851 {
852 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
853 	int ret;
854 
855 	ret = clk_rcg2_set_force_enable(hw);
856 	if (ret)
857 		return ret;
858 
859 	ret = clk_rcg2_configure(rcg, f);
860 	if (ret)
861 		return ret;
862 
863 	return clk_rcg2_clear_force_enable(hw);
864 }
865 
clk_rcg2_shared_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)866 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
867 				    unsigned long parent_rate)
868 {
869 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
870 	const struct freq_tbl *f;
871 
872 	f = qcom_find_freq(rcg->freq_tbl, rate);
873 	if (!f)
874 		return -EINVAL;
875 
876 	/*
877 	 * In case clock is disabled, update the CFG, M, N and D registers
878 	 * and don't hit the update bit of CMD register.
879 	 */
880 	if (!__clk_is_enabled(hw->clk))
881 		return __clk_rcg2_configure(rcg, f);
882 
883 	return clk_rcg2_shared_force_enable_clear(hw, f);
884 }
885 
clk_rcg2_shared_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)886 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
887 		unsigned long rate, unsigned long parent_rate, u8 index)
888 {
889 	return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
890 }
891 
clk_rcg2_shared_enable(struct clk_hw * hw)892 static int clk_rcg2_shared_enable(struct clk_hw *hw)
893 {
894 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
895 	int ret;
896 
897 	/*
898 	 * Set the update bit because required configuration has already
899 	 * been written in clk_rcg2_shared_set_rate()
900 	 */
901 	ret = clk_rcg2_set_force_enable(hw);
902 	if (ret)
903 		return ret;
904 
905 	ret = update_config(rcg);
906 	if (ret)
907 		return ret;
908 
909 	return clk_rcg2_clear_force_enable(hw);
910 }
911 
clk_rcg2_shared_disable(struct clk_hw * hw)912 static void clk_rcg2_shared_disable(struct clk_hw *hw)
913 {
914 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
915 	u32 cfg;
916 
917 	/*
918 	 * Store current configuration as switching to safe source would clear
919 	 * the SRC and DIV of CFG register
920 	 */
921 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
922 
923 	/*
924 	 * Park the RCG at a safe configuration - sourced off of safe source.
925 	 * Force enable and disable the RCG while configuring it to safeguard
926 	 * against any update signal coming from the downstream clock.
927 	 * The current parent is still prepared and enabled at this point, and
928 	 * the safe source is always on while application processor subsystem
929 	 * is online. Therefore, the RCG can safely switch its parent.
930 	 */
931 	clk_rcg2_set_force_enable(hw);
932 
933 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
934 		     rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
935 
936 	update_config(rcg);
937 
938 	clk_rcg2_clear_force_enable(hw);
939 
940 	/* Write back the stored configuration corresponding to current rate */
941 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
942 }
943 
944 const struct clk_ops clk_rcg2_shared_ops = {
945 	.enable = clk_rcg2_shared_enable,
946 	.disable = clk_rcg2_shared_disable,
947 	.get_parent = clk_rcg2_get_parent,
948 	.set_parent = clk_rcg2_set_parent,
949 	.recalc_rate = clk_rcg2_recalc_rate,
950 	.determine_rate = clk_rcg2_determine_rate,
951 	.set_rate = clk_rcg2_shared_set_rate,
952 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
953 };
954 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
955 
956 /* Common APIs to be used for DFS based RCGR */
clk_rcg2_dfs_populate_freq(struct clk_hw * hw,unsigned int l,struct freq_tbl * f)957 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
958 				       struct freq_tbl *f)
959 {
960 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
961 	struct clk_hw *p;
962 	unsigned long prate = 0;
963 	u32 val, mask, cfg, mode, src;
964 	int i, num_parents;
965 
966 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
967 
968 	mask = BIT(rcg->hid_width) - 1;
969 	f->pre_div = 1;
970 	if (cfg & mask)
971 		f->pre_div = cfg & mask;
972 
973 	src = cfg & CFG_SRC_SEL_MASK;
974 	src >>= CFG_SRC_SEL_SHIFT;
975 
976 	num_parents = clk_hw_get_num_parents(hw);
977 	for (i = 0; i < num_parents; i++) {
978 		if (src == rcg->parent_map[i].cfg) {
979 			f->src = rcg->parent_map[i].src;
980 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
981 			prate = clk_hw_get_rate(p);
982 		}
983 	}
984 
985 	mode = cfg & CFG_MODE_MASK;
986 	mode >>= CFG_MODE_SHIFT;
987 	if (mode) {
988 		mask = BIT(rcg->mnd_width) - 1;
989 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
990 			    &val);
991 		val &= mask;
992 		f->m = val;
993 
994 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
995 			    &val);
996 		val = ~val;
997 		val &= mask;
998 		val += f->m;
999 		f->n = val;
1000 	}
1001 
1002 	f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1003 }
1004 
clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 * rcg)1005 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1006 {
1007 	struct freq_tbl *freq_tbl;
1008 	int i;
1009 
1010 	/* Allocate space for 1 extra since table is NULL terminated */
1011 	freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1012 	if (!freq_tbl)
1013 		return -ENOMEM;
1014 	rcg->freq_tbl = freq_tbl;
1015 
1016 	for (i = 0; i < MAX_PERF_LEVEL; i++)
1017 		clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1018 
1019 	return 0;
1020 }
1021 
clk_rcg2_dfs_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1022 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1023 				   struct clk_rate_request *req)
1024 {
1025 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1026 	int ret;
1027 
1028 	if (!rcg->freq_tbl) {
1029 		ret = clk_rcg2_dfs_populate_freq_table(rcg);
1030 		if (ret) {
1031 			pr_err("Failed to update DFS tables for %s\n",
1032 					clk_hw_get_name(hw));
1033 			return ret;
1034 		}
1035 	}
1036 
1037 	return clk_rcg2_determine_rate(hw, req);
1038 }
1039 
1040 static unsigned long
clk_rcg2_dfs_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1041 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1042 {
1043 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1044 	u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1045 
1046 	regmap_read(rcg->clkr.regmap,
1047 		    rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1048 	level &= GENMASK(4, 1);
1049 	level >>= 1;
1050 
1051 	if (rcg->freq_tbl)
1052 		return rcg->freq_tbl[level].freq;
1053 
1054 	/*
1055 	 * Assume that parent_rate is actually the parent because
1056 	 * we can't do any better at figuring it out when the table
1057 	 * hasn't been populated yet. We only populate the table
1058 	 * in determine_rate because we can't guarantee the parents
1059 	 * will be registered with the framework until then.
1060 	 */
1061 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1062 		    &cfg);
1063 
1064 	mask = BIT(rcg->hid_width) - 1;
1065 	pre_div = 1;
1066 	if (cfg & mask)
1067 		pre_div = cfg & mask;
1068 
1069 	mode = cfg & CFG_MODE_MASK;
1070 	mode >>= CFG_MODE_SHIFT;
1071 	if (mode) {
1072 		mask = BIT(rcg->mnd_width) - 1;
1073 		regmap_read(rcg->clkr.regmap,
1074 			    rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1075 		m &= mask;
1076 
1077 		regmap_read(rcg->clkr.regmap,
1078 			    rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1079 		n = ~n;
1080 		n &= mask;
1081 		n += m;
1082 	}
1083 
1084 	return calc_rate(parent_rate, m, n, mode, pre_div);
1085 }
1086 
1087 static const struct clk_ops clk_rcg2_dfs_ops = {
1088 	.is_enabled = clk_rcg2_is_enabled,
1089 	.get_parent = clk_rcg2_get_parent,
1090 	.determine_rate = clk_rcg2_dfs_determine_rate,
1091 	.recalc_rate = clk_rcg2_dfs_recalc_rate,
1092 };
1093 
clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data * data,struct regmap * regmap)1094 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1095 			       struct regmap *regmap)
1096 {
1097 	struct clk_rcg2 *rcg = data->rcg;
1098 	struct clk_init_data *init = data->init;
1099 	u32 val;
1100 	int ret;
1101 
1102 	ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1103 	if (ret)
1104 		return -EINVAL;
1105 
1106 	if (!(val & SE_CMD_DFS_EN))
1107 		return 0;
1108 
1109 	/*
1110 	 * Rate changes with consumer writing a register in
1111 	 * their own I/O region
1112 	 */
1113 	init->flags |= CLK_GET_RATE_NOCACHE;
1114 	init->ops = &clk_rcg2_dfs_ops;
1115 
1116 	rcg->freq_tbl = NULL;
1117 
1118 	return 0;
1119 }
1120 
qcom_cc_register_rcg_dfs(struct regmap * regmap,const struct clk_rcg_dfs_data * rcgs,size_t len)1121 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1122 			     const struct clk_rcg_dfs_data *rcgs, size_t len)
1123 {
1124 	int i, ret;
1125 
1126 	for (i = 0; i < len; i++) {
1127 		ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1128 		if (ret)
1129 			return ret;
1130 	}
1131 
1132 	return 0;
1133 }
1134 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1135 
clk_rcg2_dp_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1136 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1137 			unsigned long parent_rate)
1138 {
1139 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1140 	struct freq_tbl f = { 0 };
1141 	u32 mask = BIT(rcg->hid_width) - 1;
1142 	u32 hid_div, cfg;
1143 	int i, num_parents = clk_hw_get_num_parents(hw);
1144 	unsigned long num, den;
1145 
1146 	rational_best_approximation(parent_rate, rate,
1147 			GENMASK(rcg->mnd_width - 1, 0),
1148 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1149 
1150 	if (!num || !den)
1151 		return -EINVAL;
1152 
1153 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1154 	hid_div = cfg;
1155 	cfg &= CFG_SRC_SEL_MASK;
1156 	cfg >>= CFG_SRC_SEL_SHIFT;
1157 
1158 	for (i = 0; i < num_parents; i++) {
1159 		if (cfg == rcg->parent_map[i].cfg) {
1160 			f.src = rcg->parent_map[i].src;
1161 			break;
1162 		}
1163 	}
1164 
1165 	f.pre_div = hid_div;
1166 	f.pre_div >>= CFG_SRC_DIV_SHIFT;
1167 	f.pre_div &= mask;
1168 
1169 	if (num != den) {
1170 		f.m = num;
1171 		f.n = den;
1172 	} else {
1173 		f.m = 0;
1174 		f.n = 0;
1175 	}
1176 
1177 	return clk_rcg2_configure(rcg, &f);
1178 }
1179 
clk_rcg2_dp_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)1180 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1181 		unsigned long rate, unsigned long parent_rate, u8 index)
1182 {
1183 	return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1184 }
1185 
clk_rcg2_dp_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1186 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1187 				struct clk_rate_request *req)
1188 {
1189 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1190 	unsigned long num, den;
1191 	u64 tmp;
1192 
1193 	/* Parent rate is a fixed phy link rate */
1194 	rational_best_approximation(req->best_parent_rate, req->rate,
1195 			GENMASK(rcg->mnd_width - 1, 0),
1196 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1197 
1198 	if (!num || !den)
1199 		return -EINVAL;
1200 
1201 	tmp = req->best_parent_rate * num;
1202 	do_div(tmp, den);
1203 	req->rate = tmp;
1204 
1205 	return 0;
1206 }
1207 
1208 const struct clk_ops clk_dp_ops = {
1209 	.is_enabled = clk_rcg2_is_enabled,
1210 	.get_parent = clk_rcg2_get_parent,
1211 	.set_parent = clk_rcg2_set_parent,
1212 	.recalc_rate = clk_rcg2_recalc_rate,
1213 	.set_rate = clk_rcg2_dp_set_rate,
1214 	.set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1215 	.determine_rate = clk_rcg2_dp_determine_rate,
1216 };
1217 EXPORT_SYMBOL_GPL(clk_dp_ops);
1218