• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk-provider.h>
15 
16 #include "dsi_pll.h"
17 #include "dsi.xml.h"
18 
19 /*
20  * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
21  *
22  *
23  *                        +------+
24  *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
25  *  F * byte_clk    |     +------+
26  *                  | bit clock divider (F / 8)
27  *                  |
28  *                  |     +------+
29  *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
30  *                  |     +------+                 | (sets parent rate)
31  *                  | byte clock divider (F)       |
32  *                  |                              |
33  *                  |                              o---> To esc RCG
34  *                  |                                (doesn't set parent rate)
35  *                  |
36  *                  |     +------+
37  *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
38  *                        +------+                 | (sets parent rate)
39  *                  dsi clock divider (F * magic)  |
40  *                                                 |
41  *                                                 o---> To pixel rcg
42  *                                                  (doesn't set parent rate)
43  */
44 
45 #define POLL_MAX_READS		8000
46 #define POLL_TIMEOUT_US		1
47 
48 #define NUM_PROVIDED_CLKS	2
49 
50 #define VCO_REF_CLK_RATE	27000000
51 #define VCO_MIN_RATE		600000000
52 #define VCO_MAX_RATE		1200000000
53 
54 #define DSI_BYTE_PLL_CLK	0
55 #define DSI_PIXEL_PLL_CLK	1
56 
57 #define VCO_PREF_DIV_RATIO	27
58 
59 struct pll_28nm_cached_state {
60 	unsigned long vco_rate;
61 	u8 postdiv3;
62 	u8 postdiv2;
63 	u8 postdiv1;
64 };
65 
66 struct clk_bytediv {
67 	struct clk_hw hw;
68 	void __iomem *reg;
69 };
70 
71 struct dsi_pll_28nm {
72 	struct msm_dsi_pll base;
73 
74 	int id;
75 	struct platform_device *pdev;
76 	void __iomem *mmio;
77 
78 	/* custom byte clock divider */
79 	struct clk_bytediv *bytediv;
80 
81 	/* private clocks: */
82 	struct clk *clks[NUM_DSI_CLOCKS_MAX];
83 	u32 num_clks;
84 
85 	/* clock-provider: */
86 	struct clk *provided_clks[NUM_PROVIDED_CLKS];
87 	struct clk_onecell_data clk_data;
88 
89 	struct pll_28nm_cached_state cached_state;
90 };
91 
92 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, base)
93 
pll_28nm_poll_for_ready(struct dsi_pll_28nm * pll_28nm,int nb_tries,int timeout_us)94 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
95 				    int nb_tries, int timeout_us)
96 {
97 	bool pll_locked = false;
98 	u32 val;
99 
100 	while (nb_tries--) {
101 		val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
102 		pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
103 
104 		if (pll_locked)
105 			break;
106 
107 		udelay(timeout_us);
108 	}
109 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
110 
111 	return pll_locked;
112 }
113 
114 /*
115  * Clock Callbacks
116  */
dsi_pll_28nm_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)117 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 				     unsigned long parent_rate)
119 {
120 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
121 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
122 	void __iomem *base = pll_28nm->mmio;
123 	u32 val, temp, fb_divider;
124 
125 	DBG("rate=%lu, parent's=%lu", rate, parent_rate);
126 
127 	temp = rate / 10;
128 	val = VCO_REF_CLK_RATE / 10;
129 	fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
130 	fb_divider = fb_divider / 2 - 1;
131 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
132 			fb_divider & 0xff);
133 
134 	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
135 
136 	val |= (fb_divider >> 8) & 0x07;
137 
138 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
139 			val);
140 
141 	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
142 
143 	val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
144 
145 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
146 			val);
147 
148 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
149 			0xf);
150 
151 	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
152 	val |= 0x7 << 4;
153 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
154 			val);
155 
156 	return 0;
157 }
158 
dsi_pll_28nm_clk_is_enabled(struct clk_hw * hw)159 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
160 {
161 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
162 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
163 
164 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
165 					POLL_TIMEOUT_US);
166 }
167 
dsi_pll_28nm_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)168 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
169 						  unsigned long parent_rate)
170 {
171 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
172 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
173 	void __iomem *base = pll_28nm->mmio;
174 	unsigned long vco_rate;
175 	u32 status, fb_divider, temp, ref_divider;
176 
177 	VERB("parent_rate=%lu", parent_rate);
178 
179 	status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
180 
181 	if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
182 		fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
183 		fb_divider &= 0xff;
184 		temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
185 		fb_divider = (temp << 8) | fb_divider;
186 		fb_divider += 1;
187 
188 		ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
189 		ref_divider &= 0x3f;
190 		ref_divider += 1;
191 
192 		/* multiply by 2 */
193 		vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
194 	} else {
195 		vco_rate = 0;
196 	}
197 
198 	DBG("returning vco rate = %lu", vco_rate);
199 
200 	return vco_rate;
201 }
202 
203 static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
204 	.round_rate = msm_dsi_pll_helper_clk_round_rate,
205 	.set_rate = dsi_pll_28nm_clk_set_rate,
206 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
207 	.prepare = msm_dsi_pll_helper_clk_prepare,
208 	.unprepare = msm_dsi_pll_helper_clk_unprepare,
209 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
210 };
211 
212 /*
213  * Custom byte clock divier clk_ops
214  *
215  * This clock is the entry point to configuring the PLL. The user (dsi host)
216  * will set this clock's rate to the desired byte clock rate. The VCO lock
217  * frequency is a multiple of the byte clock rate. The multiplication factor
218  * (shown as F in the diagram above) is a function of the byte clock rate.
219  *
220  * This custom divider clock ensures that its parent (VCO) is set to the
221  * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
222  * accordingly
223  */
224 #define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
225 
clk_bytediv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)226 static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
227 		unsigned long parent_rate)
228 {
229 	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
230 	unsigned int div;
231 
232 	div = pll_read(bytediv->reg) & 0xff;
233 
234 	return parent_rate / (div + 1);
235 }
236 
237 /* find multiplication factor(wrt byte clock) at which the VCO should be set */
get_vco_mul_factor(unsigned long byte_clk_rate)238 static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
239 {
240 	unsigned long bit_mhz;
241 
242 	/* convert to bit clock in Mhz */
243 	bit_mhz = (byte_clk_rate * 8) / 1000000;
244 
245 	if (bit_mhz < 125)
246 		return 64;
247 	else if (bit_mhz < 250)
248 		return 32;
249 	else if (bit_mhz < 600)
250 		return 16;
251 	else
252 		return 8;
253 }
254 
clk_bytediv_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)255 static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
256 				   unsigned long *prate)
257 {
258 	unsigned long best_parent;
259 	unsigned int factor;
260 
261 	factor = get_vco_mul_factor(rate);
262 
263 	best_parent = rate * factor;
264 	*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
265 
266 	return *prate / factor;
267 }
268 
clk_bytediv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)269 static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
270 				unsigned long parent_rate)
271 {
272 	struct clk_bytediv *bytediv = to_clk_bytediv(hw);
273 	u32 val;
274 	unsigned int factor;
275 
276 	factor = get_vco_mul_factor(rate);
277 
278 	val = pll_read(bytediv->reg);
279 	val |= (factor - 1) & 0xff;
280 	pll_write(bytediv->reg, val);
281 
282 	return 0;
283 }
284 
285 /* Our special byte clock divider ops */
286 static const struct clk_ops clk_bytediv_ops = {
287 	.round_rate = clk_bytediv_round_rate,
288 	.set_rate = clk_bytediv_set_rate,
289 	.recalc_rate = clk_bytediv_recalc_rate,
290 };
291 
292 /*
293  * PLL Callbacks
294  */
dsi_pll_28nm_enable_seq(struct msm_dsi_pll * pll)295 static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
296 {
297 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
298 	struct device *dev = &pll_28nm->pdev->dev;
299 	void __iomem *base = pll_28nm->mmio;
300 	bool locked;
301 	unsigned int bit_div, byte_div;
302 	int max_reads = 1000, timeout_us = 100;
303 	u32 val;
304 
305 	DBG("id=%d", pll_28nm->id);
306 
307 	/*
308 	 * before enabling the PLL, configure the bit clock divider since we
309 	 * don't expose it as a clock to the outside world
310 	 * 1: read back the byte clock divider that should already be set
311 	 * 2: divide by 8 to get bit clock divider
312 	 * 3: write it to POSTDIV1
313 	 */
314 	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
315 	byte_div = val + 1;
316 	bit_div = byte_div / 8;
317 
318 	val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
319 	val &= ~0xf;
320 	val |= (bit_div - 1);
321 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
322 
323 	/* enable the PLL */
324 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
325 			DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
326 
327 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
328 
329 	if (unlikely(!locked))
330 		dev_err(dev, "DSI PLL lock failed\n");
331 	else
332 		DBG("DSI PLL lock success");
333 
334 	return locked ? 0 : -EINVAL;
335 }
336 
dsi_pll_28nm_disable_seq(struct msm_dsi_pll * pll)337 static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
338 {
339 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
340 
341 	DBG("id=%d", pll_28nm->id);
342 	pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
343 }
344 
dsi_pll_28nm_save_state(struct msm_dsi_pll * pll)345 static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
346 {
347 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
348 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
349 	void __iomem *base = pll_28nm->mmio;
350 
351 	cached_state->postdiv3 =
352 			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
353 	cached_state->postdiv2 =
354 			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
355 	cached_state->postdiv1 =
356 			pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
357 
358 	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
359 }
360 
dsi_pll_28nm_restore_state(struct msm_dsi_pll * pll)361 static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
362 {
363 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
364 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
365 	void __iomem *base = pll_28nm->mmio;
366 	int ret;
367 
368 	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
369 					cached_state->vco_rate, 0);
370 	if (ret) {
371 		dev_err(&pll_28nm->pdev->dev,
372 			"restore vco rate failed. ret=%d\n", ret);
373 		return ret;
374 	}
375 
376 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
377 			cached_state->postdiv3);
378 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
379 			cached_state->postdiv2);
380 	pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
381 			cached_state->postdiv1);
382 
383 	return 0;
384 }
385 
dsi_pll_28nm_get_provider(struct msm_dsi_pll * pll,struct clk ** byte_clk_provider,struct clk ** pixel_clk_provider)386 static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
387 				struct clk **byte_clk_provider,
388 				struct clk **pixel_clk_provider)
389 {
390 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
391 
392 	if (byte_clk_provider)
393 		*byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
394 	if (pixel_clk_provider)
395 		*pixel_clk_provider =
396 				pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
397 
398 	return 0;
399 }
400 
dsi_pll_28nm_destroy(struct msm_dsi_pll * pll)401 static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
402 {
403 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
404 
405 	msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
406 					pll_28nm->clks, pll_28nm->num_clks);
407 }
408 
pll_28nm_register(struct dsi_pll_28nm * pll_28nm)409 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
410 {
411 	char *clk_name, *parent_name, *vco_name;
412 	struct clk_init_data vco_init = {
413 		.parent_names = (const char *[]){ "pxo" },
414 		.num_parents = 1,
415 		.flags = CLK_IGNORE_UNUSED,
416 		.ops = &clk_ops_dsi_pll_28nm_vco,
417 	};
418 	struct device *dev = &pll_28nm->pdev->dev;
419 	struct clk **clks = pll_28nm->clks;
420 	struct clk **provided_clks = pll_28nm->provided_clks;
421 	struct clk_bytediv *bytediv;
422 	struct clk_init_data bytediv_init = { };
423 	int ret, num = 0;
424 
425 	DBG("%d", pll_28nm->id);
426 
427 	bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
428 	if (!bytediv)
429 		return -ENOMEM;
430 
431 	vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
432 	if (!vco_name)
433 		return -ENOMEM;
434 
435 	parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
436 	if (!parent_name)
437 		return -ENOMEM;
438 
439 	clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
440 	if (!clk_name)
441 		return -ENOMEM;
442 
443 	pll_28nm->bytediv = bytediv;
444 
445 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
446 	vco_init.name = vco_name;
447 
448 	pll_28nm->base.clk_hw.init = &vco_init;
449 
450 	clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
451 
452 	/* prepare and register bytediv */
453 	bytediv->hw.init = &bytediv_init;
454 	bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
455 
456 	snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
457 	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
458 
459 	bytediv_init.name = clk_name;
460 	bytediv_init.ops = &clk_bytediv_ops;
461 	bytediv_init.flags = CLK_SET_RATE_PARENT;
462 	bytediv_init.parent_names = (const char * const *) &parent_name;
463 	bytediv_init.num_parents = 1;
464 
465 	/* DIV2 */
466 	clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
467 			clk_register(dev, &bytediv->hw);
468 
469 	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
470 	/* DIV3 */
471 	clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
472 			clk_register_divider(dev, clk_name,
473 				parent_name, 0, pll_28nm->mmio +
474 				REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
475 				0, 8, 0, NULL);
476 
477 	pll_28nm->num_clks = num;
478 
479 	pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
480 	pll_28nm->clk_data.clks = provided_clks;
481 
482 	ret = of_clk_add_provider(dev->of_node,
483 			of_clk_src_onecell_get, &pll_28nm->clk_data);
484 	if (ret) {
485 		dev_err(dev, "failed to register clk provider: %d\n", ret);
486 		return ret;
487 	}
488 
489 	return 0;
490 }
491 
msm_dsi_pll_28nm_8960_init(struct platform_device * pdev,int id)492 struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
493 					       int id)
494 {
495 	struct dsi_pll_28nm *pll_28nm;
496 	struct msm_dsi_pll *pll;
497 	int ret;
498 
499 	if (!pdev)
500 		return ERR_PTR(-ENODEV);
501 
502 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
503 	if (!pll_28nm)
504 		return ERR_PTR(-ENOMEM);
505 
506 	pll_28nm->pdev = pdev;
507 	pll_28nm->id = id + 1;
508 
509 	pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
510 	if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
511 		dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
512 		return ERR_PTR(-ENOMEM);
513 	}
514 
515 	pll = &pll_28nm->base;
516 	pll->min_rate = VCO_MIN_RATE;
517 	pll->max_rate = VCO_MAX_RATE;
518 	pll->get_provider = dsi_pll_28nm_get_provider;
519 	pll->destroy = dsi_pll_28nm_destroy;
520 	pll->disable_seq = dsi_pll_28nm_disable_seq;
521 	pll->save_state = dsi_pll_28nm_save_state;
522 	pll->restore_state = dsi_pll_28nm_restore_state;
523 
524 	pll->en_seq_cnt = 1;
525 	pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
526 
527 	ret = pll_28nm_register(pll_28nm);
528 	if (ret) {
529 		dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
530 		return ERR_PTR(ret);
531 	}
532 
533 	return pll;
534 }
535