• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/iopoll.h>
9 
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 #include "dsi_phy_10nm.xml.h"
13 
14 /*
15  * DSI PLL 10nm - clock diagram (eg: DSI0):
16  *
17  *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
18  *                              |                |
19  *                              |                |
20  *                 +---------+  |  +----------+  |  +----+
21  *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
22  *                 +---------+  |  +----------+  |  +----+
23  *                              |                |
24  *                              |                |         dsi0_pll_by_2_bit_clk
25  *                              |                |          |
26  *                              |                |  +----+  |  |\  dsi0_pclk_mux
27  *                              |                |--| /2 |--o--| \   |
28  *                              |                |  +----+     |  \  |  +---------+
29  *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
30  *                              |------------------------------|  /     +---------+
31  *                              |          +-----+             | /
32  *                              -----------| /4? |--o----------|/
33  *                                         +-----+  |           |
34  *                                                  |           |dsiclk_sel
35  *                                                  |
36  *                                                  dsi0_pll_post_out_div_clk
37  */
38 
39 #define VCO_REF_CLK_RATE		19200000
40 #define FRAC_BITS 18
41 
42 /* v3.0.0 10nm implementation that requires the old timings settings */
43 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS	BIT(0)
44 
45 struct dsi_pll_config {
46 	bool enable_ssc;
47 	bool ssc_center;
48 	u32 ssc_freq;
49 	u32 ssc_offset;
50 	u32 ssc_adj_per;
51 
52 	/* out */
53 	u32 pll_prop_gain_rate;
54 	u32 decimal_div_start;
55 	u32 frac_div_start;
56 	u32 pll_clock_inverters;
57 	u32 ssc_stepsize;
58 	u32 ssc_div_per;
59 };
60 
61 struct pll_10nm_cached_state {
62 	unsigned long vco_rate;
63 	u8 bit_clk_div;
64 	u8 pix_clk_div;
65 	u8 pll_out_div;
66 	u8 pll_mux;
67 };
68 
69 struct dsi_pll_10nm {
70 	struct clk_hw clk_hw;
71 
72 	struct msm_dsi_phy *phy;
73 
74 	u64 vco_current_rate;
75 
76 	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
77 	spinlock_t postdiv_lock;
78 
79 	struct pll_10nm_cached_state cached_state;
80 
81 	struct dsi_pll_10nm *slave;
82 };
83 
84 #define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, clk_hw)
85 
86 /*
87  * Global list of private DSI PLL struct pointers. We need this for bonded DSI
88  * mode, where the master PLL's clk_ops needs access the slave's private data
89  */
90 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
91 
dsi_pll_setup_config(struct dsi_pll_config * config)92 static void dsi_pll_setup_config(struct dsi_pll_config *config)
93 {
94 	config->ssc_freq = 31500;
95 	config->ssc_offset = 5000;
96 	config->ssc_adj_per = 2;
97 
98 	config->enable_ssc = false;
99 	config->ssc_center = false;
100 }
101 
dsi_pll_calc_dec_frac(struct dsi_pll_10nm * pll,struct dsi_pll_config * config)102 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
103 {
104 	u64 fref = VCO_REF_CLK_RATE;
105 	u64 pll_freq;
106 	u64 divider;
107 	u64 dec, dec_multiple;
108 	u32 frac;
109 	u64 multiplier;
110 
111 	pll_freq = pll->vco_current_rate;
112 
113 	divider = fref * 2;
114 
115 	multiplier = 1 << FRAC_BITS;
116 	dec_multiple = div_u64(pll_freq * multiplier, divider);
117 	dec = div_u64_rem(dec_multiple, multiplier, &frac);
118 
119 	if (pll_freq <= 1900000000UL)
120 		config->pll_prop_gain_rate = 8;
121 	else if (pll_freq <= 3000000000UL)
122 		config->pll_prop_gain_rate = 10;
123 	else
124 		config->pll_prop_gain_rate = 12;
125 	if (pll_freq < 1100000000UL)
126 		config->pll_clock_inverters = 8;
127 	else
128 		config->pll_clock_inverters = 0;
129 
130 	config->decimal_div_start = dec;
131 	config->frac_div_start = frac;
132 }
133 
134 #define SSC_CENTER		BIT(0)
135 #define SSC_EN			BIT(1)
136 
dsi_pll_calc_ssc(struct dsi_pll_10nm * pll,struct dsi_pll_config * config)137 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
138 {
139 	u32 ssc_per;
140 	u32 ssc_mod;
141 	u64 ssc_step_size;
142 	u64 frac;
143 
144 	if (!config->enable_ssc) {
145 		DBG("SSC not enabled\n");
146 		return;
147 	}
148 
149 	ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
150 	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
151 	ssc_per -= ssc_mod;
152 
153 	frac = config->frac_div_start;
154 	ssc_step_size = config->decimal_div_start;
155 	ssc_step_size *= (1 << FRAC_BITS);
156 	ssc_step_size += frac;
157 	ssc_step_size *= config->ssc_offset;
158 	ssc_step_size *= (config->ssc_adj_per + 1);
159 	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
160 	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
161 
162 	config->ssc_div_per = ssc_per;
163 	config->ssc_stepsize = ssc_step_size;
164 
165 	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
166 		 config->decimal_div_start, frac, FRAC_BITS);
167 	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
168 		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
169 }
170 
dsi_pll_ssc_commit(struct dsi_pll_10nm * pll,struct dsi_pll_config * config)171 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
172 {
173 	void __iomem *base = pll->phy->pll_base;
174 
175 	if (config->enable_ssc) {
176 		pr_debug("SSC is enabled\n");
177 
178 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
179 			  config->ssc_stepsize & 0xff);
180 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
181 			  config->ssc_stepsize >> 8);
182 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
183 			  config->ssc_div_per & 0xff);
184 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
185 			  config->ssc_div_per >> 8);
186 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
187 			  config->ssc_adj_per & 0xff);
188 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
189 			  config->ssc_adj_per >> 8);
190 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
191 			  SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
192 	}
193 }
194 
dsi_pll_config_hzindep_reg(struct dsi_pll_10nm * pll)195 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
196 {
197 	void __iomem *base = pll->phy->pll_base;
198 
199 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
200 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
201 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
202 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
203 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
204 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
205 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
206 		  0xba);
207 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
208 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
209 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
210 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
211 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
212 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
213 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
214 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
215 		  0x4c);
216 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
217 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
218 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
219 }
220 
dsi_pll_commit(struct dsi_pll_10nm * pll,struct dsi_pll_config * config)221 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
222 {
223 	void __iomem *base = pll->phy->pll_base;
224 
225 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
226 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
227 		  config->decimal_div_start);
228 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
229 		  config->frac_div_start & 0xff);
230 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
231 		  (config->frac_div_start & 0xff00) >> 8);
232 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
233 		  (config->frac_div_start & 0x30000) >> 16);
234 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
235 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
236 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
237 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
238 		  config->pll_clock_inverters);
239 }
240 
dsi_pll_10nm_vco_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)241 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
242 				     unsigned long parent_rate)
243 {
244 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
245 	struct dsi_pll_config config;
246 
247 	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
248 	    parent_rate);
249 
250 	pll_10nm->vco_current_rate = rate;
251 
252 	dsi_pll_setup_config(&config);
253 
254 	dsi_pll_calc_dec_frac(pll_10nm, &config);
255 
256 	dsi_pll_calc_ssc(pll_10nm, &config);
257 
258 	dsi_pll_commit(pll_10nm, &config);
259 
260 	dsi_pll_config_hzindep_reg(pll_10nm);
261 
262 	dsi_pll_ssc_commit(pll_10nm, &config);
263 
264 	/* flush, ensure all register writes are done*/
265 	wmb();
266 
267 	return 0;
268 }
269 
dsi_pll_10nm_lock_status(struct dsi_pll_10nm * pll)270 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
271 {
272 	struct device *dev = &pll->phy->pdev->dev;
273 	int rc;
274 	u32 status = 0;
275 	u32 const delay_us = 100;
276 	u32 const timeout_us = 5000;
277 
278 	rc = readl_poll_timeout_atomic(pll->phy->pll_base +
279 				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
280 				       status,
281 				       ((status & BIT(0)) > 0),
282 				       delay_us,
283 				       timeout_us);
284 	if (rc)
285 		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
286 			      pll->phy->id, status);
287 
288 	return rc;
289 }
290 
dsi_pll_disable_pll_bias(struct dsi_pll_10nm * pll)291 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
292 {
293 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
294 
295 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
296 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
297 		  data & ~BIT(5));
298 	ndelay(250);
299 }
300 
dsi_pll_enable_pll_bias(struct dsi_pll_10nm * pll)301 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
302 {
303 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
304 
305 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
306 		  data | BIT(5));
307 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
308 	ndelay(250);
309 }
310 
dsi_pll_disable_global_clk(struct dsi_pll_10nm * pll)311 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
312 {
313 	u32 data;
314 
315 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
316 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
317 		  data & ~BIT(5));
318 }
319 
dsi_pll_enable_global_clk(struct dsi_pll_10nm * pll)320 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
321 {
322 	u32 data;
323 
324 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
325 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
326 		  data | BIT(5));
327 }
328 
dsi_pll_10nm_vco_prepare(struct clk_hw * hw)329 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
330 {
331 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
332 	struct device *dev = &pll_10nm->phy->pdev->dev;
333 	int rc;
334 
335 	dsi_pll_enable_pll_bias(pll_10nm);
336 	if (pll_10nm->slave)
337 		dsi_pll_enable_pll_bias(pll_10nm->slave);
338 
339 	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
340 	if (rc) {
341 		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
342 		return rc;
343 	}
344 
345 	/* Start PLL */
346 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
347 		  0x01);
348 
349 	/*
350 	 * ensure all PLL configurations are written prior to checking
351 	 * for PLL lock.
352 	 */
353 	wmb();
354 
355 	/* Check for PLL lock */
356 	rc = dsi_pll_10nm_lock_status(pll_10nm);
357 	if (rc) {
358 		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
359 		goto error;
360 	}
361 
362 	pll_10nm->phy->pll_on = true;
363 
364 	dsi_pll_enable_global_clk(pll_10nm);
365 	if (pll_10nm->slave)
366 		dsi_pll_enable_global_clk(pll_10nm->slave);
367 
368 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
369 		  0x01);
370 	if (pll_10nm->slave)
371 		dsi_phy_write(pll_10nm->slave->phy->base +
372 			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
373 
374 error:
375 	return rc;
376 }
377 
dsi_pll_disable_sub(struct dsi_pll_10nm * pll)378 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
379 {
380 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
381 	dsi_pll_disable_pll_bias(pll);
382 }
383 
dsi_pll_10nm_vco_unprepare(struct clk_hw * hw)384 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
385 {
386 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
387 
388 	/*
389 	 * To avoid any stray glitches while abruptly powering down the PLL
390 	 * make sure to gate the clock using the clock enable bit before
391 	 * powering down the PLL
392 	 */
393 	dsi_pll_disable_global_clk(pll_10nm);
394 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
395 	dsi_pll_disable_sub(pll_10nm);
396 	if (pll_10nm->slave) {
397 		dsi_pll_disable_global_clk(pll_10nm->slave);
398 		dsi_pll_disable_sub(pll_10nm->slave);
399 	}
400 	/* flush, ensure all register writes are done */
401 	wmb();
402 	pll_10nm->phy->pll_on = false;
403 }
404 
dsi_pll_10nm_vco_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)405 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
406 						  unsigned long parent_rate)
407 {
408 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
409 	void __iomem *base = pll_10nm->phy->pll_base;
410 	u64 ref_clk = VCO_REF_CLK_RATE;
411 	u64 vco_rate = 0x0;
412 	u64 multiplier;
413 	u32 frac;
414 	u32 dec;
415 	u64 pll_freq, tmp64;
416 
417 	dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
418 	dec &= 0xff;
419 
420 	frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
421 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
422 		  0xff) << 8);
423 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
424 		  0x3) << 16);
425 
426 	/*
427 	 * TODO:
428 	 *	1. Assumes prescaler is disabled
429 	 */
430 	multiplier = 1 << FRAC_BITS;
431 	pll_freq = dec * (ref_clk * 2);
432 	tmp64 = (ref_clk * 2 * frac);
433 	pll_freq += div_u64(tmp64, multiplier);
434 
435 	vco_rate = pll_freq;
436 	pll_10nm->vco_current_rate = vco_rate;
437 
438 	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
439 	    pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
440 
441 	return (unsigned long)vco_rate;
442 }
443 
dsi_pll_10nm_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)444 static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
445 		unsigned long rate, unsigned long *parent_rate)
446 {
447 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
448 
449 	if      (rate < pll_10nm->phy->cfg->min_pll_rate)
450 		return  pll_10nm->phy->cfg->min_pll_rate;
451 	else if (rate > pll_10nm->phy->cfg->max_pll_rate)
452 		return  pll_10nm->phy->cfg->max_pll_rate;
453 	else
454 		return rate;
455 }
456 
457 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
458 	.round_rate = dsi_pll_10nm_clk_round_rate,
459 	.set_rate = dsi_pll_10nm_vco_set_rate,
460 	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
461 	.prepare = dsi_pll_10nm_vco_prepare,
462 	.unprepare = dsi_pll_10nm_vco_unprepare,
463 };
464 
465 /*
466  * PLL Callbacks
467  */
468 
dsi_10nm_pll_save_state(struct msm_dsi_phy * phy)469 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
470 {
471 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
472 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
473 	void __iomem *phy_base = pll_10nm->phy->base;
474 	u32 cmn_clk_cfg0, cmn_clk_cfg1;
475 
476 	cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
477 				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
478 	cached->pll_out_div &= 0x3;
479 
480 	cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
481 	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
482 	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
483 
484 	cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
485 	cached->pll_mux = cmn_clk_cfg1 & 0x3;
486 
487 	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
488 	    pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
489 	    cached->pix_clk_div, cached->pll_mux);
490 }
491 
dsi_10nm_pll_restore_state(struct msm_dsi_phy * phy)492 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
493 {
494 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
495 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
496 	void __iomem *phy_base = pll_10nm->phy->base;
497 	u32 val;
498 	int ret;
499 
500 	val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
501 	val &= ~0x3;
502 	val |= cached->pll_out_div;
503 	dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
504 
505 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
506 		  cached->bit_clk_div | (cached->pix_clk_div << 4));
507 
508 	val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
509 	val &= ~0x3;
510 	val |= cached->pll_mux;
511 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
512 
513 	ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
514 			pll_10nm->vco_current_rate,
515 			VCO_REF_CLK_RATE);
516 	if (ret) {
517 		DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
518 			"restore vco rate failed. ret=%d\n", ret);
519 		return ret;
520 	}
521 
522 	DBG("DSI PLL%d", pll_10nm->phy->id);
523 
524 	return 0;
525 }
526 
dsi_10nm_set_usecase(struct msm_dsi_phy * phy)527 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
528 {
529 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
530 	void __iomem *base = phy->base;
531 	u32 data = 0x0;	/* internal PLL */
532 
533 	DBG("DSI PLL%d", pll_10nm->phy->id);
534 
535 	switch (phy->usecase) {
536 	case MSM_DSI_PHY_STANDALONE:
537 		break;
538 	case MSM_DSI_PHY_MASTER:
539 		pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
540 		break;
541 	case MSM_DSI_PHY_SLAVE:
542 		data = 0x1; /* external PLL */
543 		break;
544 	default:
545 		return -EINVAL;
546 	}
547 
548 	/* set PLL src */
549 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
550 
551 	return 0;
552 }
553 
554 /*
555  * The post dividers and mux clocks are created using the standard divider and
556  * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
557  * state to follow the master PLL's divider/mux state. Therefore, we don't
558  * require special clock ops that also configure the slave PLL registers
559  */
pll_10nm_register(struct dsi_pll_10nm * pll_10nm,struct clk_hw ** provided_clocks)560 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
561 {
562 	char clk_name[32], parent[32], vco_name[32];
563 	char parent2[32], parent3[32], parent4[32];
564 	struct clk_init_data vco_init = {
565 		.parent_data = &(const struct clk_parent_data) {
566 			.fw_name = "ref",
567 		},
568 		.num_parents = 1,
569 		.name = vco_name,
570 		.flags = CLK_IGNORE_UNUSED,
571 		.ops = &clk_ops_dsi_pll_10nm_vco,
572 	};
573 	struct device *dev = &pll_10nm->phy->pdev->dev;
574 	struct clk_hw *hw;
575 	int ret;
576 
577 	DBG("DSI%d", pll_10nm->phy->id);
578 
579 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
580 	pll_10nm->clk_hw.init = &vco_init;
581 
582 	ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
583 	if (ret)
584 		return ret;
585 
586 	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
587 	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
588 
589 	hw = devm_clk_hw_register_divider(dev, clk_name,
590 				     parent, CLK_SET_RATE_PARENT,
591 				     pll_10nm->phy->pll_base +
592 				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
593 				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
594 	if (IS_ERR(hw)) {
595 		ret = PTR_ERR(hw);
596 		goto fail;
597 	}
598 
599 	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
600 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
601 
602 	/* BIT CLK: DIV_CTRL_3_0 */
603 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
604 				     CLK_SET_RATE_PARENT,
605 				     pll_10nm->phy->base +
606 				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
607 				     0, 4, CLK_DIVIDER_ONE_BASED,
608 				     &pll_10nm->postdiv_lock);
609 	if (IS_ERR(hw)) {
610 		ret = PTR_ERR(hw);
611 		goto fail;
612 	}
613 
614 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
615 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
616 
617 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
618 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
619 					  CLK_SET_RATE_PARENT, 1, 8);
620 	if (IS_ERR(hw)) {
621 		ret = PTR_ERR(hw);
622 		goto fail;
623 	}
624 
625 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
626 
627 	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
628 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
629 
630 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
631 					  0, 1, 2);
632 	if (IS_ERR(hw)) {
633 		ret = PTR_ERR(hw);
634 		goto fail;
635 	}
636 
637 	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
638 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
639 
640 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
641 					  0, 1, 4);
642 	if (IS_ERR(hw)) {
643 		ret = PTR_ERR(hw);
644 		goto fail;
645 	}
646 
647 	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
648 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
649 	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
650 	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
651 	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
652 
653 	hw = devm_clk_hw_register_mux(dev, clk_name,
654 				 ((const char *[]){
655 				 parent, parent2, parent3, parent4
656 				 }), 4, 0, pll_10nm->phy->base +
657 				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
658 				 0, 2, 0, NULL);
659 	if (IS_ERR(hw)) {
660 		ret = PTR_ERR(hw);
661 		goto fail;
662 	}
663 
664 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
665 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
666 
667 	/* PIX CLK DIV : DIV_CTRL_7_4*/
668 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
669 				     0, pll_10nm->phy->base +
670 					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
671 				     4, 4, CLK_DIVIDER_ONE_BASED,
672 				     &pll_10nm->postdiv_lock);
673 	if (IS_ERR(hw)) {
674 		ret = PTR_ERR(hw);
675 		goto fail;
676 	}
677 
678 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
679 
680 	return 0;
681 
682 fail:
683 
684 	return ret;
685 }
686 
dsi_pll_10nm_init(struct msm_dsi_phy * phy)687 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
688 {
689 	struct platform_device *pdev = phy->pdev;
690 	struct dsi_pll_10nm *pll_10nm;
691 	int ret;
692 
693 	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
694 	if (!pll_10nm)
695 		return -ENOMEM;
696 
697 	DBG("DSI PLL%d", phy->id);
698 
699 	pll_10nm_list[phy->id] = pll_10nm;
700 
701 	spin_lock_init(&pll_10nm->postdiv_lock);
702 
703 	pll_10nm->phy = phy;
704 
705 	ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
706 	if (ret) {
707 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
708 		return ret;
709 	}
710 
711 	phy->vco_hw = &pll_10nm->clk_hw;
712 
713 	/* TODO: Remove this when we have proper display handover support */
714 	msm_dsi_phy_pll_save_state(phy);
715 
716 	return 0;
717 }
718 
dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy * phy)719 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
720 {
721 	void __iomem *base = phy->base;
722 	u32 data = 0;
723 
724 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
725 	mb(); /* make sure read happened */
726 
727 	return (data & BIT(0));
728 }
729 
dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy * phy,bool enable)730 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
731 {
732 	void __iomem *lane_base = phy->lane_base;
733 	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
734 
735 	/*
736 	 * LPRX and CDRX need to enabled only for physical data lane
737 	 * corresponding to the logical data lane 0
738 	 */
739 	if (enable)
740 		dsi_phy_write(lane_base +
741 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
742 	else
743 		dsi_phy_write(lane_base +
744 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
745 }
746 
dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy * phy)747 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
748 {
749 	int i;
750 	u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
751 	void __iomem *lane_base = phy->lane_base;
752 
753 	if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
754 		tx_dctrl[3] = 0x02;
755 
756 	/* Strength ctrl settings */
757 	for (i = 0; i < 5; i++) {
758 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
759 			      0x55);
760 		/*
761 		 * Disable LPRX and CDRX for all lanes. And later on, it will
762 		 * be only enabled for the physical data lane corresponding
763 		 * to the logical data lane 0
764 		 */
765 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
766 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
767 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
768 			      0x88);
769 	}
770 
771 	dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
772 
773 	/* other settings */
774 	for (i = 0; i < 5; i++) {
775 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
776 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
777 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
778 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
779 			      i == 4 ? 0x80 : 0x0);
780 		dsi_phy_write(lane_base +
781 			      REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
782 		dsi_phy_write(lane_base +
783 			      REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
784 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
785 			      tx_dctrl[i]);
786 	}
787 
788 	if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
789 		/* Toggle BIT 0 to release freeze I/0 */
790 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
791 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
792 	}
793 }
794 
dsi_10nm_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req)795 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
796 			       struct msm_dsi_phy_clk_request *clk_req)
797 {
798 	int ret;
799 	u32 status;
800 	u32 const delay_us = 5;
801 	u32 const timeout_us = 1000;
802 	struct msm_dsi_dphy_timing *timing = &phy->timing;
803 	void __iomem *base = phy->base;
804 	u32 data;
805 
806 	DBG("");
807 
808 	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
809 		DRM_DEV_ERROR(&phy->pdev->dev,
810 			"%s: D-PHY timing calculation failed\n", __func__);
811 		return -EINVAL;
812 	}
813 
814 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
815 		pr_warn("PLL turned on before configuring PHY\n");
816 
817 	/* wait for REFGEN READY */
818 	ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
819 					status, (status & BIT(0)),
820 					delay_us, timeout_us);
821 	if (ret) {
822 		pr_err("Ref gen not ready. Aborting\n");
823 		return -EINVAL;
824 	}
825 
826 	/* de-assert digital and pll power down */
827 	data = BIT(6) | BIT(5);
828 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
829 
830 	/* Assert PLL core reset */
831 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
832 
833 	/* turn off resync FIFO */
834 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
835 
836 	/* Select MS1 byte-clk */
837 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
838 
839 	/* Enable LDO */
840 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
841 
842 	/* Configure PHY lane swap (TODO: we need to calculate this) */
843 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
844 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
845 
846 	/* DSI PHY timings */
847 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
848 		      timing->hs_halfbyte_en);
849 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
850 		      timing->clk_zero);
851 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
852 		      timing->clk_prepare);
853 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
854 		      timing->clk_trail);
855 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
856 		      timing->hs_exit);
857 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
858 		      timing->hs_zero);
859 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
860 		      timing->hs_prepare);
861 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
862 		      timing->hs_trail);
863 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
864 		      timing->hs_rqst);
865 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
866 		      timing->ta_go | (timing->ta_sure << 3));
867 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
868 		      timing->ta_get);
869 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
870 		      0x00);
871 
872 	/* Remove power down from all blocks */
873 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
874 
875 	/* power up lanes */
876 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
877 
878 	/* TODO: only power up lanes that are used */
879 	data |= 0x1F;
880 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
881 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
882 
883 	/* Select full-rate mode */
884 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
885 
886 	ret = dsi_10nm_set_usecase(phy);
887 	if (ret) {
888 		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
889 			__func__, ret);
890 		return ret;
891 	}
892 
893 	/* DSI lane settings */
894 	dsi_phy_hw_v3_0_lane_settings(phy);
895 
896 	DBG("DSI%d PHY enabled", phy->id);
897 
898 	return 0;
899 }
900 
dsi_10nm_phy_disable(struct msm_dsi_phy * phy)901 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
902 {
903 	void __iomem *base = phy->base;
904 	u32 data;
905 
906 	DBG("");
907 
908 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
909 		pr_warn("Turning OFF PHY while PLL is on\n");
910 
911 	dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
912 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
913 
914 	/* disable all lanes */
915 	data &= ~0x1F;
916 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
917 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
918 
919 	/* Turn off all PHY blocks */
920 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
921 	/* make sure phy is turned off */
922 	wmb();
923 
924 	DBG("DSI%d PHY disabled", phy->id);
925 }
926 
927 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
928 	.has_phy_lane = true,
929 	.reg_cfg = {
930 		.num = 1,
931 		.regs = {
932 			{"vdds", 36000, 32},
933 		},
934 	},
935 	.ops = {
936 		.enable = dsi_10nm_phy_enable,
937 		.disable = dsi_10nm_phy_disable,
938 		.pll_init = dsi_pll_10nm_init,
939 		.save_pll_state = dsi_10nm_pll_save_state,
940 		.restore_pll_state = dsi_10nm_pll_restore_state,
941 	},
942 	.min_pll_rate = 1000000000UL,
943 	.max_pll_rate = 3500000000UL,
944 	.io_start = { 0xae94400, 0xae96400 },
945 	.num_dsi_phy = 2,
946 };
947 
948 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
949 	.has_phy_lane = true,
950 	.reg_cfg = {
951 		.num = 1,
952 		.regs = {
953 			{"vdds", 36000, 32},
954 		},
955 	},
956 	.ops = {
957 		.enable = dsi_10nm_phy_enable,
958 		.disable = dsi_10nm_phy_disable,
959 		.pll_init = dsi_pll_10nm_init,
960 		.save_pll_state = dsi_10nm_pll_save_state,
961 		.restore_pll_state = dsi_10nm_pll_restore_state,
962 	},
963 	.min_pll_rate = 1000000000UL,
964 	.max_pll_rate = 3500000000UL,
965 	.io_start = { 0xc994400, 0xc996400 },
966 	.num_dsi_phy = 2,
967 	.quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
968 };
969