• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "intel_de.h"
27 #include "intel_display_types.h"
28 #include "intel_dkl_phy.h"
29 #include "intel_dpio_phy.h"
30 #include "intel_dpll.h"
31 #include "intel_dpll_mgr.h"
32 #include "intel_pch_refclk.h"
33 #include "intel_tc.h"
34 #include "intel_tc_phy_regs.h"
35 
36 /**
37  * DOC: Display PLLs
38  *
39  * Display PLLs used for driving outputs vary by platform. While some have
40  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
41  * from a pool. In the latter scenario, it is possible that multiple pipes
42  * share a PLL if their configurations match.
43  *
44  * This file provides an abstraction over display PLLs. The function
45  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
46  * users of a PLL are tracked and that tracking is integrated with the atomic
47  * modset interface. During an atomic operation, required PLLs can be reserved
48  * for a given CRTC and encoder configuration by calling
49  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
50  * with intel_release_shared_dplls().
51  * Changes to the users are first staged in the atomic state, and then made
52  * effective by calling intel_shared_dpll_swap_state() during the atomic
53  * commit phase.
54  */
55 
56 /* platform specific hooks for managing DPLLs */
57 struct intel_shared_dpll_funcs {
58 	/*
59 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
60 	 * the pll is not already enabled.
61 	 */
62 	void (*enable)(struct drm_i915_private *i915,
63 		       struct intel_shared_dpll *pll);
64 
65 	/*
66 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
67 	 * only when it is safe to disable the pll, i.e., there are no more
68 	 * tracked users for it.
69 	 */
70 	void (*disable)(struct drm_i915_private *i915,
71 			struct intel_shared_dpll *pll);
72 
73 	/*
74 	 * Hook for reading the values currently programmed to the DPLL
75 	 * registers. This is used for initial hw state readout and state
76 	 * verification after a mode set.
77 	 */
78 	bool (*get_hw_state)(struct drm_i915_private *i915,
79 			     struct intel_shared_dpll *pll,
80 			     struct intel_dpll_hw_state *hw_state);
81 
82 	/*
83 	 * Hook for calculating the pll's output frequency based on its passed
84 	 * in state.
85 	 */
86 	int (*get_freq)(struct drm_i915_private *i915,
87 			const struct intel_shared_dpll *pll,
88 			const struct intel_dpll_hw_state *pll_state);
89 };
90 
91 struct intel_dpll_mgr {
92 	const struct dpll_info *dpll_info;
93 
94 	int (*compute_dplls)(struct intel_atomic_state *state,
95 			     struct intel_crtc *crtc,
96 			     struct intel_encoder *encoder);
97 	int (*get_dplls)(struct intel_atomic_state *state,
98 			 struct intel_crtc *crtc,
99 			 struct intel_encoder *encoder);
100 	void (*put_dplls)(struct intel_atomic_state *state,
101 			  struct intel_crtc *crtc);
102 	void (*update_active_dpll)(struct intel_atomic_state *state,
103 				   struct intel_crtc *crtc,
104 				   struct intel_encoder *encoder);
105 	void (*update_ref_clks)(struct drm_i915_private *i915);
106 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
107 			      const struct intel_dpll_hw_state *hw_state);
108 };
109 
110 static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll_state * shared_dpll)111 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
112 				  struct intel_shared_dpll_state *shared_dpll)
113 {
114 	enum intel_dpll_id i;
115 
116 	/* Copy shared dpll state */
117 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
118 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
119 
120 		shared_dpll[i] = pll->state;
121 	}
122 }
123 
124 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)125 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
126 {
127 	struct intel_atomic_state *state = to_intel_atomic_state(s);
128 
129 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
130 
131 	if (!state->dpll_set) {
132 		state->dpll_set = true;
133 
134 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
135 						  state->shared_dpll);
136 	}
137 
138 	return state->shared_dpll;
139 }
140 
141 /**
142  * intel_get_shared_dpll_by_id - get a DPLL given its id
143  * @dev_priv: i915 device instance
144  * @id: pll id
145  *
146  * Returns:
147  * A pointer to the DPLL with @id
148  */
149 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private * dev_priv,enum intel_dpll_id id)150 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
151 			    enum intel_dpll_id id)
152 {
153 	return &dev_priv->display.dpll.shared_dplls[id];
154 }
155 
156 /**
157  * intel_get_shared_dpll_id - get the id of a DPLL
158  * @dev_priv: i915 device instance
159  * @pll: the DPLL
160  *
161  * Returns:
162  * The id of @pll
163  */
164 enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)165 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
166 			 struct intel_shared_dpll *pll)
167 {
168 	long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
169 
170 	if (drm_WARN_ON(&dev_priv->drm,
171 			pll_idx < 0 ||
172 			pll_idx >= dev_priv->display.dpll.num_shared_dpll))
173 		return -1;
174 
175 	return pll_idx;
176 }
177 
178 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,bool state)179 void assert_shared_dpll(struct drm_i915_private *dev_priv,
180 			struct intel_shared_dpll *pll,
181 			bool state)
182 {
183 	bool cur_state;
184 	struct intel_dpll_hw_state hw_state;
185 
186 	if (drm_WARN(&dev_priv->drm, !pll,
187 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
188 		return;
189 
190 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
191 	I915_STATE_WARN(cur_state != state,
192 	     "%s assertion failure (expected %s, current %s)\n",
193 			pll->info->name, str_on_off(state),
194 			str_on_off(cur_state));
195 }
196 
icl_pll_id_to_tc_port(enum intel_dpll_id id)197 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
198 {
199 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
200 }
201 
icl_tc_port_to_pll_id(enum tc_port tc_port)202 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
203 {
204 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
205 }
206 
207 static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)208 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
209 			   struct intel_shared_dpll *pll)
210 {
211 	if (IS_DG1(i915))
212 		return DG1_DPLL_ENABLE(pll->info->id);
213 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
214 		return MG_PLL_ENABLE(0);
215 
216 	return ICL_DPLL_ENABLE(pll->info->id);
217 }
218 
219 static i915_reg_t
intel_tc_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)220 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
221 			struct intel_shared_dpll *pll)
222 {
223 	const enum intel_dpll_id id = pll->info->id;
224 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
225 
226 	if (IS_ALDERLAKE_P(i915))
227 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
228 
229 	return MG_PLL_ENABLE(tc_port);
230 }
231 
232 /**
233  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
234  * @crtc_state: CRTC, and its state, which has a shared DPLL
235  *
236  * Enable the shared DPLL used by @crtc.
237  */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)238 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
239 {
240 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
241 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
242 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
243 	unsigned int pipe_mask = BIT(crtc->pipe);
244 	unsigned int old_mask;
245 
246 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
247 		return;
248 
249 	mutex_lock(&dev_priv->display.dpll.lock);
250 	old_mask = pll->active_mask;
251 
252 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
253 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
254 		goto out;
255 
256 	pll->active_mask |= pipe_mask;
257 
258 	drm_dbg_kms(&dev_priv->drm,
259 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
260 		    pll->info->name, pll->active_mask, pll->on,
261 		    crtc->base.base.id, crtc->base.name);
262 
263 	if (old_mask) {
264 		drm_WARN_ON(&dev_priv->drm, !pll->on);
265 		assert_shared_dpll_enabled(dev_priv, pll);
266 		goto out;
267 	}
268 	drm_WARN_ON(&dev_priv->drm, pll->on);
269 
270 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
271 	pll->info->funcs->enable(dev_priv, pll);
272 	pll->on = true;
273 
274 out:
275 	mutex_unlock(&dev_priv->display.dpll.lock);
276 }
277 
278 /**
279  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
280  * @crtc_state: CRTC, and its state, which has a shared DPLL
281  *
282  * Disable the shared DPLL used by @crtc.
283  */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)284 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
285 {
286 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
287 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
288 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
289 	unsigned int pipe_mask = BIT(crtc->pipe);
290 
291 	/* PCH only available on ILK+ */
292 	if (DISPLAY_VER(dev_priv) < 5)
293 		return;
294 
295 	if (pll == NULL)
296 		return;
297 
298 	mutex_lock(&dev_priv->display.dpll.lock);
299 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
300 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
301 		     crtc->base.base.id, crtc->base.name))
302 		goto out;
303 
304 	drm_dbg_kms(&dev_priv->drm,
305 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
306 		    pll->info->name, pll->active_mask, pll->on,
307 		    crtc->base.base.id, crtc->base.name);
308 
309 	assert_shared_dpll_enabled(dev_priv, pll);
310 	drm_WARN_ON(&dev_priv->drm, !pll->on);
311 
312 	pll->active_mask &= ~pipe_mask;
313 	if (pll->active_mask)
314 		goto out;
315 
316 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
317 	pll->info->funcs->disable(dev_priv, pll);
318 	pll->on = false;
319 
320 out:
321 	mutex_unlock(&dev_priv->display.dpll.lock);
322 }
323 
324 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * pll_state,unsigned long dpll_mask)325 intel_find_shared_dpll(struct intel_atomic_state *state,
326 		       const struct intel_crtc *crtc,
327 		       const struct intel_dpll_hw_state *pll_state,
328 		       unsigned long dpll_mask)
329 {
330 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
331 	struct intel_shared_dpll *pll, *unused_pll = NULL;
332 	struct intel_shared_dpll_state *shared_dpll;
333 	enum intel_dpll_id i;
334 
335 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
336 
337 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
338 
339 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
340 		pll = &dev_priv->display.dpll.shared_dplls[i];
341 
342 		/* Only want to check enabled timings first */
343 		if (shared_dpll[i].pipe_mask == 0) {
344 			if (!unused_pll)
345 				unused_pll = pll;
346 			continue;
347 		}
348 
349 		if (memcmp(pll_state,
350 			   &shared_dpll[i].hw_state,
351 			   sizeof(*pll_state)) == 0) {
352 			drm_dbg_kms(&dev_priv->drm,
353 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
354 				    crtc->base.base.id, crtc->base.name,
355 				    pll->info->name,
356 				    shared_dpll[i].pipe_mask,
357 				    pll->active_mask);
358 			return pll;
359 		}
360 	}
361 
362 	/* Ok no matching timings, maybe there's a free one? */
363 	if (unused_pll) {
364 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
365 			    crtc->base.base.id, crtc->base.name,
366 			    unused_pll->info->name);
367 		return unused_pll;
368 	}
369 
370 	return NULL;
371 }
372 
373 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)374 intel_reference_shared_dpll(struct intel_atomic_state *state,
375 			    const struct intel_crtc *crtc,
376 			    const struct intel_shared_dpll *pll,
377 			    const struct intel_dpll_hw_state *pll_state)
378 {
379 	struct drm_i915_private *i915 = to_i915(state->base.dev);
380 	struct intel_shared_dpll_state *shared_dpll;
381 	const enum intel_dpll_id id = pll->info->id;
382 
383 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
384 
385 	if (shared_dpll[id].pipe_mask == 0)
386 		shared_dpll[id].hw_state = *pll_state;
387 
388 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
389 		pipe_name(crtc->pipe));
390 
391 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
392 }
393 
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)394 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
395 					  const struct intel_crtc *crtc,
396 					  const struct intel_shared_dpll *pll)
397 {
398 	struct intel_shared_dpll_state *shared_dpll;
399 
400 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
401 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
402 }
403 
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)404 static void intel_put_dpll(struct intel_atomic_state *state,
405 			   struct intel_crtc *crtc)
406 {
407 	const struct intel_crtc_state *old_crtc_state =
408 		intel_atomic_get_old_crtc_state(state, crtc);
409 	struct intel_crtc_state *new_crtc_state =
410 		intel_atomic_get_new_crtc_state(state, crtc);
411 
412 	new_crtc_state->shared_dpll = NULL;
413 
414 	if (!old_crtc_state->shared_dpll)
415 		return;
416 
417 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
418 }
419 
420 /**
421  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
422  * @state: atomic state
423  *
424  * This is the dpll version of drm_atomic_helper_swap_state() since the
425  * helper does not handle driver-specific global state.
426  *
427  * For consistency with atomic helpers this function does a complete swap,
428  * i.e. it also puts the current state into @state, even though there is no
429  * need for that at this moment.
430  */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)431 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
432 {
433 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
434 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
435 	enum intel_dpll_id i;
436 
437 	if (!state->dpll_set)
438 		return;
439 
440 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
441 		struct intel_shared_dpll *pll =
442 			&dev_priv->display.dpll.shared_dplls[i];
443 
444 		swap(pll->state, shared_dpll[i]);
445 	}
446 }
447 
ibx_pch_dpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)448 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
449 				      struct intel_shared_dpll *pll,
450 				      struct intel_dpll_hw_state *hw_state)
451 {
452 	const enum intel_dpll_id id = pll->info->id;
453 	intel_wakeref_t wakeref;
454 	u32 val;
455 
456 	wakeref = intel_display_power_get_if_enabled(dev_priv,
457 						     POWER_DOMAIN_DISPLAY_CORE);
458 	if (!wakeref)
459 		return false;
460 
461 	val = intel_de_read(dev_priv, PCH_DPLL(id));
462 	hw_state->dpll = val;
463 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
464 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
465 
466 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
467 
468 	return val & DPLL_VCO_ENABLE;
469 }
470 
ibx_assert_pch_refclk_enabled(struct drm_i915_private * dev_priv)471 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
472 {
473 	u32 val;
474 	bool enabled;
475 
476 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
477 
478 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
479 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
480 			    DREF_SUPERSPREAD_SOURCE_MASK));
481 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
482 }
483 
ibx_pch_dpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)484 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
485 				struct intel_shared_dpll *pll)
486 {
487 	const enum intel_dpll_id id = pll->info->id;
488 
489 	/* PCH refclock must be enabled first */
490 	ibx_assert_pch_refclk_enabled(dev_priv);
491 
492 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
493 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
494 
495 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
496 
497 	/* Wait for the clocks to stabilize. */
498 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
499 	udelay(150);
500 
501 	/* The pixel multiplier can only be updated once the
502 	 * DPLL is enabled and the clocks are stable.
503 	 *
504 	 * So write it again.
505 	 */
506 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
507 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
508 	udelay(200);
509 }
510 
ibx_pch_dpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)511 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
512 				 struct intel_shared_dpll *pll)
513 {
514 	const enum intel_dpll_id id = pll->info->id;
515 
516 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
517 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
518 	udelay(200);
519 }
520 
ibx_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)521 static int ibx_compute_dpll(struct intel_atomic_state *state,
522 			    struct intel_crtc *crtc,
523 			    struct intel_encoder *encoder)
524 {
525 	return 0;
526 }
527 
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)528 static int ibx_get_dpll(struct intel_atomic_state *state,
529 			struct intel_crtc *crtc,
530 			struct intel_encoder *encoder)
531 {
532 	struct intel_crtc_state *crtc_state =
533 		intel_atomic_get_new_crtc_state(state, crtc);
534 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
535 	struct intel_shared_dpll *pll;
536 	enum intel_dpll_id i;
537 
538 	if (HAS_PCH_IBX(dev_priv)) {
539 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
540 		i = (enum intel_dpll_id) crtc->pipe;
541 		pll = &dev_priv->display.dpll.shared_dplls[i];
542 
543 		drm_dbg_kms(&dev_priv->drm,
544 			    "[CRTC:%d:%s] using pre-allocated %s\n",
545 			    crtc->base.base.id, crtc->base.name,
546 			    pll->info->name);
547 	} else {
548 		pll = intel_find_shared_dpll(state, crtc,
549 					     &crtc_state->dpll_hw_state,
550 					     BIT(DPLL_ID_PCH_PLL_B) |
551 					     BIT(DPLL_ID_PCH_PLL_A));
552 	}
553 
554 	if (!pll)
555 		return -EINVAL;
556 
557 	/* reference the pll */
558 	intel_reference_shared_dpll(state, crtc,
559 				    pll, &crtc_state->dpll_hw_state);
560 
561 	crtc_state->shared_dpll = pll;
562 
563 	return 0;
564 }
565 
ibx_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)566 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
567 			      const struct intel_dpll_hw_state *hw_state)
568 {
569 	drm_dbg_kms(&dev_priv->drm,
570 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
571 		    "fp0: 0x%x, fp1: 0x%x\n",
572 		    hw_state->dpll,
573 		    hw_state->dpll_md,
574 		    hw_state->fp0,
575 		    hw_state->fp1);
576 }
577 
578 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
579 	.enable = ibx_pch_dpll_enable,
580 	.disable = ibx_pch_dpll_disable,
581 	.get_hw_state = ibx_pch_dpll_get_hw_state,
582 };
583 
584 static const struct dpll_info pch_plls[] = {
585 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
586 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
587 	{ },
588 };
589 
590 static const struct intel_dpll_mgr pch_pll_mgr = {
591 	.dpll_info = pch_plls,
592 	.compute_dplls = ibx_compute_dpll,
593 	.get_dplls = ibx_get_dpll,
594 	.put_dplls = intel_put_dpll,
595 	.dump_hw_state = ibx_dump_hw_state,
596 };
597 
hsw_ddi_wrpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)598 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
599 				 struct intel_shared_dpll *pll)
600 {
601 	const enum intel_dpll_id id = pll->info->id;
602 
603 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
604 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
605 	udelay(20);
606 }
607 
hsw_ddi_spll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)608 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
609 				struct intel_shared_dpll *pll)
610 {
611 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
612 	intel_de_posting_read(dev_priv, SPLL_CTL);
613 	udelay(20);
614 }
615 
hsw_ddi_wrpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)616 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
617 				  struct intel_shared_dpll *pll)
618 {
619 	const enum intel_dpll_id id = pll->info->id;
620 	u32 val;
621 
622 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
623 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
624 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
625 
626 	/*
627 	 * Try to set up the PCH reference clock once all DPLLs
628 	 * that depend on it have been shut down.
629 	 */
630 	if (dev_priv->pch_ssc_use & BIT(id))
631 		intel_init_pch_refclk(dev_priv);
632 }
633 
hsw_ddi_spll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)634 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
635 				 struct intel_shared_dpll *pll)
636 {
637 	enum intel_dpll_id id = pll->info->id;
638 	u32 val;
639 
640 	val = intel_de_read(dev_priv, SPLL_CTL);
641 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
642 	intel_de_posting_read(dev_priv, SPLL_CTL);
643 
644 	/*
645 	 * Try to set up the PCH reference clock once all DPLLs
646 	 * that depend on it have been shut down.
647 	 */
648 	if (dev_priv->pch_ssc_use & BIT(id))
649 		intel_init_pch_refclk(dev_priv);
650 }
651 
hsw_ddi_wrpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)652 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
653 				       struct intel_shared_dpll *pll,
654 				       struct intel_dpll_hw_state *hw_state)
655 {
656 	const enum intel_dpll_id id = pll->info->id;
657 	intel_wakeref_t wakeref;
658 	u32 val;
659 
660 	wakeref = intel_display_power_get_if_enabled(dev_priv,
661 						     POWER_DOMAIN_DISPLAY_CORE);
662 	if (!wakeref)
663 		return false;
664 
665 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
666 	hw_state->wrpll = val;
667 
668 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
669 
670 	return val & WRPLL_PLL_ENABLE;
671 }
672 
hsw_ddi_spll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)673 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
674 				      struct intel_shared_dpll *pll,
675 				      struct intel_dpll_hw_state *hw_state)
676 {
677 	intel_wakeref_t wakeref;
678 	u32 val;
679 
680 	wakeref = intel_display_power_get_if_enabled(dev_priv,
681 						     POWER_DOMAIN_DISPLAY_CORE);
682 	if (!wakeref)
683 		return false;
684 
685 	val = intel_de_read(dev_priv, SPLL_CTL);
686 	hw_state->spll = val;
687 
688 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
689 
690 	return val & SPLL_PLL_ENABLE;
691 }
692 
693 #define LC_FREQ 2700
694 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
695 
696 #define P_MIN 2
697 #define P_MAX 64
698 #define P_INC 2
699 
700 /* Constraints for PLL good behavior */
701 #define REF_MIN 48
702 #define REF_MAX 400
703 #define VCO_MIN 2400
704 #define VCO_MAX 4800
705 
706 struct hsw_wrpll_rnp {
707 	unsigned p, n2, r2;
708 };
709 
hsw_wrpll_get_budget_for_freq(int clock)710 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
711 {
712 	unsigned budget;
713 
714 	switch (clock) {
715 	case 25175000:
716 	case 25200000:
717 	case 27000000:
718 	case 27027000:
719 	case 37762500:
720 	case 37800000:
721 	case 40500000:
722 	case 40541000:
723 	case 54000000:
724 	case 54054000:
725 	case 59341000:
726 	case 59400000:
727 	case 72000000:
728 	case 74176000:
729 	case 74250000:
730 	case 81000000:
731 	case 81081000:
732 	case 89012000:
733 	case 89100000:
734 	case 108000000:
735 	case 108108000:
736 	case 111264000:
737 	case 111375000:
738 	case 148352000:
739 	case 148500000:
740 	case 162000000:
741 	case 162162000:
742 	case 222525000:
743 	case 222750000:
744 	case 296703000:
745 	case 297000000:
746 		budget = 0;
747 		break;
748 	case 233500000:
749 	case 245250000:
750 	case 247750000:
751 	case 253250000:
752 	case 298000000:
753 		budget = 1500;
754 		break;
755 	case 169128000:
756 	case 169500000:
757 	case 179500000:
758 	case 202000000:
759 		budget = 2000;
760 		break;
761 	case 256250000:
762 	case 262500000:
763 	case 270000000:
764 	case 272500000:
765 	case 273750000:
766 	case 280750000:
767 	case 281250000:
768 	case 286000000:
769 	case 291750000:
770 		budget = 4000;
771 		break;
772 	case 267250000:
773 	case 268500000:
774 		budget = 5000;
775 		break;
776 	default:
777 		budget = 1000;
778 		break;
779 	}
780 
781 	return budget;
782 }
783 
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)784 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
785 				 unsigned int r2, unsigned int n2,
786 				 unsigned int p,
787 				 struct hsw_wrpll_rnp *best)
788 {
789 	u64 a, b, c, d, diff, diff_best;
790 
791 	/* No best (r,n,p) yet */
792 	if (best->p == 0) {
793 		best->p = p;
794 		best->n2 = n2;
795 		best->r2 = r2;
796 		return;
797 	}
798 
799 	/*
800 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
801 	 * freq2k.
802 	 *
803 	 * delta = 1e6 *
804 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
805 	 *	   freq2k;
806 	 *
807 	 * and we would like delta <= budget.
808 	 *
809 	 * If the discrepancy is above the PPM-based budget, always prefer to
810 	 * improve upon the previous solution.  However, if you're within the
811 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
812 	 */
813 	a = freq2k * budget * p * r2;
814 	b = freq2k * budget * best->p * best->r2;
815 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
816 	diff_best = abs_diff(freq2k * best->p * best->r2,
817 			     LC_FREQ_2K * best->n2);
818 	c = 1000000 * diff;
819 	d = 1000000 * diff_best;
820 
821 	if (a < c && b < d) {
822 		/* If both are above the budget, pick the closer */
823 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
824 			best->p = p;
825 			best->n2 = n2;
826 			best->r2 = r2;
827 		}
828 	} else if (a >= c && b < d) {
829 		/* If A is below the threshold but B is above it?  Update. */
830 		best->p = p;
831 		best->n2 = n2;
832 		best->r2 = r2;
833 	} else if (a >= c && b >= d) {
834 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
835 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
836 			best->p = p;
837 			best->n2 = n2;
838 			best->r2 = r2;
839 		}
840 	}
841 	/* Otherwise a < c && b >= d, do nothing */
842 }
843 
844 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)845 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
846 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
847 {
848 	u64 freq2k;
849 	unsigned p, n2, r2;
850 	struct hsw_wrpll_rnp best = {};
851 	unsigned budget;
852 
853 	freq2k = clock / 100;
854 
855 	budget = hsw_wrpll_get_budget_for_freq(clock);
856 
857 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
858 	 * and directly pass the LC PLL to it. */
859 	if (freq2k == 5400000) {
860 		*n2_out = 2;
861 		*p_out = 1;
862 		*r2_out = 2;
863 		return;
864 	}
865 
866 	/*
867 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
868 	 * the WR PLL.
869 	 *
870 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
871 	 * Injecting R2 = 2 * R gives:
872 	 *   REF_MAX * r2 > LC_FREQ * 2 and
873 	 *   REF_MIN * r2 < LC_FREQ * 2
874 	 *
875 	 * Which means the desired boundaries for r2 are:
876 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
877 	 *
878 	 */
879 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
880 	     r2 <= LC_FREQ * 2 / REF_MIN;
881 	     r2++) {
882 
883 		/*
884 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
885 		 *
886 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
887 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
888 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
889 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
890 		 *
891 		 * Which means the desired boundaries for n2 are:
892 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
893 		 */
894 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
895 		     n2 <= VCO_MAX * r2 / LC_FREQ;
896 		     n2++) {
897 
898 			for (p = P_MIN; p <= P_MAX; p += P_INC)
899 				hsw_wrpll_update_rnp(freq2k, budget,
900 						     r2, n2, p, &best);
901 		}
902 	}
903 
904 	*n2_out = best.n2;
905 	*p_out = best.p;
906 	*r2_out = best.r2;
907 }
908 
hsw_ddi_wrpll_get_freq(struct drm_i915_private * dev_priv,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)909 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
910 				  const struct intel_shared_dpll *pll,
911 				  const struct intel_dpll_hw_state *pll_state)
912 {
913 	int refclk;
914 	int n, p, r;
915 	u32 wrpll = pll_state->wrpll;
916 
917 	switch (wrpll & WRPLL_REF_MASK) {
918 	case WRPLL_REF_SPECIAL_HSW:
919 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
920 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
921 			refclk = dev_priv->display.dpll.ref_clks.nssc;
922 			break;
923 		}
924 		fallthrough;
925 	case WRPLL_REF_PCH_SSC:
926 		/*
927 		 * We could calculate spread here, but our checking
928 		 * code only cares about 5% accuracy, and spread is a max of
929 		 * 0.5% downspread.
930 		 */
931 		refclk = dev_priv->display.dpll.ref_clks.ssc;
932 		break;
933 	case WRPLL_REF_LCPLL:
934 		refclk = 2700000;
935 		break;
936 	default:
937 		MISSING_CASE(wrpll);
938 		return 0;
939 	}
940 
941 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
942 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
943 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
944 
945 	/* Convert to KHz, p & r have a fixed point portion */
946 	return (refclk * n / 10) / (p * r) * 2;
947 }
948 
949 static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)950 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
951 			   struct intel_crtc *crtc)
952 {
953 	struct drm_i915_private *i915 = to_i915(state->base.dev);
954 	struct intel_crtc_state *crtc_state =
955 		intel_atomic_get_new_crtc_state(state, crtc);
956 	unsigned int p, n2, r2;
957 
958 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
959 
960 	crtc_state->dpll_hw_state.wrpll =
961 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
962 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
963 		WRPLL_DIVIDER_POST(p);
964 
965 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
966 							&crtc_state->dpll_hw_state);
967 
968 	return 0;
969 }
970 
971 static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)972 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
973 		       struct intel_crtc *crtc)
974 {
975 	struct intel_crtc_state *crtc_state =
976 		intel_atomic_get_new_crtc_state(state, crtc);
977 
978 	return intel_find_shared_dpll(state, crtc,
979 				      &crtc_state->dpll_hw_state,
980 				      BIT(DPLL_ID_WRPLL2) |
981 				      BIT(DPLL_ID_WRPLL1));
982 }
983 
984 static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state * crtc_state)985 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
986 {
987 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
988 	int clock = crtc_state->port_clock;
989 
990 	switch (clock / 2) {
991 	case 81000:
992 	case 135000:
993 	case 270000:
994 		return 0;
995 	default:
996 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
997 			    clock);
998 		return -EINVAL;
999 	}
1000 }
1001 
1002 static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state * crtc_state)1003 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1004 {
1005 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1006 	struct intel_shared_dpll *pll;
1007 	enum intel_dpll_id pll_id;
1008 	int clock = crtc_state->port_clock;
1009 
1010 	switch (clock / 2) {
1011 	case 81000:
1012 		pll_id = DPLL_ID_LCPLL_810;
1013 		break;
1014 	case 135000:
1015 		pll_id = DPLL_ID_LCPLL_1350;
1016 		break;
1017 	case 270000:
1018 		pll_id = DPLL_ID_LCPLL_2700;
1019 		break;
1020 	default:
1021 		MISSING_CASE(clock / 2);
1022 		return NULL;
1023 	}
1024 
1025 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1026 
1027 	if (!pll)
1028 		return NULL;
1029 
1030 	return pll;
1031 }
1032 
hsw_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1033 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1034 				  const struct intel_shared_dpll *pll,
1035 				  const struct intel_dpll_hw_state *pll_state)
1036 {
1037 	int link_clock = 0;
1038 
1039 	switch (pll->info->id) {
1040 	case DPLL_ID_LCPLL_810:
1041 		link_clock = 81000;
1042 		break;
1043 	case DPLL_ID_LCPLL_1350:
1044 		link_clock = 135000;
1045 		break;
1046 	case DPLL_ID_LCPLL_2700:
1047 		link_clock = 270000;
1048 		break;
1049 	default:
1050 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1051 		break;
1052 	}
1053 
1054 	return link_clock * 2;
1055 }
1056 
1057 static int
hsw_ddi_spll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1058 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1059 			  struct intel_crtc *crtc)
1060 {
1061 	struct intel_crtc_state *crtc_state =
1062 		intel_atomic_get_new_crtc_state(state, crtc);
1063 
1064 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1065 		return -EINVAL;
1066 
1067 	crtc_state->dpll_hw_state.spll =
1068 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1069 
1070 	return 0;
1071 }
1072 
1073 static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1074 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1075 		      struct intel_crtc *crtc)
1076 {
1077 	struct intel_crtc_state *crtc_state =
1078 		intel_atomic_get_new_crtc_state(state, crtc);
1079 
1080 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1081 				      BIT(DPLL_ID_SPLL));
1082 }
1083 
hsw_ddi_spll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1084 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1085 				 const struct intel_shared_dpll *pll,
1086 				 const struct intel_dpll_hw_state *pll_state)
1087 {
1088 	int link_clock = 0;
1089 
1090 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1091 	case SPLL_FREQ_810MHz:
1092 		link_clock = 81000;
1093 		break;
1094 	case SPLL_FREQ_1350MHz:
1095 		link_clock = 135000;
1096 		break;
1097 	case SPLL_FREQ_2700MHz:
1098 		link_clock = 270000;
1099 		break;
1100 	default:
1101 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1102 		break;
1103 	}
1104 
1105 	return link_clock * 2;
1106 }
1107 
hsw_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1108 static int hsw_compute_dpll(struct intel_atomic_state *state,
1109 			    struct intel_crtc *crtc,
1110 			    struct intel_encoder *encoder)
1111 {
1112 	struct intel_crtc_state *crtc_state =
1113 		intel_atomic_get_new_crtc_state(state, crtc);
1114 
1115 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1116 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1117 	else if (intel_crtc_has_dp_encoder(crtc_state))
1118 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1119 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1120 		return hsw_ddi_spll_compute_dpll(state, crtc);
1121 	else
1122 		return -EINVAL;
1123 }
1124 
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1125 static int hsw_get_dpll(struct intel_atomic_state *state,
1126 			struct intel_crtc *crtc,
1127 			struct intel_encoder *encoder)
1128 {
1129 	struct intel_crtc_state *crtc_state =
1130 		intel_atomic_get_new_crtc_state(state, crtc);
1131 	struct intel_shared_dpll *pll = NULL;
1132 
1133 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1134 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1135 	else if (intel_crtc_has_dp_encoder(crtc_state))
1136 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1137 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1138 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1139 
1140 	if (!pll)
1141 		return -EINVAL;
1142 
1143 	intel_reference_shared_dpll(state, crtc,
1144 				    pll, &crtc_state->dpll_hw_state);
1145 
1146 	crtc_state->shared_dpll = pll;
1147 
1148 	return 0;
1149 }
1150 
hsw_update_dpll_ref_clks(struct drm_i915_private * i915)1151 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1152 {
1153 	i915->display.dpll.ref_clks.ssc = 135000;
1154 	/* Non-SSC is only used on non-ULT HSW. */
1155 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1156 		i915->display.dpll.ref_clks.nssc = 24000;
1157 	else
1158 		i915->display.dpll.ref_clks.nssc = 135000;
1159 }
1160 
hsw_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)1161 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1162 			      const struct intel_dpll_hw_state *hw_state)
1163 {
1164 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1165 		    hw_state->wrpll, hw_state->spll);
1166 }
1167 
1168 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1169 	.enable = hsw_ddi_wrpll_enable,
1170 	.disable = hsw_ddi_wrpll_disable,
1171 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1172 	.get_freq = hsw_ddi_wrpll_get_freq,
1173 };
1174 
1175 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1176 	.enable = hsw_ddi_spll_enable,
1177 	.disable = hsw_ddi_spll_disable,
1178 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1179 	.get_freq = hsw_ddi_spll_get_freq,
1180 };
1181 
hsw_ddi_lcpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1182 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1183 				 struct intel_shared_dpll *pll)
1184 {
1185 }
1186 
hsw_ddi_lcpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1187 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1188 				  struct intel_shared_dpll *pll)
1189 {
1190 }
1191 
hsw_ddi_lcpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1192 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1193 				       struct intel_shared_dpll *pll,
1194 				       struct intel_dpll_hw_state *hw_state)
1195 {
1196 	return true;
1197 }
1198 
1199 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1200 	.enable = hsw_ddi_lcpll_enable,
1201 	.disable = hsw_ddi_lcpll_disable,
1202 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1203 	.get_freq = hsw_ddi_lcpll_get_freq,
1204 };
1205 
1206 static const struct dpll_info hsw_plls[] = {
1207 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1208 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1209 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1210 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1211 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1212 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1213 	{ },
1214 };
1215 
1216 static const struct intel_dpll_mgr hsw_pll_mgr = {
1217 	.dpll_info = hsw_plls,
1218 	.compute_dplls = hsw_compute_dpll,
1219 	.get_dplls = hsw_get_dpll,
1220 	.put_dplls = intel_put_dpll,
1221 	.update_ref_clks = hsw_update_dpll_ref_clks,
1222 	.dump_hw_state = hsw_dump_hw_state,
1223 };
1224 
1225 struct skl_dpll_regs {
1226 	i915_reg_t ctl, cfgcr1, cfgcr2;
1227 };
1228 
1229 /* this array is indexed by the *shared* pll id */
1230 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1231 	{
1232 		/* DPLL 0 */
1233 		.ctl = LCPLL1_CTL,
1234 		/* DPLL 0 doesn't support HDMI mode */
1235 	},
1236 	{
1237 		/* DPLL 1 */
1238 		.ctl = LCPLL2_CTL,
1239 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1240 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1241 	},
1242 	{
1243 		/* DPLL 2 */
1244 		.ctl = WRPLL_CTL(0),
1245 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1246 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1247 	},
1248 	{
1249 		/* DPLL 3 */
1250 		.ctl = WRPLL_CTL(1),
1251 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1252 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1253 	},
1254 };
1255 
skl_ddi_pll_write_ctrl1(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1256 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1257 				    struct intel_shared_dpll *pll)
1258 {
1259 	const enum intel_dpll_id id = pll->info->id;
1260 	u32 val;
1261 
1262 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1263 
1264 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1265 		 DPLL_CTRL1_SSC(id) |
1266 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1267 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1268 
1269 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1270 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1271 }
1272 
skl_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1273 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1274 			       struct intel_shared_dpll *pll)
1275 {
1276 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1277 	const enum intel_dpll_id id = pll->info->id;
1278 
1279 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1280 
1281 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1282 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1283 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1284 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1285 
1286 	/* the enable bit is always bit 31 */
1287 	intel_de_write(dev_priv, regs[id].ctl,
1288 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1289 
1290 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1291 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1292 }
1293 
skl_ddi_dpll0_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1294 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1295 				 struct intel_shared_dpll *pll)
1296 {
1297 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1298 }
1299 
skl_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1300 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1301 				struct intel_shared_dpll *pll)
1302 {
1303 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1304 	const enum intel_dpll_id id = pll->info->id;
1305 
1306 	/* the enable bit is always bit 31 */
1307 	intel_de_write(dev_priv, regs[id].ctl,
1308 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1309 	intel_de_posting_read(dev_priv, regs[id].ctl);
1310 }
1311 
skl_ddi_dpll0_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1312 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1313 				  struct intel_shared_dpll *pll)
1314 {
1315 }
1316 
skl_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1317 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1318 				     struct intel_shared_dpll *pll,
1319 				     struct intel_dpll_hw_state *hw_state)
1320 {
1321 	u32 val;
1322 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1323 	const enum intel_dpll_id id = pll->info->id;
1324 	intel_wakeref_t wakeref;
1325 	bool ret;
1326 
1327 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1328 						     POWER_DOMAIN_DISPLAY_CORE);
1329 	if (!wakeref)
1330 		return false;
1331 
1332 	ret = false;
1333 
1334 	val = intel_de_read(dev_priv, regs[id].ctl);
1335 	if (!(val & LCPLL_PLL_ENABLE))
1336 		goto out;
1337 
1338 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1339 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1340 
1341 	/* avoid reading back stale values if HDMI mode is not enabled */
1342 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1343 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1344 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1345 	}
1346 	ret = true;
1347 
1348 out:
1349 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1350 
1351 	return ret;
1352 }
1353 
skl_ddi_dpll0_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1354 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1355 				       struct intel_shared_dpll *pll,
1356 				       struct intel_dpll_hw_state *hw_state)
1357 {
1358 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1359 	const enum intel_dpll_id id = pll->info->id;
1360 	intel_wakeref_t wakeref;
1361 	u32 val;
1362 	bool ret;
1363 
1364 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1365 						     POWER_DOMAIN_DISPLAY_CORE);
1366 	if (!wakeref)
1367 		return false;
1368 
1369 	ret = false;
1370 
1371 	/* DPLL0 is always enabled since it drives CDCLK */
1372 	val = intel_de_read(dev_priv, regs[id].ctl);
1373 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1374 		goto out;
1375 
1376 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1377 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1378 
1379 	ret = true;
1380 
1381 out:
1382 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1383 
1384 	return ret;
1385 }
1386 
1387 struct skl_wrpll_context {
1388 	u64 min_deviation;		/* current minimal deviation */
1389 	u64 central_freq;		/* chosen central freq */
1390 	u64 dco_freq;			/* chosen dco freq */
1391 	unsigned int p;			/* chosen divider */
1392 };
1393 
1394 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1395 #define SKL_DCO_MAX_PDEVIATION	100
1396 #define SKL_DCO_MAX_NDEVIATION	600
1397 
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1398 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1399 				  u64 central_freq,
1400 				  u64 dco_freq,
1401 				  unsigned int divider)
1402 {
1403 	u64 deviation;
1404 
1405 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1406 			      central_freq);
1407 
1408 	/* positive deviation */
1409 	if (dco_freq >= central_freq) {
1410 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1411 		    deviation < ctx->min_deviation) {
1412 			ctx->min_deviation = deviation;
1413 			ctx->central_freq = central_freq;
1414 			ctx->dco_freq = dco_freq;
1415 			ctx->p = divider;
1416 		}
1417 	/* negative deviation */
1418 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1419 		   deviation < ctx->min_deviation) {
1420 		ctx->min_deviation = deviation;
1421 		ctx->central_freq = central_freq;
1422 		ctx->dco_freq = dco_freq;
1423 		ctx->p = divider;
1424 	}
1425 }
1426 
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1427 static void skl_wrpll_get_multipliers(unsigned int p,
1428 				      unsigned int *p0 /* out */,
1429 				      unsigned int *p1 /* out */,
1430 				      unsigned int *p2 /* out */)
1431 {
1432 	/* even dividers */
1433 	if (p % 2 == 0) {
1434 		unsigned int half = p / 2;
1435 
1436 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1437 			*p0 = 2;
1438 			*p1 = 1;
1439 			*p2 = half;
1440 		} else if (half % 2 == 0) {
1441 			*p0 = 2;
1442 			*p1 = half / 2;
1443 			*p2 = 2;
1444 		} else if (half % 3 == 0) {
1445 			*p0 = 3;
1446 			*p1 = half / 3;
1447 			*p2 = 2;
1448 		} else if (half % 7 == 0) {
1449 			*p0 = 7;
1450 			*p1 = half / 7;
1451 			*p2 = 2;
1452 		}
1453 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1454 		*p0 = 3;
1455 		*p1 = 1;
1456 		*p2 = p / 3;
1457 	} else if (p == 5 || p == 7) {
1458 		*p0 = p;
1459 		*p1 = 1;
1460 		*p2 = 1;
1461 	} else if (p == 15) {
1462 		*p0 = 3;
1463 		*p1 = 1;
1464 		*p2 = 5;
1465 	} else if (p == 21) {
1466 		*p0 = 7;
1467 		*p1 = 1;
1468 		*p2 = 3;
1469 	} else if (p == 35) {
1470 		*p0 = 7;
1471 		*p1 = 1;
1472 		*p2 = 5;
1473 	}
1474 }
1475 
1476 struct skl_wrpll_params {
1477 	u32 dco_fraction;
1478 	u32 dco_integer;
1479 	u32 qdiv_ratio;
1480 	u32 qdiv_mode;
1481 	u32 kdiv;
1482 	u32 pdiv;
1483 	u32 central_freq;
1484 };
1485 
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,int ref_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1486 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1487 				      u64 afe_clock,
1488 				      int ref_clock,
1489 				      u64 central_freq,
1490 				      u32 p0, u32 p1, u32 p2)
1491 {
1492 	u64 dco_freq;
1493 
1494 	switch (central_freq) {
1495 	case 9600000000ULL:
1496 		params->central_freq = 0;
1497 		break;
1498 	case 9000000000ULL:
1499 		params->central_freq = 1;
1500 		break;
1501 	case 8400000000ULL:
1502 		params->central_freq = 3;
1503 	}
1504 
1505 	switch (p0) {
1506 	case 1:
1507 		params->pdiv = 0;
1508 		break;
1509 	case 2:
1510 		params->pdiv = 1;
1511 		break;
1512 	case 3:
1513 		params->pdiv = 2;
1514 		break;
1515 	case 7:
1516 		params->pdiv = 4;
1517 		break;
1518 	default:
1519 		WARN(1, "Incorrect PDiv\n");
1520 	}
1521 
1522 	switch (p2) {
1523 	case 5:
1524 		params->kdiv = 0;
1525 		break;
1526 	case 2:
1527 		params->kdiv = 1;
1528 		break;
1529 	case 3:
1530 		params->kdiv = 2;
1531 		break;
1532 	case 1:
1533 		params->kdiv = 3;
1534 		break;
1535 	default:
1536 		WARN(1, "Incorrect KDiv\n");
1537 	}
1538 
1539 	params->qdiv_ratio = p1;
1540 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1541 
1542 	dco_freq = p0 * p1 * p2 * afe_clock;
1543 
1544 	/*
1545 	 * Intermediate values are in Hz.
1546 	 * Divide by MHz to match bsepc
1547 	 */
1548 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1549 	params->dco_fraction =
1550 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1551 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1552 }
1553 
1554 static int
skl_ddi_calculate_wrpll(int clock,int ref_clock,struct skl_wrpll_params * wrpll_params)1555 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1556 			int ref_clock,
1557 			struct skl_wrpll_params *wrpll_params)
1558 {
1559 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1560 						 9000000000ULL,
1561 						 9600000000ULL };
1562 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1563 					    24, 28, 30, 32, 36, 40, 42, 44,
1564 					    48, 52, 54, 56, 60, 64, 66, 68,
1565 					    70, 72, 76, 78, 80, 84, 88, 90,
1566 					    92, 96, 98 };
1567 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1568 	static const struct {
1569 		const u8 *list;
1570 		int n_dividers;
1571 	} dividers[] = {
1572 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1573 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1574 	};
1575 	struct skl_wrpll_context ctx = {
1576 		.min_deviation = U64_MAX,
1577 	};
1578 	unsigned int dco, d, i;
1579 	unsigned int p0, p1, p2;
1580 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1581 
1582 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1583 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1584 			for (i = 0; i < dividers[d].n_dividers; i++) {
1585 				unsigned int p = dividers[d].list[i];
1586 				u64 dco_freq = p * afe_clock;
1587 
1588 				skl_wrpll_try_divider(&ctx,
1589 						      dco_central_freq[dco],
1590 						      dco_freq,
1591 						      p);
1592 				/*
1593 				 * Skip the remaining dividers if we're sure to
1594 				 * have found the definitive divider, we can't
1595 				 * improve a 0 deviation.
1596 				 */
1597 				if (ctx.min_deviation == 0)
1598 					goto skip_remaining_dividers;
1599 			}
1600 		}
1601 
1602 skip_remaining_dividers:
1603 		/*
1604 		 * If a solution is found with an even divider, prefer
1605 		 * this one.
1606 		 */
1607 		if (d == 0 && ctx.p)
1608 			break;
1609 	}
1610 
1611 	if (!ctx.p)
1612 		return -EINVAL;
1613 
1614 	/*
1615 	 * gcc incorrectly analyses that these can be used without being
1616 	 * initialized. To be fair, it's hard to guess.
1617 	 */
1618 	p0 = p1 = p2 = 0;
1619 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1620 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1621 				  ctx.central_freq, p0, p1, p2);
1622 
1623 	return 0;
1624 }
1625 
skl_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1626 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1627 				  const struct intel_shared_dpll *pll,
1628 				  const struct intel_dpll_hw_state *pll_state)
1629 {
1630 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1631 	u32 p0, p1, p2, dco_freq;
1632 
1633 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1634 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1635 
1636 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1637 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1638 	else
1639 		p1 = 1;
1640 
1641 
1642 	switch (p0) {
1643 	case DPLL_CFGCR2_PDIV_1:
1644 		p0 = 1;
1645 		break;
1646 	case DPLL_CFGCR2_PDIV_2:
1647 		p0 = 2;
1648 		break;
1649 	case DPLL_CFGCR2_PDIV_3:
1650 		p0 = 3;
1651 		break;
1652 	case DPLL_CFGCR2_PDIV_7_INVALID:
1653 		/*
1654 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1655 		 * handling it the same way as PDIV_7.
1656 		 */
1657 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1658 		fallthrough;
1659 	case DPLL_CFGCR2_PDIV_7:
1660 		p0 = 7;
1661 		break;
1662 	default:
1663 		MISSING_CASE(p0);
1664 		return 0;
1665 	}
1666 
1667 	switch (p2) {
1668 	case DPLL_CFGCR2_KDIV_5:
1669 		p2 = 5;
1670 		break;
1671 	case DPLL_CFGCR2_KDIV_2:
1672 		p2 = 2;
1673 		break;
1674 	case DPLL_CFGCR2_KDIV_3:
1675 		p2 = 3;
1676 		break;
1677 	case DPLL_CFGCR2_KDIV_1:
1678 		p2 = 1;
1679 		break;
1680 	default:
1681 		MISSING_CASE(p2);
1682 		return 0;
1683 	}
1684 
1685 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1686 		   ref_clock;
1687 
1688 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1689 		    ref_clock / 0x8000;
1690 
1691 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1692 		return 0;
1693 
1694 	return dco_freq / (p0 * p1 * p2 * 5);
1695 }
1696 
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1697 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1698 {
1699 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1700 	struct skl_wrpll_params wrpll_params = {};
1701 	u32 ctrl1, cfgcr1, cfgcr2;
1702 	int ret;
1703 
1704 	/*
1705 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1706 	 * as the DPLL id in this function.
1707 	 */
1708 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1709 
1710 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1711 
1712 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1713 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1714 	if (ret)
1715 		return ret;
1716 
1717 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1718 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1719 		wrpll_params.dco_integer;
1720 
1721 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1722 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1723 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1724 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1725 		wrpll_params.central_freq;
1726 
1727 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1728 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1729 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1730 
1731 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1732 							&crtc_state->dpll_hw_state);
1733 
1734 	return 0;
1735 }
1736 
1737 static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1738 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1739 {
1740 	u32 ctrl1;
1741 
1742 	/*
1743 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1744 	 * as the DPLL id in this function.
1745 	 */
1746 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1747 	switch (crtc_state->port_clock / 2) {
1748 	case 81000:
1749 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1750 		break;
1751 	case 135000:
1752 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1753 		break;
1754 	case 270000:
1755 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1756 		break;
1757 		/* eDP 1.4 rates */
1758 	case 162000:
1759 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1760 		break;
1761 	case 108000:
1762 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1763 		break;
1764 	case 216000:
1765 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1766 		break;
1767 	}
1768 
1769 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1770 
1771 	return 0;
1772 }
1773 
skl_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1774 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1775 				  const struct intel_shared_dpll *pll,
1776 				  const struct intel_dpll_hw_state *pll_state)
1777 {
1778 	int link_clock = 0;
1779 
1780 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1781 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1782 	case DPLL_CTRL1_LINK_RATE_810:
1783 		link_clock = 81000;
1784 		break;
1785 	case DPLL_CTRL1_LINK_RATE_1080:
1786 		link_clock = 108000;
1787 		break;
1788 	case DPLL_CTRL1_LINK_RATE_1350:
1789 		link_clock = 135000;
1790 		break;
1791 	case DPLL_CTRL1_LINK_RATE_1620:
1792 		link_clock = 162000;
1793 		break;
1794 	case DPLL_CTRL1_LINK_RATE_2160:
1795 		link_clock = 216000;
1796 		break;
1797 	case DPLL_CTRL1_LINK_RATE_2700:
1798 		link_clock = 270000;
1799 		break;
1800 	default:
1801 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1802 		break;
1803 	}
1804 
1805 	return link_clock * 2;
1806 }
1807 
skl_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1808 static int skl_compute_dpll(struct intel_atomic_state *state,
1809 			    struct intel_crtc *crtc,
1810 			    struct intel_encoder *encoder)
1811 {
1812 	struct intel_crtc_state *crtc_state =
1813 		intel_atomic_get_new_crtc_state(state, crtc);
1814 
1815 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1816 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1817 	else if (intel_crtc_has_dp_encoder(crtc_state))
1818 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1819 	else
1820 		return -EINVAL;
1821 }
1822 
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1823 static int skl_get_dpll(struct intel_atomic_state *state,
1824 			struct intel_crtc *crtc,
1825 			struct intel_encoder *encoder)
1826 {
1827 	struct intel_crtc_state *crtc_state =
1828 		intel_atomic_get_new_crtc_state(state, crtc);
1829 	struct intel_shared_dpll *pll;
1830 
1831 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1832 		pll = intel_find_shared_dpll(state, crtc,
1833 					     &crtc_state->dpll_hw_state,
1834 					     BIT(DPLL_ID_SKL_DPLL0));
1835 	else
1836 		pll = intel_find_shared_dpll(state, crtc,
1837 					     &crtc_state->dpll_hw_state,
1838 					     BIT(DPLL_ID_SKL_DPLL3) |
1839 					     BIT(DPLL_ID_SKL_DPLL2) |
1840 					     BIT(DPLL_ID_SKL_DPLL1));
1841 	if (!pll)
1842 		return -EINVAL;
1843 
1844 	intel_reference_shared_dpll(state, crtc,
1845 				    pll, &crtc_state->dpll_hw_state);
1846 
1847 	crtc_state->shared_dpll = pll;
1848 
1849 	return 0;
1850 }
1851 
skl_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1852 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1853 				const struct intel_shared_dpll *pll,
1854 				const struct intel_dpll_hw_state *pll_state)
1855 {
1856 	/*
1857 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1858 	 * the internal shift for each field
1859 	 */
1860 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1861 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1862 	else
1863 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1864 }
1865 
skl_update_dpll_ref_clks(struct drm_i915_private * i915)1866 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1867 {
1868 	/* No SSC ref */
1869 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1870 }
1871 
skl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)1872 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1873 			      const struct intel_dpll_hw_state *hw_state)
1874 {
1875 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1876 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1877 		      hw_state->ctrl1,
1878 		      hw_state->cfgcr1,
1879 		      hw_state->cfgcr2);
1880 }
1881 
1882 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1883 	.enable = skl_ddi_pll_enable,
1884 	.disable = skl_ddi_pll_disable,
1885 	.get_hw_state = skl_ddi_pll_get_hw_state,
1886 	.get_freq = skl_ddi_pll_get_freq,
1887 };
1888 
1889 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1890 	.enable = skl_ddi_dpll0_enable,
1891 	.disable = skl_ddi_dpll0_disable,
1892 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1893 	.get_freq = skl_ddi_pll_get_freq,
1894 };
1895 
1896 static const struct dpll_info skl_plls[] = {
1897 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1898 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1899 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1900 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1901 	{ },
1902 };
1903 
1904 static const struct intel_dpll_mgr skl_pll_mgr = {
1905 	.dpll_info = skl_plls,
1906 	.compute_dplls = skl_compute_dpll,
1907 	.get_dplls = skl_get_dpll,
1908 	.put_dplls = intel_put_dpll,
1909 	.update_ref_clks = skl_update_dpll_ref_clks,
1910 	.dump_hw_state = skl_dump_hw_state,
1911 };
1912 
bxt_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1913 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1914 				struct intel_shared_dpll *pll)
1915 {
1916 	u32 temp;
1917 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1918 	enum dpio_phy phy;
1919 	enum dpio_channel ch;
1920 
1921 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1922 
1923 	/* Non-SSC reference */
1924 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1925 	temp |= PORT_PLL_REF_SEL;
1926 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1927 
1928 	if (IS_GEMINILAKE(dev_priv)) {
1929 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1930 		temp |= PORT_PLL_POWER_ENABLE;
1931 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1932 
1933 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1934 				 PORT_PLL_POWER_STATE), 200))
1935 			drm_err(&dev_priv->drm,
1936 				"Power state not set for PLL:%d\n", port);
1937 	}
1938 
1939 	/* Disable 10 bit clock */
1940 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1941 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1942 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1943 
1944 	/* Write P1 & P2 */
1945 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1946 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1947 	temp |= pll->state.hw_state.ebb0;
1948 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1949 
1950 	/* Write M2 integer */
1951 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1952 	temp &= ~PORT_PLL_M2_INT_MASK;
1953 	temp |= pll->state.hw_state.pll0;
1954 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1955 
1956 	/* Write N */
1957 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1958 	temp &= ~PORT_PLL_N_MASK;
1959 	temp |= pll->state.hw_state.pll1;
1960 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1961 
1962 	/* Write M2 fraction */
1963 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1964 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1965 	temp |= pll->state.hw_state.pll2;
1966 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1967 
1968 	/* Write M2 fraction enable */
1969 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1970 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1971 	temp |= pll->state.hw_state.pll3;
1972 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1973 
1974 	/* Write coeff */
1975 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1976 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1977 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1978 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1979 	temp |= pll->state.hw_state.pll6;
1980 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1981 
1982 	/* Write calibration val */
1983 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1984 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1985 	temp |= pll->state.hw_state.pll8;
1986 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1987 
1988 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1989 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1990 	temp |= pll->state.hw_state.pll9;
1991 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1992 
1993 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1994 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1995 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1996 	temp |= pll->state.hw_state.pll10;
1997 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1998 
1999 	/* Recalibrate with new settings */
2000 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2001 	temp |= PORT_PLL_RECALIBRATE;
2002 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2003 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2004 	temp |= pll->state.hw_state.ebb4;
2005 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2006 
2007 	/* Enable PLL */
2008 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2009 	temp |= PORT_PLL_ENABLE;
2010 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2011 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2012 
2013 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2014 			200))
2015 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
2016 
2017 	if (IS_GEMINILAKE(dev_priv)) {
2018 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2019 		temp |= DCC_DELAY_RANGE_2;
2020 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2021 	}
2022 
2023 	/*
2024 	 * While we write to the group register to program all lanes at once we
2025 	 * can read only lane registers and we pick lanes 0/1 for that.
2026 	 */
2027 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2028 	temp &= ~LANE_STAGGER_MASK;
2029 	temp &= ~LANESTAGGER_STRAP_OVRD;
2030 	temp |= pll->state.hw_state.pcsdw12;
2031 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2032 }
2033 
bxt_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)2034 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2035 					struct intel_shared_dpll *pll)
2036 {
2037 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2038 	u32 temp;
2039 
2040 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2041 	temp &= ~PORT_PLL_ENABLE;
2042 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2043 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2044 
2045 	if (IS_GEMINILAKE(dev_priv)) {
2046 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2047 		temp &= ~PORT_PLL_POWER_ENABLE;
2048 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2049 
2050 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2051 				  PORT_PLL_POWER_STATE), 200))
2052 			drm_err(&dev_priv->drm,
2053 				"Power state not reset for PLL:%d\n", port);
2054 	}
2055 }
2056 
bxt_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)2057 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2058 					struct intel_shared_dpll *pll,
2059 					struct intel_dpll_hw_state *hw_state)
2060 {
2061 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2062 	intel_wakeref_t wakeref;
2063 	enum dpio_phy phy;
2064 	enum dpio_channel ch;
2065 	u32 val;
2066 	bool ret;
2067 
2068 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2069 
2070 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2071 						     POWER_DOMAIN_DISPLAY_CORE);
2072 	if (!wakeref)
2073 		return false;
2074 
2075 	ret = false;
2076 
2077 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2078 	if (!(val & PORT_PLL_ENABLE))
2079 		goto out;
2080 
2081 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2082 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2083 
2084 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2085 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2086 
2087 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2088 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2089 
2090 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2091 	hw_state->pll1 &= PORT_PLL_N_MASK;
2092 
2093 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2094 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2095 
2096 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2097 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2098 
2099 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2100 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2101 			  PORT_PLL_INT_COEFF_MASK |
2102 			  PORT_PLL_GAIN_CTL_MASK;
2103 
2104 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2105 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2106 
2107 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2108 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2109 
2110 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2111 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2112 			   PORT_PLL_DCO_AMP_MASK;
2113 
2114 	/*
2115 	 * While we write to the group register to program all lanes at once we
2116 	 * can read only lane registers. We configure all lanes the same way, so
2117 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2118 	 */
2119 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2120 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2121 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2122 		drm_dbg(&dev_priv->drm,
2123 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2124 			hw_state->pcsdw12,
2125 			intel_de_read(dev_priv,
2126 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2127 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2128 
2129 	ret = true;
2130 
2131 out:
2132 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2133 
2134 	return ret;
2135 }
2136 
2137 /* pre-calculated values for DP linkrates */
2138 static const struct dpll bxt_dp_clk_val[] = {
2139 	/* m2 is .22 binary fixed point */
2140 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2141 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2142 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2143 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2144 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2145 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2146 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2147 };
2148 
2149 static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2150 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2151 			  struct dpll *clk_div)
2152 {
2153 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2154 
2155 	/* Calculate HDMI div */
2156 	/*
2157 	 * FIXME: tie the following calculation into
2158 	 * i9xx_crtc_compute_clock
2159 	 */
2160 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2161 		return -EINVAL;
2162 
2163 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2164 
2165 	return 0;
2166 }
2167 
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2168 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2169 				    struct dpll *clk_div)
2170 {
2171 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2172 	int i;
2173 
2174 	*clk_div = bxt_dp_clk_val[0];
2175 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2176 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2177 			*clk_div = bxt_dp_clk_val[i];
2178 			break;
2179 		}
2180 	}
2181 
2182 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2183 
2184 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2185 		    clk_div->dot != crtc_state->port_clock);
2186 }
2187 
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct dpll * clk_div)2188 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2189 				     const struct dpll *clk_div)
2190 {
2191 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2192 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2193 	int clock = crtc_state->port_clock;
2194 	int vco = clk_div->vco;
2195 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2196 	u32 lanestagger;
2197 
2198 	if (vco >= 6200000 && vco <= 6700000) {
2199 		prop_coef = 4;
2200 		int_coef = 9;
2201 		gain_ctl = 3;
2202 		targ_cnt = 8;
2203 	} else if ((vco > 5400000 && vco < 6200000) ||
2204 			(vco >= 4800000 && vco < 5400000)) {
2205 		prop_coef = 5;
2206 		int_coef = 11;
2207 		gain_ctl = 3;
2208 		targ_cnt = 9;
2209 	} else if (vco == 5400000) {
2210 		prop_coef = 3;
2211 		int_coef = 8;
2212 		gain_ctl = 1;
2213 		targ_cnt = 9;
2214 	} else {
2215 		drm_err(&i915->drm, "Invalid VCO\n");
2216 		return -EINVAL;
2217 	}
2218 
2219 	if (clock > 270000)
2220 		lanestagger = 0x18;
2221 	else if (clock > 135000)
2222 		lanestagger = 0x0d;
2223 	else if (clock > 67000)
2224 		lanestagger = 0x07;
2225 	else if (clock > 33000)
2226 		lanestagger = 0x04;
2227 	else
2228 		lanestagger = 0x02;
2229 
2230 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2231 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2232 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2233 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2234 
2235 	if (clk_div->m2 & 0x3fffff)
2236 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2237 
2238 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2239 		PORT_PLL_INT_COEFF(int_coef) |
2240 		PORT_PLL_GAIN_CTL(gain_ctl);
2241 
2242 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2243 
2244 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2245 
2246 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2247 		PORT_PLL_DCO_AMP_OVR_EN_H;
2248 
2249 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2250 
2251 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2252 
2253 	return 0;
2254 }
2255 
bxt_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)2256 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2257 				const struct intel_shared_dpll *pll,
2258 				const struct intel_dpll_hw_state *pll_state)
2259 {
2260 	struct dpll clock;
2261 
2262 	clock.m1 = 2;
2263 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2264 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2265 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2266 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2267 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2268 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2269 
2270 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2271 }
2272 
2273 static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2274 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2275 {
2276 	struct dpll clk_div = {};
2277 
2278 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2279 
2280 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2281 }
2282 
2283 static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2284 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2285 {
2286 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2287 	struct dpll clk_div = {};
2288 	int ret;
2289 
2290 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2291 
2292 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2293 	if (ret)
2294 		return ret;
2295 
2296 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2297 						      &crtc_state->dpll_hw_state);
2298 
2299 	return 0;
2300 }
2301 
bxt_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2302 static int bxt_compute_dpll(struct intel_atomic_state *state,
2303 			    struct intel_crtc *crtc,
2304 			    struct intel_encoder *encoder)
2305 {
2306 	struct intel_crtc_state *crtc_state =
2307 		intel_atomic_get_new_crtc_state(state, crtc);
2308 
2309 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2310 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2311 	else if (intel_crtc_has_dp_encoder(crtc_state))
2312 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2313 	else
2314 		return -EINVAL;
2315 }
2316 
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2317 static int bxt_get_dpll(struct intel_atomic_state *state,
2318 			struct intel_crtc *crtc,
2319 			struct intel_encoder *encoder)
2320 {
2321 	struct intel_crtc_state *crtc_state =
2322 		intel_atomic_get_new_crtc_state(state, crtc);
2323 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2324 	struct intel_shared_dpll *pll;
2325 	enum intel_dpll_id id;
2326 
2327 	/* 1:1 mapping between ports and PLLs */
2328 	id = (enum intel_dpll_id) encoder->port;
2329 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2330 
2331 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2332 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2333 
2334 	intel_reference_shared_dpll(state, crtc,
2335 				    pll, &crtc_state->dpll_hw_state);
2336 
2337 	crtc_state->shared_dpll = pll;
2338 
2339 	return 0;
2340 }
2341 
bxt_update_dpll_ref_clks(struct drm_i915_private * i915)2342 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2343 {
2344 	i915->display.dpll.ref_clks.ssc = 100000;
2345 	i915->display.dpll.ref_clks.nssc = 100000;
2346 	/* DSI non-SSC ref 19.2MHz */
2347 }
2348 
bxt_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)2349 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2350 			      const struct intel_dpll_hw_state *hw_state)
2351 {
2352 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2353 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2354 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2355 		    hw_state->ebb0,
2356 		    hw_state->ebb4,
2357 		    hw_state->pll0,
2358 		    hw_state->pll1,
2359 		    hw_state->pll2,
2360 		    hw_state->pll3,
2361 		    hw_state->pll6,
2362 		    hw_state->pll8,
2363 		    hw_state->pll9,
2364 		    hw_state->pll10,
2365 		    hw_state->pcsdw12);
2366 }
2367 
2368 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2369 	.enable = bxt_ddi_pll_enable,
2370 	.disable = bxt_ddi_pll_disable,
2371 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2372 	.get_freq = bxt_ddi_pll_get_freq,
2373 };
2374 
2375 static const struct dpll_info bxt_plls[] = {
2376 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2377 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2378 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2379 	{ },
2380 };
2381 
2382 static const struct intel_dpll_mgr bxt_pll_mgr = {
2383 	.dpll_info = bxt_plls,
2384 	.compute_dplls = bxt_compute_dpll,
2385 	.get_dplls = bxt_get_dpll,
2386 	.put_dplls = intel_put_dpll,
2387 	.update_ref_clks = bxt_update_dpll_ref_clks,
2388 	.dump_hw_state = bxt_dump_hw_state,
2389 };
2390 
icl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2391 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2392 				      int *qdiv, int *kdiv)
2393 {
2394 	/* even dividers */
2395 	if (bestdiv % 2 == 0) {
2396 		if (bestdiv == 2) {
2397 			*pdiv = 2;
2398 			*qdiv = 1;
2399 			*kdiv = 1;
2400 		} else if (bestdiv % 4 == 0) {
2401 			*pdiv = 2;
2402 			*qdiv = bestdiv / 4;
2403 			*kdiv = 2;
2404 		} else if (bestdiv % 6 == 0) {
2405 			*pdiv = 3;
2406 			*qdiv = bestdiv / 6;
2407 			*kdiv = 2;
2408 		} else if (bestdiv % 5 == 0) {
2409 			*pdiv = 5;
2410 			*qdiv = bestdiv / 10;
2411 			*kdiv = 2;
2412 		} else if (bestdiv % 14 == 0) {
2413 			*pdiv = 7;
2414 			*qdiv = bestdiv / 14;
2415 			*kdiv = 2;
2416 		}
2417 	} else {
2418 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2419 			*pdiv = bestdiv;
2420 			*qdiv = 1;
2421 			*kdiv = 1;
2422 		} else { /* 9, 15, 21 */
2423 			*pdiv = bestdiv / 3;
2424 			*qdiv = 1;
2425 			*kdiv = 3;
2426 		}
2427 	}
2428 }
2429 
icl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2430 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2431 				      u32 dco_freq, u32 ref_freq,
2432 				      int pdiv, int qdiv, int kdiv)
2433 {
2434 	u32 dco;
2435 
2436 	switch (kdiv) {
2437 	case 1:
2438 		params->kdiv = 1;
2439 		break;
2440 	case 2:
2441 		params->kdiv = 2;
2442 		break;
2443 	case 3:
2444 		params->kdiv = 4;
2445 		break;
2446 	default:
2447 		WARN(1, "Incorrect KDiv\n");
2448 	}
2449 
2450 	switch (pdiv) {
2451 	case 2:
2452 		params->pdiv = 1;
2453 		break;
2454 	case 3:
2455 		params->pdiv = 2;
2456 		break;
2457 	case 5:
2458 		params->pdiv = 4;
2459 		break;
2460 	case 7:
2461 		params->pdiv = 8;
2462 		break;
2463 	default:
2464 		WARN(1, "Incorrect PDiv\n");
2465 	}
2466 
2467 	WARN_ON(kdiv != 2 && qdiv != 1);
2468 
2469 	params->qdiv_ratio = qdiv;
2470 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2471 
2472 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2473 
2474 	params->dco_integer = dco >> 15;
2475 	params->dco_fraction = dco & 0x7fff;
2476 }
2477 
2478 /*
2479  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2480  * Program half of the nominal DCO divider fraction value.
2481  */
2482 static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private * i915)2483 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2484 {
2485 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2486 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2487 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2488 		 i915->display.dpll.ref_clks.nssc == 38400;
2489 }
2490 
2491 struct icl_combo_pll_params {
2492 	int clock;
2493 	struct skl_wrpll_params wrpll;
2494 };
2495 
2496 /*
2497  * These values alrea already adjusted: they're the bits we write to the
2498  * registers, not the logical values.
2499  */
2500 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2501 	{ 540000,
2502 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2503 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2504 	{ 270000,
2505 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2506 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2507 	{ 162000,
2508 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2509 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2510 	{ 324000,
2511 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2512 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2513 	{ 216000,
2514 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2515 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2516 	{ 432000,
2517 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2518 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2519 	{ 648000,
2520 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2521 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2522 	{ 810000,
2523 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2524 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2525 };
2526 
2527 
2528 /* Also used for 38.4 MHz values. */
2529 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2530 	{ 540000,
2531 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2532 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2533 	{ 270000,
2534 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2535 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2536 	{ 162000,
2537 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2538 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2539 	{ 324000,
2540 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2541 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2542 	{ 216000,
2543 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2544 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2545 	{ 432000,
2546 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2547 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2548 	{ 648000,
2549 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2550 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2551 	{ 810000,
2552 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2553 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2554 };
2555 
2556 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2557 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2558 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2559 };
2560 
2561 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2562 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2563 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2564 };
2565 
2566 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2567 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2568 	/* the following params are unused */
2569 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2570 };
2571 
2572 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2573 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2574 	/* the following params are unused */
2575 };
2576 
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2577 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2578 				 struct skl_wrpll_params *pll_params)
2579 {
2580 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2581 	const struct icl_combo_pll_params *params =
2582 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2583 		icl_dp_combo_pll_24MHz_values :
2584 		icl_dp_combo_pll_19_2MHz_values;
2585 	int clock = crtc_state->port_clock;
2586 	int i;
2587 
2588 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2589 		if (clock == params[i].clock) {
2590 			*pll_params = params[i].wrpll;
2591 			return 0;
2592 		}
2593 	}
2594 
2595 	MISSING_CASE(clock);
2596 	return -EINVAL;
2597 }
2598 
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2599 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2600 			    struct skl_wrpll_params *pll_params)
2601 {
2602 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2603 
2604 	if (DISPLAY_VER(dev_priv) >= 12) {
2605 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2606 		default:
2607 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2608 			fallthrough;
2609 		case 19200:
2610 		case 38400:
2611 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2612 			break;
2613 		case 24000:
2614 			*pll_params = tgl_tbt_pll_24MHz_values;
2615 			break;
2616 		}
2617 	} else {
2618 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2619 		default:
2620 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2621 			fallthrough;
2622 		case 19200:
2623 		case 38400:
2624 			*pll_params = icl_tbt_pll_19_2MHz_values;
2625 			break;
2626 		case 24000:
2627 			*pll_params = icl_tbt_pll_24MHz_values;
2628 			break;
2629 		}
2630 	}
2631 
2632 	return 0;
2633 }
2634 
icl_ddi_tbt_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)2635 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2636 				    const struct intel_shared_dpll *pll,
2637 				    const struct intel_dpll_hw_state *pll_state)
2638 {
2639 	/*
2640 	 * The PLL outputs multiple frequencies at the same time, selection is
2641 	 * made at DDI clock mux level.
2642 	 */
2643 	drm_WARN_ON(&i915->drm, 1);
2644 
2645 	return 0;
2646 }
2647 
icl_wrpll_ref_clock(struct drm_i915_private * i915)2648 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2649 {
2650 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2651 
2652 	/*
2653 	 * For ICL+, the spec states: if reference frequency is 38.4,
2654 	 * use 19.2 because the DPLL automatically divides that by 2.
2655 	 */
2656 	if (ref_clock == 38400)
2657 		ref_clock = 19200;
2658 
2659 	return ref_clock;
2660 }
2661 
2662 static int
icl_calc_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2663 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2664 	       struct skl_wrpll_params *wrpll_params)
2665 {
2666 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2667 	int ref_clock = icl_wrpll_ref_clock(i915);
2668 	u32 afe_clock = crtc_state->port_clock * 5;
2669 	u32 dco_min = 7998000;
2670 	u32 dco_max = 10000000;
2671 	u32 dco_mid = (dco_min + dco_max) / 2;
2672 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2673 					 18, 20, 24, 28, 30, 32,  36,  40,
2674 					 42, 44, 48, 50, 52, 54,  56,  60,
2675 					 64, 66, 68, 70, 72, 76,  78,  80,
2676 					 84, 88, 90, 92, 96, 98, 100, 102,
2677 					  3,  5,  7,  9, 15, 21 };
2678 	u32 dco, best_dco = 0, dco_centrality = 0;
2679 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2680 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2681 
2682 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2683 		dco = afe_clock * dividers[d];
2684 
2685 		if (dco <= dco_max && dco >= dco_min) {
2686 			dco_centrality = abs(dco - dco_mid);
2687 
2688 			if (dco_centrality < best_dco_centrality) {
2689 				best_dco_centrality = dco_centrality;
2690 				best_div = dividers[d];
2691 				best_dco = dco;
2692 			}
2693 		}
2694 	}
2695 
2696 	if (best_div == 0)
2697 		return -EINVAL;
2698 
2699 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2700 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2701 				  pdiv, qdiv, kdiv);
2702 
2703 	return 0;
2704 }
2705 
icl_ddi_combo_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)2706 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2707 				      const struct intel_shared_dpll *pll,
2708 				      const struct intel_dpll_hw_state *pll_state)
2709 {
2710 	int ref_clock = icl_wrpll_ref_clock(i915);
2711 	u32 dco_fraction;
2712 	u32 p0, p1, p2, dco_freq;
2713 
2714 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2715 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2716 
2717 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2718 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2719 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2720 	else
2721 		p1 = 1;
2722 
2723 	switch (p0) {
2724 	case DPLL_CFGCR1_PDIV_2:
2725 		p0 = 2;
2726 		break;
2727 	case DPLL_CFGCR1_PDIV_3:
2728 		p0 = 3;
2729 		break;
2730 	case DPLL_CFGCR1_PDIV_5:
2731 		p0 = 5;
2732 		break;
2733 	case DPLL_CFGCR1_PDIV_7:
2734 		p0 = 7;
2735 		break;
2736 	}
2737 
2738 	switch (p2) {
2739 	case DPLL_CFGCR1_KDIV_1:
2740 		p2 = 1;
2741 		break;
2742 	case DPLL_CFGCR1_KDIV_2:
2743 		p2 = 2;
2744 		break;
2745 	case DPLL_CFGCR1_KDIV_3:
2746 		p2 = 3;
2747 		break;
2748 	}
2749 
2750 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2751 		   ref_clock;
2752 
2753 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2754 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2755 
2756 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2757 		dco_fraction *= 2;
2758 
2759 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2760 
2761 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2762 		return 0;
2763 
2764 	return dco_freq / (p0 * p1 * p2 * 5);
2765 }
2766 
icl_calc_dpll_state(struct drm_i915_private * i915,const struct skl_wrpll_params * pll_params,struct intel_dpll_hw_state * pll_state)2767 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2768 				const struct skl_wrpll_params *pll_params,
2769 				struct intel_dpll_hw_state *pll_state)
2770 {
2771 	u32 dco_fraction = pll_params->dco_fraction;
2772 
2773 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2774 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2775 
2776 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2777 			    pll_params->dco_integer;
2778 
2779 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2780 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2781 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2782 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2783 
2784 	if (DISPLAY_VER(i915) >= 12)
2785 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2786 	else
2787 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2788 
2789 	if (i915->display.vbt.override_afc_startup)
2790 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2791 }
2792 
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct intel_dpll_hw_state * state,bool is_dkl)2793 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2794 				    u32 *target_dco_khz,
2795 				    struct intel_dpll_hw_state *state,
2796 				    bool is_dkl)
2797 {
2798 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2799 	u32 dco_min_freq, dco_max_freq;
2800 	unsigned int i;
2801 	int div2;
2802 
2803 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2804 	dco_max_freq = is_dp ? 8100000 : 10000000;
2805 
2806 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2807 		int div1 = div1_vals[i];
2808 
2809 		for (div2 = 10; div2 > 0; div2--) {
2810 			int dco = div1 * div2 * clock_khz * 5;
2811 			int a_divratio, tlinedrv, inputsel;
2812 			u32 hsdiv;
2813 
2814 			if (dco < dco_min_freq || dco > dco_max_freq)
2815 				continue;
2816 
2817 			if (div2 >= 2) {
2818 				/*
2819 				 * Note: a_divratio not matching TGL BSpec
2820 				 * algorithm but matching hardcoded values and
2821 				 * working on HW for DP alt-mode at least
2822 				 */
2823 				a_divratio = is_dp ? 10 : 5;
2824 				tlinedrv = is_dkl ? 1 : 2;
2825 			} else {
2826 				a_divratio = 5;
2827 				tlinedrv = 0;
2828 			}
2829 			inputsel = is_dp ? 0 : 1;
2830 
2831 			switch (div1) {
2832 			default:
2833 				MISSING_CASE(div1);
2834 				fallthrough;
2835 			case 2:
2836 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2837 				break;
2838 			case 3:
2839 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2840 				break;
2841 			case 5:
2842 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2843 				break;
2844 			case 7:
2845 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2846 				break;
2847 			}
2848 
2849 			*target_dco_khz = dco;
2850 
2851 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2852 
2853 			state->mg_clktop2_coreclkctl1 =
2854 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2855 
2856 			state->mg_clktop2_hsclkctl =
2857 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2858 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2859 				hsdiv |
2860 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2861 
2862 			return 0;
2863 		}
2864 	}
2865 
2866 	return -EINVAL;
2867 }
2868 
2869 /*
2870  * The specification for this function uses real numbers, so the math had to be
2871  * adapted to integer-only calculation, that's why it looks so different.
2872  */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * pll_state)2873 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2874 				 struct intel_dpll_hw_state *pll_state)
2875 {
2876 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2877 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2878 	int clock = crtc_state->port_clock;
2879 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2880 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2881 	u32 prop_coeff, int_coeff;
2882 	u32 tdc_targetcnt, feedfwgain;
2883 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2884 	u64 tmp;
2885 	bool use_ssc = false;
2886 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2887 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2888 	int ret;
2889 
2890 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2891 				       pll_state, is_dkl);
2892 	if (ret)
2893 		return ret;
2894 
2895 	m1div = 2;
2896 	m2div_int = dco_khz / (refclk_khz * m1div);
2897 	if (m2div_int > 255) {
2898 		if (!is_dkl) {
2899 			m1div = 4;
2900 			m2div_int = dco_khz / (refclk_khz * m1div);
2901 		}
2902 
2903 		if (m2div_int > 255)
2904 			return -EINVAL;
2905 	}
2906 	m2div_rem = dco_khz % (refclk_khz * m1div);
2907 
2908 	tmp = (u64)m2div_rem * (1 << 22);
2909 	do_div(tmp, refclk_khz * m1div);
2910 	m2div_frac = tmp;
2911 
2912 	switch (refclk_khz) {
2913 	case 19200:
2914 		iref_ndiv = 1;
2915 		iref_trim = 28;
2916 		iref_pulse_w = 1;
2917 		break;
2918 	case 24000:
2919 		iref_ndiv = 1;
2920 		iref_trim = 25;
2921 		iref_pulse_w = 2;
2922 		break;
2923 	case 38400:
2924 		iref_ndiv = 2;
2925 		iref_trim = 28;
2926 		iref_pulse_w = 1;
2927 		break;
2928 	default:
2929 		MISSING_CASE(refclk_khz);
2930 		return -EINVAL;
2931 	}
2932 
2933 	/*
2934 	 * tdc_res = 0.000003
2935 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2936 	 *
2937 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2938 	 * was supposed to be a division, but we rearranged the operations of
2939 	 * the formula to avoid early divisions so we don't multiply the
2940 	 * rounding errors.
2941 	 *
2942 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2943 	 * we also rearrange to work with integers.
2944 	 *
2945 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2946 	 * last division by 10.
2947 	 */
2948 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2949 
2950 	/*
2951 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2952 	 * 32 bits. That's not a problem since we round the division down
2953 	 * anyway.
2954 	 */
2955 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2956 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2957 
2958 	if (dco_khz >= 9000000) {
2959 		prop_coeff = 5;
2960 		int_coeff = 10;
2961 	} else {
2962 		prop_coeff = 4;
2963 		int_coeff = 8;
2964 	}
2965 
2966 	if (use_ssc) {
2967 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2968 		do_div(tmp, refclk_khz * m1div * 10000);
2969 		ssc_stepsize = tmp;
2970 
2971 		tmp = mul_u32_u32(dco_khz, 1000);
2972 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2973 	} else {
2974 		ssc_stepsize = 0;
2975 		ssc_steplen = 0;
2976 	}
2977 	ssc_steplog = 4;
2978 
2979 	/* write pll_state calculations */
2980 	if (is_dkl) {
2981 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2982 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2983 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2984 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2985 		if (dev_priv->display.vbt.override_afc_startup) {
2986 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2987 
2988 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2989 		}
2990 
2991 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2992 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2993 
2994 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2995 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2996 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2997 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2998 
2999 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3000 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3001 
3002 		pll_state->mg_pll_tdc_coldst_bias =
3003 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3004 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3005 
3006 	} else {
3007 		pll_state->mg_pll_div0 =
3008 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3009 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3010 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3011 
3012 		pll_state->mg_pll_div1 =
3013 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3014 			MG_PLL_DIV1_DITHER_DIV_2 |
3015 			MG_PLL_DIV1_NDIVRATIO(1) |
3016 			MG_PLL_DIV1_FBPREDIV(m1div);
3017 
3018 		pll_state->mg_pll_lf =
3019 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3020 			MG_PLL_LF_AFCCNTSEL_512 |
3021 			MG_PLL_LF_GAINCTRL(1) |
3022 			MG_PLL_LF_INT_COEFF(int_coeff) |
3023 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3024 
3025 		pll_state->mg_pll_frac_lock =
3026 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3027 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3028 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3029 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3030 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3031 		if (use_ssc || m2div_rem > 0)
3032 			pll_state->mg_pll_frac_lock |=
3033 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3034 
3035 		pll_state->mg_pll_ssc =
3036 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3037 			MG_PLL_SSC_TYPE(2) |
3038 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3039 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3040 			MG_PLL_SSC_FLLEN |
3041 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3042 
3043 		pll_state->mg_pll_tdc_coldst_bias =
3044 			MG_PLL_TDC_COLDST_COLDSTART |
3045 			MG_PLL_TDC_COLDST_IREFINT_EN |
3046 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3047 			MG_PLL_TDC_TDCOVCCORR_EN |
3048 			MG_PLL_TDC_TDCSEL(3);
3049 
3050 		pll_state->mg_pll_bias =
3051 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3052 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3053 			MG_PLL_BIAS_BIAS_BONUS(10) |
3054 			MG_PLL_BIAS_BIASCAL_EN |
3055 			MG_PLL_BIAS_CTRIM(12) |
3056 			MG_PLL_BIAS_VREF_RDAC(4) |
3057 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3058 
3059 		if (refclk_khz == 38400) {
3060 			pll_state->mg_pll_tdc_coldst_bias_mask =
3061 				MG_PLL_TDC_COLDST_COLDSTART;
3062 			pll_state->mg_pll_bias_mask = 0;
3063 		} else {
3064 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3065 			pll_state->mg_pll_bias_mask = -1U;
3066 		}
3067 
3068 		pll_state->mg_pll_tdc_coldst_bias &=
3069 			pll_state->mg_pll_tdc_coldst_bias_mask;
3070 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
icl_ddi_mg_pll_get_freq(struct drm_i915_private * dev_priv,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)3076 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3077 				   const struct intel_shared_dpll *pll,
3078 				   const struct intel_dpll_hw_state *pll_state)
3079 {
3080 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3081 	u64 tmp;
3082 
3083 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3084 
3085 	if (DISPLAY_VER(dev_priv) >= 12) {
3086 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3087 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3088 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3089 
3090 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3091 			m2_frac = pll_state->mg_pll_bias &
3092 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3093 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3094 		} else {
3095 			m2_frac = 0;
3096 		}
3097 	} else {
3098 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3099 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3100 
3101 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3102 			m2_frac = pll_state->mg_pll_div0 &
3103 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3104 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3105 		} else {
3106 			m2_frac = 0;
3107 		}
3108 	}
3109 
3110 	switch (pll_state->mg_clktop2_hsclkctl &
3111 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3112 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3113 		div1 = 2;
3114 		break;
3115 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3116 		div1 = 3;
3117 		break;
3118 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3119 		div1 = 5;
3120 		break;
3121 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3122 		div1 = 7;
3123 		break;
3124 	default:
3125 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3126 		return 0;
3127 	}
3128 
3129 	div2 = (pll_state->mg_clktop2_hsclkctl &
3130 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3131 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3132 
3133 	/* div2 value of 0 is same as 1 means no div */
3134 	if (div2 == 0)
3135 		div2 = 1;
3136 
3137 	/*
3138 	 * Adjust the original formula to delay the division by 2^22 in order to
3139 	 * minimize possible rounding errors.
3140 	 */
3141 	tmp = (u64)m1 * m2_int * ref_clock +
3142 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3143 	tmp = div_u64(tmp, 5 * div1 * div2);
3144 
3145 	return tmp;
3146 }
3147 
3148 /**
3149  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3150  * @crtc_state: state for the CRTC to select the DPLL for
3151  * @port_dpll_id: the active @port_dpll_id to select
3152  *
3153  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3154  * CRTC.
3155  */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)3156 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3157 			      enum icl_port_dpll_id port_dpll_id)
3158 {
3159 	struct icl_port_dpll *port_dpll =
3160 		&crtc_state->icl_port_dplls[port_dpll_id];
3161 
3162 	crtc_state->shared_dpll = port_dpll->pll;
3163 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3164 }
3165 
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3166 static void icl_update_active_dpll(struct intel_atomic_state *state,
3167 				   struct intel_crtc *crtc,
3168 				   struct intel_encoder *encoder)
3169 {
3170 	struct intel_crtc_state *crtc_state =
3171 		intel_atomic_get_new_crtc_state(state, crtc);
3172 	struct intel_digital_port *primary_port;
3173 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3174 
3175 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3176 		enc_to_mst(encoder)->primary :
3177 		enc_to_dig_port(encoder);
3178 
3179 	if (primary_port &&
3180 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3181 	     intel_tc_port_in_legacy_mode(primary_port)))
3182 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3183 
3184 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3185 }
3186 
intel_get_hti_plls(struct drm_i915_private * i915)3187 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3188 {
3189 	if (!(i915->hti_state & HDPORT_ENABLED))
3190 		return 0;
3191 
3192 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3193 }
3194 
icl_compute_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)3195 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3196 				      struct intel_crtc *crtc)
3197 {
3198 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3199 	struct intel_crtc_state *crtc_state =
3200 		intel_atomic_get_new_crtc_state(state, crtc);
3201 	struct icl_port_dpll *port_dpll =
3202 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3203 	struct skl_wrpll_params pll_params = {};
3204 	int ret;
3205 
3206 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3207 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3208 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3209 	else
3210 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3211 
3212 	if (ret)
3213 		return ret;
3214 
3215 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3216 
3217 	/* this is mainly for the fastset check */
3218 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3219 
3220 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3221 							    &port_dpll->hw_state);
3222 
3223 	return 0;
3224 }
3225 
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3226 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3227 				  struct intel_crtc *crtc,
3228 				  struct intel_encoder *encoder)
3229 {
3230 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3231 	struct intel_crtc_state *crtc_state =
3232 		intel_atomic_get_new_crtc_state(state, crtc);
3233 	struct icl_port_dpll *port_dpll =
3234 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3235 	enum port port = encoder->port;
3236 	unsigned long dpll_mask;
3237 
3238 	if (IS_ALDERLAKE_S(dev_priv)) {
3239 		dpll_mask =
3240 			BIT(DPLL_ID_DG1_DPLL3) |
3241 			BIT(DPLL_ID_DG1_DPLL2) |
3242 			BIT(DPLL_ID_ICL_DPLL1) |
3243 			BIT(DPLL_ID_ICL_DPLL0);
3244 	} else if (IS_DG1(dev_priv)) {
3245 		if (port == PORT_D || port == PORT_E) {
3246 			dpll_mask =
3247 				BIT(DPLL_ID_DG1_DPLL2) |
3248 				BIT(DPLL_ID_DG1_DPLL3);
3249 		} else {
3250 			dpll_mask =
3251 				BIT(DPLL_ID_DG1_DPLL0) |
3252 				BIT(DPLL_ID_DG1_DPLL1);
3253 		}
3254 	} else if (IS_ROCKETLAKE(dev_priv)) {
3255 		dpll_mask =
3256 			BIT(DPLL_ID_EHL_DPLL4) |
3257 			BIT(DPLL_ID_ICL_DPLL1) |
3258 			BIT(DPLL_ID_ICL_DPLL0);
3259 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3260 		dpll_mask =
3261 			BIT(DPLL_ID_EHL_DPLL4) |
3262 			BIT(DPLL_ID_ICL_DPLL1) |
3263 			BIT(DPLL_ID_ICL_DPLL0);
3264 	} else {
3265 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3266 	}
3267 
3268 	/* Eliminate DPLLs from consideration if reserved by HTI */
3269 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3270 
3271 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3272 						&port_dpll->hw_state,
3273 						dpll_mask);
3274 	if (!port_dpll->pll)
3275 		return -EINVAL;
3276 
3277 	intel_reference_shared_dpll(state, crtc,
3278 				    port_dpll->pll, &port_dpll->hw_state);
3279 
3280 	icl_update_active_dpll(state, crtc, encoder);
3281 
3282 	return 0;
3283 }
3284 
icl_compute_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3285 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3286 				    struct intel_crtc *crtc)
3287 {
3288 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3289 	struct intel_crtc_state *crtc_state =
3290 		intel_atomic_get_new_crtc_state(state, crtc);
3291 	struct icl_port_dpll *port_dpll =
3292 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3293 	struct skl_wrpll_params pll_params = {};
3294 	int ret;
3295 
3296 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3297 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3298 	if (ret)
3299 		return ret;
3300 
3301 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3302 
3303 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3304 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3305 	if (ret)
3306 		return ret;
3307 
3308 	/* this is mainly for the fastset check */
3309 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3310 
3311 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3312 							 &port_dpll->hw_state);
3313 
3314 	return 0;
3315 }
3316 
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3317 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3318 				struct intel_crtc *crtc,
3319 				struct intel_encoder *encoder)
3320 {
3321 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3322 	struct intel_crtc_state *crtc_state =
3323 		intel_atomic_get_new_crtc_state(state, crtc);
3324 	struct icl_port_dpll *port_dpll =
3325 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3326 	enum intel_dpll_id dpll_id;
3327 	int ret;
3328 
3329 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3330 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3331 						&port_dpll->hw_state,
3332 						BIT(DPLL_ID_ICL_TBTPLL));
3333 	if (!port_dpll->pll)
3334 		return -EINVAL;
3335 	intel_reference_shared_dpll(state, crtc,
3336 				    port_dpll->pll, &port_dpll->hw_state);
3337 
3338 
3339 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3340 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3341 							 encoder->port));
3342 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3343 						&port_dpll->hw_state,
3344 						BIT(dpll_id));
3345 	if (!port_dpll->pll) {
3346 		ret = -EINVAL;
3347 		goto err_unreference_tbt_pll;
3348 	}
3349 	intel_reference_shared_dpll(state, crtc,
3350 				    port_dpll->pll, &port_dpll->hw_state);
3351 
3352 	icl_update_active_dpll(state, crtc, encoder);
3353 
3354 	return 0;
3355 
3356 err_unreference_tbt_pll:
3357 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3358 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3359 
3360 	return ret;
3361 }
3362 
icl_compute_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3363 static int icl_compute_dplls(struct intel_atomic_state *state,
3364 			     struct intel_crtc *crtc,
3365 			     struct intel_encoder *encoder)
3366 {
3367 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3368 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3369 
3370 	if (intel_phy_is_combo(dev_priv, phy))
3371 		return icl_compute_combo_phy_dpll(state, crtc);
3372 	else if (intel_phy_is_tc(dev_priv, phy))
3373 		return icl_compute_tc_phy_dplls(state, crtc);
3374 
3375 	MISSING_CASE(phy);
3376 
3377 	return 0;
3378 }
3379 
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3380 static int icl_get_dplls(struct intel_atomic_state *state,
3381 			 struct intel_crtc *crtc,
3382 			 struct intel_encoder *encoder)
3383 {
3384 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3385 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3386 
3387 	if (intel_phy_is_combo(dev_priv, phy))
3388 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3389 	else if (intel_phy_is_tc(dev_priv, phy))
3390 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3391 
3392 	MISSING_CASE(phy);
3393 
3394 	return -EINVAL;
3395 }
3396 
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3397 static void icl_put_dplls(struct intel_atomic_state *state,
3398 			  struct intel_crtc *crtc)
3399 {
3400 	const struct intel_crtc_state *old_crtc_state =
3401 		intel_atomic_get_old_crtc_state(state, crtc);
3402 	struct intel_crtc_state *new_crtc_state =
3403 		intel_atomic_get_new_crtc_state(state, crtc);
3404 	enum icl_port_dpll_id id;
3405 
3406 	new_crtc_state->shared_dpll = NULL;
3407 
3408 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3409 		const struct icl_port_dpll *old_port_dpll =
3410 			&old_crtc_state->icl_port_dplls[id];
3411 		struct icl_port_dpll *new_port_dpll =
3412 			&new_crtc_state->icl_port_dplls[id];
3413 
3414 		new_port_dpll->pll = NULL;
3415 
3416 		if (!old_port_dpll->pll)
3417 			continue;
3418 
3419 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3420 	}
3421 }
3422 
mg_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3423 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3424 				struct intel_shared_dpll *pll,
3425 				struct intel_dpll_hw_state *hw_state)
3426 {
3427 	const enum intel_dpll_id id = pll->info->id;
3428 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3429 	intel_wakeref_t wakeref;
3430 	bool ret = false;
3431 	u32 val;
3432 
3433 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3434 
3435 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3436 						     POWER_DOMAIN_DISPLAY_CORE);
3437 	if (!wakeref)
3438 		return false;
3439 
3440 	val = intel_de_read(dev_priv, enable_reg);
3441 	if (!(val & PLL_ENABLE))
3442 		goto out;
3443 
3444 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3445 						  MG_REFCLKIN_CTL(tc_port));
3446 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3447 
3448 	hw_state->mg_clktop2_coreclkctl1 =
3449 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3450 	hw_state->mg_clktop2_coreclkctl1 &=
3451 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3452 
3453 	hw_state->mg_clktop2_hsclkctl =
3454 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3455 	hw_state->mg_clktop2_hsclkctl &=
3456 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3457 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3458 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3459 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3460 
3461 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3462 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3463 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3464 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3465 						   MG_PLL_FRAC_LOCK(tc_port));
3466 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3467 
3468 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3469 	hw_state->mg_pll_tdc_coldst_bias =
3470 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3471 
3472 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3473 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3474 		hw_state->mg_pll_bias_mask = 0;
3475 	} else {
3476 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3477 		hw_state->mg_pll_bias_mask = -1U;
3478 	}
3479 
3480 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3481 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3482 
3483 	ret = true;
3484 out:
3485 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3486 	return ret;
3487 }
3488 
dkl_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3489 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3490 				 struct intel_shared_dpll *pll,
3491 				 struct intel_dpll_hw_state *hw_state)
3492 {
3493 	const enum intel_dpll_id id = pll->info->id;
3494 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3495 	intel_wakeref_t wakeref;
3496 	bool ret = false;
3497 	u32 val;
3498 
3499 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3500 						     POWER_DOMAIN_DISPLAY_CORE);
3501 	if (!wakeref)
3502 		return false;
3503 
3504 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3505 	if (!(val & PLL_ENABLE))
3506 		goto out;
3507 
3508 	/*
3509 	 * All registers read here have the same HIP_INDEX_REG even though
3510 	 * they are on different building blocks
3511 	 */
3512 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3513 						       DKL_REFCLKIN_CTL(tc_port), 2);
3514 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3515 
3516 	hw_state->mg_clktop2_hsclkctl =
3517 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2);
3518 	hw_state->mg_clktop2_hsclkctl &=
3519 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3520 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3521 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3522 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3523 
3524 	hw_state->mg_clktop2_coreclkctl1 =
3525 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2);
3526 	hw_state->mg_clktop2_coreclkctl1 &=
3527 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3528 
3529 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port), 2);
3530 	val = DKL_PLL_DIV0_MASK;
3531 	if (dev_priv->display.vbt.override_afc_startup)
3532 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3533 	hw_state->mg_pll_div0 &= val;
3534 
3535 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2);
3536 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3537 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3538 
3539 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2);
3540 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3541 				 DKL_PLL_SSC_STEP_LEN_MASK |
3542 				 DKL_PLL_SSC_STEP_NUM_MASK |
3543 				 DKL_PLL_SSC_EN);
3544 
3545 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2);
3546 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3547 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3548 
3549 	hw_state->mg_pll_tdc_coldst_bias =
3550 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
3551 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3552 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3553 
3554 	ret = true;
3555 out:
3556 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3557 	return ret;
3558 }
3559 
icl_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state,i915_reg_t enable_reg)3560 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3561 				 struct intel_shared_dpll *pll,
3562 				 struct intel_dpll_hw_state *hw_state,
3563 				 i915_reg_t enable_reg)
3564 {
3565 	const enum intel_dpll_id id = pll->info->id;
3566 	intel_wakeref_t wakeref;
3567 	bool ret = false;
3568 	u32 val;
3569 
3570 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3571 						     POWER_DOMAIN_DISPLAY_CORE);
3572 	if (!wakeref)
3573 		return false;
3574 
3575 	val = intel_de_read(dev_priv, enable_reg);
3576 	if (!(val & PLL_ENABLE))
3577 		goto out;
3578 
3579 	if (IS_ALDERLAKE_S(dev_priv)) {
3580 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3581 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3582 	} else if (IS_DG1(dev_priv)) {
3583 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3584 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3585 	} else if (IS_ROCKETLAKE(dev_priv)) {
3586 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3587 						 RKL_DPLL_CFGCR0(id));
3588 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3589 						 RKL_DPLL_CFGCR1(id));
3590 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3591 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3592 						 TGL_DPLL_CFGCR0(id));
3593 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3594 						 TGL_DPLL_CFGCR1(id));
3595 		if (dev_priv->display.vbt.override_afc_startup) {
3596 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3597 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3598 		}
3599 	} else {
3600 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3601 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3602 							 ICL_DPLL_CFGCR0(4));
3603 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3604 							 ICL_DPLL_CFGCR1(4));
3605 		} else {
3606 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3607 							 ICL_DPLL_CFGCR0(id));
3608 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3609 							 ICL_DPLL_CFGCR1(id));
3610 		}
3611 	}
3612 
3613 	ret = true;
3614 out:
3615 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3616 	return ret;
3617 }
3618 
combo_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3619 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3620 				   struct intel_shared_dpll *pll,
3621 				   struct intel_dpll_hw_state *hw_state)
3622 {
3623 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3624 
3625 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3626 }
3627 
tbt_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3628 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3629 				 struct intel_shared_dpll *pll,
3630 				 struct intel_dpll_hw_state *hw_state)
3631 {
3632 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3633 }
3634 
icl_dpll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3635 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3636 			   struct intel_shared_dpll *pll)
3637 {
3638 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3639 	const enum intel_dpll_id id = pll->info->id;
3640 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3641 
3642 	if (IS_ALDERLAKE_S(dev_priv)) {
3643 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3644 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3645 	} else if (IS_DG1(dev_priv)) {
3646 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3647 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3648 	} else if (IS_ROCKETLAKE(dev_priv)) {
3649 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3650 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3651 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3652 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3653 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3654 		div0_reg = TGL_DPLL0_DIV0(id);
3655 	} else {
3656 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3657 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3658 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3659 		} else {
3660 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3661 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3662 		}
3663 	}
3664 
3665 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3666 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3667 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3668 			 !i915_mmio_reg_valid(div0_reg));
3669 	if (dev_priv->display.vbt.override_afc_startup &&
3670 	    i915_mmio_reg_valid(div0_reg))
3671 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3672 			     hw_state->div0);
3673 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3674 }
3675 
icl_mg_pll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3676 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3677 			     struct intel_shared_dpll *pll)
3678 {
3679 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3680 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3681 	u32 val;
3682 
3683 	/*
3684 	 * Some of the following registers have reserved fields, so program
3685 	 * these with RMW based on a mask. The mask can be fixed or generated
3686 	 * during the calc/readout phase if the mask depends on some other HW
3687 	 * state like refclk, see icl_calc_mg_pll_state().
3688 	 */
3689 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3690 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3691 	val |= hw_state->mg_refclkin_ctl;
3692 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3693 
3694 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3695 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3696 	val |= hw_state->mg_clktop2_coreclkctl1;
3697 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3698 
3699 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3700 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3701 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3702 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3703 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3704 	val |= hw_state->mg_clktop2_hsclkctl;
3705 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3706 
3707 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3708 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3709 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3710 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3711 		       hw_state->mg_pll_frac_lock);
3712 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3713 
3714 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3715 	val &= ~hw_state->mg_pll_bias_mask;
3716 	val |= hw_state->mg_pll_bias;
3717 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3718 
3719 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3720 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3721 	val |= hw_state->mg_pll_tdc_coldst_bias;
3722 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3723 
3724 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3725 }
3726 
dkl_pll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3727 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3728 			  struct intel_shared_dpll *pll)
3729 {
3730 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3731 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3732 	u32 val;
3733 
3734 	/*
3735 	 * All registers programmed here have the same HIP_INDEX_REG even
3736 	 * though on different building block
3737 	 */
3738 	/* All the registers are RMW */
3739 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2);
3740 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3741 	val |= hw_state->mg_refclkin_ctl;
3742 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2, val);
3743 
3744 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2);
3745 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3746 	val |= hw_state->mg_clktop2_coreclkctl1;
3747 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2, val);
3748 
3749 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2);
3750 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3751 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3752 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3753 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3754 	val |= hw_state->mg_clktop2_hsclkctl;
3755 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2, val);
3756 
3757 	val = DKL_PLL_DIV0_MASK;
3758 	if (dev_priv->display.vbt.override_afc_startup)
3759 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3760 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), 2, val,
3761 			  hw_state->mg_pll_div0);
3762 
3763 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2);
3764 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3765 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3766 	val |= hw_state->mg_pll_div1;
3767 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), 2, val);
3768 
3769 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2);
3770 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3771 		 DKL_PLL_SSC_STEP_LEN_MASK |
3772 		 DKL_PLL_SSC_STEP_NUM_MASK |
3773 		 DKL_PLL_SSC_EN);
3774 	val |= hw_state->mg_pll_ssc;
3775 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), 2, val);
3776 
3777 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2);
3778 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3779 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3780 	val |= hw_state->mg_pll_bias;
3781 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), 2, val);
3782 
3783 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
3784 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3785 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3786 	val |= hw_state->mg_pll_tdc_coldst_bias;
3787 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2, val);
3788 
3789 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
3790 }
3791 
icl_pll_power_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3792 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3793 				 struct intel_shared_dpll *pll,
3794 				 i915_reg_t enable_reg)
3795 {
3796 	u32 val;
3797 
3798 	val = intel_de_read(dev_priv, enable_reg);
3799 	val |= PLL_POWER_ENABLE;
3800 	intel_de_write(dev_priv, enable_reg, val);
3801 
3802 	/*
3803 	 * The spec says we need to "wait" but it also says it should be
3804 	 * immediate.
3805 	 */
3806 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3807 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3808 			pll->info->id);
3809 }
3810 
icl_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3811 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3812 			   struct intel_shared_dpll *pll,
3813 			   i915_reg_t enable_reg)
3814 {
3815 	u32 val;
3816 
3817 	val = intel_de_read(dev_priv, enable_reg);
3818 	val |= PLL_ENABLE;
3819 	intel_de_write(dev_priv, enable_reg, val);
3820 
3821 	/* Timeout is actually 600us. */
3822 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3823 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3824 }
3825 
adlp_cmtg_clock_gating_wa(struct drm_i915_private * i915,struct intel_shared_dpll * pll)3826 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3827 {
3828 	u32 val;
3829 
3830 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3831 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3832 		return;
3833 	/*
3834 	 * Wa_16011069516:adl-p[a0]
3835 	 *
3836 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3837 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3838 	 * sanity check this assumption with a double read, which presumably
3839 	 * returns the correct value even with clock gating on.
3840 	 *
3841 	 * Instead of the usual place for workarounds we apply this one here,
3842 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3843 	 */
3844 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3845 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3846 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3847 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3848 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3849 }
3850 
combo_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3851 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3852 			     struct intel_shared_dpll *pll)
3853 {
3854 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3855 
3856 	if (IS_JSL_EHL(dev_priv) &&
3857 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3858 
3859 		/*
3860 		 * We need to disable DC states when this DPLL is enabled.
3861 		 * This can be done by taking a reference on DPLL4 power
3862 		 * domain.
3863 		 */
3864 		pll->wakeref = intel_display_power_get(dev_priv,
3865 						       POWER_DOMAIN_DC_OFF);
3866 	}
3867 
3868 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3869 
3870 	icl_dpll_write(dev_priv, pll);
3871 
3872 	/*
3873 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3874 	 * paths should already be setting the appropriate voltage, hence we do
3875 	 * nothing here.
3876 	 */
3877 
3878 	icl_pll_enable(dev_priv, pll, enable_reg);
3879 
3880 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3881 
3882 	/* DVFS post sequence would be here. See the comment above. */
3883 }
3884 
tbt_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3885 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3886 			   struct intel_shared_dpll *pll)
3887 {
3888 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3889 
3890 	icl_dpll_write(dev_priv, pll);
3891 
3892 	/*
3893 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3894 	 * paths should already be setting the appropriate voltage, hence we do
3895 	 * nothing here.
3896 	 */
3897 
3898 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3899 
3900 	/* DVFS post sequence would be here. See the comment above. */
3901 }
3902 
mg_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3903 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3904 			  struct intel_shared_dpll *pll)
3905 {
3906 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3907 
3908 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3909 
3910 	if (DISPLAY_VER(dev_priv) >= 12)
3911 		dkl_pll_write(dev_priv, pll);
3912 	else
3913 		icl_mg_pll_write(dev_priv, pll);
3914 
3915 	/*
3916 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3917 	 * paths should already be setting the appropriate voltage, hence we do
3918 	 * nothing here.
3919 	 */
3920 
3921 	icl_pll_enable(dev_priv, pll, enable_reg);
3922 
3923 	/* DVFS post sequence would be here. See the comment above. */
3924 }
3925 
icl_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3926 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3927 			    struct intel_shared_dpll *pll,
3928 			    i915_reg_t enable_reg)
3929 {
3930 	u32 val;
3931 
3932 	/* The first steps are done by intel_ddi_post_disable(). */
3933 
3934 	/*
3935 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3936 	 * paths should already be setting the appropriate voltage, hence we do
3937 	 * nothing here.
3938 	 */
3939 
3940 	val = intel_de_read(dev_priv, enable_reg);
3941 	val &= ~PLL_ENABLE;
3942 	intel_de_write(dev_priv, enable_reg, val);
3943 
3944 	/* Timeout is actually 1us. */
3945 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3946 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3947 
3948 	/* DVFS post sequence would be here. See the comment above. */
3949 
3950 	val = intel_de_read(dev_priv, enable_reg);
3951 	val &= ~PLL_POWER_ENABLE;
3952 	intel_de_write(dev_priv, enable_reg, val);
3953 
3954 	/*
3955 	 * The spec says we need to "wait" but it also says it should be
3956 	 * immediate.
3957 	 */
3958 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3959 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3960 			pll->info->id);
3961 }
3962 
combo_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3963 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3964 			      struct intel_shared_dpll *pll)
3965 {
3966 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3967 
3968 	icl_pll_disable(dev_priv, pll, enable_reg);
3969 
3970 	if (IS_JSL_EHL(dev_priv) &&
3971 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3972 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3973 					pll->wakeref);
3974 }
3975 
tbt_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3976 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3977 			    struct intel_shared_dpll *pll)
3978 {
3979 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3980 }
3981 
mg_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3982 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3983 			   struct intel_shared_dpll *pll)
3984 {
3985 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3986 
3987 	icl_pll_disable(dev_priv, pll, enable_reg);
3988 }
3989 
icl_update_dpll_ref_clks(struct drm_i915_private * i915)3990 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3991 {
3992 	/* No SSC ref */
3993 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3994 }
3995 
icl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)3996 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3997 			      const struct intel_dpll_hw_state *hw_state)
3998 {
3999 	drm_dbg_kms(&dev_priv->drm,
4000 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4001 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4002 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4003 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4004 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4005 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4006 		    hw_state->cfgcr0, hw_state->cfgcr1,
4007 		    hw_state->div0,
4008 		    hw_state->mg_refclkin_ctl,
4009 		    hw_state->mg_clktop2_coreclkctl1,
4010 		    hw_state->mg_clktop2_hsclkctl,
4011 		    hw_state->mg_pll_div0,
4012 		    hw_state->mg_pll_div1,
4013 		    hw_state->mg_pll_lf,
4014 		    hw_state->mg_pll_frac_lock,
4015 		    hw_state->mg_pll_ssc,
4016 		    hw_state->mg_pll_bias,
4017 		    hw_state->mg_pll_tdc_coldst_bias);
4018 }
4019 
4020 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4021 	.enable = combo_pll_enable,
4022 	.disable = combo_pll_disable,
4023 	.get_hw_state = combo_pll_get_hw_state,
4024 	.get_freq = icl_ddi_combo_pll_get_freq,
4025 };
4026 
4027 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4028 	.enable = tbt_pll_enable,
4029 	.disable = tbt_pll_disable,
4030 	.get_hw_state = tbt_pll_get_hw_state,
4031 	.get_freq = icl_ddi_tbt_pll_get_freq,
4032 };
4033 
4034 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4035 	.enable = mg_pll_enable,
4036 	.disable = mg_pll_disable,
4037 	.get_hw_state = mg_pll_get_hw_state,
4038 	.get_freq = icl_ddi_mg_pll_get_freq,
4039 };
4040 
4041 static const struct dpll_info icl_plls[] = {
4042 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4043 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4044 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4045 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4046 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4047 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4048 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4049 	{ },
4050 };
4051 
4052 static const struct intel_dpll_mgr icl_pll_mgr = {
4053 	.dpll_info = icl_plls,
4054 	.compute_dplls = icl_compute_dplls,
4055 	.get_dplls = icl_get_dplls,
4056 	.put_dplls = icl_put_dplls,
4057 	.update_active_dpll = icl_update_active_dpll,
4058 	.update_ref_clks = icl_update_dpll_ref_clks,
4059 	.dump_hw_state = icl_dump_hw_state,
4060 };
4061 
4062 static const struct dpll_info ehl_plls[] = {
4063 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4064 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4065 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4066 	{ },
4067 };
4068 
4069 static const struct intel_dpll_mgr ehl_pll_mgr = {
4070 	.dpll_info = ehl_plls,
4071 	.compute_dplls = icl_compute_dplls,
4072 	.get_dplls = icl_get_dplls,
4073 	.put_dplls = icl_put_dplls,
4074 	.update_ref_clks = icl_update_dpll_ref_clks,
4075 	.dump_hw_state = icl_dump_hw_state,
4076 };
4077 
4078 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4079 	.enable = mg_pll_enable,
4080 	.disable = mg_pll_disable,
4081 	.get_hw_state = dkl_pll_get_hw_state,
4082 	.get_freq = icl_ddi_mg_pll_get_freq,
4083 };
4084 
4085 static const struct dpll_info tgl_plls[] = {
4086 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4087 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4088 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4089 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4090 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4091 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4092 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4093 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4094 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4095 	{ },
4096 };
4097 
4098 static const struct intel_dpll_mgr tgl_pll_mgr = {
4099 	.dpll_info = tgl_plls,
4100 	.compute_dplls = icl_compute_dplls,
4101 	.get_dplls = icl_get_dplls,
4102 	.put_dplls = icl_put_dplls,
4103 	.update_active_dpll = icl_update_active_dpll,
4104 	.update_ref_clks = icl_update_dpll_ref_clks,
4105 	.dump_hw_state = icl_dump_hw_state,
4106 };
4107 
4108 static const struct dpll_info rkl_plls[] = {
4109 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4110 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4111 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4112 	{ },
4113 };
4114 
4115 static const struct intel_dpll_mgr rkl_pll_mgr = {
4116 	.dpll_info = rkl_plls,
4117 	.compute_dplls = icl_compute_dplls,
4118 	.get_dplls = icl_get_dplls,
4119 	.put_dplls = icl_put_dplls,
4120 	.update_ref_clks = icl_update_dpll_ref_clks,
4121 	.dump_hw_state = icl_dump_hw_state,
4122 };
4123 
4124 static const struct dpll_info dg1_plls[] = {
4125 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4126 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4127 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4128 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4129 	{ },
4130 };
4131 
4132 static const struct intel_dpll_mgr dg1_pll_mgr = {
4133 	.dpll_info = dg1_plls,
4134 	.compute_dplls = icl_compute_dplls,
4135 	.get_dplls = icl_get_dplls,
4136 	.put_dplls = icl_put_dplls,
4137 	.update_ref_clks = icl_update_dpll_ref_clks,
4138 	.dump_hw_state = icl_dump_hw_state,
4139 };
4140 
4141 static const struct dpll_info adls_plls[] = {
4142 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4143 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4144 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4145 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4146 	{ },
4147 };
4148 
4149 static const struct intel_dpll_mgr adls_pll_mgr = {
4150 	.dpll_info = adls_plls,
4151 	.compute_dplls = icl_compute_dplls,
4152 	.get_dplls = icl_get_dplls,
4153 	.put_dplls = icl_put_dplls,
4154 	.update_ref_clks = icl_update_dpll_ref_clks,
4155 	.dump_hw_state = icl_dump_hw_state,
4156 };
4157 
4158 static const struct dpll_info adlp_plls[] = {
4159 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4160 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4161 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4162 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4163 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4164 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4165 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4166 	{ },
4167 };
4168 
4169 static const struct intel_dpll_mgr adlp_pll_mgr = {
4170 	.dpll_info = adlp_plls,
4171 	.compute_dplls = icl_compute_dplls,
4172 	.get_dplls = icl_get_dplls,
4173 	.put_dplls = icl_put_dplls,
4174 	.update_active_dpll = icl_update_active_dpll,
4175 	.update_ref_clks = icl_update_dpll_ref_clks,
4176 	.dump_hw_state = icl_dump_hw_state,
4177 };
4178 
4179 /**
4180  * intel_shared_dpll_init - Initialize shared DPLLs
4181  * @dev_priv: i915 device
4182  *
4183  * Initialize shared DPLLs for @dev_priv.
4184  */
intel_shared_dpll_init(struct drm_i915_private * dev_priv)4185 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4186 {
4187 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4188 	const struct dpll_info *dpll_info;
4189 	int i;
4190 
4191 	if (IS_DG2(dev_priv))
4192 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4193 		dpll_mgr = NULL;
4194 	else if (IS_ALDERLAKE_P(dev_priv))
4195 		dpll_mgr = &adlp_pll_mgr;
4196 	else if (IS_ALDERLAKE_S(dev_priv))
4197 		dpll_mgr = &adls_pll_mgr;
4198 	else if (IS_DG1(dev_priv))
4199 		dpll_mgr = &dg1_pll_mgr;
4200 	else if (IS_ROCKETLAKE(dev_priv))
4201 		dpll_mgr = &rkl_pll_mgr;
4202 	else if (DISPLAY_VER(dev_priv) >= 12)
4203 		dpll_mgr = &tgl_pll_mgr;
4204 	else if (IS_JSL_EHL(dev_priv))
4205 		dpll_mgr = &ehl_pll_mgr;
4206 	else if (DISPLAY_VER(dev_priv) >= 11)
4207 		dpll_mgr = &icl_pll_mgr;
4208 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4209 		dpll_mgr = &bxt_pll_mgr;
4210 	else if (DISPLAY_VER(dev_priv) == 9)
4211 		dpll_mgr = &skl_pll_mgr;
4212 	else if (HAS_DDI(dev_priv))
4213 		dpll_mgr = &hsw_pll_mgr;
4214 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4215 		dpll_mgr = &pch_pll_mgr;
4216 
4217 	if (!dpll_mgr) {
4218 		dev_priv->display.dpll.num_shared_dpll = 0;
4219 		return;
4220 	}
4221 
4222 	dpll_info = dpll_mgr->dpll_info;
4223 
4224 	for (i = 0; dpll_info[i].name; i++) {
4225 		if (drm_WARN_ON(&dev_priv->drm,
4226 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4227 			break;
4228 
4229 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4230 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4231 	}
4232 
4233 	dev_priv->display.dpll.mgr = dpll_mgr;
4234 	dev_priv->display.dpll.num_shared_dpll = i;
4235 	mutex_init(&dev_priv->display.dpll.lock);
4236 }
4237 
4238 /**
4239  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4240  * @state: atomic state
4241  * @crtc: CRTC to compute DPLLs for
4242  * @encoder: encoder
4243  *
4244  * This function computes the DPLL state for the given CRTC and encoder.
4245  *
4246  * The new configuration in the atomic commit @state is made effective by
4247  * calling intel_shared_dpll_swap_state().
4248  *
4249  * Returns:
4250  * 0 on success, negative error code on falure.
4251  */
intel_compute_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4252 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4253 			       struct intel_crtc *crtc,
4254 			       struct intel_encoder *encoder)
4255 {
4256 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4257 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4258 
4259 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4260 		return -EINVAL;
4261 
4262 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4263 }
4264 
4265 /**
4266  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4267  * @state: atomic state
4268  * @crtc: CRTC to reserve DPLLs for
4269  * @encoder: encoder
4270  *
4271  * This function reserves all required DPLLs for the given CRTC and encoder
4272  * combination in the current atomic commit @state and the new @crtc atomic
4273  * state.
4274  *
4275  * The new configuration in the atomic commit @state is made effective by
4276  * calling intel_shared_dpll_swap_state().
4277  *
4278  * The reserved DPLLs should be released by calling
4279  * intel_release_shared_dplls().
4280  *
4281  * Returns:
4282  * 0 if all required DPLLs were successfully reserved,
4283  * negative error code otherwise.
4284  */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4285 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4286 			       struct intel_crtc *crtc,
4287 			       struct intel_encoder *encoder)
4288 {
4289 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4290 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4291 
4292 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4293 		return -EINVAL;
4294 
4295 	return dpll_mgr->get_dplls(state, crtc, encoder);
4296 }
4297 
4298 /**
4299  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4300  * @state: atomic state
4301  * @crtc: crtc from which the DPLLs are to be released
4302  *
4303  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4304  * from the current atomic commit @state and the old @crtc atomic state.
4305  *
4306  * The new configuration in the atomic commit @state is made effective by
4307  * calling intel_shared_dpll_swap_state().
4308  */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)4309 void intel_release_shared_dplls(struct intel_atomic_state *state,
4310 				struct intel_crtc *crtc)
4311 {
4312 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4313 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4314 
4315 	/*
4316 	 * FIXME: this function is called for every platform having a
4317 	 * compute_clock hook, even though the platform doesn't yet support
4318 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4319 	 * called on those.
4320 	 */
4321 	if (!dpll_mgr)
4322 		return;
4323 
4324 	dpll_mgr->put_dplls(state, crtc);
4325 }
4326 
4327 /**
4328  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4329  * @state: atomic state
4330  * @crtc: the CRTC for which to update the active DPLL
4331  * @encoder: encoder determining the type of port DPLL
4332  *
4333  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4334  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4335  * DPLL selected will be based on the current mode of the encoder's port.
4336  */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4337 void intel_update_active_dpll(struct intel_atomic_state *state,
4338 			      struct intel_crtc *crtc,
4339 			      struct intel_encoder *encoder)
4340 {
4341 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4342 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4343 
4344 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4345 		return;
4346 
4347 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4348 }
4349 
4350 /**
4351  * intel_dpll_get_freq - calculate the DPLL's output frequency
4352  * @i915: i915 device
4353  * @pll: DPLL for which to calculate the output frequency
4354  * @pll_state: DPLL state from which to calculate the output frequency
4355  *
4356  * Return the output frequency corresponding to @pll's passed in @pll_state.
4357  */
intel_dpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)4358 int intel_dpll_get_freq(struct drm_i915_private *i915,
4359 			const struct intel_shared_dpll *pll,
4360 			const struct intel_dpll_hw_state *pll_state)
4361 {
4362 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4363 		return 0;
4364 
4365 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4366 }
4367 
4368 /**
4369  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4370  * @i915: i915 device
4371  * @pll: DPLL for which to calculate the output frequency
4372  * @hw_state: DPLL's hardware state
4373  *
4374  * Read out @pll's hardware state into @hw_state.
4375  */
intel_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)4376 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4377 			     struct intel_shared_dpll *pll,
4378 			     struct intel_dpll_hw_state *hw_state)
4379 {
4380 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4381 }
4382 
readout_dpll_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4383 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4384 				  struct intel_shared_dpll *pll)
4385 {
4386 	struct intel_crtc *crtc;
4387 
4388 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4389 
4390 	if (IS_JSL_EHL(i915) && pll->on &&
4391 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4392 		pll->wakeref = intel_display_power_get(i915,
4393 						       POWER_DOMAIN_DC_OFF);
4394 	}
4395 
4396 	pll->state.pipe_mask = 0;
4397 	for_each_intel_crtc(&i915->drm, crtc) {
4398 		struct intel_crtc_state *crtc_state =
4399 			to_intel_crtc_state(crtc->base.state);
4400 
4401 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4402 			pll->state.pipe_mask |= BIT(crtc->pipe);
4403 	}
4404 	pll->active_mask = pll->state.pipe_mask;
4405 
4406 	drm_dbg_kms(&i915->drm,
4407 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4408 		    pll->info->name, pll->state.pipe_mask, pll->on);
4409 }
4410 
intel_dpll_update_ref_clks(struct drm_i915_private * i915)4411 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4412 {
4413 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4414 		i915->display.dpll.mgr->update_ref_clks(i915);
4415 }
4416 
intel_dpll_readout_hw_state(struct drm_i915_private * i915)4417 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4418 {
4419 	int i;
4420 
4421 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4422 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4423 }
4424 
sanitize_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4425 static void sanitize_dpll_state(struct drm_i915_private *i915,
4426 				struct intel_shared_dpll *pll)
4427 {
4428 	if (!pll->on)
4429 		return;
4430 
4431 	adlp_cmtg_clock_gating_wa(i915, pll);
4432 
4433 	if (pll->active_mask)
4434 		return;
4435 
4436 	drm_dbg_kms(&i915->drm,
4437 		    "%s enabled but not in use, disabling\n",
4438 		    pll->info->name);
4439 
4440 	pll->info->funcs->disable(i915, pll);
4441 	pll->on = false;
4442 }
4443 
intel_dpll_sanitize_state(struct drm_i915_private * i915)4444 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4445 {
4446 	int i;
4447 
4448 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4449 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4450 }
4451 
4452 /**
4453  * intel_dpll_dump_hw_state - write hw_state to dmesg
4454  * @dev_priv: i915 drm device
4455  * @hw_state: hw state to be written to the log
4456  *
4457  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4458  */
intel_dpll_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)4459 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4460 			      const struct intel_dpll_hw_state *hw_state)
4461 {
4462 	if (dev_priv->display.dpll.mgr) {
4463 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4464 	} else {
4465 		/* fallback for platforms that don't use the shared dpll
4466 		 * infrastructure
4467 		 */
4468 		drm_dbg_kms(&dev_priv->drm,
4469 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4470 			    "fp0: 0x%x, fp1: 0x%x\n",
4471 			    hw_state->dpll,
4472 			    hw_state->dpll_md,
4473 			    hw_state->fp0,
4474 			    hw_state->fp1);
4475 	}
4476 }
4477 
4478 static void
verify_single_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)4479 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4480 			 struct intel_shared_dpll *pll,
4481 			 struct intel_crtc *crtc,
4482 			 struct intel_crtc_state *new_crtc_state)
4483 {
4484 	struct intel_dpll_hw_state dpll_hw_state;
4485 	u8 pipe_mask;
4486 	bool active;
4487 
4488 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4489 
4490 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4491 
4492 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4493 
4494 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4495 		I915_STATE_WARN(!pll->on && pll->active_mask,
4496 				"pll in active use but not on in sw tracking\n");
4497 		I915_STATE_WARN(pll->on && !pll->active_mask,
4498 				"pll is on but not used by any active pipe\n");
4499 		I915_STATE_WARN(pll->on != active,
4500 				"pll on state mismatch (expected %i, found %i)\n",
4501 				pll->on, active);
4502 	}
4503 
4504 	if (!crtc) {
4505 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4506 				"more active pll users than references: 0x%x vs 0x%x\n",
4507 				pll->active_mask, pll->state.pipe_mask);
4508 
4509 		return;
4510 	}
4511 
4512 	pipe_mask = BIT(crtc->pipe);
4513 
4514 	if (new_crtc_state->hw.active)
4515 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4516 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4517 				pipe_name(crtc->pipe), pll->active_mask);
4518 	else
4519 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4520 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4521 				pipe_name(crtc->pipe), pll->active_mask);
4522 
4523 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4524 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4525 			pipe_mask, pll->state.pipe_mask);
4526 
4527 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4528 					  &dpll_hw_state,
4529 					  sizeof(dpll_hw_state)),
4530 			"pll hw state mismatch\n");
4531 }
4532 
intel_shared_dpll_state_verify(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)4533 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4534 				    struct intel_crtc_state *old_crtc_state,
4535 				    struct intel_crtc_state *new_crtc_state)
4536 {
4537 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4538 
4539 	if (new_crtc_state->shared_dpll)
4540 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4541 					 crtc, new_crtc_state);
4542 
4543 	if (old_crtc_state->shared_dpll &&
4544 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4545 		u8 pipe_mask = BIT(crtc->pipe);
4546 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4547 
4548 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4549 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4550 				pipe_name(crtc->pipe), pll->active_mask);
4551 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4552 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4553 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4554 	}
4555 }
4556 
intel_shared_dpll_verify_disabled(struct drm_i915_private * i915)4557 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4558 {
4559 	int i;
4560 
4561 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4562 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4563 					 NULL, NULL);
4564 }
4565