• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_snps_phy.h"
63 #include "display/intel_tv.h"
64 #include "display/intel_vdsc.h"
65 #include "display/intel_vrr.h"
66 
67 #include "gem/i915_gem_lmem.h"
68 #include "gem/i915_gem_object.h"
69 
70 #include "gt/intel_rps.h"
71 #include "gt/gen8_ppgtt.h"
72 
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "intel_acpi.h"
77 #include "intel_atomic.h"
78 #include "intel_atomic_plane.h"
79 #include "intel_bw.h"
80 #include "intel_cdclk.h"
81 #include "intel_color.h"
82 #include "intel_crtc.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dmc.h"
86 #include "intel_dp_link_training.h"
87 #include "intel_dpt.h"
88 #include "intel_fbc.h"
89 #include "intel_fdi.h"
90 #include "intel_fbdev.h"
91 #include "intel_fifo_underrun.h"
92 #include "intel_frontbuffer.h"
93 #include "intel_hdcp.h"
94 #include "intel_hotplug.h"
95 #include "intel_overlay.h"
96 #include "intel_pipe_crc.h"
97 #include "intel_pm.h"
98 #include "intel_pps.h"
99 #include "intel_psr.h"
100 #include "intel_quirks.h"
101 #include "intel_sideband.h"
102 #include "intel_sprite.h"
103 #include "intel_tc.h"
104 #include "intel_vga.h"
105 #include "i9xx_plane.h"
106 #include "skl_scaler.h"
107 #include "skl_universal_plane.h"
108 
109 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
110 				struct intel_crtc_state *pipe_config);
111 static void ilk_pch_clock_get(struct intel_crtc *crtc,
112 			      struct intel_crtc_state *pipe_config);
113 
114 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
115 				  struct drm_i915_gem_object *obj,
116 				  struct drm_mode_fb_cmd2 *mode_cmd);
117 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
118 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
119 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
120 					 const struct intel_link_m_n *m_n,
121 					 const struct intel_link_m_n *m2_n2);
122 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
126 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
127 static void intel_modeset_setup_hw_state(struct drm_device *dev,
128 					 struct drm_modeset_acquire_ctx *ctx);
129 
130 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)131 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
132 {
133 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
134 
135 	/* Obtain SKU information */
136 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
137 		CCK_FUSE_HPLL_FREQ_MASK;
138 
139 	return vco_freq[hpll_freq] * 1000;
140 }
141 
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)142 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
143 		      const char *name, u32 reg, int ref_freq)
144 {
145 	u32 val;
146 	int divider;
147 
148 	val = vlv_cck_read(dev_priv, reg);
149 	divider = val & CCK_FREQUENCY_VALUES;
150 
151 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
152 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
153 		 "%s change in progress\n", name);
154 
155 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
156 }
157 
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)158 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
159 			   const char *name, u32 reg)
160 {
161 	int hpll;
162 
163 	vlv_cck_get(dev_priv);
164 
165 	if (dev_priv->hpll_freq == 0)
166 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
167 
168 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
169 
170 	vlv_cck_put(dev_priv);
171 
172 	return hpll;
173 }
174 
intel_update_czclk(struct drm_i915_private * dev_priv)175 static void intel_update_czclk(struct drm_i915_private *dev_priv)
176 {
177 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
178 		return;
179 
180 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
181 						      CCK_CZ_CLOCK_CONTROL);
182 
183 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
184 		dev_priv->czclk_freq);
185 }
186 
187 /* WA Display #0827: Gen9:all */
188 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)189 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
190 {
191 	if (enable)
192 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
193 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
194 	else
195 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
196 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
197 }
198 
199 /* Wa_2006604312:icl,ehl */
200 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)201 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
202 		       bool enable)
203 {
204 	if (enable)
205 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
206 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
207 	else
208 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
209 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
210 }
211 
212 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)213 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
214 {
215 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
216 }
217 
218 static bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)219 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
220 {
221 	return crtc_state->sync_mode_slaves_mask != 0;
222 }
223 
224 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)225 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
226 {
227 	return is_trans_port_sync_master(crtc_state) ||
228 		is_trans_port_sync_slave(crtc_state);
229 }
230 
pipe_scanline_is_moving(struct drm_i915_private * dev_priv,enum pipe pipe)231 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
232 				    enum pipe pipe)
233 {
234 	i915_reg_t reg = PIPEDSL(pipe);
235 	u32 line1, line2;
236 	u32 line_mask;
237 
238 	if (DISPLAY_VER(dev_priv) == 2)
239 		line_mask = DSL_LINEMASK_GEN2;
240 	else
241 		line_mask = DSL_LINEMASK_GEN3;
242 
243 	line1 = intel_de_read(dev_priv, reg) & line_mask;
244 	msleep(5);
245 	line2 = intel_de_read(dev_priv, reg) & line_mask;
246 
247 	return line1 != line2;
248 }
249 
wait_for_pipe_scanline_moving(struct intel_crtc * crtc,bool state)250 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
251 {
252 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
253 	enum pipe pipe = crtc->pipe;
254 
255 	/* Wait for the display line to settle/start moving */
256 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
257 		drm_err(&dev_priv->drm,
258 			"pipe %c scanline %s wait timed out\n",
259 			pipe_name(pipe), onoff(state));
260 }
261 
intel_wait_for_pipe_scanline_stopped(struct intel_crtc * crtc)262 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
263 {
264 	wait_for_pipe_scanline_moving(crtc, false);
265 }
266 
intel_wait_for_pipe_scanline_moving(struct intel_crtc * crtc)267 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
268 {
269 	wait_for_pipe_scanline_moving(crtc, true);
270 }
271 
272 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)273 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
274 {
275 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
276 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
277 
278 	if (DISPLAY_VER(dev_priv) >= 4) {
279 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
280 		i915_reg_t reg = PIPECONF(cpu_transcoder);
281 
282 		/* Wait for the Pipe State to go off */
283 		if (intel_de_wait_for_clear(dev_priv, reg,
284 					    I965_PIPECONF_ACTIVE, 100))
285 			drm_WARN(&dev_priv->drm, 1,
286 				 "pipe_off wait timed out\n");
287 	} else {
288 		intel_wait_for_pipe_scanline_stopped(crtc);
289 	}
290 }
291 
292 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)293 void assert_pll(struct drm_i915_private *dev_priv,
294 		enum pipe pipe, bool state)
295 {
296 	u32 val;
297 	bool cur_state;
298 
299 	val = intel_de_read(dev_priv, DPLL(pipe));
300 	cur_state = !!(val & DPLL_VCO_ENABLE);
301 	I915_STATE_WARN(cur_state != state,
302 	     "PLL state assertion failure (expected %s, current %s)\n",
303 			onoff(state), onoff(cur_state));
304 }
305 
306 /* XXX: the dsi pll is shared between MIPI DSI ports */
assert_dsi_pll(struct drm_i915_private * dev_priv,bool state)307 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
308 {
309 	u32 val;
310 	bool cur_state;
311 
312 	vlv_cck_get(dev_priv);
313 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
314 	vlv_cck_put(dev_priv);
315 
316 	cur_state = val & DSI_PLL_VCO_EN;
317 	I915_STATE_WARN(cur_state != state,
318 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
319 			onoff(state), onoff(cur_state));
320 }
321 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)322 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
323 			  enum pipe pipe, bool state)
324 {
325 	bool cur_state;
326 
327 	if (HAS_DDI(dev_priv)) {
328 		/*
329 		 * DDI does not have a specific FDI_TX register.
330 		 *
331 		 * FDI is never fed from EDP transcoder
332 		 * so pipe->transcoder cast is fine here.
333 		 */
334 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
335 		u32 val = intel_de_read(dev_priv,
336 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
337 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
338 	} else {
339 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
340 		cur_state = !!(val & FDI_TX_ENABLE);
341 	}
342 	I915_STATE_WARN(cur_state != state,
343 	     "FDI TX state assertion failure (expected %s, current %s)\n",
344 			onoff(state), onoff(cur_state));
345 }
346 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
347 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
348 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)349 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
350 			  enum pipe pipe, bool state)
351 {
352 	u32 val;
353 	bool cur_state;
354 
355 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
356 	cur_state = !!(val & FDI_RX_ENABLE);
357 	I915_STATE_WARN(cur_state != state,
358 	     "FDI RX state assertion failure (expected %s, current %s)\n",
359 			onoff(state), onoff(cur_state));
360 }
361 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
362 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
363 
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)364 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
365 				      enum pipe pipe)
366 {
367 	u32 val;
368 
369 	/* ILK FDI PLL is always enabled */
370 	if (IS_IRONLAKE(dev_priv))
371 		return;
372 
373 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
374 	if (HAS_DDI(dev_priv))
375 		return;
376 
377 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
378 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
379 }
380 
assert_fdi_rx_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)381 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
382 		       enum pipe pipe, bool state)
383 {
384 	u32 val;
385 	bool cur_state;
386 
387 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
388 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
389 	I915_STATE_WARN(cur_state != state,
390 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
391 			onoff(state), onoff(cur_state));
392 }
393 
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)394 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
395 {
396 	i915_reg_t pp_reg;
397 	u32 val;
398 	enum pipe panel_pipe = INVALID_PIPE;
399 	bool locked = true;
400 
401 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
402 		return;
403 
404 	if (HAS_PCH_SPLIT(dev_priv)) {
405 		u32 port_sel;
406 
407 		pp_reg = PP_CONTROL(0);
408 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
409 
410 		switch (port_sel) {
411 		case PANEL_PORT_SELECT_LVDS:
412 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
413 			break;
414 		case PANEL_PORT_SELECT_DPA:
415 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
416 			break;
417 		case PANEL_PORT_SELECT_DPC:
418 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
419 			break;
420 		case PANEL_PORT_SELECT_DPD:
421 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
422 			break;
423 		default:
424 			MISSING_CASE(port_sel);
425 			break;
426 		}
427 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
428 		/* presumably write lock depends on pipe, not port select */
429 		pp_reg = PP_CONTROL(pipe);
430 		panel_pipe = pipe;
431 	} else {
432 		u32 port_sel;
433 
434 		pp_reg = PP_CONTROL(0);
435 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
436 
437 		drm_WARN_ON(&dev_priv->drm,
438 			    port_sel != PANEL_PORT_SELECT_LVDS);
439 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
440 	}
441 
442 	val = intel_de_read(dev_priv, pp_reg);
443 	if (!(val & PANEL_POWER_ON) ||
444 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
445 		locked = false;
446 
447 	I915_STATE_WARN(panel_pipe == pipe && locked,
448 	     "panel assertion failure, pipe %c regs locked\n",
449 	     pipe_name(pipe));
450 }
451 
assert_pipe(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)452 void assert_pipe(struct drm_i915_private *dev_priv,
453 		 enum transcoder cpu_transcoder, bool state)
454 {
455 	bool cur_state;
456 	enum intel_display_power_domain power_domain;
457 	intel_wakeref_t wakeref;
458 
459 	/* we keep both pipes enabled on 830 */
460 	if (IS_I830(dev_priv))
461 		state = true;
462 
463 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
464 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
465 	if (wakeref) {
466 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
467 		cur_state = !!(val & PIPECONF_ENABLE);
468 
469 		intel_display_power_put(dev_priv, power_domain, wakeref);
470 	} else {
471 		cur_state = false;
472 	}
473 
474 	I915_STATE_WARN(cur_state != state,
475 			"transcoder %s assertion failure (expected %s, current %s)\n",
476 			transcoder_name(cpu_transcoder),
477 			onoff(state), onoff(cur_state));
478 }
479 
assert_plane(struct intel_plane * plane,bool state)480 static void assert_plane(struct intel_plane *plane, bool state)
481 {
482 	enum pipe pipe;
483 	bool cur_state;
484 
485 	cur_state = plane->get_hw_state(plane, &pipe);
486 
487 	I915_STATE_WARN(cur_state != state,
488 			"%s assertion failure (expected %s, current %s)\n",
489 			plane->base.name, onoff(state), onoff(cur_state));
490 }
491 
492 #define assert_plane_enabled(p) assert_plane(p, true)
493 #define assert_plane_disabled(p) assert_plane(p, false)
494 
assert_planes_disabled(struct intel_crtc * crtc)495 static void assert_planes_disabled(struct intel_crtc *crtc)
496 {
497 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
498 	struct intel_plane *plane;
499 
500 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
501 		assert_plane_disabled(plane);
502 }
503 
assert_pch_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)504 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
505 				    enum pipe pipe)
506 {
507 	u32 val;
508 	bool enabled;
509 
510 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
511 	enabled = !!(val & TRANS_ENABLE);
512 	I915_STATE_WARN(enabled,
513 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
514 	     pipe_name(pipe));
515 }
516 
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t dp_reg)517 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
518 				   enum pipe pipe, enum port port,
519 				   i915_reg_t dp_reg)
520 {
521 	enum pipe port_pipe;
522 	bool state;
523 
524 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
525 
526 	I915_STATE_WARN(state && port_pipe == pipe,
527 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
528 			port_name(port), pipe_name(pipe));
529 
530 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
531 			"IBX PCH DP %c still using transcoder B\n",
532 			port_name(port));
533 }
534 
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t hdmi_reg)535 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
536 				     enum pipe pipe, enum port port,
537 				     i915_reg_t hdmi_reg)
538 {
539 	enum pipe port_pipe;
540 	bool state;
541 
542 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
543 
544 	I915_STATE_WARN(state && port_pipe == pipe,
545 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
546 			port_name(port), pipe_name(pipe));
547 
548 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
549 			"IBX PCH HDMI %c still using transcoder B\n",
550 			port_name(port));
551 }
552 
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)553 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
554 				      enum pipe pipe)
555 {
556 	enum pipe port_pipe;
557 
558 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
559 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
560 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
561 
562 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
563 			port_pipe == pipe,
564 			"PCH VGA enabled on transcoder %c, should be disabled\n",
565 			pipe_name(pipe));
566 
567 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
568 			port_pipe == pipe,
569 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
570 			pipe_name(pipe));
571 
572 	/* PCH SDVOB multiplex with HDMIB */
573 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
574 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
575 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
576 }
577 
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)578 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
579 			 struct intel_digital_port *dig_port,
580 			 unsigned int expected_mask)
581 {
582 	u32 port_mask;
583 	i915_reg_t dpll_reg;
584 
585 	switch (dig_port->base.port) {
586 	case PORT_B:
587 		port_mask = DPLL_PORTB_READY_MASK;
588 		dpll_reg = DPLL(0);
589 		break;
590 	case PORT_C:
591 		port_mask = DPLL_PORTC_READY_MASK;
592 		dpll_reg = DPLL(0);
593 		expected_mask <<= 4;
594 		break;
595 	case PORT_D:
596 		port_mask = DPLL_PORTD_READY_MASK;
597 		dpll_reg = DPIO_PHY_STATUS;
598 		break;
599 	default:
600 		BUG();
601 	}
602 
603 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
604 				       port_mask, expected_mask, 1000))
605 		drm_WARN(&dev_priv->drm, 1,
606 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
607 			 dig_port->base.base.base.id, dig_port->base.base.name,
608 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
609 			 expected_mask);
610 }
611 
ilk_enable_pch_transcoder(const struct intel_crtc_state * crtc_state)612 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
613 {
614 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
615 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
616 	enum pipe pipe = crtc->pipe;
617 	i915_reg_t reg;
618 	u32 val, pipeconf_val;
619 
620 	/* Make sure PCH DPLL is enabled */
621 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
622 
623 	/* FDI must be feeding us bits for PCH ports */
624 	assert_fdi_tx_enabled(dev_priv, pipe);
625 	assert_fdi_rx_enabled(dev_priv, pipe);
626 
627 	if (HAS_PCH_CPT(dev_priv)) {
628 		reg = TRANS_CHICKEN2(pipe);
629 		val = intel_de_read(dev_priv, reg);
630 		/*
631 		 * Workaround: Set the timing override bit
632 		 * before enabling the pch transcoder.
633 		 */
634 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
635 		/* Configure frame start delay to match the CPU */
636 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
637 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
638 		intel_de_write(dev_priv, reg, val);
639 	}
640 
641 	reg = PCH_TRANSCONF(pipe);
642 	val = intel_de_read(dev_priv, reg);
643 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
644 
645 	if (HAS_PCH_IBX(dev_priv)) {
646 		/* Configure frame start delay to match the CPU */
647 		val &= ~TRANS_FRAME_START_DELAY_MASK;
648 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
649 
650 		/*
651 		 * Make the BPC in transcoder be consistent with
652 		 * that in pipeconf reg. For HDMI we must use 8bpc
653 		 * here for both 8bpc and 12bpc.
654 		 */
655 		val &= ~PIPECONF_BPC_MASK;
656 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
657 			val |= PIPECONF_8BPC;
658 		else
659 			val |= pipeconf_val & PIPECONF_BPC_MASK;
660 	}
661 
662 	val &= ~TRANS_INTERLACE_MASK;
663 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
664 		if (HAS_PCH_IBX(dev_priv) &&
665 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
666 			val |= TRANS_LEGACY_INTERLACED_ILK;
667 		else
668 			val |= TRANS_INTERLACED;
669 	} else {
670 		val |= TRANS_PROGRESSIVE;
671 	}
672 
673 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
674 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
675 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
676 			pipe_name(pipe));
677 }
678 
lpt_enable_pch_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)679 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
680 				      enum transcoder cpu_transcoder)
681 {
682 	u32 val, pipeconf_val;
683 
684 	/* FDI must be feeding us bits for PCH ports */
685 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
686 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
687 
688 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
689 	/* Workaround: set timing override bit. */
690 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
691 	/* Configure frame start delay to match the CPU */
692 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
693 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
694 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
695 
696 	val = TRANS_ENABLE;
697 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
698 
699 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
700 	    PIPECONF_INTERLACED_ILK)
701 		val |= TRANS_INTERLACED;
702 	else
703 		val |= TRANS_PROGRESSIVE;
704 
705 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
706 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
707 				  TRANS_STATE_ENABLE, 100))
708 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
709 }
710 
ilk_disable_pch_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)711 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
712 				       enum pipe pipe)
713 {
714 	i915_reg_t reg;
715 	u32 val;
716 
717 	/* FDI relies on the transcoder */
718 	assert_fdi_tx_disabled(dev_priv, pipe);
719 	assert_fdi_rx_disabled(dev_priv, pipe);
720 
721 	/* Ports must be off as well */
722 	assert_pch_ports_disabled(dev_priv, pipe);
723 
724 	reg = PCH_TRANSCONF(pipe);
725 	val = intel_de_read(dev_priv, reg);
726 	val &= ~TRANS_ENABLE;
727 	intel_de_write(dev_priv, reg, val);
728 	/* wait for PCH transcoder off, transcoder state */
729 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
730 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
731 			pipe_name(pipe));
732 
733 	if (HAS_PCH_CPT(dev_priv)) {
734 		/* Workaround: Clear the timing override chicken bit again. */
735 		reg = TRANS_CHICKEN2(pipe);
736 		val = intel_de_read(dev_priv, reg);
737 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
738 		intel_de_write(dev_priv, reg, val);
739 	}
740 }
741 
lpt_disable_pch_transcoder(struct drm_i915_private * dev_priv)742 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
743 {
744 	u32 val;
745 
746 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
747 	val &= ~TRANS_ENABLE;
748 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
749 	/* wait for PCH transcoder off, transcoder state */
750 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
751 				    TRANS_STATE_ENABLE, 50))
752 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
753 
754 	/* Workaround: clear timing override bit. */
755 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
756 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
757 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
758 }
759 
intel_crtc_pch_transcoder(struct intel_crtc * crtc)760 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
761 {
762 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
763 
764 	if (HAS_PCH_LPT(dev_priv))
765 		return PIPE_A;
766 	else
767 		return crtc->pipe;
768 }
769 
intel_enable_pipe(const struct intel_crtc_state * new_crtc_state)770 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
771 {
772 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
773 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
774 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
775 	enum pipe pipe = crtc->pipe;
776 	i915_reg_t reg;
777 	u32 val;
778 
779 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
780 
781 	assert_planes_disabled(crtc);
782 
783 	/*
784 	 * A pipe without a PLL won't actually be able to drive bits from
785 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
786 	 * need the check.
787 	 */
788 	if (HAS_GMCH(dev_priv)) {
789 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
790 			assert_dsi_pll_enabled(dev_priv);
791 		else
792 			assert_pll_enabled(dev_priv, pipe);
793 	} else {
794 		if (new_crtc_state->has_pch_encoder) {
795 			/* if driving the PCH, we need FDI enabled */
796 			assert_fdi_rx_pll_enabled(dev_priv,
797 						  intel_crtc_pch_transcoder(crtc));
798 			assert_fdi_tx_pll_enabled(dev_priv,
799 						  (enum pipe) cpu_transcoder);
800 		}
801 		/* FIXME: assert CPU port conditions for SNB+ */
802 	}
803 
804 	/* Wa_22012358565:adl-p */
805 	if (DISPLAY_VER(dev_priv) == 13)
806 		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
807 			     0, PIPE_ARB_USE_PROG_SLOTS);
808 
809 	reg = PIPECONF(cpu_transcoder);
810 	val = intel_de_read(dev_priv, reg);
811 	if (val & PIPECONF_ENABLE) {
812 		/* we keep both pipes enabled on 830 */
813 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
814 		return;
815 	}
816 
817 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
818 	intel_de_posting_read(dev_priv, reg);
819 
820 	/*
821 	 * Until the pipe starts PIPEDSL reads will return a stale value,
822 	 * which causes an apparent vblank timestamp jump when PIPEDSL
823 	 * resets to its proper value. That also messes up the frame count
824 	 * when it's derived from the timestamps. So let's wait for the
825 	 * pipe to start properly before we call drm_crtc_vblank_on()
826 	 */
827 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
828 		intel_wait_for_pipe_scanline_moving(crtc);
829 }
830 
intel_disable_pipe(const struct intel_crtc_state * old_crtc_state)831 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
832 {
833 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
834 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
835 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
836 	enum pipe pipe = crtc->pipe;
837 	i915_reg_t reg;
838 	u32 val;
839 
840 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
841 
842 	/*
843 	 * Make sure planes won't keep trying to pump pixels to us,
844 	 * or we might hang the display.
845 	 */
846 	assert_planes_disabled(crtc);
847 
848 	reg = PIPECONF(cpu_transcoder);
849 	val = intel_de_read(dev_priv, reg);
850 	if ((val & PIPECONF_ENABLE) == 0)
851 		return;
852 
853 	/*
854 	 * Double wide has implications for planes
855 	 * so best keep it disabled when not needed.
856 	 */
857 	if (old_crtc_state->double_wide)
858 		val &= ~PIPECONF_DOUBLE_WIDE;
859 
860 	/* Don't disable pipe or pipe PLLs if needed */
861 	if (!IS_I830(dev_priv))
862 		val &= ~PIPECONF_ENABLE;
863 
864 	if (DISPLAY_VER(dev_priv) >= 12)
865 		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
866 			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
867 
868 	intel_de_write(dev_priv, reg, val);
869 	if ((val & PIPECONF_ENABLE) == 0)
870 		intel_wait_for_pipe_off(old_crtc_state);
871 }
872 
873 bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info * info,u64 modifier)874 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
875 				    u64 modifier)
876 {
877 	return info->is_yuv &&
878 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
879 }
880 
881 unsigned int
intel_tile_width_bytes(const struct drm_framebuffer * fb,int color_plane)882 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
883 {
884 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
885 	unsigned int cpp = fb->format->cpp[color_plane];
886 
887 	switch (fb->modifier) {
888 	case DRM_FORMAT_MOD_LINEAR:
889 		return intel_tile_size(dev_priv);
890 	case I915_FORMAT_MOD_X_TILED:
891 		if (DISPLAY_VER(dev_priv) == 2)
892 			return 128;
893 		else
894 			return 512;
895 	case I915_FORMAT_MOD_Y_TILED_CCS:
896 		if (is_ccs_plane(fb, color_plane))
897 			return 128;
898 		fallthrough;
899 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
900 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
901 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
902 		if (is_ccs_plane(fb, color_plane))
903 			return 64;
904 		fallthrough;
905 	case I915_FORMAT_MOD_Y_TILED:
906 		if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
907 			return 128;
908 		else
909 			return 512;
910 	case I915_FORMAT_MOD_Yf_TILED_CCS:
911 		if (is_ccs_plane(fb, color_plane))
912 			return 128;
913 		fallthrough;
914 	case I915_FORMAT_MOD_Yf_TILED:
915 		switch (cpp) {
916 		case 1:
917 			return 64;
918 		case 2:
919 		case 4:
920 			return 128;
921 		case 8:
922 		case 16:
923 			return 256;
924 		default:
925 			MISSING_CASE(cpp);
926 			return cpp;
927 		}
928 		break;
929 	default:
930 		MISSING_CASE(fb->modifier);
931 		return cpp;
932 	}
933 }
934 
935 unsigned int
intel_fb_align_height(const struct drm_framebuffer * fb,int color_plane,unsigned int height)936 intel_fb_align_height(const struct drm_framebuffer *fb,
937 		      int color_plane, unsigned int height)
938 {
939 	unsigned int tile_height = intel_tile_height(fb, color_plane);
940 
941 	return ALIGN(height, tile_height);
942 }
943 
intel_rotation_info_size(const struct intel_rotation_info * rot_info)944 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
945 {
946 	unsigned int size = 0;
947 	int i;
948 
949 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
950 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
951 
952 	return size;
953 }
954 
intel_remapped_info_size(const struct intel_remapped_info * rem_info)955 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
956 {
957 	unsigned int size = 0;
958 	int i;
959 
960 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
961 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
962 
963 	return size;
964 }
965 
intel_linear_alignment(const struct drm_i915_private * dev_priv)966 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
967 {
968 	if (DISPLAY_VER(dev_priv) >= 9)
969 		return 256 * 1024;
970 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
971 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
972 		return 128 * 1024;
973 	else if (DISPLAY_VER(dev_priv) >= 4)
974 		return 4 * 1024;
975 	else
976 		return 0;
977 }
978 
has_async_flips(struct drm_i915_private * i915)979 static bool has_async_flips(struct drm_i915_private *i915)
980 {
981 	return DISPLAY_VER(i915) >= 5;
982 }
983 
intel_surf_alignment(const struct drm_framebuffer * fb,int color_plane)984 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
985 				  int color_plane)
986 {
987 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
988 
989 	if (intel_fb_uses_dpt(fb))
990 		return 512 * 4096;
991 
992 	/* AUX_DIST needs only 4K alignment */
993 	if (is_ccs_plane(fb, color_plane))
994 		return 4096;
995 
996 	if (is_semiplanar_uv_plane(fb, color_plane)) {
997 		/*
998 		 * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
999 		 * alignment for linear UV planes on all platforms.
1000 		 */
1001 		if (DISPLAY_VER(dev_priv) >= 12) {
1002 			if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1003 				return intel_linear_alignment(dev_priv);
1004 
1005 			return intel_tile_row_size(fb, color_plane);
1006 		}
1007 
1008 		return 4096;
1009 	}
1010 
1011 	drm_WARN_ON(&dev_priv->drm, color_plane != 0);
1012 
1013 	switch (fb->modifier) {
1014 	case DRM_FORMAT_MOD_LINEAR:
1015 		return intel_linear_alignment(dev_priv);
1016 	case I915_FORMAT_MOD_X_TILED:
1017 		if (has_async_flips(dev_priv))
1018 			return 256 * 1024;
1019 		return 0;
1020 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1021 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1022 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1023 		return 16 * 1024;
1024 	case I915_FORMAT_MOD_Y_TILED_CCS:
1025 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1026 	case I915_FORMAT_MOD_Y_TILED:
1027 	case I915_FORMAT_MOD_Yf_TILED:
1028 		return 1 * 1024 * 1024;
1029 	default:
1030 		MISSING_CASE(fb->modifier);
1031 		return 0;
1032 	}
1033 }
1034 
intel_plane_uses_fence(const struct intel_plane_state * plane_state)1035 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1036 {
1037 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1038 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1039 
1040 	return DISPLAY_VER(dev_priv) < 4 ||
1041 		(plane->has_fbc &&
1042 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1043 }
1044 
1045 static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer * fb,const struct i915_ggtt_view * view,bool uses_fence,unsigned long * out_flags,struct i915_address_space * vm)1046 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
1047 		     const struct i915_ggtt_view *view,
1048 		     bool uses_fence,
1049 		     unsigned long *out_flags,
1050 		     struct i915_address_space *vm)
1051 {
1052 	struct drm_device *dev = fb->dev;
1053 	struct drm_i915_private *dev_priv = to_i915(dev);
1054 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1055 	struct i915_vma *vma;
1056 	u32 alignment;
1057 	int ret;
1058 
1059 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
1060 		return ERR_PTR(-EINVAL);
1061 
1062 	alignment = 4096 * 512;
1063 
1064 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1065 
1066 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1067 	if (ret) {
1068 		vma = ERR_PTR(ret);
1069 		goto err;
1070 	}
1071 
1072 	vma = i915_vma_instance(obj, vm, view);
1073 	if (IS_ERR(vma))
1074 		goto err;
1075 
1076 	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
1077 		ret = i915_vma_unbind(vma);
1078 		if (ret) {
1079 			vma = ERR_PTR(ret);
1080 			goto err;
1081 		}
1082 	}
1083 
1084 	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
1085 	if (ret) {
1086 		vma = ERR_PTR(ret);
1087 		goto err;
1088 	}
1089 
1090 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
1091 
1092 	i915_gem_object_flush_if_display(obj);
1093 
1094 	i915_vma_get(vma);
1095 err:
1096 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1097 
1098 	return vma;
1099 }
1100 
1101 struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer * fb,bool phys_cursor,const struct i915_ggtt_view * view,bool uses_fence,unsigned long * out_flags)1102 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1103 			   bool phys_cursor,
1104 			   const struct i915_ggtt_view *view,
1105 			   bool uses_fence,
1106 			   unsigned long *out_flags)
1107 {
1108 	struct drm_device *dev = fb->dev;
1109 	struct drm_i915_private *dev_priv = to_i915(dev);
1110 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1111 	intel_wakeref_t wakeref;
1112 	struct i915_gem_ww_ctx ww;
1113 	struct i915_vma *vma;
1114 	unsigned int pinctl;
1115 	u32 alignment;
1116 	int ret;
1117 
1118 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1119 		return ERR_PTR(-EINVAL);
1120 
1121 	if (phys_cursor)
1122 		alignment = intel_cursor_alignment(dev_priv);
1123 	else
1124 		alignment = intel_surf_alignment(fb, 0);
1125 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1126 		return ERR_PTR(-EINVAL);
1127 
1128 	/* Note that the w/a also requires 64 PTE of padding following the
1129 	 * bo. We currently fill all unused PTE with the shadow page and so
1130 	 * we should always have valid PTE following the scanout preventing
1131 	 * the VT-d warning.
1132 	 */
1133 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1134 		alignment = 256 * 1024;
1135 
1136 	/*
1137 	 * Global gtt pte registers are special registers which actually forward
1138 	 * writes to a chunk of system memory. Which means that there is no risk
1139 	 * that the register values disappear as soon as we call
1140 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1141 	 * pin/unpin/fence and not more.
1142 	 */
1143 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1144 
1145 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1146 
1147 	/*
1148 	 * Valleyview is definitely limited to scanning out the first
1149 	 * 512MiB. Lets presume this behaviour was inherited from the
1150 	 * g4x display engine and that all earlier gen are similarly
1151 	 * limited. Testing suggests that it is a little more
1152 	 * complicated than this. For example, Cherryview appears quite
1153 	 * happy to scanout from anywhere within its global aperture.
1154 	 */
1155 	pinctl = 0;
1156 	if (HAS_GMCH(dev_priv))
1157 		pinctl |= PIN_MAPPABLE;
1158 
1159 	i915_gem_ww_ctx_init(&ww, true);
1160 retry:
1161 	ret = i915_gem_object_lock(obj, &ww);
1162 	if (!ret && phys_cursor)
1163 		ret = i915_gem_object_attach_phys(obj, alignment);
1164 	else if (!ret && HAS_LMEM(dev_priv))
1165 		ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1166 	/* TODO: Do we need to sync when migration becomes async? */
1167 	if (!ret)
1168 		ret = i915_gem_object_pin_pages(obj);
1169 	if (ret)
1170 		goto err;
1171 
1172 	if (!ret) {
1173 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1174 							   view, pinctl);
1175 		if (IS_ERR(vma)) {
1176 			ret = PTR_ERR(vma);
1177 			goto err_unpin;
1178 		}
1179 	}
1180 
1181 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1182 		/*
1183 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1184 		 * fence, whereas 965+ only requires a fence if using
1185 		 * framebuffer compression.  For simplicity, we always, when
1186 		 * possible, install a fence as the cost is not that onerous.
1187 		 *
1188 		 * If we fail to fence the tiled scanout, then either the
1189 		 * modeset will reject the change (which is highly unlikely as
1190 		 * the affected systems, all but one, do not have unmappable
1191 		 * space) or we will not be able to enable full powersaving
1192 		 * techniques (also likely not to apply due to various limits
1193 		 * FBC and the like impose on the size of the buffer, which
1194 		 * presumably we violated anyway with this unmappable buffer).
1195 		 * Anyway, it is presumably better to stumble onwards with
1196 		 * something and try to run the system in a "less than optimal"
1197 		 * mode that matches the user configuration.
1198 		 */
1199 		ret = i915_vma_pin_fence(vma);
1200 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1201 			i915_vma_unpin(vma);
1202 			goto err_unpin;
1203 		}
1204 		ret = 0;
1205 
1206 		if (vma->fence)
1207 			*out_flags |= PLANE_HAS_FENCE;
1208 	}
1209 
1210 	i915_vma_get(vma);
1211 
1212 err_unpin:
1213 	i915_gem_object_unpin_pages(obj);
1214 err:
1215 	if (ret == -EDEADLK) {
1216 		ret = i915_gem_ww_ctx_backoff(&ww);
1217 		if (!ret)
1218 			goto retry;
1219 	}
1220 	i915_gem_ww_ctx_fini(&ww);
1221 	if (ret)
1222 		vma = ERR_PTR(ret);
1223 
1224 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1225 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1226 	return vma;
1227 }
1228 
intel_unpin_fb_vma(struct i915_vma * vma,unsigned long flags)1229 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1230 {
1231 	if (flags & PLANE_HAS_FENCE)
1232 		i915_vma_unpin_fence(vma);
1233 	i915_vma_unpin(vma);
1234 	i915_vma_put(vma);
1235 }
1236 
1237 /*
1238  * Convert the x/y offsets into a linear offset.
1239  * Only valid with 0/180 degree rotation, which is fine since linear
1240  * offset is only used with linear buffers on pre-hsw and tiled buffers
1241  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1242  */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)1243 u32 intel_fb_xy_to_linear(int x, int y,
1244 			  const struct intel_plane_state *state,
1245 			  int color_plane)
1246 {
1247 	const struct drm_framebuffer *fb = state->hw.fb;
1248 	unsigned int cpp = fb->format->cpp[color_plane];
1249 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1250 
1251 	return y * pitch + x * cpp;
1252 }
1253 
1254 /*
1255  * Add the x/y offsets derived from fb->offsets[] to the user
1256  * specified plane src x/y offsets. The resulting x/y offsets
1257  * specify the start of scanout from the beginning of the gtt mapping.
1258  */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)1259 void intel_add_fb_offsets(int *x, int *y,
1260 			  const struct intel_plane_state *state,
1261 			  int color_plane)
1262 
1263 {
1264 	*x += state->view.color_plane[color_plane].x;
1265 	*y += state->view.color_plane[color_plane].y;
1266 }
1267 
intel_fb_modifier_to_tiling(u64 fb_modifier)1268 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1269 {
1270 	switch (fb_modifier) {
1271 	case I915_FORMAT_MOD_X_TILED:
1272 		return I915_TILING_X;
1273 	case I915_FORMAT_MOD_Y_TILED:
1274 	case I915_FORMAT_MOD_Y_TILED_CCS:
1275 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1276 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1277 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1278 		return I915_TILING_Y;
1279 	default:
1280 		return I915_TILING_NONE;
1281 	}
1282 }
1283 
1284 /*
1285  * From the Sky Lake PRM:
1286  * "The Color Control Surface (CCS) contains the compression status of
1287  *  the cache-line pairs. The compression state of the cache-line pair
1288  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1289  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1290  *  cache-line-pairs. CCS is always Y tiled."
1291  *
1292  * Since cache line pairs refers to horizontally adjacent cache lines,
1293  * each cache line in the CCS corresponds to an area of 32x16 cache
1294  * lines on the main surface. Since each pixel is 4 bytes, this gives
1295  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1296  * main surface.
1297  */
1298 static const struct drm_format_info skl_ccs_formats[] = {
1299 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1300 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1301 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1302 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1303 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1304 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1305 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1306 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1307 };
1308 
1309 /*
1310  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1311  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1312  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1313  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1314  * the main surface.
1315  */
1316 static const struct drm_format_info gen12_ccs_formats[] = {
1317 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1318 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1319 	  .hsub = 1, .vsub = 1, },
1320 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1321 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1322 	  .hsub = 1, .vsub = 1, },
1323 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1324 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1325 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1326 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1327 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1328 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1329 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1330 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1331 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1332 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1333 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1334 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1335 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1336 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1337 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1338 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1339 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1340 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1341 	{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1342 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1343 	  .hsub = 1, .vsub = 1, .is_yuv = true },
1344 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1345 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1346 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1347 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1348 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1349 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1350 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1351 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1352 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1353 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1354 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1355 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1356 };
1357 
1358 /*
1359  * Same as gen12_ccs_formats[] above, but with additional surface used
1360  * to pass Clear Color information in plane 2 with 64 bits of data.
1361  */
1362 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1363 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1364 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1365 	  .hsub = 1, .vsub = 1, },
1366 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1367 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1368 	  .hsub = 1, .vsub = 1, },
1369 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1370 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1371 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1372 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1373 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1374 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1375 };
1376 
1377 static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],int num_formats,u32 format)1378 lookup_format_info(const struct drm_format_info formats[],
1379 		   int num_formats, u32 format)
1380 {
1381 	int i;
1382 
1383 	for (i = 0; i < num_formats; i++) {
1384 		if (formats[i].format == format)
1385 			return &formats[i];
1386 	}
1387 
1388 	return NULL;
1389 }
1390 
1391 static const struct drm_format_info *
intel_get_format_info(const struct drm_mode_fb_cmd2 * cmd)1392 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1393 {
1394 	switch (cmd->modifier[0]) {
1395 	case I915_FORMAT_MOD_Y_TILED_CCS:
1396 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1397 		return lookup_format_info(skl_ccs_formats,
1398 					  ARRAY_SIZE(skl_ccs_formats),
1399 					  cmd->pixel_format);
1400 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1401 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1402 		return lookup_format_info(gen12_ccs_formats,
1403 					  ARRAY_SIZE(gen12_ccs_formats),
1404 					  cmd->pixel_format);
1405 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1406 		return lookup_format_info(gen12_ccs_cc_formats,
1407 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1408 					  cmd->pixel_format);
1409 	default:
1410 		return NULL;
1411 	}
1412 }
1413 
gen12_ccs_aux_stride(struct drm_framebuffer * fb,int ccs_plane)1414 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1415 {
1416 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1417 			    512) * 64;
1418 }
1419 
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)1420 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1421 			      u32 pixel_format, u64 modifier)
1422 {
1423 	struct intel_crtc *crtc;
1424 	struct intel_plane *plane;
1425 
1426 	if (!HAS_DISPLAY(dev_priv))
1427 		return 0;
1428 
1429 	/*
1430 	 * We assume the primary plane for pipe A has
1431 	 * the highest stride limits of them all,
1432 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1433 	 */
1434 	crtc = intel_get_first_crtc(dev_priv);
1435 	if (!crtc)
1436 		return 0;
1437 
1438 	plane = to_intel_plane(crtc->base.primary);
1439 
1440 	return plane->max_stride(plane, pixel_format, modifier,
1441 				 DRM_MODE_ROTATE_0);
1442 }
1443 
1444 static
intel_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)1445 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1446 			u32 pixel_format, u64 modifier)
1447 {
1448 	/*
1449 	 * Arbitrary limit for gen4+ chosen to match the
1450 	 * render engine max stride.
1451 	 *
1452 	 * The new CCS hash mode makes remapping impossible
1453 	 */
1454 	if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
1455 	    intel_modifier_uses_dpt(dev_priv, modifier))
1456 		return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1457 	else if (DISPLAY_VER(dev_priv) >= 7)
1458 		return 256 * 1024;
1459 	else
1460 		return 128 * 1024;
1461 }
1462 
1463 static u32
intel_fb_stride_alignment(const struct drm_framebuffer * fb,int color_plane)1464 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1465 {
1466 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1467 	u32 tile_width;
1468 
1469 	if (is_surface_linear(fb, color_plane)) {
1470 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1471 							   fb->format->format,
1472 							   fb->modifier);
1473 
1474 		/*
1475 		 * To make remapping with linear generally feasible
1476 		 * we need the stride to be page aligned.
1477 		 */
1478 		if (fb->pitches[color_plane] > max_stride &&
1479 		    !is_ccs_modifier(fb->modifier))
1480 			return intel_tile_size(dev_priv);
1481 		else
1482 			return 64;
1483 	}
1484 
1485 	tile_width = intel_tile_width_bytes(fb, color_plane);
1486 	if (is_ccs_modifier(fb->modifier)) {
1487 		/*
1488 		 * Display WA #0531: skl,bxt,kbl,glk
1489 		 *
1490 		 * Render decompression and plane width > 3840
1491 		 * combined with horizontal panning requires the
1492 		 * plane stride to be a multiple of 4. We'll just
1493 		 * require the entire fb to accommodate that to avoid
1494 		 * potential runtime errors at plane configuration time.
1495 		 */
1496 		if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
1497 		    color_plane == 0 && fb->width > 3840)
1498 			tile_width *= 4;
1499 		/*
1500 		 * The main surface pitch must be padded to a multiple of four
1501 		 * tile widths.
1502 		 */
1503 		else if (DISPLAY_VER(dev_priv) >= 12)
1504 			tile_width *= 4;
1505 	}
1506 	return tile_width;
1507 }
1508 
1509 static struct i915_vma *
initial_plane_vma(struct drm_i915_private * i915,struct intel_initial_plane_config * plane_config)1510 initial_plane_vma(struct drm_i915_private *i915,
1511 		  struct intel_initial_plane_config *plane_config)
1512 {
1513 	struct drm_i915_gem_object *obj;
1514 	struct i915_vma *vma;
1515 	u32 base, size;
1516 
1517 	if (plane_config->size == 0)
1518 		return NULL;
1519 
1520 	base = round_down(plane_config->base,
1521 			  I915_GTT_MIN_ALIGNMENT);
1522 	size = round_up(plane_config->base + plane_config->size,
1523 			I915_GTT_MIN_ALIGNMENT);
1524 	size -= base;
1525 
1526 	/*
1527 	 * If the FB is too big, just don't use it since fbdev is not very
1528 	 * important and we should probably use that space with FBC or other
1529 	 * features.
1530 	 */
1531 	if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1532 	    size * 2 > i915->stolen_usable_size)
1533 		return NULL;
1534 
1535 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1536 	if (IS_ERR(obj))
1537 		return NULL;
1538 
1539 	/*
1540 	 * Mark it WT ahead of time to avoid changing the
1541 	 * cache_level during fbdev initialization. The
1542 	 * unbind there would get stuck waiting for rcu.
1543 	 */
1544 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1545 					    I915_CACHE_WT : I915_CACHE_NONE);
1546 
1547 	switch (plane_config->tiling) {
1548 	case I915_TILING_NONE:
1549 		break;
1550 	case I915_TILING_X:
1551 	case I915_TILING_Y:
1552 		obj->tiling_and_stride =
1553 			plane_config->fb->base.pitches[0] |
1554 			plane_config->tiling;
1555 		break;
1556 	default:
1557 		MISSING_CASE(plane_config->tiling);
1558 		goto err_obj;
1559 	}
1560 
1561 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1562 	if (IS_ERR(vma))
1563 		goto err_obj;
1564 
1565 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1566 		goto err_obj;
1567 
1568 	if (i915_gem_object_is_tiled(obj) &&
1569 	    !i915_vma_is_map_and_fenceable(vma))
1570 		goto err_obj;
1571 
1572 	return vma;
1573 
1574 err_obj:
1575 	i915_gem_object_put(obj);
1576 	return NULL;
1577 }
1578 
1579 static bool
intel_alloc_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)1580 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1581 			      struct intel_initial_plane_config *plane_config)
1582 {
1583 	struct drm_device *dev = crtc->base.dev;
1584 	struct drm_i915_private *dev_priv = to_i915(dev);
1585 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1586 	struct drm_framebuffer *fb = &plane_config->fb->base;
1587 	struct i915_vma *vma;
1588 
1589 	switch (fb->modifier) {
1590 	case DRM_FORMAT_MOD_LINEAR:
1591 	case I915_FORMAT_MOD_X_TILED:
1592 	case I915_FORMAT_MOD_Y_TILED:
1593 		break;
1594 	default:
1595 		drm_dbg(&dev_priv->drm,
1596 			"Unsupported modifier for initial FB: 0x%llx\n",
1597 			fb->modifier);
1598 		return false;
1599 	}
1600 
1601 	vma = initial_plane_vma(dev_priv, plane_config);
1602 	if (!vma)
1603 		return false;
1604 
1605 	mode_cmd.pixel_format = fb->format->format;
1606 	mode_cmd.width = fb->width;
1607 	mode_cmd.height = fb->height;
1608 	mode_cmd.pitches[0] = fb->pitches[0];
1609 	mode_cmd.modifier[0] = fb->modifier;
1610 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1611 
1612 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1613 				   vma->obj, &mode_cmd)) {
1614 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1615 		goto err_vma;
1616 	}
1617 
1618 	plane_config->vma = vma;
1619 	return true;
1620 
1621 err_vma:
1622 	i915_vma_put(vma);
1623 	return false;
1624 }
1625 
1626 static void
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)1627 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1628 			struct intel_plane_state *plane_state,
1629 			bool visible)
1630 {
1631 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1632 
1633 	plane_state->uapi.visible = visible;
1634 
1635 	if (visible)
1636 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1637 	else
1638 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1639 }
1640 
fixup_plane_bitmasks(struct intel_crtc_state * crtc_state)1641 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1642 {
1643 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1644 	struct drm_plane *plane;
1645 
1646 	/*
1647 	 * Active_planes aliases if multiple "primary" or cursor planes
1648 	 * have been used on the same (or wrong) pipe. plane_mask uses
1649 	 * unique ids, hence we can use that to reconstruct active_planes.
1650 	 */
1651 	crtc_state->enabled_planes = 0;
1652 	crtc_state->active_planes = 0;
1653 
1654 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1655 				crtc_state->uapi.plane_mask) {
1656 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1657 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1658 	}
1659 }
1660 
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)1661 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1662 				  struct intel_plane *plane)
1663 {
1664 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1665 	struct intel_crtc_state *crtc_state =
1666 		to_intel_crtc_state(crtc->base.state);
1667 	struct intel_plane_state *plane_state =
1668 		to_intel_plane_state(plane->base.state);
1669 
1670 	drm_dbg_kms(&dev_priv->drm,
1671 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1672 		    plane->base.base.id, plane->base.name,
1673 		    crtc->base.base.id, crtc->base.name);
1674 
1675 	intel_set_plane_visible(crtc_state, plane_state, false);
1676 	fixup_plane_bitmasks(crtc_state);
1677 	crtc_state->data_rate[plane->id] = 0;
1678 	crtc_state->min_cdclk[plane->id] = 0;
1679 
1680 	if (plane->id == PLANE_PRIMARY)
1681 		hsw_disable_ips(crtc_state);
1682 
1683 	/*
1684 	 * Vblank time updates from the shadow to live plane control register
1685 	 * are blocked if the memory self-refresh mode is active at that
1686 	 * moment. So to make sure the plane gets truly disabled, disable
1687 	 * first the self-refresh mode. The self-refresh enable bit in turn
1688 	 * will be checked/applied by the HW only at the next frame start
1689 	 * event which is after the vblank start event, so we need to have a
1690 	 * wait-for-vblank between disabling the plane and the pipe.
1691 	 */
1692 	if (HAS_GMCH(dev_priv) &&
1693 	    intel_set_memory_cxsr(dev_priv, false))
1694 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1695 
1696 	/*
1697 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1698 	 * So disable underrun reporting before all the planes get disabled.
1699 	 */
1700 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1701 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1702 
1703 	intel_disable_plane(plane, crtc_state);
1704 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1705 }
1706 
1707 static bool
intel_reuse_initial_plane_obj(struct drm_i915_private * i915,const struct intel_initial_plane_config * plane_config,struct drm_framebuffer ** fb,struct i915_vma ** vma)1708 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1709 			      const struct intel_initial_plane_config *plane_config,
1710 			      struct drm_framebuffer **fb,
1711 			      struct i915_vma **vma)
1712 {
1713 	struct intel_crtc *crtc;
1714 
1715 	for_each_intel_crtc(&i915->drm, crtc) {
1716 		struct intel_crtc_state *crtc_state =
1717 			to_intel_crtc_state(crtc->base.state);
1718 		struct intel_plane *plane =
1719 			to_intel_plane(crtc->base.primary);
1720 		struct intel_plane_state *plane_state =
1721 			to_intel_plane_state(plane->base.state);
1722 
1723 		if (!crtc_state->uapi.active)
1724 			continue;
1725 
1726 		if (!plane_state->ggtt_vma)
1727 			continue;
1728 
1729 		if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1730 			*fb = plane_state->hw.fb;
1731 			*vma = plane_state->ggtt_vma;
1732 			return true;
1733 		}
1734 	}
1735 
1736 	return false;
1737 }
1738 
1739 static void
intel_find_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)1740 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1741 			     struct intel_initial_plane_config *plane_config)
1742 {
1743 	struct drm_device *dev = crtc->base.dev;
1744 	struct drm_i915_private *dev_priv = to_i915(dev);
1745 	struct intel_crtc_state *crtc_state =
1746 		to_intel_crtc_state(crtc->base.state);
1747 	struct intel_plane *plane =
1748 		to_intel_plane(crtc->base.primary);
1749 	struct intel_plane_state *plane_state =
1750 		to_intel_plane_state(plane->base.state);
1751 	struct drm_framebuffer *fb;
1752 	struct i915_vma *vma;
1753 
1754 	/*
1755 	 * TODO:
1756 	 *   Disable planes if get_initial_plane_config() failed.
1757 	 *   Make sure things work if the surface base is not page aligned.
1758 	 */
1759 	if (!plane_config->fb)
1760 		return;
1761 
1762 	if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1763 		fb = &plane_config->fb->base;
1764 		vma = plane_config->vma;
1765 		goto valid_fb;
1766 	}
1767 
1768 	/*
1769 	 * Failed to alloc the obj, check to see if we should share
1770 	 * an fb with another CRTC instead
1771 	 */
1772 	if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1773 		goto valid_fb;
1774 
1775 	/*
1776 	 * We've failed to reconstruct the BIOS FB.  Current display state
1777 	 * indicates that the primary plane is visible, but has a NULL FB,
1778 	 * which will lead to problems later if we don't fix it up.  The
1779 	 * simplest solution is to just disable the primary plane now and
1780 	 * pretend the BIOS never had it enabled.
1781 	 */
1782 	intel_plane_disable_noatomic(crtc, plane);
1783 	if (crtc_state->bigjoiner) {
1784 		struct intel_crtc *slave =
1785 			crtc_state->bigjoiner_linked_crtc;
1786 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1787 	}
1788 
1789 	return;
1790 
1791 valid_fb:
1792 	plane_state->uapi.rotation = plane_config->rotation;
1793 	intel_fb_fill_view(to_intel_framebuffer(fb),
1794 			   plane_state->uapi.rotation, &plane_state->view);
1795 
1796 	__i915_vma_pin(vma);
1797 	plane_state->ggtt_vma = i915_vma_get(vma);
1798 	if (intel_plane_uses_fence(plane_state) &&
1799 	    i915_vma_pin_fence(vma) == 0 && vma->fence)
1800 		plane_state->flags |= PLANE_HAS_FENCE;
1801 
1802 	plane_state->uapi.src_x = 0;
1803 	plane_state->uapi.src_y = 0;
1804 	plane_state->uapi.src_w = fb->width << 16;
1805 	plane_state->uapi.src_h = fb->height << 16;
1806 
1807 	plane_state->uapi.crtc_x = 0;
1808 	plane_state->uapi.crtc_y = 0;
1809 	plane_state->uapi.crtc_w = fb->width;
1810 	plane_state->uapi.crtc_h = fb->height;
1811 
1812 	if (plane_config->tiling)
1813 		dev_priv->preserve_bios_swizzle = true;
1814 
1815 	plane_state->uapi.fb = fb;
1816 	drm_framebuffer_get(fb);
1817 
1818 	plane_state->uapi.crtc = &crtc->base;
1819 	intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
1820 
1821 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1822 
1823 	atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
1824 }
1825 
1826 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)1827 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1828 {
1829 	int x = 0, y = 0;
1830 
1831 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1832 					  plane_state->view.color_plane[0].offset, 0);
1833 
1834 	return y;
1835 }
1836 
1837 static int
__intel_display_resume(struct drm_device * dev,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)1838 __intel_display_resume(struct drm_device *dev,
1839 		       struct drm_atomic_state *state,
1840 		       struct drm_modeset_acquire_ctx *ctx)
1841 {
1842 	struct drm_crtc_state *crtc_state;
1843 	struct drm_crtc *crtc;
1844 	int i, ret;
1845 
1846 	intel_modeset_setup_hw_state(dev, ctx);
1847 	intel_vga_redisable(to_i915(dev));
1848 
1849 	if (!state)
1850 		return 0;
1851 
1852 	/*
1853 	 * We've duplicated the state, pointers to the old state are invalid.
1854 	 *
1855 	 * Don't attempt to use the old state until we commit the duplicated state.
1856 	 */
1857 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1858 		/*
1859 		 * Force recalculation even if we restore
1860 		 * current state. With fast modeset this may not result
1861 		 * in a modeset when the state is compatible.
1862 		 */
1863 		crtc_state->mode_changed = true;
1864 	}
1865 
1866 	/* ignore any reset values/BIOS leftovers in the WM registers */
1867 	if (!HAS_GMCH(to_i915(dev)))
1868 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
1869 
1870 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1871 
1872 	drm_WARN_ON(dev, ret == -EDEADLK);
1873 	return ret;
1874 }
1875 
gpu_reset_clobbers_display(struct drm_i915_private * dev_priv)1876 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1877 {
1878 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1879 		intel_has_gpu_reset(&dev_priv->gt));
1880 }
1881 
intel_display_prepare_reset(struct drm_i915_private * dev_priv)1882 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1883 {
1884 	struct drm_device *dev = &dev_priv->drm;
1885 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1886 	struct drm_atomic_state *state;
1887 	int ret;
1888 
1889 	if (!HAS_DISPLAY(dev_priv))
1890 		return;
1891 
1892 	/* reset doesn't touch the display */
1893 	if (!dev_priv->params.force_reset_modeset_test &&
1894 	    !gpu_reset_clobbers_display(dev_priv))
1895 		return;
1896 
1897 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
1898 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1899 	smp_mb__after_atomic();
1900 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1901 
1902 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1903 		drm_dbg_kms(&dev_priv->drm,
1904 			    "Modeset potentially stuck, unbreaking through wedging\n");
1905 		intel_gt_set_wedged(&dev_priv->gt);
1906 	}
1907 
1908 	/*
1909 	 * Need mode_config.mutex so that we don't
1910 	 * trample ongoing ->detect() and whatnot.
1911 	 */
1912 	mutex_lock(&dev->mode_config.mutex);
1913 	drm_modeset_acquire_init(ctx, 0);
1914 	while (1) {
1915 		ret = drm_modeset_lock_all_ctx(dev, ctx);
1916 		if (ret != -EDEADLK)
1917 			break;
1918 
1919 		drm_modeset_backoff(ctx);
1920 	}
1921 	/*
1922 	 * Disabling the crtcs gracefully seems nicer. Also the
1923 	 * g33 docs say we should at least disable all the planes.
1924 	 */
1925 	state = drm_atomic_helper_duplicate_state(dev, ctx);
1926 	if (IS_ERR(state)) {
1927 		ret = PTR_ERR(state);
1928 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1929 			ret);
1930 		return;
1931 	}
1932 
1933 	ret = drm_atomic_helper_disable_all(dev, ctx);
1934 	if (ret) {
1935 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1936 			ret);
1937 		drm_atomic_state_put(state);
1938 		return;
1939 	}
1940 
1941 	dev_priv->modeset_restore_state = state;
1942 	state->acquire_ctx = ctx;
1943 }
1944 
intel_display_finish_reset(struct drm_i915_private * dev_priv)1945 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1946 {
1947 	struct drm_device *dev = &dev_priv->drm;
1948 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1949 	struct drm_atomic_state *state;
1950 	int ret;
1951 
1952 	if (!HAS_DISPLAY(dev_priv))
1953 		return;
1954 
1955 	/* reset doesn't touch the display */
1956 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1957 		return;
1958 
1959 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
1960 	if (!state)
1961 		goto unlock;
1962 
1963 	/* reset doesn't touch the display */
1964 	if (!gpu_reset_clobbers_display(dev_priv)) {
1965 		/* for testing only restore the display */
1966 		ret = __intel_display_resume(dev, state, ctx);
1967 		if (ret)
1968 			drm_err(&dev_priv->drm,
1969 				"Restoring old state failed with %i\n", ret);
1970 	} else {
1971 		/*
1972 		 * The display has been reset as well,
1973 		 * so need a full re-initialization.
1974 		 */
1975 		intel_pps_unlock_regs_wa(dev_priv);
1976 		intel_modeset_init_hw(dev_priv);
1977 		intel_init_clock_gating(dev_priv);
1978 		intel_hpd_init(dev_priv);
1979 
1980 		ret = __intel_display_resume(dev, state, ctx);
1981 		if (ret)
1982 			drm_err(&dev_priv->drm,
1983 				"Restoring old state failed with %i\n", ret);
1984 
1985 		intel_hpd_poll_disable(dev_priv);
1986 	}
1987 
1988 	drm_atomic_state_put(state);
1989 unlock:
1990 	drm_modeset_drop_locks(ctx);
1991 	drm_modeset_acquire_fini(ctx);
1992 	mutex_unlock(&dev->mode_config.mutex);
1993 
1994 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1995 }
1996 
underrun_recovery_supported(const struct intel_crtc_state * crtc_state)1997 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
1998 {
1999 	if (crtc_state->pch_pfit.enabled &&
2000 	    (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
2001 	     crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
2002 	     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
2003 		return false;
2004 
2005 	if (crtc_state->dsc.compression_enable)
2006 		return false;
2007 
2008 	if (crtc_state->has_psr2)
2009 		return false;
2010 
2011 	if (crtc_state->splitter.enable)
2012 		return false;
2013 
2014 	return true;
2015 }
2016 
icl_set_pipe_chicken(const struct intel_crtc_state * crtc_state)2017 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
2018 {
2019 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2020 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2021 	enum pipe pipe = crtc->pipe;
2022 	u32 tmp;
2023 
2024 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2025 
2026 	/*
2027 	 * Display WA #1153: icl
2028 	 * enable hardware to bypass the alpha math
2029 	 * and rounding for per-pixel values 00 and 0xff
2030 	 */
2031 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2032 	/*
2033 	 * Display WA # 1605353570: icl
2034 	 * Set the pixel rounding bit to 1 for allowing
2035 	 * passthrough of Frame buffer pixels unmodified
2036 	 * across pipe
2037 	 */
2038 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2039 
2040 	if (IS_DG2(dev_priv)) {
2041 		/*
2042 		 * Underrun recovery must always be disabled on DG2.  However
2043 		 * the chicken bit meaning is inverted compared to other
2044 		 * platforms.
2045 		 */
2046 		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
2047 	} else if (DISPLAY_VER(dev_priv) >= 13) {
2048 		if (underrun_recovery_supported(crtc_state))
2049 			tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
2050 		else
2051 			tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
2052 	}
2053 
2054 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2055 }
2056 
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)2057 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2058 {
2059 	struct drm_crtc *crtc;
2060 	bool cleanup_done;
2061 
2062 	drm_for_each_crtc(crtc, &dev_priv->drm) {
2063 		struct drm_crtc_commit *commit;
2064 		spin_lock(&crtc->commit_lock);
2065 		commit = list_first_entry_or_null(&crtc->commit_list,
2066 						  struct drm_crtc_commit, commit_entry);
2067 		cleanup_done = commit ?
2068 			try_wait_for_completion(&commit->cleanup_done) : true;
2069 		spin_unlock(&crtc->commit_lock);
2070 
2071 		if (cleanup_done)
2072 			continue;
2073 
2074 		drm_crtc_wait_one_vblank(crtc);
2075 
2076 		return true;
2077 	}
2078 
2079 	return false;
2080 }
2081 
lpt_disable_iclkip(struct drm_i915_private * dev_priv)2082 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2083 {
2084 	u32 temp;
2085 
2086 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2087 
2088 	mutex_lock(&dev_priv->sb_lock);
2089 
2090 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2091 	temp |= SBI_SSCCTL_DISABLE;
2092 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2093 
2094 	mutex_unlock(&dev_priv->sb_lock);
2095 }
2096 
2097 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)2098 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2099 {
2100 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2101 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2102 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2103 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2104 	u32 temp;
2105 
2106 	lpt_disable_iclkip(dev_priv);
2107 
2108 	/* The iCLK virtual clock root frequency is in MHz,
2109 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
2110 	 * divisors, it is necessary to divide one by another, so we
2111 	 * convert the virtual clock precision to KHz here for higher
2112 	 * precision.
2113 	 */
2114 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2115 		u32 iclk_virtual_root_freq = 172800 * 1000;
2116 		u32 iclk_pi_range = 64;
2117 		u32 desired_divisor;
2118 
2119 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2120 						    clock << auxdiv);
2121 		divsel = (desired_divisor / iclk_pi_range) - 2;
2122 		phaseinc = desired_divisor % iclk_pi_range;
2123 
2124 		/*
2125 		 * Near 20MHz is a corner case which is
2126 		 * out of range for the 7-bit divisor
2127 		 */
2128 		if (divsel <= 0x7f)
2129 			break;
2130 	}
2131 
2132 	/* This should not happen with any sane values */
2133 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2134 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2135 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2136 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2137 
2138 	drm_dbg_kms(&dev_priv->drm,
2139 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2140 		    clock, auxdiv, divsel, phasedir, phaseinc);
2141 
2142 	mutex_lock(&dev_priv->sb_lock);
2143 
2144 	/* Program SSCDIVINTPHASE6 */
2145 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2146 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2147 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2148 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2149 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2150 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2151 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2152 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2153 
2154 	/* Program SSCAUXDIV */
2155 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2156 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2157 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2158 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2159 
2160 	/* Enable modulator and associated divider */
2161 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2162 	temp &= ~SBI_SSCCTL_DISABLE;
2163 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2164 
2165 	mutex_unlock(&dev_priv->sb_lock);
2166 
2167 	/* Wait for initialization time */
2168 	udelay(24);
2169 
2170 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2171 }
2172 
lpt_get_iclkip(struct drm_i915_private * dev_priv)2173 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2174 {
2175 	u32 divsel, phaseinc, auxdiv;
2176 	u32 iclk_virtual_root_freq = 172800 * 1000;
2177 	u32 iclk_pi_range = 64;
2178 	u32 desired_divisor;
2179 	u32 temp;
2180 
2181 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2182 		return 0;
2183 
2184 	mutex_lock(&dev_priv->sb_lock);
2185 
2186 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2187 	if (temp & SBI_SSCCTL_DISABLE) {
2188 		mutex_unlock(&dev_priv->sb_lock);
2189 		return 0;
2190 	}
2191 
2192 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2193 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2194 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2195 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2196 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2197 
2198 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2199 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2200 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2201 
2202 	mutex_unlock(&dev_priv->sb_lock);
2203 
2204 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2205 
2206 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2207 				 desired_divisor << auxdiv);
2208 }
2209 
ilk_pch_transcoder_set_timings(const struct intel_crtc_state * crtc_state,enum pipe pch_transcoder)2210 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2211 					   enum pipe pch_transcoder)
2212 {
2213 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2214 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2215 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2216 
2217 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2218 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2219 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2220 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2221 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2222 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2223 
2224 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2225 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2226 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2227 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2228 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2229 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2230 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2231 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2232 }
2233 
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)2234 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2235 {
2236 	u32 temp;
2237 
2238 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2239 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2240 		return;
2241 
2242 	drm_WARN_ON(&dev_priv->drm,
2243 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2244 		    FDI_RX_ENABLE);
2245 	drm_WARN_ON(&dev_priv->drm,
2246 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2247 		    FDI_RX_ENABLE);
2248 
2249 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2250 	if (enable)
2251 		temp |= FDI_BC_BIFURCATION_SELECT;
2252 
2253 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2254 		    enable ? "en" : "dis");
2255 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2256 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2257 }
2258 
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)2259 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2260 {
2261 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2262 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2263 
2264 	switch (crtc->pipe) {
2265 	case PIPE_A:
2266 		break;
2267 	case PIPE_B:
2268 		if (crtc_state->fdi_lanes > 2)
2269 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2270 		else
2271 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2272 
2273 		break;
2274 	case PIPE_C:
2275 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2276 
2277 		break;
2278 	default:
2279 		BUG();
2280 	}
2281 }
2282 
2283 /*
2284  * Finds the encoder associated with the given CRTC. This can only be
2285  * used when we know that the CRTC isn't feeding multiple encoders!
2286  */
2287 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2288 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2289 			   const struct intel_crtc_state *crtc_state)
2290 {
2291 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2292 	const struct drm_connector_state *connector_state;
2293 	const struct drm_connector *connector;
2294 	struct intel_encoder *encoder = NULL;
2295 	int num_encoders = 0;
2296 	int i;
2297 
2298 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2299 		if (connector_state->crtc != &crtc->base)
2300 			continue;
2301 
2302 		encoder = to_intel_encoder(connector_state->best_encoder);
2303 		num_encoders++;
2304 	}
2305 
2306 	drm_WARN(state->base.dev, num_encoders != 1,
2307 		 "%d encoders for pipe %c\n",
2308 		 num_encoders, pipe_name(crtc->pipe));
2309 
2310 	return encoder;
2311 }
2312 
2313 /*
2314  * Enable PCH resources required for PCH ports:
2315  *   - PCH PLLs
2316  *   - FDI training & RX/TX
2317  *   - update transcoder timings
2318  *   - DP transcoding bits
2319  *   - transcoder
2320  */
ilk_pch_enable(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2321 static void ilk_pch_enable(const struct intel_atomic_state *state,
2322 			   const struct intel_crtc_state *crtc_state)
2323 {
2324 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2325 	struct drm_device *dev = crtc->base.dev;
2326 	struct drm_i915_private *dev_priv = to_i915(dev);
2327 	enum pipe pipe = crtc->pipe;
2328 	u32 temp;
2329 
2330 	assert_pch_transcoder_disabled(dev_priv, pipe);
2331 
2332 	if (IS_IVYBRIDGE(dev_priv))
2333 		ivb_update_fdi_bc_bifurcation(crtc_state);
2334 
2335 	/* Write the TU size bits before fdi link training, so that error
2336 	 * detection works. */
2337 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2338 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2339 
2340 	/* For PCH output, training FDI link */
2341 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2342 
2343 	/* We need to program the right clock selection before writing the pixel
2344 	 * mutliplier into the DPLL. */
2345 	if (HAS_PCH_CPT(dev_priv)) {
2346 		u32 sel;
2347 
2348 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2349 		temp |= TRANS_DPLL_ENABLE(pipe);
2350 		sel = TRANS_DPLLB_SEL(pipe);
2351 		if (crtc_state->shared_dpll ==
2352 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2353 			temp |= sel;
2354 		else
2355 			temp &= ~sel;
2356 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2357 	}
2358 
2359 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2360 	 * transcoder, and we actually should do this to not upset any PCH
2361 	 * transcoder that already use the clock when we share it.
2362 	 *
2363 	 * Note that enable_shared_dpll tries to do the right thing, but
2364 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2365 	 * the right LVDS enable sequence. */
2366 	intel_enable_shared_dpll(crtc_state);
2367 
2368 	/* set transcoder timing, panel must allow it */
2369 	assert_panel_unlocked(dev_priv, pipe);
2370 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2371 
2372 	intel_fdi_normal_train(crtc);
2373 
2374 	/* For PCH DP, enable TRANS_DP_CTL */
2375 	if (HAS_PCH_CPT(dev_priv) &&
2376 	    intel_crtc_has_dp_encoder(crtc_state)) {
2377 		const struct drm_display_mode *adjusted_mode =
2378 			&crtc_state->hw.adjusted_mode;
2379 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2380 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2381 		enum port port;
2382 
2383 		temp = intel_de_read(dev_priv, reg);
2384 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2385 			  TRANS_DP_SYNC_MASK |
2386 			  TRANS_DP_BPC_MASK);
2387 		temp |= TRANS_DP_OUTPUT_ENABLE;
2388 		temp |= bpc << 9; /* same format but at 11:9 */
2389 
2390 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2391 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2392 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2393 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2394 
2395 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2396 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2397 		temp |= TRANS_DP_PORT_SEL(port);
2398 
2399 		intel_de_write(dev_priv, reg, temp);
2400 	}
2401 
2402 	ilk_enable_pch_transcoder(crtc_state);
2403 }
2404 
lpt_pch_enable(const struct intel_crtc_state * crtc_state)2405 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2406 {
2407 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2408 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2409 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2410 
2411 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2412 
2413 	lpt_program_iclkip(crtc_state);
2414 
2415 	/* Set transcoder timing. */
2416 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2417 
2418 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2419 }
2420 
cpt_verify_modeset(struct drm_i915_private * dev_priv,enum pipe pipe)2421 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2422 			       enum pipe pipe)
2423 {
2424 	i915_reg_t dslreg = PIPEDSL(pipe);
2425 	u32 temp;
2426 
2427 	temp = intel_de_read(dev_priv, dslreg);
2428 	udelay(500);
2429 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2430 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2431 			drm_err(&dev_priv->drm,
2432 				"mode set failed: pipe %c stuck\n",
2433 				pipe_name(pipe));
2434 	}
2435 }
2436 
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)2437 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2438 {
2439 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2440 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2441 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2442 	enum pipe pipe = crtc->pipe;
2443 	int width = drm_rect_width(dst);
2444 	int height = drm_rect_height(dst);
2445 	int x = dst->x1;
2446 	int y = dst->y1;
2447 
2448 	if (!crtc_state->pch_pfit.enabled)
2449 		return;
2450 
2451 	/* Force use of hard-coded filter coefficients
2452 	 * as some pre-programmed values are broken,
2453 	 * e.g. x201.
2454 	 */
2455 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2456 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2457 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2458 	else
2459 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2460 			       PF_FILTER_MED_3x3);
2461 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2462 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2463 }
2464 
hsw_enable_ips(const struct intel_crtc_state * crtc_state)2465 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2466 {
2467 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2468 	struct drm_device *dev = crtc->base.dev;
2469 	struct drm_i915_private *dev_priv = to_i915(dev);
2470 
2471 	if (!crtc_state->ips_enabled)
2472 		return;
2473 
2474 	/*
2475 	 * We can only enable IPS after we enable a plane and wait for a vblank
2476 	 * This function is called from post_plane_update, which is run after
2477 	 * a vblank wait.
2478 	 */
2479 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2480 
2481 	if (IS_BROADWELL(dev_priv)) {
2482 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2483 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2484 		/* Quoting Art Runyan: "its not safe to expect any particular
2485 		 * value in IPS_CTL bit 31 after enabling IPS through the
2486 		 * mailbox." Moreover, the mailbox may return a bogus state,
2487 		 * so we need to just enable it and continue on.
2488 		 */
2489 	} else {
2490 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2491 		/* The bit only becomes 1 in the next vblank, so this wait here
2492 		 * is essentially intel_wait_for_vblank. If we don't have this
2493 		 * and don't wait for vblanks until the end of crtc_enable, then
2494 		 * the HW state readout code will complain that the expected
2495 		 * IPS_CTL value is not the one we read. */
2496 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2497 			drm_err(&dev_priv->drm,
2498 				"Timed out waiting for IPS enable\n");
2499 	}
2500 }
2501 
hsw_disable_ips(const struct intel_crtc_state * crtc_state)2502 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2503 {
2504 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2505 	struct drm_device *dev = crtc->base.dev;
2506 	struct drm_i915_private *dev_priv = to_i915(dev);
2507 
2508 	if (!crtc_state->ips_enabled)
2509 		return;
2510 
2511 	if (IS_BROADWELL(dev_priv)) {
2512 		drm_WARN_ON(dev,
2513 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2514 		/*
2515 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2516 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2517 		 * instead.
2518 		 */
2519 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2520 			drm_err(&dev_priv->drm,
2521 				"Timed out waiting for IPS disable\n");
2522 	} else {
2523 		intel_de_write(dev_priv, IPS_CTL, 0);
2524 		intel_de_posting_read(dev_priv, IPS_CTL);
2525 	}
2526 
2527 	/* We need to wait for a vblank before we can disable the plane. */
2528 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2529 }
2530 
intel_crtc_dpms_overlay_disable(struct intel_crtc * crtc)2531 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2532 {
2533 	if (crtc->overlay)
2534 		(void) intel_overlay_switch_off(crtc->overlay);
2535 
2536 	/* Let userspace switch the overlay on again. In most cases userspace
2537 	 * has to recompute where to put it anyway.
2538 	 */
2539 }
2540 
hsw_pre_update_disable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2541 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2542 				       const struct intel_crtc_state *new_crtc_state)
2543 {
2544 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2545 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2546 
2547 	if (!old_crtc_state->ips_enabled)
2548 		return false;
2549 
2550 	if (intel_crtc_needs_modeset(new_crtc_state))
2551 		return true;
2552 
2553 	/*
2554 	 * Workaround : Do not read or write the pipe palette/gamma data while
2555 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2556 	 *
2557 	 * Disable IPS before we program the LUT.
2558 	 */
2559 	if (IS_HASWELL(dev_priv) &&
2560 	    (new_crtc_state->uapi.color_mgmt_changed ||
2561 	     new_crtc_state->update_pipe) &&
2562 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2563 		return true;
2564 
2565 	return !new_crtc_state->ips_enabled;
2566 }
2567 
hsw_post_update_enable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2568 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2569 				       const struct intel_crtc_state *new_crtc_state)
2570 {
2571 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2572 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2573 
2574 	if (!new_crtc_state->ips_enabled)
2575 		return false;
2576 
2577 	if (intel_crtc_needs_modeset(new_crtc_state))
2578 		return true;
2579 
2580 	/*
2581 	 * Workaround : Do not read or write the pipe palette/gamma data while
2582 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2583 	 *
2584 	 * Re-enable IPS after the LUT has been programmed.
2585 	 */
2586 	if (IS_HASWELL(dev_priv) &&
2587 	    (new_crtc_state->uapi.color_mgmt_changed ||
2588 	     new_crtc_state->update_pipe) &&
2589 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2590 		return true;
2591 
2592 	/*
2593 	 * We can't read out IPS on broadwell, assume the worst and
2594 	 * forcibly enable IPS on the first fastset.
2595 	 */
2596 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2597 		return true;
2598 
2599 	return !old_crtc_state->ips_enabled;
2600 }
2601 
needs_nv12_wa(const struct intel_crtc_state * crtc_state)2602 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2603 {
2604 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2605 
2606 	if (!crtc_state->nv12_planes)
2607 		return false;
2608 
2609 	/* WA Display #0827: Gen9:all */
2610 	if (DISPLAY_VER(dev_priv) == 9)
2611 		return true;
2612 
2613 	return false;
2614 }
2615 
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)2616 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2617 {
2618 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2619 
2620 	/* Wa_2006604312:icl,ehl */
2621 	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2622 		return true;
2623 
2624 	return false;
2625 }
2626 
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2627 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2628 			    const struct intel_crtc_state *new_crtc_state)
2629 {
2630 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2631 		new_crtc_state->active_planes;
2632 }
2633 
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2634 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2635 			     const struct intel_crtc_state *new_crtc_state)
2636 {
2637 	return old_crtc_state->active_planes &&
2638 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2639 }
2640 
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2641 static void intel_post_plane_update(struct intel_atomic_state *state,
2642 				    struct intel_crtc *crtc)
2643 {
2644 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2645 	const struct intel_crtc_state *old_crtc_state =
2646 		intel_atomic_get_old_crtc_state(state, crtc);
2647 	const struct intel_crtc_state *new_crtc_state =
2648 		intel_atomic_get_new_crtc_state(state, crtc);
2649 	enum pipe pipe = crtc->pipe;
2650 
2651 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2652 
2653 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2654 		intel_update_watermarks(crtc);
2655 
2656 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2657 		hsw_enable_ips(new_crtc_state);
2658 
2659 	intel_fbc_post_update(state, crtc);
2660 
2661 	if (needs_nv12_wa(old_crtc_state) &&
2662 	    !needs_nv12_wa(new_crtc_state))
2663 		skl_wa_827(dev_priv, pipe, false);
2664 
2665 	if (needs_scalerclk_wa(old_crtc_state) &&
2666 	    !needs_scalerclk_wa(new_crtc_state))
2667 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2668 }
2669 
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)2670 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2671 					struct intel_crtc *crtc)
2672 {
2673 	const struct intel_crtc_state *crtc_state =
2674 		intel_atomic_get_new_crtc_state(state, crtc);
2675 	u8 update_planes = crtc_state->update_planes;
2676 	const struct intel_plane_state *plane_state;
2677 	struct intel_plane *plane;
2678 	int i;
2679 
2680 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2681 		if (plane->enable_flip_done &&
2682 		    plane->pipe == crtc->pipe &&
2683 		    update_planes & BIT(plane->id))
2684 			plane->enable_flip_done(plane);
2685 	}
2686 }
2687 
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)2688 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2689 					 struct intel_crtc *crtc)
2690 {
2691 	const struct intel_crtc_state *crtc_state =
2692 		intel_atomic_get_new_crtc_state(state, crtc);
2693 	u8 update_planes = crtc_state->update_planes;
2694 	const struct intel_plane_state *plane_state;
2695 	struct intel_plane *plane;
2696 	int i;
2697 
2698 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2699 		if (plane->disable_flip_done &&
2700 		    plane->pipe == crtc->pipe &&
2701 		    update_planes & BIT(plane->id))
2702 			plane->disable_flip_done(plane);
2703 	}
2704 }
2705 
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)2706 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2707 					     struct intel_crtc *crtc)
2708 {
2709 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2710 	const struct intel_crtc_state *old_crtc_state =
2711 		intel_atomic_get_old_crtc_state(state, crtc);
2712 	const struct intel_crtc_state *new_crtc_state =
2713 		intel_atomic_get_new_crtc_state(state, crtc);
2714 	u8 update_planes = new_crtc_state->update_planes;
2715 	const struct intel_plane_state *old_plane_state;
2716 	struct intel_plane *plane;
2717 	bool need_vbl_wait = false;
2718 	int i;
2719 
2720 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2721 		if (plane->need_async_flip_disable_wa &&
2722 		    plane->pipe == crtc->pipe &&
2723 		    update_planes & BIT(plane->id)) {
2724 			/*
2725 			 * Apart from the async flip bit we want to
2726 			 * preserve the old state for the plane.
2727 			 */
2728 			plane->async_flip(plane, old_crtc_state,
2729 					  old_plane_state, false);
2730 			need_vbl_wait = true;
2731 		}
2732 	}
2733 
2734 	if (need_vbl_wait)
2735 		intel_wait_for_vblank(i915, crtc->pipe);
2736 }
2737 
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2738 static void intel_pre_plane_update(struct intel_atomic_state *state,
2739 				   struct intel_crtc *crtc)
2740 {
2741 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2742 	const struct intel_crtc_state *old_crtc_state =
2743 		intel_atomic_get_old_crtc_state(state, crtc);
2744 	const struct intel_crtc_state *new_crtc_state =
2745 		intel_atomic_get_new_crtc_state(state, crtc);
2746 	enum pipe pipe = crtc->pipe;
2747 
2748 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2749 		hsw_disable_ips(old_crtc_state);
2750 
2751 	if (intel_fbc_pre_update(state, crtc))
2752 		intel_wait_for_vblank(dev_priv, pipe);
2753 
2754 	/* Display WA 827 */
2755 	if (!needs_nv12_wa(old_crtc_state) &&
2756 	    needs_nv12_wa(new_crtc_state))
2757 		skl_wa_827(dev_priv, pipe, true);
2758 
2759 	/* Wa_2006604312:icl,ehl */
2760 	if (!needs_scalerclk_wa(old_crtc_state) &&
2761 	    needs_scalerclk_wa(new_crtc_state))
2762 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2763 
2764 	/*
2765 	 * Vblank time updates from the shadow to live plane control register
2766 	 * are blocked if the memory self-refresh mode is active at that
2767 	 * moment. So to make sure the plane gets truly disabled, disable
2768 	 * first the self-refresh mode. The self-refresh enable bit in turn
2769 	 * will be checked/applied by the HW only at the next frame start
2770 	 * event which is after the vblank start event, so we need to have a
2771 	 * wait-for-vblank between disabling the plane and the pipe.
2772 	 */
2773 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2774 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2775 		intel_wait_for_vblank(dev_priv, pipe);
2776 
2777 	/*
2778 	 * IVB workaround: must disable low power watermarks for at least
2779 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2780 	 * when scaling is disabled.
2781 	 *
2782 	 * WaCxSRDisabledForSpriteScaling:ivb
2783 	 */
2784 	if (old_crtc_state->hw.active &&
2785 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2786 		intel_wait_for_vblank(dev_priv, pipe);
2787 
2788 	/*
2789 	 * If we're doing a modeset we don't need to do any
2790 	 * pre-vblank watermark programming here.
2791 	 */
2792 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
2793 		/*
2794 		 * For platforms that support atomic watermarks, program the
2795 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2796 		 * will be the intermediate values that are safe for both pre- and
2797 		 * post- vblank; when vblank happens, the 'active' values will be set
2798 		 * to the final 'target' values and we'll do this again to get the
2799 		 * optimal watermarks.  For gen9+ platforms, the values we program here
2800 		 * will be the final target values which will get automatically latched
2801 		 * at vblank time; no further programming will be necessary.
2802 		 *
2803 		 * If a platform hasn't been transitioned to atomic watermarks yet,
2804 		 * we'll continue to update watermarks the old way, if flags tell
2805 		 * us to.
2806 		 */
2807 		if (dev_priv->display.initial_watermarks)
2808 			dev_priv->display.initial_watermarks(state, crtc);
2809 		else if (new_crtc_state->update_wm_pre)
2810 			intel_update_watermarks(crtc);
2811 	}
2812 
2813 	/*
2814 	 * Gen2 reports pipe underruns whenever all planes are disabled.
2815 	 * So disable underrun reporting before all the planes get disabled.
2816 	 *
2817 	 * We do this after .initial_watermarks() so that we have a
2818 	 * chance of catching underruns with the intermediate watermarks
2819 	 * vs. the old plane configuration.
2820 	 */
2821 	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2822 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2823 
2824 	/*
2825 	 * WA for platforms where async address update enable bit
2826 	 * is double buffered and only latched at start of vblank.
2827 	 */
2828 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2829 		intel_crtc_async_flip_disable_wa(state, crtc);
2830 }
2831 
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)2832 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2833 				      struct intel_crtc *crtc)
2834 {
2835 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2836 	const struct intel_crtc_state *new_crtc_state =
2837 		intel_atomic_get_new_crtc_state(state, crtc);
2838 	unsigned int update_mask = new_crtc_state->update_planes;
2839 	const struct intel_plane_state *old_plane_state;
2840 	struct intel_plane *plane;
2841 	unsigned fb_bits = 0;
2842 	int i;
2843 
2844 	intel_crtc_dpms_overlay_disable(crtc);
2845 
2846 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2847 		if (crtc->pipe != plane->pipe ||
2848 		    !(update_mask & BIT(plane->id)))
2849 			continue;
2850 
2851 		intel_disable_plane(plane, new_crtc_state);
2852 
2853 		if (old_plane_state->uapi.visible)
2854 			fb_bits |= plane->frontbuffer_bit;
2855 	}
2856 
2857 	intel_frontbuffer_flip(dev_priv, fb_bits);
2858 }
2859 
2860 /*
2861  * intel_connector_primary_encoder - get the primary encoder for a connector
2862  * @connector: connector for which to return the encoder
2863  *
2864  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2865  * all connectors to their encoder, except for DP-MST connectors which have
2866  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2867  * pointed to by as many DP-MST connectors as there are pipes.
2868  */
2869 static struct intel_encoder *
intel_connector_primary_encoder(struct intel_connector * connector)2870 intel_connector_primary_encoder(struct intel_connector *connector)
2871 {
2872 	struct intel_encoder *encoder;
2873 
2874 	if (connector->mst_port)
2875 		return &dp_to_dig_port(connector->mst_port)->base;
2876 
2877 	encoder = intel_attached_encoder(connector);
2878 	drm_WARN_ON(connector->base.dev, !encoder);
2879 
2880 	return encoder;
2881 }
2882 
intel_encoders_update_prepare(struct intel_atomic_state * state)2883 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2884 {
2885 	struct drm_connector_state *new_conn_state;
2886 	struct drm_connector *connector;
2887 	int i;
2888 
2889 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2890 					i) {
2891 		struct intel_connector *intel_connector;
2892 		struct intel_encoder *encoder;
2893 		struct intel_crtc *crtc;
2894 
2895 		if (!intel_connector_needs_modeset(state, connector))
2896 			continue;
2897 
2898 		intel_connector = to_intel_connector(connector);
2899 		encoder = intel_connector_primary_encoder(intel_connector);
2900 		if (!encoder->update_prepare)
2901 			continue;
2902 
2903 		crtc = new_conn_state->crtc ?
2904 			to_intel_crtc(new_conn_state->crtc) : NULL;
2905 		encoder->update_prepare(state, encoder, crtc);
2906 	}
2907 }
2908 
intel_encoders_update_complete(struct intel_atomic_state * state)2909 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2910 {
2911 	struct drm_connector_state *new_conn_state;
2912 	struct drm_connector *connector;
2913 	int i;
2914 
2915 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2916 					i) {
2917 		struct intel_connector *intel_connector;
2918 		struct intel_encoder *encoder;
2919 		struct intel_crtc *crtc;
2920 
2921 		if (!intel_connector_needs_modeset(state, connector))
2922 			continue;
2923 
2924 		intel_connector = to_intel_connector(connector);
2925 		encoder = intel_connector_primary_encoder(intel_connector);
2926 		if (!encoder->update_complete)
2927 			continue;
2928 
2929 		crtc = new_conn_state->crtc ?
2930 			to_intel_crtc(new_conn_state->crtc) : NULL;
2931 		encoder->update_complete(state, encoder, crtc);
2932 	}
2933 }
2934 
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2935 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2936 					  struct intel_crtc *crtc)
2937 {
2938 	const struct intel_crtc_state *crtc_state =
2939 		intel_atomic_get_new_crtc_state(state, crtc);
2940 	const struct drm_connector_state *conn_state;
2941 	struct drm_connector *conn;
2942 	int i;
2943 
2944 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2945 		struct intel_encoder *encoder =
2946 			to_intel_encoder(conn_state->best_encoder);
2947 
2948 		if (conn_state->crtc != &crtc->base)
2949 			continue;
2950 
2951 		if (encoder->pre_pll_enable)
2952 			encoder->pre_pll_enable(state, encoder,
2953 						crtc_state, conn_state);
2954 	}
2955 }
2956 
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2957 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2958 				      struct intel_crtc *crtc)
2959 {
2960 	const struct intel_crtc_state *crtc_state =
2961 		intel_atomic_get_new_crtc_state(state, crtc);
2962 	const struct drm_connector_state *conn_state;
2963 	struct drm_connector *conn;
2964 	int i;
2965 
2966 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2967 		struct intel_encoder *encoder =
2968 			to_intel_encoder(conn_state->best_encoder);
2969 
2970 		if (conn_state->crtc != &crtc->base)
2971 			continue;
2972 
2973 		if (encoder->pre_enable)
2974 			encoder->pre_enable(state, encoder,
2975 					    crtc_state, conn_state);
2976 	}
2977 }
2978 
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2979 static void intel_encoders_enable(struct intel_atomic_state *state,
2980 				  struct intel_crtc *crtc)
2981 {
2982 	const struct intel_crtc_state *crtc_state =
2983 		intel_atomic_get_new_crtc_state(state, crtc);
2984 	const struct drm_connector_state *conn_state;
2985 	struct drm_connector *conn;
2986 	int i;
2987 
2988 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2989 		struct intel_encoder *encoder =
2990 			to_intel_encoder(conn_state->best_encoder);
2991 
2992 		if (conn_state->crtc != &crtc->base)
2993 			continue;
2994 
2995 		if (encoder->enable)
2996 			encoder->enable(state, encoder,
2997 					crtc_state, conn_state);
2998 		intel_opregion_notify_encoder(encoder, true);
2999 	}
3000 }
3001 
intel_encoders_pre_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3002 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
3003 				       struct intel_crtc *crtc)
3004 {
3005 	const struct intel_crtc_state *old_crtc_state =
3006 		intel_atomic_get_old_crtc_state(state, crtc);
3007 	const struct drm_connector_state *old_conn_state;
3008 	struct drm_connector *conn;
3009 	int i;
3010 
3011 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3012 		struct intel_encoder *encoder =
3013 			to_intel_encoder(old_conn_state->best_encoder);
3014 
3015 		if (old_conn_state->crtc != &crtc->base)
3016 			continue;
3017 
3018 		if (encoder->pre_disable)
3019 			encoder->pre_disable(state, encoder, old_crtc_state,
3020 					     old_conn_state);
3021 	}
3022 }
3023 
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3024 static void intel_encoders_disable(struct intel_atomic_state *state,
3025 				   struct intel_crtc *crtc)
3026 {
3027 	const struct intel_crtc_state *old_crtc_state =
3028 		intel_atomic_get_old_crtc_state(state, crtc);
3029 	const struct drm_connector_state *old_conn_state;
3030 	struct drm_connector *conn;
3031 	int i;
3032 
3033 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3034 		struct intel_encoder *encoder =
3035 			to_intel_encoder(old_conn_state->best_encoder);
3036 
3037 		if (old_conn_state->crtc != &crtc->base)
3038 			continue;
3039 
3040 		intel_opregion_notify_encoder(encoder, false);
3041 		if (encoder->disable)
3042 			encoder->disable(state, encoder,
3043 					 old_crtc_state, old_conn_state);
3044 	}
3045 }
3046 
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3047 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3048 					struct intel_crtc *crtc)
3049 {
3050 	const struct intel_crtc_state *old_crtc_state =
3051 		intel_atomic_get_old_crtc_state(state, crtc);
3052 	const struct drm_connector_state *old_conn_state;
3053 	struct drm_connector *conn;
3054 	int i;
3055 
3056 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3057 		struct intel_encoder *encoder =
3058 			to_intel_encoder(old_conn_state->best_encoder);
3059 
3060 		if (old_conn_state->crtc != &crtc->base)
3061 			continue;
3062 
3063 		if (encoder->post_disable)
3064 			encoder->post_disable(state, encoder,
3065 					      old_crtc_state, old_conn_state);
3066 	}
3067 }
3068 
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3069 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3070 					    struct intel_crtc *crtc)
3071 {
3072 	const struct intel_crtc_state *old_crtc_state =
3073 		intel_atomic_get_old_crtc_state(state, crtc);
3074 	const struct drm_connector_state *old_conn_state;
3075 	struct drm_connector *conn;
3076 	int i;
3077 
3078 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3079 		struct intel_encoder *encoder =
3080 			to_intel_encoder(old_conn_state->best_encoder);
3081 
3082 		if (old_conn_state->crtc != &crtc->base)
3083 			continue;
3084 
3085 		if (encoder->post_pll_disable)
3086 			encoder->post_pll_disable(state, encoder,
3087 						  old_crtc_state, old_conn_state);
3088 	}
3089 }
3090 
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)3091 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3092 				       struct intel_crtc *crtc)
3093 {
3094 	const struct intel_crtc_state *crtc_state =
3095 		intel_atomic_get_new_crtc_state(state, crtc);
3096 	const struct drm_connector_state *conn_state;
3097 	struct drm_connector *conn;
3098 	int i;
3099 
3100 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3101 		struct intel_encoder *encoder =
3102 			to_intel_encoder(conn_state->best_encoder);
3103 
3104 		if (conn_state->crtc != &crtc->base)
3105 			continue;
3106 
3107 		if (encoder->update_pipe)
3108 			encoder->update_pipe(state, encoder,
3109 					     crtc_state, conn_state);
3110 	}
3111 }
3112 
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)3113 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3114 {
3115 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3116 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3117 
3118 	plane->disable_plane(plane, crtc_state);
3119 }
3120 
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3121 static void ilk_crtc_enable(struct intel_atomic_state *state,
3122 			    struct intel_crtc *crtc)
3123 {
3124 	const struct intel_crtc_state *new_crtc_state =
3125 		intel_atomic_get_new_crtc_state(state, crtc);
3126 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3127 	enum pipe pipe = crtc->pipe;
3128 
3129 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3130 		return;
3131 
3132 	/*
3133 	 * Sometimes spurious CPU pipe underruns happen during FDI
3134 	 * training, at least with VGA+HDMI cloning. Suppress them.
3135 	 *
3136 	 * On ILK we get an occasional spurious CPU pipe underruns
3137 	 * between eDP port A enable and vdd enable. Also PCH port
3138 	 * enable seems to result in the occasional CPU pipe underrun.
3139 	 *
3140 	 * Spurious PCH underruns also occur during PCH enabling.
3141 	 */
3142 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3143 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3144 
3145 	if (new_crtc_state->has_pch_encoder)
3146 		intel_prepare_shared_dpll(new_crtc_state);
3147 
3148 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3149 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3150 
3151 	intel_set_transcoder_timings(new_crtc_state);
3152 	intel_set_pipe_src_size(new_crtc_state);
3153 
3154 	if (new_crtc_state->has_pch_encoder)
3155 		intel_cpu_transcoder_set_m_n(new_crtc_state,
3156 					     &new_crtc_state->fdi_m_n, NULL);
3157 
3158 	ilk_set_pipeconf(new_crtc_state);
3159 
3160 	crtc->active = true;
3161 
3162 	intel_encoders_pre_enable(state, crtc);
3163 
3164 	if (new_crtc_state->has_pch_encoder) {
3165 		/* Note: FDI PLL enabling _must_ be done before we enable the
3166 		 * cpu pipes, hence this is separate from all the other fdi/pch
3167 		 * enabling. */
3168 		ilk_fdi_pll_enable(new_crtc_state);
3169 	} else {
3170 		assert_fdi_tx_disabled(dev_priv, pipe);
3171 		assert_fdi_rx_disabled(dev_priv, pipe);
3172 	}
3173 
3174 	ilk_pfit_enable(new_crtc_state);
3175 
3176 	/*
3177 	 * On ILK+ LUT must be loaded before the pipe is running but with
3178 	 * clocks enabled
3179 	 */
3180 	intel_color_load_luts(new_crtc_state);
3181 	intel_color_commit(new_crtc_state);
3182 	/* update DSPCNTR to configure gamma for pipe bottom color */
3183 	intel_disable_primary_plane(new_crtc_state);
3184 
3185 	if (dev_priv->display.initial_watermarks)
3186 		dev_priv->display.initial_watermarks(state, crtc);
3187 	intel_enable_pipe(new_crtc_state);
3188 
3189 	if (new_crtc_state->has_pch_encoder)
3190 		ilk_pch_enable(state, new_crtc_state);
3191 
3192 	intel_crtc_vblank_on(new_crtc_state);
3193 
3194 	intel_encoders_enable(state, crtc);
3195 
3196 	if (HAS_PCH_CPT(dev_priv))
3197 		cpt_verify_modeset(dev_priv, pipe);
3198 
3199 	/*
3200 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3201 	 * And a second vblank wait is needed at least on ILK with
3202 	 * some interlaced HDMI modes. Let's do the double wait always
3203 	 * in case there are more corner cases we don't know about.
3204 	 */
3205 	if (new_crtc_state->has_pch_encoder) {
3206 		intel_wait_for_vblank(dev_priv, pipe);
3207 		intel_wait_for_vblank(dev_priv, pipe);
3208 	}
3209 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3210 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3211 }
3212 
3213 /* IPS only exists on ULT machines and is tied to pipe A. */
hsw_crtc_supports_ips(struct intel_crtc * crtc)3214 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3215 {
3216 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3217 }
3218 
glk_pipe_scaler_clock_gating_wa(struct drm_i915_private * dev_priv,enum pipe pipe,bool apply)3219 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3220 					    enum pipe pipe, bool apply)
3221 {
3222 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3223 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3224 
3225 	if (apply)
3226 		val |= mask;
3227 	else
3228 		val &= ~mask;
3229 
3230 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3231 }
3232 
icl_pipe_mbus_enable(struct intel_crtc * crtc,bool joined_mbus)3233 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
3234 {
3235 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3236 	enum pipe pipe = crtc->pipe;
3237 	u32 val;
3238 
3239 	/* Wa_22010947358:adl-p */
3240 	if (IS_ALDERLAKE_P(dev_priv))
3241 		val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3242 	else
3243 		val = MBUS_DBOX_A_CREDIT(2);
3244 
3245 	if (DISPLAY_VER(dev_priv) >= 12) {
3246 		val |= MBUS_DBOX_BW_CREDIT(2);
3247 		val |= MBUS_DBOX_B_CREDIT(12);
3248 	} else {
3249 		val |= MBUS_DBOX_BW_CREDIT(1);
3250 		val |= MBUS_DBOX_B_CREDIT(8);
3251 	}
3252 
3253 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3254 }
3255 
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)3256 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3257 {
3258 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3259 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3260 
3261 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3262 		       HSW_LINETIME(crtc_state->linetime) |
3263 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3264 }
3265 
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)3266 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3267 {
3268 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3269 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3270 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3271 	u32 val;
3272 
3273 	val = intel_de_read(dev_priv, reg);
3274 	val &= ~HSW_FRAME_START_DELAY_MASK;
3275 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3276 	intel_de_write(dev_priv, reg, val);
3277 }
3278 
icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)3279 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3280 					 const struct intel_crtc_state *crtc_state)
3281 {
3282 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3283 	struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3284 	struct intel_crtc_state *master_crtc_state;
3285 	struct drm_connector_state *conn_state;
3286 	struct drm_connector *conn;
3287 	struct intel_encoder *encoder = NULL;
3288 	int i;
3289 
3290 	if (crtc_state->bigjoiner_slave)
3291 		master = crtc_state->bigjoiner_linked_crtc;
3292 
3293 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3294 
3295 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3296 		if (conn_state->crtc != &master->base)
3297 			continue;
3298 
3299 		encoder = to_intel_encoder(conn_state->best_encoder);
3300 		break;
3301 	}
3302 
3303 	if (!crtc_state->bigjoiner_slave) {
3304 		/* need to enable VDSC, which we skipped in pre-enable */
3305 		intel_dsc_enable(encoder, crtc_state);
3306 	} else {
3307 		/*
3308 		 * Enable sequence steps 1-7 on bigjoiner master
3309 		 */
3310 		intel_encoders_pre_pll_enable(state, master);
3311 		if (master_crtc_state->shared_dpll)
3312 			intel_enable_shared_dpll(master_crtc_state);
3313 		intel_encoders_pre_enable(state, master);
3314 
3315 		/* and DSC on slave */
3316 		intel_dsc_enable(NULL, crtc_state);
3317 	}
3318 
3319 	if (DISPLAY_VER(dev_priv) >= 13)
3320 		intel_uncompressed_joiner_enable(crtc_state);
3321 }
3322 
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3323 static void hsw_crtc_enable(struct intel_atomic_state *state,
3324 			    struct intel_crtc *crtc)
3325 {
3326 	const struct intel_crtc_state *new_crtc_state =
3327 		intel_atomic_get_new_crtc_state(state, crtc);
3328 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3329 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3330 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3331 	bool psl_clkgate_wa;
3332 
3333 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3334 		return;
3335 
3336 	if (!new_crtc_state->bigjoiner) {
3337 		intel_encoders_pre_pll_enable(state, crtc);
3338 
3339 		if (new_crtc_state->shared_dpll)
3340 			intel_enable_shared_dpll(new_crtc_state);
3341 
3342 		intel_encoders_pre_enable(state, crtc);
3343 	} else {
3344 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3345 	}
3346 
3347 	intel_set_pipe_src_size(new_crtc_state);
3348 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3349 		bdw_set_pipemisc(new_crtc_state);
3350 
3351 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3352 		intel_set_transcoder_timings(new_crtc_state);
3353 
3354 		if (cpu_transcoder != TRANSCODER_EDP)
3355 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3356 				       new_crtc_state->pixel_multiplier - 1);
3357 
3358 		if (new_crtc_state->has_pch_encoder)
3359 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3360 						     &new_crtc_state->fdi_m_n, NULL);
3361 
3362 		hsw_set_frame_start_delay(new_crtc_state);
3363 	}
3364 
3365 	if (!transcoder_is_dsi(cpu_transcoder))
3366 		hsw_set_pipeconf(new_crtc_state);
3367 
3368 	crtc->active = true;
3369 
3370 	/* Display WA #1180: WaDisableScalarClockGating: glk */
3371 	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3372 		new_crtc_state->pch_pfit.enabled;
3373 	if (psl_clkgate_wa)
3374 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3375 
3376 	if (DISPLAY_VER(dev_priv) >= 9)
3377 		skl_pfit_enable(new_crtc_state);
3378 	else
3379 		ilk_pfit_enable(new_crtc_state);
3380 
3381 	/*
3382 	 * On ILK+ LUT must be loaded before the pipe is running but with
3383 	 * clocks enabled
3384 	 */
3385 	intel_color_load_luts(new_crtc_state);
3386 	intel_color_commit(new_crtc_state);
3387 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3388 	if (DISPLAY_VER(dev_priv) < 9)
3389 		intel_disable_primary_plane(new_crtc_state);
3390 
3391 	hsw_set_linetime_wm(new_crtc_state);
3392 
3393 	if (DISPLAY_VER(dev_priv) >= 11)
3394 		icl_set_pipe_chicken(new_crtc_state);
3395 
3396 	if (dev_priv->display.initial_watermarks)
3397 		dev_priv->display.initial_watermarks(state, crtc);
3398 
3399 	if (DISPLAY_VER(dev_priv) >= 11) {
3400 		const struct intel_dbuf_state *dbuf_state =
3401 				intel_atomic_get_new_dbuf_state(state);
3402 
3403 		icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3404 	}
3405 
3406 	if (new_crtc_state->bigjoiner_slave)
3407 		intel_crtc_vblank_on(new_crtc_state);
3408 
3409 	intel_encoders_enable(state, crtc);
3410 
3411 	if (psl_clkgate_wa) {
3412 		intel_wait_for_vblank(dev_priv, pipe);
3413 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3414 	}
3415 
3416 	/* If we change the relative order between pipe/planes enabling, we need
3417 	 * to change the workaround. */
3418 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3419 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3420 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3421 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3422 	}
3423 }
3424 
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)3425 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3426 {
3427 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3428 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3429 	enum pipe pipe = crtc->pipe;
3430 
3431 	/* To avoid upsetting the power well on haswell only disable the pfit if
3432 	 * it's in use. The hw state code will make sure we get this right. */
3433 	if (!old_crtc_state->pch_pfit.enabled)
3434 		return;
3435 
3436 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3437 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3438 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3439 }
3440 
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3441 static void ilk_crtc_disable(struct intel_atomic_state *state,
3442 			     struct intel_crtc *crtc)
3443 {
3444 	const struct intel_crtc_state *old_crtc_state =
3445 		intel_atomic_get_old_crtc_state(state, crtc);
3446 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3447 	enum pipe pipe = crtc->pipe;
3448 
3449 	/*
3450 	 * Sometimes spurious CPU pipe underruns happen when the
3451 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3452 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3453 	 */
3454 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3455 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3456 
3457 	intel_encoders_disable(state, crtc);
3458 
3459 	intel_crtc_vblank_off(old_crtc_state);
3460 
3461 	intel_disable_pipe(old_crtc_state);
3462 
3463 	ilk_pfit_disable(old_crtc_state);
3464 
3465 	if (old_crtc_state->has_pch_encoder)
3466 		ilk_fdi_disable(crtc);
3467 
3468 	intel_encoders_post_disable(state, crtc);
3469 
3470 	if (old_crtc_state->has_pch_encoder) {
3471 		ilk_disable_pch_transcoder(dev_priv, pipe);
3472 
3473 		if (HAS_PCH_CPT(dev_priv)) {
3474 			i915_reg_t reg;
3475 			u32 temp;
3476 
3477 			/* disable TRANS_DP_CTL */
3478 			reg = TRANS_DP_CTL(pipe);
3479 			temp = intel_de_read(dev_priv, reg);
3480 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3481 				  TRANS_DP_PORT_SEL_MASK);
3482 			temp |= TRANS_DP_PORT_SEL_NONE;
3483 			intel_de_write(dev_priv, reg, temp);
3484 
3485 			/* disable DPLL_SEL */
3486 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3487 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3488 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3489 		}
3490 
3491 		ilk_fdi_pll_disable(crtc);
3492 	}
3493 
3494 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3495 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3496 }
3497 
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3498 static void hsw_crtc_disable(struct intel_atomic_state *state,
3499 			     struct intel_crtc *crtc)
3500 {
3501 	/*
3502 	 * FIXME collapse everything to one hook.
3503 	 * Need care with mst->ddi interactions.
3504 	 */
3505 	intel_encoders_disable(state, crtc);
3506 	intel_encoders_post_disable(state, crtc);
3507 }
3508 
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)3509 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3510 {
3511 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3512 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3513 
3514 	if (!crtc_state->gmch_pfit.control)
3515 		return;
3516 
3517 	/*
3518 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3519 	 * according to register description and PRM.
3520 	 */
3521 	drm_WARN_ON(&dev_priv->drm,
3522 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3523 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3524 
3525 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3526 		       crtc_state->gmch_pfit.pgm_ratios);
3527 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3528 
3529 	/* Border color in case we don't scale up to the full screen. Black by
3530 	 * default, change to something else for debugging. */
3531 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3532 }
3533 
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)3534 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3535 {
3536 	if (phy == PHY_NONE)
3537 		return false;
3538 	else if (IS_DG2(dev_priv))
3539 		/*
3540 		 * DG2 outputs labelled as "combo PHY" in the bspec use
3541 		 * SNPS PHYs with completely different programming,
3542 		 * hence we always return false here.
3543 		 */
3544 		return false;
3545 	else if (IS_ALDERLAKE_S(dev_priv))
3546 		return phy <= PHY_E;
3547 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3548 		return phy <= PHY_D;
3549 	else if (IS_JSL_EHL(dev_priv))
3550 		return phy <= PHY_C;
3551 	else if (DISPLAY_VER(dev_priv) >= 11)
3552 		return phy <= PHY_B;
3553 	else
3554 		return false;
3555 }
3556 
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)3557 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3558 {
3559 	if (IS_DG2(dev_priv))
3560 		/* DG2's "TC1" output uses a SNPS PHY */
3561 		return false;
3562 	else if (IS_ALDERLAKE_P(dev_priv))
3563 		return phy >= PHY_F && phy <= PHY_I;
3564 	else if (IS_TIGERLAKE(dev_priv))
3565 		return phy >= PHY_D && phy <= PHY_I;
3566 	else if (IS_ICELAKE(dev_priv))
3567 		return phy >= PHY_C && phy <= PHY_F;
3568 	else
3569 		return false;
3570 }
3571 
intel_phy_is_snps(struct drm_i915_private * dev_priv,enum phy phy)3572 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3573 {
3574 	if (phy == PHY_NONE)
3575 		return false;
3576 	else if (IS_DG2(dev_priv))
3577 		/*
3578 		 * All four "combo" ports and the TC1 port (PHY E) use
3579 		 * Synopsis PHYs.
3580 		 */
3581 		return phy <= PHY_E;
3582 
3583 	return false;
3584 }
3585 
intel_port_to_phy(struct drm_i915_private * i915,enum port port)3586 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3587 {
3588 	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3589 		return PHY_D + port - PORT_D_XELPD;
3590 	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3591 		return PHY_F + port - PORT_TC1;
3592 	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3593 		return PHY_B + port - PORT_TC1;
3594 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3595 		return PHY_C + port - PORT_TC1;
3596 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3597 		return PHY_A;
3598 
3599 	return PHY_A + port - PORT_A;
3600 }
3601 
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)3602 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3603 {
3604 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3605 		return TC_PORT_NONE;
3606 
3607 	if (DISPLAY_VER(dev_priv) >= 12)
3608 		return TC_PORT_1 + port - PORT_TC1;
3609 	else
3610 		return TC_PORT_1 + port - PORT_C;
3611 }
3612 
intel_port_to_power_domain(enum port port)3613 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3614 {
3615 	switch (port) {
3616 	case PORT_A:
3617 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3618 	case PORT_B:
3619 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3620 	case PORT_C:
3621 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3622 	case PORT_D:
3623 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3624 	case PORT_E:
3625 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3626 	case PORT_F:
3627 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3628 	case PORT_G:
3629 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3630 	case PORT_H:
3631 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3632 	case PORT_I:
3633 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3634 	default:
3635 		MISSING_CASE(port);
3636 		return POWER_DOMAIN_PORT_OTHER;
3637 	}
3638 }
3639 
3640 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)3641 intel_aux_power_domain(struct intel_digital_port *dig_port)
3642 {
3643 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3644 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3645 
3646 	if (intel_phy_is_tc(dev_priv, phy) &&
3647 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3648 		switch (dig_port->aux_ch) {
3649 		case AUX_CH_C:
3650 			return POWER_DOMAIN_AUX_C_TBT;
3651 		case AUX_CH_D:
3652 			return POWER_DOMAIN_AUX_D_TBT;
3653 		case AUX_CH_E:
3654 			return POWER_DOMAIN_AUX_E_TBT;
3655 		case AUX_CH_F:
3656 			return POWER_DOMAIN_AUX_F_TBT;
3657 		case AUX_CH_G:
3658 			return POWER_DOMAIN_AUX_G_TBT;
3659 		case AUX_CH_H:
3660 			return POWER_DOMAIN_AUX_H_TBT;
3661 		case AUX_CH_I:
3662 			return POWER_DOMAIN_AUX_I_TBT;
3663 		default:
3664 			MISSING_CASE(dig_port->aux_ch);
3665 			return POWER_DOMAIN_AUX_C_TBT;
3666 		}
3667 	}
3668 
3669 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3670 }
3671 
3672 /*
3673  * Converts aux_ch to power_domain without caring about TBT ports for that use
3674  * intel_aux_power_domain()
3675  */
3676 enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)3677 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3678 {
3679 	switch (aux_ch) {
3680 	case AUX_CH_A:
3681 		return POWER_DOMAIN_AUX_A;
3682 	case AUX_CH_B:
3683 		return POWER_DOMAIN_AUX_B;
3684 	case AUX_CH_C:
3685 		return POWER_DOMAIN_AUX_C;
3686 	case AUX_CH_D:
3687 		return POWER_DOMAIN_AUX_D;
3688 	case AUX_CH_E:
3689 		return POWER_DOMAIN_AUX_E;
3690 	case AUX_CH_F:
3691 		return POWER_DOMAIN_AUX_F;
3692 	case AUX_CH_G:
3693 		return POWER_DOMAIN_AUX_G;
3694 	case AUX_CH_H:
3695 		return POWER_DOMAIN_AUX_H;
3696 	case AUX_CH_I:
3697 		return POWER_DOMAIN_AUX_I;
3698 	default:
3699 		MISSING_CASE(aux_ch);
3700 		return POWER_DOMAIN_AUX_A;
3701 	}
3702 }
3703 
get_crtc_power_domains(struct intel_crtc_state * crtc_state)3704 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3705 {
3706 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3707 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3708 	struct drm_encoder *encoder;
3709 	enum pipe pipe = crtc->pipe;
3710 	u64 mask;
3711 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3712 
3713 	if (!crtc_state->hw.active)
3714 		return 0;
3715 
3716 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3717 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3718 	if (crtc_state->pch_pfit.enabled ||
3719 	    crtc_state->pch_pfit.force_thru)
3720 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3721 
3722 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3723 				  crtc_state->uapi.encoder_mask) {
3724 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3725 
3726 		mask |= BIT_ULL(intel_encoder->power_domain);
3727 	}
3728 
3729 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3730 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3731 
3732 	if (crtc_state->shared_dpll)
3733 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3734 
3735 	if (crtc_state->dsc.compression_enable)
3736 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3737 
3738 	return mask;
3739 }
3740 
3741 static u64
modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state)3742 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3743 {
3744 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3745 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3746 	enum intel_display_power_domain domain;
3747 	u64 domains, new_domains, old_domains;
3748 
3749 	domains = get_crtc_power_domains(crtc_state);
3750 
3751 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3752 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3753 
3754 	for_each_power_domain(domain, new_domains)
3755 		intel_display_power_get_in_set(dev_priv,
3756 					       &crtc->enabled_power_domains,
3757 					       domain);
3758 
3759 	return old_domains;
3760 }
3761 
modeset_put_crtc_power_domains(struct intel_crtc * crtc,u64 domains)3762 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3763 					   u64 domains)
3764 {
3765 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3766 					    &crtc->enabled_power_domains,
3767 					    domains);
3768 }
3769 
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3770 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3771 				   struct intel_crtc *crtc)
3772 {
3773 	const struct intel_crtc_state *new_crtc_state =
3774 		intel_atomic_get_new_crtc_state(state, crtc);
3775 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3776 	enum pipe pipe = crtc->pipe;
3777 
3778 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3779 		return;
3780 
3781 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3782 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3783 
3784 	intel_set_transcoder_timings(new_crtc_state);
3785 	intel_set_pipe_src_size(new_crtc_state);
3786 
3787 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3788 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3789 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3790 	}
3791 
3792 	i9xx_set_pipeconf(new_crtc_state);
3793 
3794 	crtc->active = true;
3795 
3796 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3797 
3798 	intel_encoders_pre_pll_enable(state, crtc);
3799 
3800 	if (IS_CHERRYVIEW(dev_priv)) {
3801 		chv_prepare_pll(crtc, new_crtc_state);
3802 		chv_enable_pll(crtc, new_crtc_state);
3803 	} else {
3804 		vlv_prepare_pll(crtc, new_crtc_state);
3805 		vlv_enable_pll(crtc, new_crtc_state);
3806 	}
3807 
3808 	intel_encoders_pre_enable(state, crtc);
3809 
3810 	i9xx_pfit_enable(new_crtc_state);
3811 
3812 	intel_color_load_luts(new_crtc_state);
3813 	intel_color_commit(new_crtc_state);
3814 	/* update DSPCNTR to configure gamma for pipe bottom color */
3815 	intel_disable_primary_plane(new_crtc_state);
3816 
3817 	dev_priv->display.initial_watermarks(state, crtc);
3818 	intel_enable_pipe(new_crtc_state);
3819 
3820 	intel_crtc_vblank_on(new_crtc_state);
3821 
3822 	intel_encoders_enable(state, crtc);
3823 }
3824 
i9xx_set_pll_dividers(const struct intel_crtc_state * crtc_state)3825 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
3826 {
3827 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3828 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3829 
3830 	intel_de_write(dev_priv, FP0(crtc->pipe),
3831 		       crtc_state->dpll_hw_state.fp0);
3832 	intel_de_write(dev_priv, FP1(crtc->pipe),
3833 		       crtc_state->dpll_hw_state.fp1);
3834 }
3835 
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3836 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3837 			     struct intel_crtc *crtc)
3838 {
3839 	const struct intel_crtc_state *new_crtc_state =
3840 		intel_atomic_get_new_crtc_state(state, crtc);
3841 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3842 	enum pipe pipe = crtc->pipe;
3843 
3844 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3845 		return;
3846 
3847 	i9xx_set_pll_dividers(new_crtc_state);
3848 
3849 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3850 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3851 
3852 	intel_set_transcoder_timings(new_crtc_state);
3853 	intel_set_pipe_src_size(new_crtc_state);
3854 
3855 	i9xx_set_pipeconf(new_crtc_state);
3856 
3857 	crtc->active = true;
3858 
3859 	if (DISPLAY_VER(dev_priv) != 2)
3860 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3861 
3862 	intel_encoders_pre_enable(state, crtc);
3863 
3864 	i9xx_enable_pll(crtc, new_crtc_state);
3865 
3866 	i9xx_pfit_enable(new_crtc_state);
3867 
3868 	intel_color_load_luts(new_crtc_state);
3869 	intel_color_commit(new_crtc_state);
3870 	/* update DSPCNTR to configure gamma for pipe bottom color */
3871 	intel_disable_primary_plane(new_crtc_state);
3872 
3873 	if (dev_priv->display.initial_watermarks)
3874 		dev_priv->display.initial_watermarks(state, crtc);
3875 	else
3876 		intel_update_watermarks(crtc);
3877 	intel_enable_pipe(new_crtc_state);
3878 
3879 	intel_crtc_vblank_on(new_crtc_state);
3880 
3881 	intel_encoders_enable(state, crtc);
3882 
3883 	/* prevents spurious underruns */
3884 	if (DISPLAY_VER(dev_priv) == 2)
3885 		intel_wait_for_vblank(dev_priv, pipe);
3886 }
3887 
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)3888 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3889 {
3890 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3891 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3892 
3893 	if (!old_crtc_state->gmch_pfit.control)
3894 		return;
3895 
3896 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3897 
3898 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3899 		    intel_de_read(dev_priv, PFIT_CONTROL));
3900 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
3901 }
3902 
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3903 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3904 			      struct intel_crtc *crtc)
3905 {
3906 	struct intel_crtc_state *old_crtc_state =
3907 		intel_atomic_get_old_crtc_state(state, crtc);
3908 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3909 	enum pipe pipe = crtc->pipe;
3910 
3911 	/*
3912 	 * On gen2 planes are double buffered but the pipe isn't, so we must
3913 	 * wait for planes to fully turn off before disabling the pipe.
3914 	 */
3915 	if (DISPLAY_VER(dev_priv) == 2)
3916 		intel_wait_for_vblank(dev_priv, pipe);
3917 
3918 	intel_encoders_disable(state, crtc);
3919 
3920 	intel_crtc_vblank_off(old_crtc_state);
3921 
3922 	intel_disable_pipe(old_crtc_state);
3923 
3924 	i9xx_pfit_disable(old_crtc_state);
3925 
3926 	intel_encoders_post_disable(state, crtc);
3927 
3928 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3929 		if (IS_CHERRYVIEW(dev_priv))
3930 			chv_disable_pll(dev_priv, pipe);
3931 		else if (IS_VALLEYVIEW(dev_priv))
3932 			vlv_disable_pll(dev_priv, pipe);
3933 		else
3934 			i9xx_disable_pll(old_crtc_state);
3935 	}
3936 
3937 	intel_encoders_post_pll_disable(state, crtc);
3938 
3939 	if (DISPLAY_VER(dev_priv) != 2)
3940 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3941 
3942 	if (!dev_priv->display.initial_watermarks)
3943 		intel_update_watermarks(crtc);
3944 
3945 	/* clock the pipe down to 640x480@60 to potentially save power */
3946 	if (IS_I830(dev_priv))
3947 		i830_enable_pipe(dev_priv, pipe);
3948 }
3949 
intel_crtc_disable_noatomic(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)3950 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3951 					struct drm_modeset_acquire_ctx *ctx)
3952 {
3953 	struct intel_encoder *encoder;
3954 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3955 	struct intel_bw_state *bw_state =
3956 		to_intel_bw_state(dev_priv->bw_obj.state);
3957 	struct intel_cdclk_state *cdclk_state =
3958 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3959 	struct intel_dbuf_state *dbuf_state =
3960 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3961 	struct intel_crtc_state *crtc_state =
3962 		to_intel_crtc_state(crtc->base.state);
3963 	struct intel_plane *plane;
3964 	struct drm_atomic_state *state;
3965 	struct intel_crtc_state *temp_crtc_state;
3966 	enum pipe pipe = crtc->pipe;
3967 	int ret;
3968 
3969 	if (!crtc_state->hw.active)
3970 		return;
3971 
3972 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3973 		const struct intel_plane_state *plane_state =
3974 			to_intel_plane_state(plane->base.state);
3975 
3976 		if (plane_state->uapi.visible)
3977 			intel_plane_disable_noatomic(crtc, plane);
3978 	}
3979 
3980 	state = drm_atomic_state_alloc(&dev_priv->drm);
3981 	if (!state) {
3982 		drm_dbg_kms(&dev_priv->drm,
3983 			    "failed to disable [CRTC:%d:%s], out of memory",
3984 			    crtc->base.base.id, crtc->base.name);
3985 		return;
3986 	}
3987 
3988 	state->acquire_ctx = ctx;
3989 
3990 	/* Everything's already locked, -EDEADLK can't happen. */
3991 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3992 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3993 
3994 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3995 
3996 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3997 
3998 	drm_atomic_state_put(state);
3999 
4000 	drm_dbg_kms(&dev_priv->drm,
4001 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4002 		    crtc->base.base.id, crtc->base.name);
4003 
4004 	crtc->active = false;
4005 	crtc->base.enabled = false;
4006 
4007 	drm_WARN_ON(&dev_priv->drm,
4008 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4009 	crtc_state->uapi.active = false;
4010 	crtc_state->uapi.connector_mask = 0;
4011 	crtc_state->uapi.encoder_mask = 0;
4012 	intel_crtc_free_hw_state(crtc_state);
4013 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4014 
4015 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4016 		encoder->base.crtc = NULL;
4017 
4018 	intel_fbc_disable(crtc);
4019 	intel_update_watermarks(crtc);
4020 	intel_disable_shared_dpll(crtc_state);
4021 
4022 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4023 
4024 	dev_priv->active_pipes &= ~BIT(pipe);
4025 	cdclk_state->min_cdclk[pipe] = 0;
4026 	cdclk_state->min_voltage_level[pipe] = 0;
4027 	cdclk_state->active_pipes &= ~BIT(pipe);
4028 
4029 	dbuf_state->active_pipes &= ~BIT(pipe);
4030 
4031 	bw_state->data_rate[pipe] = 0;
4032 	bw_state->num_active_planes[pipe] = 0;
4033 }
4034 
4035 /*
4036  * turn all crtc's off, but do not adjust state
4037  * This has to be paired with a call to intel_modeset_setup_hw_state.
4038  */
intel_display_suspend(struct drm_device * dev)4039 int intel_display_suspend(struct drm_device *dev)
4040 {
4041 	struct drm_i915_private *dev_priv = to_i915(dev);
4042 	struct drm_atomic_state *state;
4043 	int ret;
4044 
4045 	if (!HAS_DISPLAY(dev_priv))
4046 		return 0;
4047 
4048 	state = drm_atomic_helper_suspend(dev);
4049 	ret = PTR_ERR_OR_ZERO(state);
4050 	if (ret)
4051 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4052 			ret);
4053 	else
4054 		dev_priv->modeset_restore_state = state;
4055 	return ret;
4056 }
4057 
intel_encoder_destroy(struct drm_encoder * encoder)4058 void intel_encoder_destroy(struct drm_encoder *encoder)
4059 {
4060 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4061 
4062 	drm_encoder_cleanup(encoder);
4063 	kfree(intel_encoder);
4064 }
4065 
4066 /* Cross check the actual hw state with our own modeset state tracking (and it's
4067  * internal consistency). */
intel_connector_verify_state(struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)4068 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4069 					 struct drm_connector_state *conn_state)
4070 {
4071 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
4072 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
4073 
4074 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4075 		    connector->base.base.id, connector->base.name);
4076 
4077 	if (connector->get_hw_state(connector)) {
4078 		struct intel_encoder *encoder = intel_attached_encoder(connector);
4079 
4080 		I915_STATE_WARN(!crtc_state,
4081 			 "connector enabled without attached crtc\n");
4082 
4083 		if (!crtc_state)
4084 			return;
4085 
4086 		I915_STATE_WARN(!crtc_state->hw.active,
4087 				"connector is active, but attached crtc isn't\n");
4088 
4089 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4090 			return;
4091 
4092 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4093 			"atomic encoder doesn't match attached encoder\n");
4094 
4095 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4096 			"attached encoder crtc differs from connector crtc\n");
4097 	} else {
4098 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4099 				"attached crtc is active, but connector isn't\n");
4100 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4101 			"best encoder set without crtc!\n");
4102 	}
4103 }
4104 
hsw_crtc_state_ips_capable(const struct intel_crtc_state * crtc_state)4105 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4106 {
4107 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4108 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4109 
4110 	/* IPS only exists on ULT machines and is tied to pipe A. */
4111 	if (!hsw_crtc_supports_ips(crtc))
4112 		return false;
4113 
4114 	if (!dev_priv->params.enable_ips)
4115 		return false;
4116 
4117 	if (crtc_state->pipe_bpp > 24)
4118 		return false;
4119 
4120 	/*
4121 	 * We compare against max which means we must take
4122 	 * the increased cdclk requirement into account when
4123 	 * calculating the new cdclk.
4124 	 *
4125 	 * Should measure whether using a lower cdclk w/o IPS
4126 	 */
4127 	if (IS_BROADWELL(dev_priv) &&
4128 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4129 		return false;
4130 
4131 	return true;
4132 }
4133 
hsw_compute_ips_config(struct intel_crtc_state * crtc_state)4134 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4135 {
4136 	struct drm_i915_private *dev_priv =
4137 		to_i915(crtc_state->uapi.crtc->dev);
4138 	struct intel_atomic_state *state =
4139 		to_intel_atomic_state(crtc_state->uapi.state);
4140 
4141 	crtc_state->ips_enabled = false;
4142 
4143 	if (!hsw_crtc_state_ips_capable(crtc_state))
4144 		return 0;
4145 
4146 	/*
4147 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4148 	 * enabled and disabled dynamically based on package C states,
4149 	 * user space can't make reliable use of the CRCs, so let's just
4150 	 * completely disable it.
4151 	 */
4152 	if (crtc_state->crc_enabled)
4153 		return 0;
4154 
4155 	/* IPS should be fine as long as at least one plane is enabled. */
4156 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4157 		return 0;
4158 
4159 	if (IS_BROADWELL(dev_priv)) {
4160 		const struct intel_cdclk_state *cdclk_state;
4161 
4162 		cdclk_state = intel_atomic_get_cdclk_state(state);
4163 		if (IS_ERR(cdclk_state))
4164 			return PTR_ERR(cdclk_state);
4165 
4166 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4167 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4168 			return 0;
4169 	}
4170 
4171 	crtc_state->ips_enabled = true;
4172 
4173 	return 0;
4174 }
4175 
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)4176 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4177 {
4178 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4179 
4180 	/* GDG double wide on either pipe, otherwise pipe A only */
4181 	return DISPLAY_VER(dev_priv) < 4 &&
4182 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4183 }
4184 
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)4185 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4186 {
4187 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4188 	struct drm_rect src;
4189 
4190 	/*
4191 	 * We only use IF-ID interlacing. If we ever use
4192 	 * PF-ID we'll need to adjust the pixel_rate here.
4193 	 */
4194 
4195 	if (!crtc_state->pch_pfit.enabled)
4196 		return pixel_rate;
4197 
4198 	drm_rect_init(&src, 0, 0,
4199 		      crtc_state->pipe_src_w << 16,
4200 		      crtc_state->pipe_src_h << 16);
4201 
4202 	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
4203 				   pixel_rate);
4204 }
4205 
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)4206 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4207 					 const struct drm_display_mode *timings)
4208 {
4209 	mode->hdisplay = timings->crtc_hdisplay;
4210 	mode->htotal = timings->crtc_htotal;
4211 	mode->hsync_start = timings->crtc_hsync_start;
4212 	mode->hsync_end = timings->crtc_hsync_end;
4213 
4214 	mode->vdisplay = timings->crtc_vdisplay;
4215 	mode->vtotal = timings->crtc_vtotal;
4216 	mode->vsync_start = timings->crtc_vsync_start;
4217 	mode->vsync_end = timings->crtc_vsync_end;
4218 
4219 	mode->flags = timings->flags;
4220 	mode->type = DRM_MODE_TYPE_DRIVER;
4221 
4222 	mode->clock = timings->crtc_clock;
4223 
4224 	drm_mode_set_name(mode);
4225 }
4226 
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)4227 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4228 {
4229 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4230 
4231 	if (HAS_GMCH(dev_priv))
4232 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4233 		crtc_state->pixel_rate =
4234 			crtc_state->hw.pipe_mode.crtc_clock;
4235 	else
4236 		crtc_state->pixel_rate =
4237 			ilk_pipe_pixel_rate(crtc_state);
4238 }
4239 
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)4240 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4241 {
4242 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4243 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4244 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4245 
4246 	drm_mode_copy(pipe_mode, adjusted_mode);
4247 
4248 	if (crtc_state->bigjoiner) {
4249 		/*
4250 		 * transcoder is programmed to the full mode,
4251 		 * but pipe timings are half of the transcoder mode
4252 		 */
4253 		pipe_mode->crtc_hdisplay /= 2;
4254 		pipe_mode->crtc_hblank_start /= 2;
4255 		pipe_mode->crtc_hblank_end /= 2;
4256 		pipe_mode->crtc_hsync_start /= 2;
4257 		pipe_mode->crtc_hsync_end /= 2;
4258 		pipe_mode->crtc_htotal /= 2;
4259 		pipe_mode->crtc_clock /= 2;
4260 	}
4261 
4262 	if (crtc_state->splitter.enable) {
4263 		int n = crtc_state->splitter.link_count;
4264 		int overlap = crtc_state->splitter.pixel_overlap;
4265 
4266 		/*
4267 		 * eDP MSO uses segment timings from EDID for transcoder
4268 		 * timings, but full mode for everything else.
4269 		 *
4270 		 * h_full = (h_segment - pixel_overlap) * link_count
4271 		 */
4272 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4273 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4274 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4275 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4276 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4277 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4278 		pipe_mode->crtc_clock *= n;
4279 
4280 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4281 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4282 	} else {
4283 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4284 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4285 	}
4286 
4287 	intel_crtc_compute_pixel_rate(crtc_state);
4288 
4289 	drm_mode_copy(mode, adjusted_mode);
4290 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4291 	mode->vdisplay = crtc_state->pipe_src_h;
4292 }
4293 
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)4294 static void intel_encoder_get_config(struct intel_encoder *encoder,
4295 				     struct intel_crtc_state *crtc_state)
4296 {
4297 	encoder->get_config(encoder, crtc_state);
4298 
4299 	intel_crtc_readout_derived_state(crtc_state);
4300 }
4301 
intel_crtc_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4302 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4303 				     struct intel_crtc_state *pipe_config)
4304 {
4305 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4306 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4307 	int clock_limit = dev_priv->max_dotclk_freq;
4308 
4309 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4310 
4311 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4312 	if (pipe_config->bigjoiner) {
4313 		pipe_mode->crtc_clock /= 2;
4314 		pipe_mode->crtc_hdisplay /= 2;
4315 		pipe_mode->crtc_hblank_start /= 2;
4316 		pipe_mode->crtc_hblank_end /= 2;
4317 		pipe_mode->crtc_hsync_start /= 2;
4318 		pipe_mode->crtc_hsync_end /= 2;
4319 		pipe_mode->crtc_htotal /= 2;
4320 		pipe_config->pipe_src_w /= 2;
4321 	}
4322 
4323 	if (pipe_config->splitter.enable) {
4324 		int n = pipe_config->splitter.link_count;
4325 		int overlap = pipe_config->splitter.pixel_overlap;
4326 
4327 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4328 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4329 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4330 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4331 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4332 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4333 		pipe_mode->crtc_clock *= n;
4334 	}
4335 
4336 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4337 
4338 	if (DISPLAY_VER(dev_priv) < 4) {
4339 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4340 
4341 		/*
4342 		 * Enable double wide mode when the dot clock
4343 		 * is > 90% of the (display) core speed.
4344 		 */
4345 		if (intel_crtc_supports_double_wide(crtc) &&
4346 		    pipe_mode->crtc_clock > clock_limit) {
4347 			clock_limit = dev_priv->max_dotclk_freq;
4348 			pipe_config->double_wide = true;
4349 		}
4350 	}
4351 
4352 	if (pipe_mode->crtc_clock > clock_limit) {
4353 		drm_dbg_kms(&dev_priv->drm,
4354 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4355 			    pipe_mode->crtc_clock, clock_limit,
4356 			    yesno(pipe_config->double_wide));
4357 		return -EINVAL;
4358 	}
4359 
4360 	/*
4361 	 * Pipe horizontal size must be even in:
4362 	 * - DVO ganged mode
4363 	 * - LVDS dual channel mode
4364 	 * - Double wide pipe
4365 	 */
4366 	if (pipe_config->pipe_src_w & 1) {
4367 		if (pipe_config->double_wide) {
4368 			drm_dbg_kms(&dev_priv->drm,
4369 				    "Odd pipe source width not supported with double wide pipe\n");
4370 			return -EINVAL;
4371 		}
4372 
4373 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4374 		    intel_is_dual_link_lvds(dev_priv)) {
4375 			drm_dbg_kms(&dev_priv->drm,
4376 				    "Odd pipe source width not supported with dual link LVDS\n");
4377 			return -EINVAL;
4378 		}
4379 	}
4380 
4381 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4382 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4383 	 */
4384 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4385 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4386 		return -EINVAL;
4387 
4388 	intel_crtc_compute_pixel_rate(pipe_config);
4389 
4390 	if (pipe_config->has_pch_encoder)
4391 		return ilk_fdi_compute_config(crtc, pipe_config);
4392 
4393 	return 0;
4394 }
4395 
4396 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)4397 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4398 {
4399 	while (*num > DATA_LINK_M_N_MASK ||
4400 	       *den > DATA_LINK_M_N_MASK) {
4401 		*num >>= 1;
4402 		*den >>= 1;
4403 	}
4404 }
4405 
compute_m_n(unsigned int m,unsigned int n,u32 * ret_m,u32 * ret_n,bool constant_n)4406 static void compute_m_n(unsigned int m, unsigned int n,
4407 			u32 *ret_m, u32 *ret_n,
4408 			bool constant_n)
4409 {
4410 	/*
4411 	 * Several DP dongles in particular seem to be fussy about
4412 	 * too large link M/N values. Give N value as 0x8000 that
4413 	 * should be acceptable by specific devices. 0x8000 is the
4414 	 * specified fixed N value for asynchronous clock mode,
4415 	 * which the devices expect also in synchronous clock mode.
4416 	 */
4417 	if (constant_n)
4418 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4419 	else
4420 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4421 
4422 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4423 	intel_reduce_m_n_ratio(ret_m, ret_n);
4424 }
4425 
4426 void
intel_link_compute_m_n(u16 bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n,bool constant_n,bool fec_enable)4427 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4428 		       int pixel_clock, int link_clock,
4429 		       struct intel_link_m_n *m_n,
4430 		       bool constant_n, bool fec_enable)
4431 {
4432 	u32 data_clock = bits_per_pixel * pixel_clock;
4433 
4434 	if (fec_enable)
4435 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4436 
4437 	m_n->tu = 64;
4438 	compute_m_n(data_clock,
4439 		    link_clock * nlanes * 8,
4440 		    &m_n->gmch_m, &m_n->gmch_n,
4441 		    constant_n);
4442 
4443 	compute_m_n(pixel_clock, link_clock,
4444 		    &m_n->link_m, &m_n->link_n,
4445 		    constant_n);
4446 }
4447 
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)4448 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4449 {
4450 	/*
4451 	 * There may be no VBT; and if the BIOS enabled SSC we can
4452 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4453 	 * BIOS isn't using it, don't assume it will work even if the VBT
4454 	 * indicates as much.
4455 	 */
4456 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4457 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4458 						       PCH_DREF_CONTROL) &
4459 			DREF_SSC1_ENABLE;
4460 
4461 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4462 			drm_dbg_kms(&dev_priv->drm,
4463 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4464 				    enableddisabled(bios_lvds_use_ssc),
4465 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4466 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4467 		}
4468 	}
4469 }
4470 
intel_pch_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n)4471 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4472 					 const struct intel_link_m_n *m_n)
4473 {
4474 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4475 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4476 	enum pipe pipe = crtc->pipe;
4477 
4478 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4479 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4480 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4481 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4482 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4483 }
4484 
transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)4485 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4486 				 enum transcoder transcoder)
4487 {
4488 	if (IS_HASWELL(dev_priv))
4489 		return transcoder == TRANSCODER_EDP;
4490 
4491 	/*
4492 	 * Strictly speaking some registers are available before
4493 	 * gen7, but we only support DRRS on gen7+
4494 	 */
4495 	return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4496 }
4497 
intel_cpu_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)4498 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4499 					 const struct intel_link_m_n *m_n,
4500 					 const struct intel_link_m_n *m2_n2)
4501 {
4502 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4503 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4504 	enum pipe pipe = crtc->pipe;
4505 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4506 
4507 	if (DISPLAY_VER(dev_priv) >= 5) {
4508 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4509 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4510 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4511 			       m_n->gmch_n);
4512 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4513 			       m_n->link_m);
4514 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4515 			       m_n->link_n);
4516 		/*
4517 		 *  M2_N2 registers are set only if DRRS is supported
4518 		 * (to make sure the registers are not unnecessarily accessed).
4519 		 */
4520 		if (m2_n2 && crtc_state->has_drrs &&
4521 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4522 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4523 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4524 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4525 				       m2_n2->gmch_n);
4526 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4527 				       m2_n2->link_m);
4528 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4529 				       m2_n2->link_n);
4530 		}
4531 	} else {
4532 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4533 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4534 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4535 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4536 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4537 	}
4538 }
4539 
intel_dp_set_m_n(const struct intel_crtc_state * crtc_state,enum link_m_n_set m_n)4540 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4541 {
4542 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4543 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4544 
4545 	if (m_n == M1_N1) {
4546 		dp_m_n = &crtc_state->dp_m_n;
4547 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4548 	} else if (m_n == M2_N2) {
4549 
4550 		/*
4551 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4552 		 * needs to be programmed into M1_N1.
4553 		 */
4554 		dp_m_n = &crtc_state->dp_m2_n2;
4555 	} else {
4556 		drm_err(&i915->drm, "Unsupported divider value\n");
4557 		return;
4558 	}
4559 
4560 	if (crtc_state->has_pch_encoder)
4561 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4562 	else
4563 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4564 }
4565 
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)4566 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4567 {
4568 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4569 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4570 	enum pipe pipe = crtc->pipe;
4571 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4572 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4573 	u32 crtc_vtotal, crtc_vblank_end;
4574 	int vsyncshift = 0;
4575 
4576 	/* We need to be careful not to changed the adjusted mode, for otherwise
4577 	 * the hw state checker will get angry at the mismatch. */
4578 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4579 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4580 
4581 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4582 		/* the chip adds 2 halflines automatically */
4583 		crtc_vtotal -= 1;
4584 		crtc_vblank_end -= 1;
4585 
4586 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4587 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4588 		else
4589 			vsyncshift = adjusted_mode->crtc_hsync_start -
4590 				adjusted_mode->crtc_htotal / 2;
4591 		if (vsyncshift < 0)
4592 			vsyncshift += adjusted_mode->crtc_htotal;
4593 	}
4594 
4595 	if (DISPLAY_VER(dev_priv) > 3)
4596 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4597 		               vsyncshift);
4598 
4599 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4600 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4601 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4602 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4603 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4604 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4605 
4606 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4607 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4608 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4609 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4610 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4611 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4612 
4613 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4614 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4615 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4616 	 * bits. */
4617 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4618 	    (pipe == PIPE_B || pipe == PIPE_C))
4619 		intel_de_write(dev_priv, VTOTAL(pipe),
4620 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4621 
4622 }
4623 
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)4624 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4625 {
4626 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4627 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4628 	enum pipe pipe = crtc->pipe;
4629 
4630 	/* pipesrc controls the size that is scaled from, which should
4631 	 * always be the user's requested size.
4632 	 */
4633 	intel_de_write(dev_priv, PIPESRC(pipe),
4634 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4635 }
4636 
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)4637 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4638 {
4639 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4640 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4641 
4642 	if (DISPLAY_VER(dev_priv) == 2)
4643 		return false;
4644 
4645 	if (DISPLAY_VER(dev_priv) >= 9 ||
4646 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4647 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4648 	else
4649 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4650 }
4651 
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4652 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4653 					 struct intel_crtc_state *pipe_config)
4654 {
4655 	struct drm_device *dev = crtc->base.dev;
4656 	struct drm_i915_private *dev_priv = to_i915(dev);
4657 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4658 	u32 tmp;
4659 
4660 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4661 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4662 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4663 
4664 	if (!transcoder_is_dsi(cpu_transcoder)) {
4665 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4666 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4667 							(tmp & 0xffff) + 1;
4668 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4669 						((tmp >> 16) & 0xffff) + 1;
4670 	}
4671 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4672 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4673 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4674 
4675 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4676 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4677 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4678 
4679 	if (!transcoder_is_dsi(cpu_transcoder)) {
4680 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4681 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4682 							(tmp & 0xffff) + 1;
4683 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4684 						((tmp >> 16) & 0xffff) + 1;
4685 	}
4686 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4687 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4688 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4689 
4690 	if (intel_pipe_is_interlaced(pipe_config)) {
4691 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4692 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4693 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4694 	}
4695 }
4696 
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4697 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4698 				    struct intel_crtc_state *pipe_config)
4699 {
4700 	struct drm_device *dev = crtc->base.dev;
4701 	struct drm_i915_private *dev_priv = to_i915(dev);
4702 	u32 tmp;
4703 
4704 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4705 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4706 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4707 }
4708 
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)4709 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4710 {
4711 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4712 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4713 	u32 pipeconf;
4714 
4715 	pipeconf = 0;
4716 
4717 	/* we keep both pipes enabled on 830 */
4718 	if (IS_I830(dev_priv))
4719 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4720 
4721 	if (crtc_state->double_wide)
4722 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4723 
4724 	/* only g4x and later have fancy bpc/dither controls */
4725 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4726 	    IS_CHERRYVIEW(dev_priv)) {
4727 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4728 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4729 			pipeconf |= PIPECONF_DITHER_EN |
4730 				    PIPECONF_DITHER_TYPE_SP;
4731 
4732 		switch (crtc_state->pipe_bpp) {
4733 		case 18:
4734 			pipeconf |= PIPECONF_6BPC;
4735 			break;
4736 		case 24:
4737 			pipeconf |= PIPECONF_8BPC;
4738 			break;
4739 		case 30:
4740 			pipeconf |= PIPECONF_10BPC;
4741 			break;
4742 		default:
4743 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4744 			BUG();
4745 		}
4746 	}
4747 
4748 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4749 		if (DISPLAY_VER(dev_priv) < 4 ||
4750 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4751 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4752 		else
4753 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4754 	} else {
4755 		pipeconf |= PIPECONF_PROGRESSIVE;
4756 	}
4757 
4758 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4759 	     crtc_state->limited_color_range)
4760 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4761 
4762 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4763 
4764 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4765 
4766 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4767 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4768 }
4769 
i9xx_has_pfit(struct drm_i915_private * dev_priv)4770 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4771 {
4772 	if (IS_I830(dev_priv))
4773 		return false;
4774 
4775 	return DISPLAY_VER(dev_priv) >= 4 ||
4776 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4777 }
4778 
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)4779 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4780 {
4781 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4782 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4783 	u32 tmp;
4784 
4785 	if (!i9xx_has_pfit(dev_priv))
4786 		return;
4787 
4788 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4789 	if (!(tmp & PFIT_ENABLE))
4790 		return;
4791 
4792 	/* Check whether the pfit is attached to our pipe. */
4793 	if (DISPLAY_VER(dev_priv) < 4) {
4794 		if (crtc->pipe != PIPE_B)
4795 			return;
4796 	} else {
4797 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4798 			return;
4799 	}
4800 
4801 	crtc_state->gmch_pfit.control = tmp;
4802 	crtc_state->gmch_pfit.pgm_ratios =
4803 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4804 }
4805 
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4806 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4807 			       struct intel_crtc_state *pipe_config)
4808 {
4809 	struct drm_device *dev = crtc->base.dev;
4810 	struct drm_i915_private *dev_priv = to_i915(dev);
4811 	enum pipe pipe = crtc->pipe;
4812 	struct dpll clock;
4813 	u32 mdiv;
4814 	int refclk = 100000;
4815 
4816 	/* In case of DSI, DPLL will not be used */
4817 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4818 		return;
4819 
4820 	vlv_dpio_get(dev_priv);
4821 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4822 	vlv_dpio_put(dev_priv);
4823 
4824 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4825 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
4826 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4827 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4828 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4829 
4830 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4831 }
4832 
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4833 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4834 			       struct intel_crtc_state *pipe_config)
4835 {
4836 	struct drm_device *dev = crtc->base.dev;
4837 	struct drm_i915_private *dev_priv = to_i915(dev);
4838 	enum pipe pipe = crtc->pipe;
4839 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
4840 	struct dpll clock;
4841 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4842 	int refclk = 100000;
4843 
4844 	/* In case of DSI, DPLL will not be used */
4845 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4846 		return;
4847 
4848 	vlv_dpio_get(dev_priv);
4849 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4850 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4851 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4852 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4853 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4854 	vlv_dpio_put(dev_priv);
4855 
4856 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4857 	clock.m2 = (pll_dw0 & 0xff) << 22;
4858 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4859 		clock.m2 |= pll_dw2 & 0x3fffff;
4860 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4861 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4862 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4863 
4864 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4865 }
4866 
4867 static enum intel_output_format
bdw_get_pipemisc_output_format(struct intel_crtc * crtc)4868 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4869 {
4870 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4871 	u32 tmp;
4872 
4873 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4874 
4875 	if (tmp & PIPEMISC_YUV420_ENABLE) {
4876 		/* We support 4:2:0 in full blend mode only */
4877 		drm_WARN_ON(&dev_priv->drm,
4878 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4879 
4880 		return INTEL_OUTPUT_FORMAT_YCBCR420;
4881 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4882 		return INTEL_OUTPUT_FORMAT_YCBCR444;
4883 	} else {
4884 		return INTEL_OUTPUT_FORMAT_RGB;
4885 	}
4886 }
4887 
i9xx_get_pipe_color_config(struct intel_crtc_state * crtc_state)4888 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4889 {
4890 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4891 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4892 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4893 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4894 	u32 tmp;
4895 
4896 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4897 
4898 	if (tmp & DISPPLANE_GAMMA_ENABLE)
4899 		crtc_state->gamma_enable = true;
4900 
4901 	if (!HAS_GMCH(dev_priv) &&
4902 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
4903 		crtc_state->csc_enable = true;
4904 }
4905 
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4906 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4907 				 struct intel_crtc_state *pipe_config)
4908 {
4909 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4910 	enum intel_display_power_domain power_domain;
4911 	intel_wakeref_t wakeref;
4912 	u32 tmp;
4913 	bool ret;
4914 
4915 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4916 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4917 	if (!wakeref)
4918 		return false;
4919 
4920 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4921 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4922 	pipe_config->shared_dpll = NULL;
4923 
4924 	ret = false;
4925 
4926 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4927 	if (!(tmp & PIPECONF_ENABLE))
4928 		goto out;
4929 
4930 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4931 	    IS_CHERRYVIEW(dev_priv)) {
4932 		switch (tmp & PIPECONF_BPC_MASK) {
4933 		case PIPECONF_6BPC:
4934 			pipe_config->pipe_bpp = 18;
4935 			break;
4936 		case PIPECONF_8BPC:
4937 			pipe_config->pipe_bpp = 24;
4938 			break;
4939 		case PIPECONF_10BPC:
4940 			pipe_config->pipe_bpp = 30;
4941 			break;
4942 		default:
4943 			break;
4944 		}
4945 	}
4946 
4947 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4948 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
4949 		pipe_config->limited_color_range = true;
4950 
4951 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4952 		PIPECONF_GAMMA_MODE_SHIFT;
4953 
4954 	if (IS_CHERRYVIEW(dev_priv))
4955 		pipe_config->cgm_mode = intel_de_read(dev_priv,
4956 						      CGM_PIPE_MODE(crtc->pipe));
4957 
4958 	i9xx_get_pipe_color_config(pipe_config);
4959 	intel_color_get_config(pipe_config);
4960 
4961 	if (DISPLAY_VER(dev_priv) < 4)
4962 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4963 
4964 	intel_get_transcoder_timings(crtc, pipe_config);
4965 	intel_get_pipe_src_size(crtc, pipe_config);
4966 
4967 	i9xx_get_pfit_config(pipe_config);
4968 
4969 	if (DISPLAY_VER(dev_priv) >= 4) {
4970 		/* No way to read it out on pipes B and C */
4971 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4972 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
4973 		else
4974 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4975 		pipe_config->pixel_multiplier =
4976 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4977 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4978 		pipe_config->dpll_hw_state.dpll_md = tmp;
4979 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4980 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4981 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4982 		pipe_config->pixel_multiplier =
4983 			((tmp & SDVO_MULTIPLIER_MASK)
4984 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4985 	} else {
4986 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
4987 		 * port and will be fixed up in the encoder->get_config
4988 		 * function. */
4989 		pipe_config->pixel_multiplier = 1;
4990 	}
4991 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4992 							DPLL(crtc->pipe));
4993 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4994 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4995 							       FP0(crtc->pipe));
4996 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4997 							       FP1(crtc->pipe));
4998 	} else {
4999 		/* Mask out read-only status bits. */
5000 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5001 						     DPLL_PORTC_READY_MASK |
5002 						     DPLL_PORTB_READY_MASK);
5003 	}
5004 
5005 	if (IS_CHERRYVIEW(dev_priv))
5006 		chv_crtc_clock_get(crtc, pipe_config);
5007 	else if (IS_VALLEYVIEW(dev_priv))
5008 		vlv_crtc_clock_get(crtc, pipe_config);
5009 	else
5010 		i9xx_crtc_clock_get(crtc, pipe_config);
5011 
5012 	/*
5013 	 * Normally the dotclock is filled in by the encoder .get_config()
5014 	 * but in case the pipe is enabled w/o any ports we need a sane
5015 	 * default.
5016 	 */
5017 	pipe_config->hw.adjusted_mode.crtc_clock =
5018 		pipe_config->port_clock / pipe_config->pixel_multiplier;
5019 
5020 	ret = true;
5021 
5022 out:
5023 	intel_display_power_put(dev_priv, power_domain, wakeref);
5024 
5025 	return ret;
5026 }
5027 
ilk_init_pch_refclk(struct drm_i915_private * dev_priv)5028 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5029 {
5030 	struct intel_encoder *encoder;
5031 	int i;
5032 	u32 val, final;
5033 	bool has_lvds = false;
5034 	bool has_cpu_edp = false;
5035 	bool has_panel = false;
5036 	bool has_ck505 = false;
5037 	bool can_ssc = false;
5038 	bool using_ssc_source = false;
5039 
5040 	/* We need to take the global config into account */
5041 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5042 		switch (encoder->type) {
5043 		case INTEL_OUTPUT_LVDS:
5044 			has_panel = true;
5045 			has_lvds = true;
5046 			break;
5047 		case INTEL_OUTPUT_EDP:
5048 			has_panel = true;
5049 			if (encoder->port == PORT_A)
5050 				has_cpu_edp = true;
5051 			break;
5052 		default:
5053 			break;
5054 		}
5055 	}
5056 
5057 	if (HAS_PCH_IBX(dev_priv)) {
5058 		has_ck505 = dev_priv->vbt.display_clock_mode;
5059 		can_ssc = has_ck505;
5060 	} else {
5061 		has_ck505 = false;
5062 		can_ssc = true;
5063 	}
5064 
5065 	/* Check if any DPLLs are using the SSC source */
5066 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5067 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5068 
5069 		if (!(temp & DPLL_VCO_ENABLE))
5070 			continue;
5071 
5072 		if ((temp & PLL_REF_INPUT_MASK) ==
5073 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5074 			using_ssc_source = true;
5075 			break;
5076 		}
5077 	}
5078 
5079 	drm_dbg_kms(&dev_priv->drm,
5080 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5081 		    has_panel, has_lvds, has_ck505, using_ssc_source);
5082 
5083 	/* Ironlake: try to setup display ref clock before DPLL
5084 	 * enabling. This is only under driver's control after
5085 	 * PCH B stepping, previous chipset stepping should be
5086 	 * ignoring this setting.
5087 	 */
5088 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5089 
5090 	/* As we must carefully and slowly disable/enable each source in turn,
5091 	 * compute the final state we want first and check if we need to
5092 	 * make any changes at all.
5093 	 */
5094 	final = val;
5095 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5096 	if (has_ck505)
5097 		final |= DREF_NONSPREAD_CK505_ENABLE;
5098 	else
5099 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5100 
5101 	final &= ~DREF_SSC_SOURCE_MASK;
5102 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5103 	final &= ~DREF_SSC1_ENABLE;
5104 
5105 	if (has_panel) {
5106 		final |= DREF_SSC_SOURCE_ENABLE;
5107 
5108 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5109 			final |= DREF_SSC1_ENABLE;
5110 
5111 		if (has_cpu_edp) {
5112 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5113 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5114 			else
5115 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5116 		} else
5117 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5118 	} else if (using_ssc_source) {
5119 		final |= DREF_SSC_SOURCE_ENABLE;
5120 		final |= DREF_SSC1_ENABLE;
5121 	}
5122 
5123 	if (final == val)
5124 		return;
5125 
5126 	/* Always enable nonspread source */
5127 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5128 
5129 	if (has_ck505)
5130 		val |= DREF_NONSPREAD_CK505_ENABLE;
5131 	else
5132 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5133 
5134 	if (has_panel) {
5135 		val &= ~DREF_SSC_SOURCE_MASK;
5136 		val |= DREF_SSC_SOURCE_ENABLE;
5137 
5138 		/* SSC must be turned on before enabling the CPU output  */
5139 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5140 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5141 			val |= DREF_SSC1_ENABLE;
5142 		} else
5143 			val &= ~DREF_SSC1_ENABLE;
5144 
5145 		/* Get SSC going before enabling the outputs */
5146 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5147 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5148 		udelay(200);
5149 
5150 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5151 
5152 		/* Enable CPU source on CPU attached eDP */
5153 		if (has_cpu_edp) {
5154 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5155 				drm_dbg_kms(&dev_priv->drm,
5156 					    "Using SSC on eDP\n");
5157 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5158 			} else
5159 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5160 		} else
5161 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5162 
5163 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5164 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5165 		udelay(200);
5166 	} else {
5167 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5168 
5169 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5170 
5171 		/* Turn off CPU output */
5172 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5173 
5174 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5175 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5176 		udelay(200);
5177 
5178 		if (!using_ssc_source) {
5179 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5180 
5181 			/* Turn off the SSC source */
5182 			val &= ~DREF_SSC_SOURCE_MASK;
5183 			val |= DREF_SSC_SOURCE_DISABLE;
5184 
5185 			/* Turn off SSC1 */
5186 			val &= ~DREF_SSC1_ENABLE;
5187 
5188 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5189 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5190 			udelay(200);
5191 		}
5192 	}
5193 
5194 	BUG_ON(val != final);
5195 }
5196 
lpt_reset_fdi_mphy(struct drm_i915_private * dev_priv)5197 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5198 {
5199 	u32 tmp;
5200 
5201 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5202 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5203 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5204 
5205 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5206 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5207 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5208 
5209 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5210 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5211 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5212 
5213 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5214 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5215 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5216 }
5217 
5218 /* WaMPhyProgramming:hsw */
lpt_program_fdi_mphy(struct drm_i915_private * dev_priv)5219 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5220 {
5221 	u32 tmp;
5222 
5223 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5224 	tmp &= ~(0xFF << 24);
5225 	tmp |= (0x12 << 24);
5226 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5227 
5228 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5229 	tmp |= (1 << 11);
5230 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5231 
5232 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5233 	tmp |= (1 << 11);
5234 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5235 
5236 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5237 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5238 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5239 
5240 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5241 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5242 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5243 
5244 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5245 	tmp &= ~(7 << 13);
5246 	tmp |= (5 << 13);
5247 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5248 
5249 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5250 	tmp &= ~(7 << 13);
5251 	tmp |= (5 << 13);
5252 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5253 
5254 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5255 	tmp &= ~0xFF;
5256 	tmp |= 0x1C;
5257 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5258 
5259 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5260 	tmp &= ~0xFF;
5261 	tmp |= 0x1C;
5262 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5263 
5264 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5265 	tmp &= ~(0xFF << 16);
5266 	tmp |= (0x1C << 16);
5267 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5268 
5269 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5270 	tmp &= ~(0xFF << 16);
5271 	tmp |= (0x1C << 16);
5272 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5273 
5274 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5275 	tmp |= (1 << 27);
5276 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5277 
5278 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5279 	tmp |= (1 << 27);
5280 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5281 
5282 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5283 	tmp &= ~(0xF << 28);
5284 	tmp |= (4 << 28);
5285 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5286 
5287 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5288 	tmp &= ~(0xF << 28);
5289 	tmp |= (4 << 28);
5290 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5291 }
5292 
5293 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5294  * Programming" based on the parameters passed:
5295  * - Sequence to enable CLKOUT_DP
5296  * - Sequence to enable CLKOUT_DP without spread
5297  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5298  */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)5299 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5300 				 bool with_spread, bool with_fdi)
5301 {
5302 	u32 reg, tmp;
5303 
5304 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5305 		     "FDI requires downspread\n"))
5306 		with_spread = true;
5307 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5308 		     with_fdi, "LP PCH doesn't have FDI\n"))
5309 		with_fdi = false;
5310 
5311 	mutex_lock(&dev_priv->sb_lock);
5312 
5313 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5314 	tmp &= ~SBI_SSCCTL_DISABLE;
5315 	tmp |= SBI_SSCCTL_PATHALT;
5316 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5317 
5318 	udelay(24);
5319 
5320 	if (with_spread) {
5321 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5322 		tmp &= ~SBI_SSCCTL_PATHALT;
5323 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5324 
5325 		if (with_fdi) {
5326 			lpt_reset_fdi_mphy(dev_priv);
5327 			lpt_program_fdi_mphy(dev_priv);
5328 		}
5329 	}
5330 
5331 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5332 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5333 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5334 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5335 
5336 	mutex_unlock(&dev_priv->sb_lock);
5337 }
5338 
5339 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)5340 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5341 {
5342 	u32 reg, tmp;
5343 
5344 	mutex_lock(&dev_priv->sb_lock);
5345 
5346 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5347 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5348 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5349 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5350 
5351 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5352 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5353 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5354 			tmp |= SBI_SSCCTL_PATHALT;
5355 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5356 			udelay(32);
5357 		}
5358 		tmp |= SBI_SSCCTL_DISABLE;
5359 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5360 	}
5361 
5362 	mutex_unlock(&dev_priv->sb_lock);
5363 }
5364 
5365 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5366 
5367 static const u16 sscdivintphase[] = {
5368 	[BEND_IDX( 50)] = 0x3B23,
5369 	[BEND_IDX( 45)] = 0x3B23,
5370 	[BEND_IDX( 40)] = 0x3C23,
5371 	[BEND_IDX( 35)] = 0x3C23,
5372 	[BEND_IDX( 30)] = 0x3D23,
5373 	[BEND_IDX( 25)] = 0x3D23,
5374 	[BEND_IDX( 20)] = 0x3E23,
5375 	[BEND_IDX( 15)] = 0x3E23,
5376 	[BEND_IDX( 10)] = 0x3F23,
5377 	[BEND_IDX(  5)] = 0x3F23,
5378 	[BEND_IDX(  0)] = 0x0025,
5379 	[BEND_IDX( -5)] = 0x0025,
5380 	[BEND_IDX(-10)] = 0x0125,
5381 	[BEND_IDX(-15)] = 0x0125,
5382 	[BEND_IDX(-20)] = 0x0225,
5383 	[BEND_IDX(-25)] = 0x0225,
5384 	[BEND_IDX(-30)] = 0x0325,
5385 	[BEND_IDX(-35)] = 0x0325,
5386 	[BEND_IDX(-40)] = 0x0425,
5387 	[BEND_IDX(-45)] = 0x0425,
5388 	[BEND_IDX(-50)] = 0x0525,
5389 };
5390 
5391 /*
5392  * Bend CLKOUT_DP
5393  * steps -50 to 50 inclusive, in steps of 5
5394  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5395  * change in clock period = -(steps / 10) * 5.787 ps
5396  */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)5397 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5398 {
5399 	u32 tmp;
5400 	int idx = BEND_IDX(steps);
5401 
5402 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5403 		return;
5404 
5405 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5406 		return;
5407 
5408 	mutex_lock(&dev_priv->sb_lock);
5409 
5410 	if (steps % 10 != 0)
5411 		tmp = 0xAAAAAAAB;
5412 	else
5413 		tmp = 0x00000000;
5414 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5415 
5416 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5417 	tmp &= 0xffff0000;
5418 	tmp |= sscdivintphase[idx];
5419 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5420 
5421 	mutex_unlock(&dev_priv->sb_lock);
5422 }
5423 
5424 #undef BEND_IDX
5425 
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)5426 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5427 {
5428 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5429 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5430 
5431 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5432 		return false;
5433 
5434 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5435 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5436 		return true;
5437 
5438 	if (IS_BROADWELL(dev_priv) &&
5439 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5440 		return true;
5441 
5442 	return false;
5443 }
5444 
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)5445 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5446 			       enum intel_dpll_id id)
5447 {
5448 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5449 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5450 
5451 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5452 		return false;
5453 
5454 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5455 		return true;
5456 
5457 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5458 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5459 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5460 		return true;
5461 
5462 	return false;
5463 }
5464 
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)5465 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5466 {
5467 	struct intel_encoder *encoder;
5468 	bool has_fdi = false;
5469 
5470 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5471 		switch (encoder->type) {
5472 		case INTEL_OUTPUT_ANALOG:
5473 			has_fdi = true;
5474 			break;
5475 		default:
5476 			break;
5477 		}
5478 	}
5479 
5480 	/*
5481 	 * The BIOS may have decided to use the PCH SSC
5482 	 * reference so we must not disable it until the
5483 	 * relevant PLLs have stopped relying on it. We'll
5484 	 * just leave the PCH SSC reference enabled in case
5485 	 * any active PLL is using it. It will get disabled
5486 	 * after runtime suspend if we don't have FDI.
5487 	 *
5488 	 * TODO: Move the whole reference clock handling
5489 	 * to the modeset sequence proper so that we can
5490 	 * actually enable/disable/reconfigure these things
5491 	 * safely. To do that we need to introduce a real
5492 	 * clock hierarchy. That would also allow us to do
5493 	 * clock bending finally.
5494 	 */
5495 	dev_priv->pch_ssc_use = 0;
5496 
5497 	if (spll_uses_pch_ssc(dev_priv)) {
5498 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5499 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5500 	}
5501 
5502 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5503 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5504 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5505 	}
5506 
5507 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5508 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5509 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5510 	}
5511 
5512 	if (dev_priv->pch_ssc_use)
5513 		return;
5514 
5515 	if (has_fdi) {
5516 		lpt_bend_clkout_dp(dev_priv, 0);
5517 		lpt_enable_clkout_dp(dev_priv, true, true);
5518 	} else {
5519 		lpt_disable_clkout_dp(dev_priv);
5520 	}
5521 }
5522 
5523 /*
5524  * Initialize reference clocks when the driver loads
5525  */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)5526 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5527 {
5528 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5529 		ilk_init_pch_refclk(dev_priv);
5530 	else if (HAS_PCH_LPT(dev_priv))
5531 		lpt_init_pch_refclk(dev_priv);
5532 }
5533 
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)5534 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5535 {
5536 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5537 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5538 	enum pipe pipe = crtc->pipe;
5539 	u32 val;
5540 
5541 	val = 0;
5542 
5543 	switch (crtc_state->pipe_bpp) {
5544 	case 18:
5545 		val |= PIPECONF_6BPC;
5546 		break;
5547 	case 24:
5548 		val |= PIPECONF_8BPC;
5549 		break;
5550 	case 30:
5551 		val |= PIPECONF_10BPC;
5552 		break;
5553 	case 36:
5554 		val |= PIPECONF_12BPC;
5555 		break;
5556 	default:
5557 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5558 		BUG();
5559 	}
5560 
5561 	if (crtc_state->dither)
5562 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5563 
5564 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5565 		val |= PIPECONF_INTERLACED_ILK;
5566 	else
5567 		val |= PIPECONF_PROGRESSIVE;
5568 
5569 	/*
5570 	 * This would end up with an odd purple hue over
5571 	 * the entire display. Make sure we don't do it.
5572 	 */
5573 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5574 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5575 
5576 	if (crtc_state->limited_color_range &&
5577 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5578 		val |= PIPECONF_COLOR_RANGE_SELECT;
5579 
5580 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5581 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5582 
5583 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5584 
5585 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5586 
5587 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5588 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5589 }
5590 
hsw_set_pipeconf(const struct intel_crtc_state * crtc_state)5591 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5592 {
5593 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5594 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5595 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5596 	u32 val = 0;
5597 
5598 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5599 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5600 
5601 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5602 		val |= PIPECONF_INTERLACED_ILK;
5603 	else
5604 		val |= PIPECONF_PROGRESSIVE;
5605 
5606 	if (IS_HASWELL(dev_priv) &&
5607 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5608 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5609 
5610 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5611 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5612 }
5613 
bdw_set_pipemisc(const struct intel_crtc_state * crtc_state)5614 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5615 {
5616 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5617 	const struct intel_crtc_scaler_state *scaler_state =
5618 		&crtc_state->scaler_state;
5619 
5620 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5621 	u32 val = 0;
5622 	int i;
5623 
5624 	switch (crtc_state->pipe_bpp) {
5625 	case 18:
5626 		val |= PIPEMISC_6_BPC;
5627 		break;
5628 	case 24:
5629 		val |= PIPEMISC_8_BPC;
5630 		break;
5631 	case 30:
5632 		val |= PIPEMISC_10_BPC;
5633 		break;
5634 	case 36:
5635 		/* Port output 12BPC defined for ADLP+ */
5636 		if (DISPLAY_VER(dev_priv) > 12)
5637 			val |= PIPEMISC_12_BPC_ADLP;
5638 		break;
5639 	default:
5640 		MISSING_CASE(crtc_state->pipe_bpp);
5641 		break;
5642 	}
5643 
5644 	if (crtc_state->dither)
5645 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5646 
5647 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5648 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5649 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5650 
5651 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5652 		val |= PIPEMISC_YUV420_ENABLE |
5653 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5654 
5655 	if (DISPLAY_VER(dev_priv) >= 11 &&
5656 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5657 					   BIT(PLANE_CURSOR))) == 0)
5658 		val |= PIPEMISC_HDR_MODE_PRECISION;
5659 
5660 	if (DISPLAY_VER(dev_priv) >= 12)
5661 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5662 
5663 	if (IS_ALDERLAKE_P(dev_priv)) {
5664 		bool scaler_in_use = false;
5665 
5666 		for (i = 0; i < crtc->num_scalers; i++) {
5667 			if (!scaler_state->scalers[i].in_use)
5668 				continue;
5669 
5670 			scaler_in_use = true;
5671 			break;
5672 		}
5673 
5674 		intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5675 			     PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5676 			     scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5677 			     PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5678 	}
5679 
5680 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5681 }
5682 
bdw_get_pipemisc_bpp(struct intel_crtc * crtc)5683 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5684 {
5685 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5686 	u32 tmp;
5687 
5688 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5689 
5690 	switch (tmp & PIPEMISC_BPC_MASK) {
5691 	case PIPEMISC_6_BPC:
5692 		return 18;
5693 	case PIPEMISC_8_BPC:
5694 		return 24;
5695 	case PIPEMISC_10_BPC:
5696 		return 30;
5697 	/*
5698 	 * PORT OUTPUT 12 BPC defined for ADLP+.
5699 	 *
5700 	 * TODO:
5701 	 * For previous platforms with DSI interface, bits 5:7
5702 	 * are used for storing pipe_bpp irrespective of dithering.
5703 	 * Since the value of 12 BPC is not defined for these bits
5704 	 * on older platforms, need to find a workaround for 12 BPC
5705 	 * MIPI DSI HW readout.
5706 	 */
5707 	case PIPEMISC_12_BPC_ADLP:
5708 		if (DISPLAY_VER(dev_priv) > 12)
5709 			return 36;
5710 		fallthrough;
5711 	default:
5712 		MISSING_CASE(tmp);
5713 		return 0;
5714 	}
5715 }
5716 
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)5717 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5718 {
5719 	/*
5720 	 * Account for spread spectrum to avoid
5721 	 * oversubscribing the link. Max center spread
5722 	 * is 2.5%; use 5% for safety's sake.
5723 	 */
5724 	u32 bps = target_clock * bpp * 21 / 20;
5725 	return DIV_ROUND_UP(bps, link_bw * 8);
5726 }
5727 
intel_pch_transcoder_get_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n)5728 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5729 					 struct intel_link_m_n *m_n)
5730 {
5731 	struct drm_device *dev = crtc->base.dev;
5732 	struct drm_i915_private *dev_priv = to_i915(dev);
5733 	enum pipe pipe = crtc->pipe;
5734 
5735 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5736 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5737 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5738 		& ~TU_SIZE_MASK;
5739 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5740 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5741 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5742 }
5743 
intel_cpu_transcoder_get_m_n(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2)5744 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5745 					 enum transcoder transcoder,
5746 					 struct intel_link_m_n *m_n,
5747 					 struct intel_link_m_n *m2_n2)
5748 {
5749 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5750 	enum pipe pipe = crtc->pipe;
5751 
5752 	if (DISPLAY_VER(dev_priv) >= 5) {
5753 		m_n->link_m = intel_de_read(dev_priv,
5754 					    PIPE_LINK_M1(transcoder));
5755 		m_n->link_n = intel_de_read(dev_priv,
5756 					    PIPE_LINK_N1(transcoder));
5757 		m_n->gmch_m = intel_de_read(dev_priv,
5758 					    PIPE_DATA_M1(transcoder))
5759 			& ~TU_SIZE_MASK;
5760 		m_n->gmch_n = intel_de_read(dev_priv,
5761 					    PIPE_DATA_N1(transcoder));
5762 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5763 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5764 
5765 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5766 			m2_n2->link_m = intel_de_read(dev_priv,
5767 						      PIPE_LINK_M2(transcoder));
5768 			m2_n2->link_n =	intel_de_read(dev_priv,
5769 							     PIPE_LINK_N2(transcoder));
5770 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5771 							     PIPE_DATA_M2(transcoder))
5772 					& ~TU_SIZE_MASK;
5773 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5774 							     PIPE_DATA_N2(transcoder));
5775 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5776 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5777 		}
5778 	} else {
5779 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5780 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5781 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5782 			& ~TU_SIZE_MASK;
5783 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5784 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5785 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5786 	}
5787 }
5788 
intel_dp_get_m_n(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5789 void intel_dp_get_m_n(struct intel_crtc *crtc,
5790 		      struct intel_crtc_state *pipe_config)
5791 {
5792 	if (pipe_config->has_pch_encoder)
5793 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5794 	else
5795 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5796 					     &pipe_config->dp_m_n,
5797 					     &pipe_config->dp_m2_n2);
5798 }
5799 
ilk_get_fdi_m_n_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5800 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5801 				   struct intel_crtc_state *pipe_config)
5802 {
5803 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5804 				     &pipe_config->fdi_m_n, NULL);
5805 }
5806 
ilk_get_pfit_pos_size(struct intel_crtc_state * crtc_state,u32 pos,u32 size)5807 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5808 				  u32 pos, u32 size)
5809 {
5810 	drm_rect_init(&crtc_state->pch_pfit.dst,
5811 		      pos >> 16, pos & 0xffff,
5812 		      size >> 16, size & 0xffff);
5813 }
5814 
skl_get_pfit_config(struct intel_crtc_state * crtc_state)5815 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5816 {
5817 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5818 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5819 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5820 	int id = -1;
5821 	int i;
5822 
5823 	/* find scaler attached to this pipe */
5824 	for (i = 0; i < crtc->num_scalers; i++) {
5825 		u32 ctl, pos, size;
5826 
5827 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5828 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5829 			continue;
5830 
5831 		id = i;
5832 		crtc_state->pch_pfit.enabled = true;
5833 
5834 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5835 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5836 
5837 		ilk_get_pfit_pos_size(crtc_state, pos, size);
5838 
5839 		scaler_state->scalers[i].in_use = true;
5840 		break;
5841 	}
5842 
5843 	scaler_state->scaler_id = id;
5844 	if (id >= 0)
5845 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5846 	else
5847 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5848 }
5849 
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)5850 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5851 {
5852 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5853 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5854 	u32 ctl, pos, size;
5855 
5856 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5857 	if ((ctl & PF_ENABLE) == 0)
5858 		return;
5859 
5860 	crtc_state->pch_pfit.enabled = true;
5861 
5862 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5863 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5864 
5865 	ilk_get_pfit_pos_size(crtc_state, pos, size);
5866 
5867 	/*
5868 	 * We currently do not free assignements of panel fitters on
5869 	 * ivb/hsw (since we don't use the higher upscaling modes which
5870 	 * differentiates them) so just WARN about this case for now.
5871 	 */
5872 	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5873 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5874 }
5875 
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5876 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5877 				struct intel_crtc_state *pipe_config)
5878 {
5879 	struct drm_device *dev = crtc->base.dev;
5880 	struct drm_i915_private *dev_priv = to_i915(dev);
5881 	enum intel_display_power_domain power_domain;
5882 	intel_wakeref_t wakeref;
5883 	u32 tmp;
5884 	bool ret;
5885 
5886 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5887 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5888 	if (!wakeref)
5889 		return false;
5890 
5891 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5892 	pipe_config->shared_dpll = NULL;
5893 
5894 	ret = false;
5895 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5896 	if (!(tmp & PIPECONF_ENABLE))
5897 		goto out;
5898 
5899 	switch (tmp & PIPECONF_BPC_MASK) {
5900 	case PIPECONF_6BPC:
5901 		pipe_config->pipe_bpp = 18;
5902 		break;
5903 	case PIPECONF_8BPC:
5904 		pipe_config->pipe_bpp = 24;
5905 		break;
5906 	case PIPECONF_10BPC:
5907 		pipe_config->pipe_bpp = 30;
5908 		break;
5909 	case PIPECONF_12BPC:
5910 		pipe_config->pipe_bpp = 36;
5911 		break;
5912 	default:
5913 		break;
5914 	}
5915 
5916 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5917 		pipe_config->limited_color_range = true;
5918 
5919 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5920 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5921 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5922 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5923 		break;
5924 	default:
5925 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5926 		break;
5927 	}
5928 
5929 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5930 		PIPECONF_GAMMA_MODE_SHIFT;
5931 
5932 	pipe_config->csc_mode = intel_de_read(dev_priv,
5933 					      PIPE_CSC_MODE(crtc->pipe));
5934 
5935 	i9xx_get_pipe_color_config(pipe_config);
5936 	intel_color_get_config(pipe_config);
5937 
5938 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5939 		struct intel_shared_dpll *pll;
5940 		enum intel_dpll_id pll_id;
5941 		bool pll_active;
5942 
5943 		pipe_config->has_pch_encoder = true;
5944 
5945 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5946 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5947 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5948 
5949 		ilk_get_fdi_m_n_config(crtc, pipe_config);
5950 
5951 		if (HAS_PCH_IBX(dev_priv)) {
5952 			/*
5953 			 * The pipe->pch transcoder and pch transcoder->pll
5954 			 * mapping is fixed.
5955 			 */
5956 			pll_id = (enum intel_dpll_id) crtc->pipe;
5957 		} else {
5958 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5959 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5960 				pll_id = DPLL_ID_PCH_PLL_B;
5961 			else
5962 				pll_id= DPLL_ID_PCH_PLL_A;
5963 		}
5964 
5965 		pipe_config->shared_dpll =
5966 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
5967 		pll = pipe_config->shared_dpll;
5968 
5969 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5970 						     &pipe_config->dpll_hw_state);
5971 		drm_WARN_ON(dev, !pll_active);
5972 
5973 		tmp = pipe_config->dpll_hw_state.dpll;
5974 		pipe_config->pixel_multiplier =
5975 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5976 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5977 
5978 		ilk_pch_clock_get(crtc, pipe_config);
5979 	} else {
5980 		pipe_config->pixel_multiplier = 1;
5981 	}
5982 
5983 	intel_get_transcoder_timings(crtc, pipe_config);
5984 	intel_get_pipe_src_size(crtc, pipe_config);
5985 
5986 	ilk_get_pfit_config(pipe_config);
5987 
5988 	ret = true;
5989 
5990 out:
5991 	intel_display_power_put(dev_priv, power_domain, wakeref);
5992 
5993 	return ret;
5994 }
5995 
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)5996 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5997 				     struct intel_crtc_state *pipe_config,
5998 				     struct intel_display_power_domain_set *power_domain_set)
5999 {
6000 	struct drm_device *dev = crtc->base.dev;
6001 	struct drm_i915_private *dev_priv = to_i915(dev);
6002 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6003 	unsigned long enabled_panel_transcoders = 0;
6004 	enum transcoder panel_transcoder;
6005 	u32 tmp;
6006 
6007 	if (DISPLAY_VER(dev_priv) >= 11)
6008 		panel_transcoder_mask |=
6009 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6010 
6011 	/*
6012 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
6013 	 * and DSI transcoders handled below.
6014 	 */
6015 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6016 
6017 	/*
6018 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6019 	 * consistency and less surprising code; it's in always on power).
6020 	 */
6021 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6022 				       panel_transcoder_mask) {
6023 		bool force_thru = false;
6024 		enum pipe trans_pipe;
6025 
6026 		tmp = intel_de_read(dev_priv,
6027 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
6028 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6029 			continue;
6030 
6031 		/*
6032 		 * Log all enabled ones, only use the first one.
6033 		 *
6034 		 * FIXME: This won't work for two separate DSI displays.
6035 		 */
6036 		enabled_panel_transcoders |= BIT(panel_transcoder);
6037 		if (enabled_panel_transcoders != BIT(panel_transcoder))
6038 			continue;
6039 
6040 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6041 		default:
6042 			drm_WARN(dev, 1,
6043 				 "unknown pipe linked to transcoder %s\n",
6044 				 transcoder_name(panel_transcoder));
6045 			fallthrough;
6046 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6047 			force_thru = true;
6048 			fallthrough;
6049 		case TRANS_DDI_EDP_INPUT_A_ON:
6050 			trans_pipe = PIPE_A;
6051 			break;
6052 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6053 			trans_pipe = PIPE_B;
6054 			break;
6055 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6056 			trans_pipe = PIPE_C;
6057 			break;
6058 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
6059 			trans_pipe = PIPE_D;
6060 			break;
6061 		}
6062 
6063 		if (trans_pipe == crtc->pipe) {
6064 			pipe_config->cpu_transcoder = panel_transcoder;
6065 			pipe_config->pch_pfit.force_thru = force_thru;
6066 		}
6067 	}
6068 
6069 	/*
6070 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6071 	 */
6072 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6073 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6074 
6075 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6076 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6077 		return false;
6078 
6079 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6080 
6081 	return tmp & PIPECONF_ENABLE;
6082 }
6083 
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)6084 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6085 					 struct intel_crtc_state *pipe_config,
6086 					 struct intel_display_power_domain_set *power_domain_set)
6087 {
6088 	struct drm_device *dev = crtc->base.dev;
6089 	struct drm_i915_private *dev_priv = to_i915(dev);
6090 	enum transcoder cpu_transcoder;
6091 	enum port port;
6092 	u32 tmp;
6093 
6094 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6095 		if (port == PORT_A)
6096 			cpu_transcoder = TRANSCODER_DSI_A;
6097 		else
6098 			cpu_transcoder = TRANSCODER_DSI_C;
6099 
6100 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6101 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6102 			continue;
6103 
6104 		/*
6105 		 * The PLL needs to be enabled with a valid divider
6106 		 * configuration, otherwise accessing DSI registers will hang
6107 		 * the machine. See BSpec North Display Engine
6108 		 * registers/MIPI[BXT]. We can break out here early, since we
6109 		 * need the same DSI PLL to be enabled for both DSI ports.
6110 		 */
6111 		if (!bxt_dsi_pll_is_enabled(dev_priv))
6112 			break;
6113 
6114 		/* XXX: this works for video mode only */
6115 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6116 		if (!(tmp & DPI_ENABLE))
6117 			continue;
6118 
6119 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6120 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6121 			continue;
6122 
6123 		pipe_config->cpu_transcoder = cpu_transcoder;
6124 		break;
6125 	}
6126 
6127 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
6128 }
6129 
hsw_get_ddi_port_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6130 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6131 				   struct intel_crtc_state *pipe_config)
6132 {
6133 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6134 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6135 	enum port port;
6136 	u32 tmp;
6137 
6138 	if (transcoder_is_dsi(cpu_transcoder)) {
6139 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6140 						PORT_A : PORT_B;
6141 	} else {
6142 		tmp = intel_de_read(dev_priv,
6143 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
6144 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6145 			return;
6146 		if (DISPLAY_VER(dev_priv) >= 12)
6147 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6148 		else
6149 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6150 	}
6151 
6152 	/*
6153 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6154 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6155 	 * the PCH transcoder is on.
6156 	 */
6157 	if (DISPLAY_VER(dev_priv) < 9 &&
6158 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6159 		pipe_config->has_pch_encoder = true;
6160 
6161 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6162 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6163 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6164 
6165 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6166 	}
6167 }
6168 
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6169 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6170 				struct intel_crtc_state *pipe_config)
6171 {
6172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6173 	struct intel_display_power_domain_set power_domain_set = { };
6174 	bool active;
6175 	u32 tmp;
6176 
6177 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6178 						       POWER_DOMAIN_PIPE(crtc->pipe)))
6179 		return false;
6180 
6181 	pipe_config->shared_dpll = NULL;
6182 
6183 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6184 
6185 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6186 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6187 		drm_WARN_ON(&dev_priv->drm, active);
6188 		active = true;
6189 	}
6190 
6191 	intel_dsc_get_config(pipe_config);
6192 	if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
6193 		intel_uncompressed_joiner_get_config(pipe_config);
6194 
6195 	if (!active) {
6196 		/* bigjoiner slave doesn't enable transcoder */
6197 		if (!pipe_config->bigjoiner_slave)
6198 			goto out;
6199 
6200 		active = true;
6201 		pipe_config->pixel_multiplier = 1;
6202 
6203 		/* we cannot read out most state, so don't bother.. */
6204 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6205 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6206 	    DISPLAY_VER(dev_priv) >= 11) {
6207 		hsw_get_ddi_port_state(crtc, pipe_config);
6208 		intel_get_transcoder_timings(crtc, pipe_config);
6209 	}
6210 
6211 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6212 		intel_vrr_get_config(crtc, pipe_config);
6213 
6214 	intel_get_pipe_src_size(crtc, pipe_config);
6215 
6216 	if (IS_HASWELL(dev_priv)) {
6217 		u32 tmp = intel_de_read(dev_priv,
6218 					PIPECONF(pipe_config->cpu_transcoder));
6219 
6220 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6221 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6222 		else
6223 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6224 	} else {
6225 		pipe_config->output_format =
6226 			bdw_get_pipemisc_output_format(crtc);
6227 	}
6228 
6229 	pipe_config->gamma_mode = intel_de_read(dev_priv,
6230 						GAMMA_MODE(crtc->pipe));
6231 
6232 	pipe_config->csc_mode = intel_de_read(dev_priv,
6233 					      PIPE_CSC_MODE(crtc->pipe));
6234 
6235 	if (DISPLAY_VER(dev_priv) >= 9) {
6236 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6237 
6238 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6239 			pipe_config->gamma_enable = true;
6240 
6241 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6242 			pipe_config->csc_enable = true;
6243 	} else {
6244 		i9xx_get_pipe_color_config(pipe_config);
6245 	}
6246 
6247 	intel_color_get_config(pipe_config);
6248 
6249 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6250 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6251 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6252 		pipe_config->ips_linetime =
6253 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6254 
6255 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6256 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6257 		if (DISPLAY_VER(dev_priv) >= 9)
6258 			skl_get_pfit_config(pipe_config);
6259 		else
6260 			ilk_get_pfit_config(pipe_config);
6261 	}
6262 
6263 	if (hsw_crtc_supports_ips(crtc)) {
6264 		if (IS_HASWELL(dev_priv))
6265 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6266 								 IPS_CTL) & IPS_ENABLE;
6267 		else {
6268 			/*
6269 			 * We cannot readout IPS state on broadwell, set to
6270 			 * true so we can set it to a defined state on first
6271 			 * commit.
6272 			 */
6273 			pipe_config->ips_enabled = true;
6274 		}
6275 	}
6276 
6277 	if (pipe_config->bigjoiner_slave) {
6278 		/* Cannot be read out as a slave, set to 0. */
6279 		pipe_config->pixel_multiplier = 0;
6280 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6281 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6282 		pipe_config->pixel_multiplier =
6283 			intel_de_read(dev_priv,
6284 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6285 	} else {
6286 		pipe_config->pixel_multiplier = 1;
6287 	}
6288 
6289 out:
6290 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6291 
6292 	return active;
6293 }
6294 
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)6295 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6296 {
6297 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6298 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6299 
6300 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6301 		return false;
6302 
6303 	crtc_state->hw.active = true;
6304 
6305 	intel_crtc_readout_derived_state(crtc_state);
6306 
6307 	return true;
6308 }
6309 
6310 /* VESA 640x480x72Hz mode to set on the pipe */
6311 static const struct drm_display_mode load_detect_mode = {
6312 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6313 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6314 };
6315 
6316 struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)6317 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6318 			 struct drm_mode_fb_cmd2 *mode_cmd)
6319 {
6320 	struct intel_framebuffer *intel_fb;
6321 	int ret;
6322 
6323 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6324 	if (!intel_fb)
6325 		return ERR_PTR(-ENOMEM);
6326 
6327 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6328 	if (ret)
6329 		goto err;
6330 
6331 	return &intel_fb->base;
6332 
6333 err:
6334 	kfree(intel_fb);
6335 	return ERR_PTR(ret);
6336 }
6337 
intel_modeset_disable_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)6338 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6339 					struct drm_crtc *crtc)
6340 {
6341 	struct drm_plane *plane;
6342 	struct drm_plane_state *plane_state;
6343 	int ret, i;
6344 
6345 	ret = drm_atomic_add_affected_planes(state, crtc);
6346 	if (ret)
6347 		return ret;
6348 
6349 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6350 		if (plane_state->crtc != crtc)
6351 			continue;
6352 
6353 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6354 		if (ret)
6355 			return ret;
6356 
6357 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6358 	}
6359 
6360 	return 0;
6361 }
6362 
intel_get_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)6363 int intel_get_load_detect_pipe(struct drm_connector *connector,
6364 			       struct intel_load_detect_pipe *old,
6365 			       struct drm_modeset_acquire_ctx *ctx)
6366 {
6367 	struct intel_encoder *encoder =
6368 		intel_attached_encoder(to_intel_connector(connector));
6369 	struct intel_crtc *possible_crtc;
6370 	struct intel_crtc *crtc = NULL;
6371 	struct drm_device *dev = encoder->base.dev;
6372 	struct drm_i915_private *dev_priv = to_i915(dev);
6373 	struct drm_mode_config *config = &dev->mode_config;
6374 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6375 	struct drm_connector_state *connector_state;
6376 	struct intel_crtc_state *crtc_state;
6377 	int ret;
6378 
6379 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6380 		    connector->base.id, connector->name,
6381 		    encoder->base.base.id, encoder->base.name);
6382 
6383 	old->restore_state = NULL;
6384 
6385 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6386 
6387 	/*
6388 	 * Algorithm gets a little messy:
6389 	 *
6390 	 *   - if the connector already has an assigned crtc, use it (but make
6391 	 *     sure it's on first)
6392 	 *
6393 	 *   - try to find the first unused crtc that can drive this connector,
6394 	 *     and use that if we find one
6395 	 */
6396 
6397 	/* See if we already have a CRTC for this connector */
6398 	if (connector->state->crtc) {
6399 		crtc = to_intel_crtc(connector->state->crtc);
6400 
6401 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6402 		if (ret)
6403 			goto fail;
6404 
6405 		/* Make sure the crtc and connector are running */
6406 		goto found;
6407 	}
6408 
6409 	/* Find an unused one (if possible) */
6410 	for_each_intel_crtc(dev, possible_crtc) {
6411 		if (!(encoder->base.possible_crtcs &
6412 		      drm_crtc_mask(&possible_crtc->base)))
6413 			continue;
6414 
6415 		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6416 		if (ret)
6417 			goto fail;
6418 
6419 		if (possible_crtc->base.state->enable) {
6420 			drm_modeset_unlock(&possible_crtc->base.mutex);
6421 			continue;
6422 		}
6423 
6424 		crtc = possible_crtc;
6425 		break;
6426 	}
6427 
6428 	/*
6429 	 * If we didn't find an unused CRTC, don't use any.
6430 	 */
6431 	if (!crtc) {
6432 		drm_dbg_kms(&dev_priv->drm,
6433 			    "no pipe available for load-detect\n");
6434 		ret = -ENODEV;
6435 		goto fail;
6436 	}
6437 
6438 found:
6439 	state = drm_atomic_state_alloc(dev);
6440 	restore_state = drm_atomic_state_alloc(dev);
6441 	if (!state || !restore_state) {
6442 		ret = -ENOMEM;
6443 		goto fail;
6444 	}
6445 
6446 	state->acquire_ctx = ctx;
6447 	restore_state->acquire_ctx = ctx;
6448 
6449 	connector_state = drm_atomic_get_connector_state(state, connector);
6450 	if (IS_ERR(connector_state)) {
6451 		ret = PTR_ERR(connector_state);
6452 		goto fail;
6453 	}
6454 
6455 	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6456 	if (ret)
6457 		goto fail;
6458 
6459 	crtc_state = intel_atomic_get_crtc_state(state, crtc);
6460 	if (IS_ERR(crtc_state)) {
6461 		ret = PTR_ERR(crtc_state);
6462 		goto fail;
6463 	}
6464 
6465 	crtc_state->uapi.active = true;
6466 
6467 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6468 					   &load_detect_mode);
6469 	if (ret)
6470 		goto fail;
6471 
6472 	ret = intel_modeset_disable_planes(state, &crtc->base);
6473 	if (ret)
6474 		goto fail;
6475 
6476 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6477 	if (!ret)
6478 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6479 	if (!ret)
6480 		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6481 	if (ret) {
6482 		drm_dbg_kms(&dev_priv->drm,
6483 			    "Failed to create a copy of old state to restore: %i\n",
6484 			    ret);
6485 		goto fail;
6486 	}
6487 
6488 	ret = drm_atomic_commit(state);
6489 	if (ret) {
6490 		drm_dbg_kms(&dev_priv->drm,
6491 			    "failed to set mode on load-detect pipe\n");
6492 		goto fail;
6493 	}
6494 
6495 	old->restore_state = restore_state;
6496 	drm_atomic_state_put(state);
6497 
6498 	/* let the connector get through one full cycle before testing */
6499 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6500 	return true;
6501 
6502 fail:
6503 	if (state) {
6504 		drm_atomic_state_put(state);
6505 		state = NULL;
6506 	}
6507 	if (restore_state) {
6508 		drm_atomic_state_put(restore_state);
6509 		restore_state = NULL;
6510 	}
6511 
6512 	if (ret == -EDEADLK)
6513 		return ret;
6514 
6515 	return false;
6516 }
6517 
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)6518 void intel_release_load_detect_pipe(struct drm_connector *connector,
6519 				    struct intel_load_detect_pipe *old,
6520 				    struct drm_modeset_acquire_ctx *ctx)
6521 {
6522 	struct intel_encoder *intel_encoder =
6523 		intel_attached_encoder(to_intel_connector(connector));
6524 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6525 	struct drm_encoder *encoder = &intel_encoder->base;
6526 	struct drm_atomic_state *state = old->restore_state;
6527 	int ret;
6528 
6529 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6530 		    connector->base.id, connector->name,
6531 		    encoder->base.id, encoder->name);
6532 
6533 	if (!state)
6534 		return;
6535 
6536 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6537 	if (ret)
6538 		drm_dbg_kms(&i915->drm,
6539 			    "Couldn't release load detect pipe: %i\n", ret);
6540 	drm_atomic_state_put(state);
6541 }
6542 
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)6543 static int i9xx_pll_refclk(struct drm_device *dev,
6544 			   const struct intel_crtc_state *pipe_config)
6545 {
6546 	struct drm_i915_private *dev_priv = to_i915(dev);
6547 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6548 
6549 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6550 		return dev_priv->vbt.lvds_ssc_freq;
6551 	else if (HAS_PCH_SPLIT(dev_priv))
6552 		return 120000;
6553 	else if (DISPLAY_VER(dev_priv) != 2)
6554 		return 96000;
6555 	else
6556 		return 48000;
6557 }
6558 
6559 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6560 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6561 				struct intel_crtc_state *pipe_config)
6562 {
6563 	struct drm_device *dev = crtc->base.dev;
6564 	struct drm_i915_private *dev_priv = to_i915(dev);
6565 	enum pipe pipe = crtc->pipe;
6566 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6567 	u32 fp;
6568 	struct dpll clock;
6569 	int port_clock;
6570 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6571 
6572 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6573 		fp = pipe_config->dpll_hw_state.fp0;
6574 	else
6575 		fp = pipe_config->dpll_hw_state.fp1;
6576 
6577 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6578 	if (IS_PINEVIEW(dev_priv)) {
6579 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6580 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6581 	} else {
6582 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6583 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6584 	}
6585 
6586 	if (DISPLAY_VER(dev_priv) != 2) {
6587 		if (IS_PINEVIEW(dev_priv))
6588 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6589 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6590 		else
6591 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6592 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6593 
6594 		switch (dpll & DPLL_MODE_MASK) {
6595 		case DPLLB_MODE_DAC_SERIAL:
6596 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6597 				5 : 10;
6598 			break;
6599 		case DPLLB_MODE_LVDS:
6600 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6601 				7 : 14;
6602 			break;
6603 		default:
6604 			drm_dbg_kms(&dev_priv->drm,
6605 				    "Unknown DPLL mode %08x in programmed "
6606 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6607 			return;
6608 		}
6609 
6610 		if (IS_PINEVIEW(dev_priv))
6611 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6612 		else
6613 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6614 	} else {
6615 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6616 								 LVDS);
6617 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6618 
6619 		if (is_lvds) {
6620 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6621 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6622 
6623 			if (lvds & LVDS_CLKB_POWER_UP)
6624 				clock.p2 = 7;
6625 			else
6626 				clock.p2 = 14;
6627 		} else {
6628 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6629 				clock.p1 = 2;
6630 			else {
6631 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6632 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6633 			}
6634 			if (dpll & PLL_P2_DIVIDE_BY_4)
6635 				clock.p2 = 4;
6636 			else
6637 				clock.p2 = 2;
6638 		}
6639 
6640 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6641 	}
6642 
6643 	/*
6644 	 * This value includes pixel_multiplier. We will use
6645 	 * port_clock to compute adjusted_mode.crtc_clock in the
6646 	 * encoder's get_config() function.
6647 	 */
6648 	pipe_config->port_clock = port_clock;
6649 }
6650 
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)6651 int intel_dotclock_calculate(int link_freq,
6652 			     const struct intel_link_m_n *m_n)
6653 {
6654 	/*
6655 	 * The calculation for the data clock is:
6656 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6657 	 * But we want to avoid losing precison if possible, so:
6658 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6659 	 *
6660 	 * and the link clock is simpler:
6661 	 * link_clock = (m * link_clock) / n
6662 	 */
6663 
6664 	if (!m_n->link_n)
6665 		return 0;
6666 
6667 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6668 }
6669 
ilk_pch_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6670 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6671 			      struct intel_crtc_state *pipe_config)
6672 {
6673 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6674 
6675 	/* read out port_clock from the DPLL */
6676 	i9xx_crtc_clock_get(crtc, pipe_config);
6677 
6678 	/*
6679 	 * In case there is an active pipe without active ports,
6680 	 * we may need some idea for the dotclock anyway.
6681 	 * Calculate one based on the FDI configuration.
6682 	 */
6683 	pipe_config->hw.adjusted_mode.crtc_clock =
6684 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6685 					 &pipe_config->fdi_m_n);
6686 }
6687 
6688 /* Returns the currently programmed mode of the given encoder. */
6689 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)6690 intel_encoder_current_mode(struct intel_encoder *encoder)
6691 {
6692 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6693 	struct intel_crtc_state *crtc_state;
6694 	struct drm_display_mode *mode;
6695 	struct intel_crtc *crtc;
6696 	enum pipe pipe;
6697 
6698 	if (!encoder->get_hw_state(encoder, &pipe))
6699 		return NULL;
6700 
6701 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6702 
6703 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6704 	if (!mode)
6705 		return NULL;
6706 
6707 	crtc_state = intel_crtc_state_alloc(crtc);
6708 	if (!crtc_state) {
6709 		kfree(mode);
6710 		return NULL;
6711 	}
6712 
6713 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6714 		kfree(crtc_state);
6715 		kfree(mode);
6716 		return NULL;
6717 	}
6718 
6719 	intel_encoder_get_config(encoder, crtc_state);
6720 
6721 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6722 
6723 	kfree(crtc_state);
6724 
6725 	return mode;
6726 }
6727 
6728 /**
6729  * intel_wm_need_update - Check whether watermarks need updating
6730  * @cur: current plane state
6731  * @new: new plane state
6732  *
6733  * Check current plane state versus the new one to determine whether
6734  * watermarks need to be recalculated.
6735  *
6736  * Returns true or false.
6737  */
intel_wm_need_update(const struct intel_plane_state * cur,struct intel_plane_state * new)6738 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6739 				 struct intel_plane_state *new)
6740 {
6741 	/* Update watermarks on tiling or size changes. */
6742 	if (new->uapi.visible != cur->uapi.visible)
6743 		return true;
6744 
6745 	if (!cur->hw.fb || !new->hw.fb)
6746 		return false;
6747 
6748 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6749 	    cur->hw.rotation != new->hw.rotation ||
6750 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6751 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6752 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6753 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6754 		return true;
6755 
6756 	return false;
6757 }
6758 
needs_scaling(const struct intel_plane_state * state)6759 static bool needs_scaling(const struct intel_plane_state *state)
6760 {
6761 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6762 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6763 	int dst_w = drm_rect_width(&state->uapi.dst);
6764 	int dst_h = drm_rect_height(&state->uapi.dst);
6765 
6766 	return (src_w != dst_w || src_h != dst_h);
6767 }
6768 
intel_plane_atomic_calc_changes(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * crtc_state,const struct intel_plane_state * old_plane_state,struct intel_plane_state * plane_state)6769 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6770 				    struct intel_crtc_state *crtc_state,
6771 				    const struct intel_plane_state *old_plane_state,
6772 				    struct intel_plane_state *plane_state)
6773 {
6774 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6775 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6776 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6777 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6778 	bool was_crtc_enabled = old_crtc_state->hw.active;
6779 	bool is_crtc_enabled = crtc_state->hw.active;
6780 	bool turn_off, turn_on, visible, was_visible;
6781 	int ret;
6782 
6783 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6784 		ret = skl_update_scaler_plane(crtc_state, plane_state);
6785 		if (ret)
6786 			return ret;
6787 	}
6788 
6789 	was_visible = old_plane_state->uapi.visible;
6790 	visible = plane_state->uapi.visible;
6791 
6792 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6793 		was_visible = false;
6794 
6795 	/*
6796 	 * Visibility is calculated as if the crtc was on, but
6797 	 * after scaler setup everything depends on it being off
6798 	 * when the crtc isn't active.
6799 	 *
6800 	 * FIXME this is wrong for watermarks. Watermarks should also
6801 	 * be computed as if the pipe would be active. Perhaps move
6802 	 * per-plane wm computation to the .check_plane() hook, and
6803 	 * only combine the results from all planes in the current place?
6804 	 */
6805 	if (!is_crtc_enabled) {
6806 		intel_plane_set_invisible(crtc_state, plane_state);
6807 		visible = false;
6808 	}
6809 
6810 	if (!was_visible && !visible)
6811 		return 0;
6812 
6813 	turn_off = was_visible && (!visible || mode_changed);
6814 	turn_on = visible && (!was_visible || mode_changed);
6815 
6816 	drm_dbg_atomic(&dev_priv->drm,
6817 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6818 		       crtc->base.base.id, crtc->base.name,
6819 		       plane->base.base.id, plane->base.name,
6820 		       was_visible, visible,
6821 		       turn_off, turn_on, mode_changed);
6822 
6823 	if (turn_on) {
6824 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6825 			crtc_state->update_wm_pre = true;
6826 
6827 		/* must disable cxsr around plane enable/disable */
6828 		if (plane->id != PLANE_CURSOR)
6829 			crtc_state->disable_cxsr = true;
6830 	} else if (turn_off) {
6831 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6832 			crtc_state->update_wm_post = true;
6833 
6834 		/* must disable cxsr around plane enable/disable */
6835 		if (plane->id != PLANE_CURSOR)
6836 			crtc_state->disable_cxsr = true;
6837 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
6838 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6839 			/* FIXME bollocks */
6840 			crtc_state->update_wm_pre = true;
6841 			crtc_state->update_wm_post = true;
6842 		}
6843 	}
6844 
6845 	if (visible || was_visible)
6846 		crtc_state->fb_bits |= plane->frontbuffer_bit;
6847 
6848 	/*
6849 	 * ILK/SNB DVSACNTR/Sprite Enable
6850 	 * IVB SPR_CTL/Sprite Enable
6851 	 * "When in Self Refresh Big FIFO mode, a write to enable the
6852 	 *  plane will be internally buffered and delayed while Big FIFO
6853 	 *  mode is exiting."
6854 	 *
6855 	 * Which means that enabling the sprite can take an extra frame
6856 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
6857 	 * down to LP0 and wait for vblank in order to make sure the
6858 	 * sprite gets enabled on the next vblank after the register write.
6859 	 * Doing otherwise would risk enabling the sprite one frame after
6860 	 * we've already signalled flip completion. We can resume LP1+
6861 	 * once the sprite has been enabled.
6862 	 *
6863 	 *
6864 	 * WaCxSRDisabledForSpriteScaling:ivb
6865 	 * IVB SPR_SCALE/Scaling Enable
6866 	 * "Low Power watermarks must be disabled for at least one
6867 	 *  frame before enabling sprite scaling, and kept disabled
6868 	 *  until sprite scaling is disabled."
6869 	 *
6870 	 * ILK/SNB DVSASCALE/Scaling Enable
6871 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
6872 	 *  masked off while Big FIFO mode is exiting."
6873 	 *
6874 	 * Despite the w/a only being listed for IVB we assume that
6875 	 * the ILK/SNB note has similar ramifications, hence we apply
6876 	 * the w/a on all three platforms.
6877 	 *
6878 	 * With experimental results seems this is needed also for primary
6879 	 * plane, not only sprite plane.
6880 	 */
6881 	if (plane->id != PLANE_CURSOR &&
6882 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6883 	     IS_IVYBRIDGE(dev_priv)) &&
6884 	    (turn_on || (!needs_scaling(old_plane_state) &&
6885 			 needs_scaling(plane_state))))
6886 		crtc_state->disable_lp_wm = true;
6887 
6888 	return 0;
6889 }
6890 
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)6891 static bool encoders_cloneable(const struct intel_encoder *a,
6892 			       const struct intel_encoder *b)
6893 {
6894 	/* masks could be asymmetric, so check both ways */
6895 	return a == b || (a->cloneable & (1 << b->type) &&
6896 			  b->cloneable & (1 << a->type));
6897 }
6898 
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)6899 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6900 					 struct intel_crtc *crtc,
6901 					 struct intel_encoder *encoder)
6902 {
6903 	struct intel_encoder *source_encoder;
6904 	struct drm_connector *connector;
6905 	struct drm_connector_state *connector_state;
6906 	int i;
6907 
6908 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6909 		if (connector_state->crtc != &crtc->base)
6910 			continue;
6911 
6912 		source_encoder =
6913 			to_intel_encoder(connector_state->best_encoder);
6914 		if (!encoders_cloneable(encoder, source_encoder))
6915 			return false;
6916 	}
6917 
6918 	return true;
6919 }
6920 
icl_add_linked_planes(struct intel_atomic_state * state)6921 static int icl_add_linked_planes(struct intel_atomic_state *state)
6922 {
6923 	struct intel_plane *plane, *linked;
6924 	struct intel_plane_state *plane_state, *linked_plane_state;
6925 	int i;
6926 
6927 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6928 		linked = plane_state->planar_linked_plane;
6929 
6930 		if (!linked)
6931 			continue;
6932 
6933 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
6934 		if (IS_ERR(linked_plane_state))
6935 			return PTR_ERR(linked_plane_state);
6936 
6937 		drm_WARN_ON(state->base.dev,
6938 			    linked_plane_state->planar_linked_plane != plane);
6939 		drm_WARN_ON(state->base.dev,
6940 			    linked_plane_state->planar_slave == plane_state->planar_slave);
6941 	}
6942 
6943 	return 0;
6944 }
6945 
icl_check_nv12_planes(struct intel_crtc_state * crtc_state)6946 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6947 {
6948 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6949 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6950 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6951 	struct intel_plane *plane, *linked;
6952 	struct intel_plane_state *plane_state;
6953 	int i;
6954 
6955 	if (DISPLAY_VER(dev_priv) < 11)
6956 		return 0;
6957 
6958 	/*
6959 	 * Destroy all old plane links and make the slave plane invisible
6960 	 * in the crtc_state->active_planes mask.
6961 	 */
6962 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6963 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6964 			continue;
6965 
6966 		plane_state->planar_linked_plane = NULL;
6967 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
6968 			crtc_state->enabled_planes &= ~BIT(plane->id);
6969 			crtc_state->active_planes &= ~BIT(plane->id);
6970 			crtc_state->update_planes |= BIT(plane->id);
6971 		}
6972 
6973 		plane_state->planar_slave = false;
6974 	}
6975 
6976 	if (!crtc_state->nv12_planes)
6977 		return 0;
6978 
6979 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6980 		struct intel_plane_state *linked_state = NULL;
6981 
6982 		if (plane->pipe != crtc->pipe ||
6983 		    !(crtc_state->nv12_planes & BIT(plane->id)))
6984 			continue;
6985 
6986 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6987 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6988 				continue;
6989 
6990 			if (crtc_state->active_planes & BIT(linked->id))
6991 				continue;
6992 
6993 			linked_state = intel_atomic_get_plane_state(state, linked);
6994 			if (IS_ERR(linked_state))
6995 				return PTR_ERR(linked_state);
6996 
6997 			break;
6998 		}
6999 
7000 		if (!linked_state) {
7001 			drm_dbg_kms(&dev_priv->drm,
7002 				    "Need %d free Y planes for planar YUV\n",
7003 				    hweight8(crtc_state->nv12_planes));
7004 
7005 			return -EINVAL;
7006 		}
7007 
7008 		plane_state->planar_linked_plane = linked;
7009 
7010 		linked_state->planar_slave = true;
7011 		linked_state->planar_linked_plane = plane;
7012 		crtc_state->enabled_planes |= BIT(linked->id);
7013 		crtc_state->active_planes |= BIT(linked->id);
7014 		crtc_state->update_planes |= BIT(linked->id);
7015 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7016 			    linked->base.name, plane->base.name);
7017 
7018 		/* Copy parameters to slave plane */
7019 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7020 		linked_state->color_ctl = plane_state->color_ctl;
7021 		linked_state->view = plane_state->view;
7022 
7023 		intel_plane_copy_hw_state(linked_state, plane_state);
7024 		linked_state->uapi.src = plane_state->uapi.src;
7025 		linked_state->uapi.dst = plane_state->uapi.dst;
7026 
7027 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
7028 			if (linked->id == PLANE_SPRITE5)
7029 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7030 			else if (linked->id == PLANE_SPRITE4)
7031 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7032 			else if (linked->id == PLANE_SPRITE3)
7033 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7034 			else if (linked->id == PLANE_SPRITE2)
7035 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7036 			else
7037 				MISSING_CASE(linked->id);
7038 		}
7039 	}
7040 
7041 	return 0;
7042 }
7043 
c8_planes_changed(const struct intel_crtc_state * new_crtc_state)7044 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7045 {
7046 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7047 	struct intel_atomic_state *state =
7048 		to_intel_atomic_state(new_crtc_state->uapi.state);
7049 	const struct intel_crtc_state *old_crtc_state =
7050 		intel_atomic_get_old_crtc_state(state, crtc);
7051 
7052 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7053 }
7054 
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)7055 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7056 {
7057 	const struct drm_display_mode *pipe_mode =
7058 		&crtc_state->hw.pipe_mode;
7059 	int linetime_wm;
7060 
7061 	if (!crtc_state->hw.enable)
7062 		return 0;
7063 
7064 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7065 					pipe_mode->crtc_clock);
7066 
7067 	return min(linetime_wm, 0x1ff);
7068 }
7069 
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)7070 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7071 			       const struct intel_cdclk_state *cdclk_state)
7072 {
7073 	const struct drm_display_mode *pipe_mode =
7074 		&crtc_state->hw.pipe_mode;
7075 	int linetime_wm;
7076 
7077 	if (!crtc_state->hw.enable)
7078 		return 0;
7079 
7080 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7081 					cdclk_state->logical.cdclk);
7082 
7083 	return min(linetime_wm, 0x1ff);
7084 }
7085 
skl_linetime_wm(const struct intel_crtc_state * crtc_state)7086 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7087 {
7088 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7089 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7090 	const struct drm_display_mode *pipe_mode =
7091 		&crtc_state->hw.pipe_mode;
7092 	int linetime_wm;
7093 
7094 	if (!crtc_state->hw.enable)
7095 		return 0;
7096 
7097 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7098 				   crtc_state->pixel_rate);
7099 
7100 	/* Display WA #1135: BXT:ALL GLK:ALL */
7101 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
7102 	    dev_priv->ipc_enabled)
7103 		linetime_wm /= 2;
7104 
7105 	return min(linetime_wm, 0x1ff);
7106 }
7107 
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)7108 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7109 				   struct intel_crtc *crtc)
7110 {
7111 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7112 	struct intel_crtc_state *crtc_state =
7113 		intel_atomic_get_new_crtc_state(state, crtc);
7114 	const struct intel_cdclk_state *cdclk_state;
7115 
7116 	if (DISPLAY_VER(dev_priv) >= 9)
7117 		crtc_state->linetime = skl_linetime_wm(crtc_state);
7118 	else
7119 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
7120 
7121 	if (!hsw_crtc_supports_ips(crtc))
7122 		return 0;
7123 
7124 	cdclk_state = intel_atomic_get_cdclk_state(state);
7125 	if (IS_ERR(cdclk_state))
7126 		return PTR_ERR(cdclk_state);
7127 
7128 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7129 						       cdclk_state);
7130 
7131 	return 0;
7132 }
7133 
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)7134 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7135 				   struct intel_crtc *crtc)
7136 {
7137 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7138 	struct intel_crtc_state *crtc_state =
7139 		intel_atomic_get_new_crtc_state(state, crtc);
7140 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7141 	int ret;
7142 
7143 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7144 	    mode_changed && !crtc_state->hw.active)
7145 		crtc_state->update_wm_post = true;
7146 
7147 	if (mode_changed && crtc_state->hw.enable &&
7148 	    dev_priv->display.crtc_compute_clock &&
7149 	    !crtc_state->bigjoiner_slave &&
7150 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7151 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7152 		if (ret)
7153 			return ret;
7154 	}
7155 
7156 	/*
7157 	 * May need to update pipe gamma enable bits
7158 	 * when C8 planes are getting enabled/disabled.
7159 	 */
7160 	if (c8_planes_changed(crtc_state))
7161 		crtc_state->uapi.color_mgmt_changed = true;
7162 
7163 	if (mode_changed || crtc_state->update_pipe ||
7164 	    crtc_state->uapi.color_mgmt_changed) {
7165 		ret = intel_color_check(crtc_state);
7166 		if (ret)
7167 			return ret;
7168 	}
7169 
7170 	if (dev_priv->display.compute_pipe_wm) {
7171 		ret = dev_priv->display.compute_pipe_wm(state, crtc);
7172 		if (ret) {
7173 			drm_dbg_kms(&dev_priv->drm,
7174 				    "Target pipe watermarks are invalid\n");
7175 			return ret;
7176 		}
7177 
7178 	}
7179 
7180 	if (dev_priv->display.compute_intermediate_wm) {
7181 		if (drm_WARN_ON(&dev_priv->drm,
7182 				!dev_priv->display.compute_pipe_wm))
7183 			return 0;
7184 
7185 		/*
7186 		 * Calculate 'intermediate' watermarks that satisfy both the
7187 		 * old state and the new state.  We can program these
7188 		 * immediately.
7189 		 */
7190 		ret = dev_priv->display.compute_intermediate_wm(state, crtc);
7191 		if (ret) {
7192 			drm_dbg_kms(&dev_priv->drm,
7193 				    "No valid intermediate pipe watermarks are possible\n");
7194 			return ret;
7195 		}
7196 	}
7197 
7198 	if (DISPLAY_VER(dev_priv) >= 9) {
7199 		if (mode_changed || crtc_state->update_pipe) {
7200 			ret = skl_update_scaler_crtc(crtc_state);
7201 			if (ret)
7202 				return ret;
7203 		}
7204 
7205 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7206 		if (ret)
7207 			return ret;
7208 	}
7209 
7210 	if (HAS_IPS(dev_priv)) {
7211 		ret = hsw_compute_ips_config(crtc_state);
7212 		if (ret)
7213 			return ret;
7214 	}
7215 
7216 	if (DISPLAY_VER(dev_priv) >= 9 ||
7217 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7218 		ret = hsw_compute_linetime_wm(state, crtc);
7219 		if (ret)
7220 			return ret;
7221 
7222 	}
7223 
7224 	if (!mode_changed) {
7225 		ret = intel_psr2_sel_fetch_update(state, crtc);
7226 		if (ret)
7227 			return ret;
7228 	}
7229 
7230 	return 0;
7231 }
7232 
intel_modeset_update_connector_atomic_state(struct drm_device * dev)7233 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7234 {
7235 	struct intel_connector *connector;
7236 	struct drm_connector_list_iter conn_iter;
7237 
7238 	drm_connector_list_iter_begin(dev, &conn_iter);
7239 	for_each_intel_connector_iter(connector, &conn_iter) {
7240 		struct drm_connector_state *conn_state = connector->base.state;
7241 		struct intel_encoder *encoder =
7242 			to_intel_encoder(connector->base.encoder);
7243 
7244 		if (conn_state->crtc)
7245 			drm_connector_put(&connector->base);
7246 
7247 		if (encoder) {
7248 			struct intel_crtc *crtc =
7249 				to_intel_crtc(encoder->base.crtc);
7250 			const struct intel_crtc_state *crtc_state =
7251 				to_intel_crtc_state(crtc->base.state);
7252 
7253 			conn_state->best_encoder = &encoder->base;
7254 			conn_state->crtc = &crtc->base;
7255 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7256 
7257 			drm_connector_get(&connector->base);
7258 		} else {
7259 			conn_state->best_encoder = NULL;
7260 			conn_state->crtc = NULL;
7261 		}
7262 	}
7263 	drm_connector_list_iter_end(&conn_iter);
7264 }
7265 
7266 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * pipe_config)7267 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7268 		      struct intel_crtc_state *pipe_config)
7269 {
7270 	struct drm_connector *connector = conn_state->connector;
7271 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7272 	const struct drm_display_info *info = &connector->display_info;
7273 	int bpp;
7274 
7275 	switch (conn_state->max_bpc) {
7276 	case 6 ... 7:
7277 		bpp = 6 * 3;
7278 		break;
7279 	case 8 ... 9:
7280 		bpp = 8 * 3;
7281 		break;
7282 	case 10 ... 11:
7283 		bpp = 10 * 3;
7284 		break;
7285 	case 12 ... 16:
7286 		bpp = 12 * 3;
7287 		break;
7288 	default:
7289 		MISSING_CASE(conn_state->max_bpc);
7290 		return -EINVAL;
7291 	}
7292 
7293 	if (bpp < pipe_config->pipe_bpp) {
7294 		drm_dbg_kms(&i915->drm,
7295 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7296 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7297 			    connector->base.id, connector->name,
7298 			    bpp, 3 * info->bpc,
7299 			    3 * conn_state->max_requested_bpc,
7300 			    pipe_config->pipe_bpp);
7301 
7302 		pipe_config->pipe_bpp = bpp;
7303 	}
7304 
7305 	return 0;
7306 }
7307 
7308 static int
compute_baseline_pipe_bpp(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7309 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7310 			  struct intel_crtc_state *pipe_config)
7311 {
7312 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7313 	struct drm_atomic_state *state = pipe_config->uapi.state;
7314 	struct drm_connector *connector;
7315 	struct drm_connector_state *connector_state;
7316 	int bpp, i;
7317 
7318 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7319 	    IS_CHERRYVIEW(dev_priv)))
7320 		bpp = 10*3;
7321 	else if (DISPLAY_VER(dev_priv) >= 5)
7322 		bpp = 12*3;
7323 	else
7324 		bpp = 8*3;
7325 
7326 	pipe_config->pipe_bpp = bpp;
7327 
7328 	/* Clamp display bpp to connector max bpp */
7329 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7330 		int ret;
7331 
7332 		if (connector_state->crtc != &crtc->base)
7333 			continue;
7334 
7335 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7336 		if (ret)
7337 			return ret;
7338 	}
7339 
7340 	return 0;
7341 }
7342 
intel_dump_crtc_timings(struct drm_i915_private * i915,const struct drm_display_mode * mode)7343 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7344 				    const struct drm_display_mode *mode)
7345 {
7346 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7347 		    "type: 0x%x flags: 0x%x\n",
7348 		    mode->crtc_clock,
7349 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7350 		    mode->crtc_hsync_end, mode->crtc_htotal,
7351 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7352 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7353 		    mode->type, mode->flags);
7354 }
7355 
7356 static void
intel_dump_m_n_config(const struct intel_crtc_state * pipe_config,const char * id,unsigned int lane_count,const struct intel_link_m_n * m_n)7357 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7358 		      const char *id, unsigned int lane_count,
7359 		      const struct intel_link_m_n *m_n)
7360 {
7361 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7362 
7363 	drm_dbg_kms(&i915->drm,
7364 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7365 		    id, lane_count,
7366 		    m_n->gmch_m, m_n->gmch_n,
7367 		    m_n->link_m, m_n->link_n, m_n->tu);
7368 }
7369 
7370 static void
intel_dump_infoframe(struct drm_i915_private * dev_priv,const union hdmi_infoframe * frame)7371 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7372 		     const union hdmi_infoframe *frame)
7373 {
7374 	if (!drm_debug_enabled(DRM_UT_KMS))
7375 		return;
7376 
7377 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7378 }
7379 
7380 static void
intel_dump_dp_vsc_sdp(struct drm_i915_private * dev_priv,const struct drm_dp_vsc_sdp * vsc)7381 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7382 		      const struct drm_dp_vsc_sdp *vsc)
7383 {
7384 	if (!drm_debug_enabled(DRM_UT_KMS))
7385 		return;
7386 
7387 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7388 }
7389 
7390 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7391 
7392 static const char * const output_type_str[] = {
7393 	OUTPUT_TYPE(UNUSED),
7394 	OUTPUT_TYPE(ANALOG),
7395 	OUTPUT_TYPE(DVO),
7396 	OUTPUT_TYPE(SDVO),
7397 	OUTPUT_TYPE(LVDS),
7398 	OUTPUT_TYPE(TVOUT),
7399 	OUTPUT_TYPE(HDMI),
7400 	OUTPUT_TYPE(DP),
7401 	OUTPUT_TYPE(EDP),
7402 	OUTPUT_TYPE(DSI),
7403 	OUTPUT_TYPE(DDI),
7404 	OUTPUT_TYPE(DP_MST),
7405 };
7406 
7407 #undef OUTPUT_TYPE
7408 
snprintf_output_types(char * buf,size_t len,unsigned int output_types)7409 static void snprintf_output_types(char *buf, size_t len,
7410 				  unsigned int output_types)
7411 {
7412 	char *str = buf;
7413 	int i;
7414 
7415 	str[0] = '\0';
7416 
7417 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7418 		int r;
7419 
7420 		if ((output_types & BIT(i)) == 0)
7421 			continue;
7422 
7423 		r = snprintf(str, len, "%s%s",
7424 			     str != buf ? "," : "", output_type_str[i]);
7425 		if (r >= len)
7426 			break;
7427 		str += r;
7428 		len -= r;
7429 
7430 		output_types &= ~BIT(i);
7431 	}
7432 
7433 	WARN_ON_ONCE(output_types != 0);
7434 }
7435 
7436 static const char * const output_format_str[] = {
7437 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7438 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7439 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7440 };
7441 
output_formats(enum intel_output_format format)7442 static const char *output_formats(enum intel_output_format format)
7443 {
7444 	if (format >= ARRAY_SIZE(output_format_str))
7445 		return "invalid";
7446 	return output_format_str[format];
7447 }
7448 
intel_dump_plane_state(const struct intel_plane_state * plane_state)7449 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7450 {
7451 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7452 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7453 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7454 
7455 	if (!fb) {
7456 		drm_dbg_kms(&i915->drm,
7457 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7458 			    plane->base.base.id, plane->base.name,
7459 			    yesno(plane_state->uapi.visible));
7460 		return;
7461 	}
7462 
7463 	drm_dbg_kms(&i915->drm,
7464 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7465 		    plane->base.base.id, plane->base.name,
7466 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7467 		    fb->modifier, yesno(plane_state->uapi.visible));
7468 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7469 		    plane_state->hw.rotation, plane_state->scaler_id);
7470 	if (plane_state->uapi.visible)
7471 		drm_dbg_kms(&i915->drm,
7472 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7473 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7474 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7475 }
7476 
intel_dump_pipe_config(const struct intel_crtc_state * pipe_config,struct intel_atomic_state * state,const char * context)7477 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7478 				   struct intel_atomic_state *state,
7479 				   const char *context)
7480 {
7481 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7482 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7483 	const struct intel_plane_state *plane_state;
7484 	struct intel_plane *plane;
7485 	char buf[64];
7486 	int i;
7487 
7488 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7489 		    crtc->base.base.id, crtc->base.name,
7490 		    yesno(pipe_config->hw.enable), context);
7491 
7492 	if (!pipe_config->hw.enable)
7493 		goto dump_planes;
7494 
7495 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7496 	drm_dbg_kms(&dev_priv->drm,
7497 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7498 		    yesno(pipe_config->hw.active),
7499 		    buf, pipe_config->output_types,
7500 		    output_formats(pipe_config->output_format));
7501 
7502 	drm_dbg_kms(&dev_priv->drm,
7503 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7504 		    transcoder_name(pipe_config->cpu_transcoder),
7505 		    pipe_config->pipe_bpp, pipe_config->dither);
7506 
7507 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7508 		    transcoder_name(pipe_config->mst_master_transcoder));
7509 
7510 	drm_dbg_kms(&dev_priv->drm,
7511 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7512 		    transcoder_name(pipe_config->master_transcoder),
7513 		    pipe_config->sync_mode_slaves_mask);
7514 
7515 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7516 		    pipe_config->bigjoiner_slave ? "slave" :
7517 		    pipe_config->bigjoiner ? "master" : "no");
7518 
7519 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7520 		    enableddisabled(pipe_config->splitter.enable),
7521 		    pipe_config->splitter.link_count,
7522 		    pipe_config->splitter.pixel_overlap);
7523 
7524 	if (pipe_config->has_pch_encoder)
7525 		intel_dump_m_n_config(pipe_config, "fdi",
7526 				      pipe_config->fdi_lanes,
7527 				      &pipe_config->fdi_m_n);
7528 
7529 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7530 		intel_dump_m_n_config(pipe_config, "dp m_n",
7531 				pipe_config->lane_count, &pipe_config->dp_m_n);
7532 		if (pipe_config->has_drrs)
7533 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7534 					      pipe_config->lane_count,
7535 					      &pipe_config->dp_m2_n2);
7536 	}
7537 
7538 	drm_dbg_kms(&dev_priv->drm,
7539 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7540 		    pipe_config->has_audio, pipe_config->has_infoframe,
7541 		    pipe_config->infoframes.enable);
7542 
7543 	if (pipe_config->infoframes.enable &
7544 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7545 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7546 			    pipe_config->infoframes.gcp);
7547 	if (pipe_config->infoframes.enable &
7548 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7549 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7550 	if (pipe_config->infoframes.enable &
7551 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7552 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7553 	if (pipe_config->infoframes.enable &
7554 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7555 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7556 	if (pipe_config->infoframes.enable &
7557 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7558 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7559 	if (pipe_config->infoframes.enable &
7560 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7561 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7562 	if (pipe_config->infoframes.enable &
7563 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7564 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7565 
7566 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7567 		    yesno(pipe_config->vrr.enable),
7568 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7569 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7570 		    pipe_config->vrr.flipline,
7571 		    intel_vrr_vmin_vblank_start(pipe_config),
7572 		    intel_vrr_vmax_vblank_start(pipe_config));
7573 
7574 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7575 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7576 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7577 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7578 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7579 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7580 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7581 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7582 	drm_dbg_kms(&dev_priv->drm,
7583 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7584 		    pipe_config->port_clock,
7585 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7586 		    pipe_config->pixel_rate);
7587 
7588 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7589 		    pipe_config->linetime, pipe_config->ips_linetime);
7590 
7591 	if (DISPLAY_VER(dev_priv) >= 9)
7592 		drm_dbg_kms(&dev_priv->drm,
7593 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7594 			    crtc->num_scalers,
7595 			    pipe_config->scaler_state.scaler_users,
7596 			    pipe_config->scaler_state.scaler_id);
7597 
7598 	if (HAS_GMCH(dev_priv))
7599 		drm_dbg_kms(&dev_priv->drm,
7600 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7601 			    pipe_config->gmch_pfit.control,
7602 			    pipe_config->gmch_pfit.pgm_ratios,
7603 			    pipe_config->gmch_pfit.lvds_border_bits);
7604 	else
7605 		drm_dbg_kms(&dev_priv->drm,
7606 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7607 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7608 			    enableddisabled(pipe_config->pch_pfit.enabled),
7609 			    yesno(pipe_config->pch_pfit.force_thru));
7610 
7611 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7612 		    pipe_config->ips_enabled, pipe_config->double_wide);
7613 
7614 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7615 
7616 	if (IS_CHERRYVIEW(dev_priv))
7617 		drm_dbg_kms(&dev_priv->drm,
7618 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7619 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7620 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7621 	else
7622 		drm_dbg_kms(&dev_priv->drm,
7623 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7624 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7625 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7626 
7627 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7628 		    pipe_config->hw.degamma_lut ?
7629 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7630 		    pipe_config->hw.gamma_lut ?
7631 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7632 
7633 dump_planes:
7634 	if (!state)
7635 		return;
7636 
7637 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7638 		if (plane->pipe == crtc->pipe)
7639 			intel_dump_plane_state(plane_state);
7640 	}
7641 }
7642 
check_digital_port_conflicts(struct intel_atomic_state * state)7643 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7644 {
7645 	struct drm_device *dev = state->base.dev;
7646 	struct drm_connector *connector;
7647 	struct drm_connector_list_iter conn_iter;
7648 	unsigned int used_ports = 0;
7649 	unsigned int used_mst_ports = 0;
7650 	bool ret = true;
7651 
7652 	/*
7653 	 * We're going to peek into connector->state,
7654 	 * hence connection_mutex must be held.
7655 	 */
7656 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7657 
7658 	/*
7659 	 * Walk the connector list instead of the encoder
7660 	 * list to detect the problem on ddi platforms
7661 	 * where there's just one encoder per digital port.
7662 	 */
7663 	drm_connector_list_iter_begin(dev, &conn_iter);
7664 	drm_for_each_connector_iter(connector, &conn_iter) {
7665 		struct drm_connector_state *connector_state;
7666 		struct intel_encoder *encoder;
7667 
7668 		connector_state =
7669 			drm_atomic_get_new_connector_state(&state->base,
7670 							   connector);
7671 		if (!connector_state)
7672 			connector_state = connector->state;
7673 
7674 		if (!connector_state->best_encoder)
7675 			continue;
7676 
7677 		encoder = to_intel_encoder(connector_state->best_encoder);
7678 
7679 		drm_WARN_ON(dev, !connector_state->crtc);
7680 
7681 		switch (encoder->type) {
7682 		case INTEL_OUTPUT_DDI:
7683 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7684 				break;
7685 			fallthrough;
7686 		case INTEL_OUTPUT_DP:
7687 		case INTEL_OUTPUT_HDMI:
7688 		case INTEL_OUTPUT_EDP:
7689 			/* the same port mustn't appear more than once */
7690 			if (used_ports & BIT(encoder->port))
7691 				ret = false;
7692 
7693 			used_ports |= BIT(encoder->port);
7694 			break;
7695 		case INTEL_OUTPUT_DP_MST:
7696 			used_mst_ports |=
7697 				1 << encoder->port;
7698 			break;
7699 		default:
7700 			break;
7701 		}
7702 	}
7703 	drm_connector_list_iter_end(&conn_iter);
7704 
7705 	/* can't mix MST and SST/HDMI on the same port */
7706 	if (used_ports & used_mst_ports)
7707 		return false;
7708 
7709 	return ret;
7710 }
7711 
7712 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7713 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7714 					   struct intel_crtc_state *crtc_state)
7715 {
7716 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7717 
7718 	if (crtc_state->bigjoiner_slave) {
7719 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7720 								  crtc_state->bigjoiner_linked_crtc);
7721 
7722 		/* No need to copy state if the master state is unchanged */
7723 		if (!from_crtc_state)
7724 			return;
7725 	}
7726 
7727 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7728 }
7729 
7730 static void
intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7731 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7732 				 struct intel_crtc_state *crtc_state)
7733 {
7734 	crtc_state->hw.enable = crtc_state->uapi.enable;
7735 	crtc_state->hw.active = crtc_state->uapi.active;
7736 	crtc_state->hw.mode = crtc_state->uapi.mode;
7737 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7738 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7739 
7740 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7741 }
7742 
intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state * crtc_state)7743 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7744 {
7745 	if (crtc_state->bigjoiner_slave)
7746 		return;
7747 
7748 	crtc_state->uapi.enable = crtc_state->hw.enable;
7749 	crtc_state->uapi.active = crtc_state->hw.active;
7750 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7751 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7752 
7753 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7754 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7755 
7756 	/* copy color blobs to uapi */
7757 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7758 				  crtc_state->hw.degamma_lut);
7759 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7760 				  crtc_state->hw.gamma_lut);
7761 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7762 				  crtc_state->hw.ctm);
7763 }
7764 
7765 static int
copy_bigjoiner_crtc_state(struct intel_crtc_state * crtc_state,const struct intel_crtc_state * from_crtc_state)7766 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7767 			  const struct intel_crtc_state *from_crtc_state)
7768 {
7769 	struct intel_crtc_state *saved_state;
7770 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7771 
7772 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7773 	if (!saved_state)
7774 		return -ENOMEM;
7775 
7776 	saved_state->uapi = crtc_state->uapi;
7777 	saved_state->scaler_state = crtc_state->scaler_state;
7778 	saved_state->shared_dpll = crtc_state->shared_dpll;
7779 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7780 	saved_state->crc_enabled = crtc_state->crc_enabled;
7781 
7782 	intel_crtc_free_hw_state(crtc_state);
7783 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7784 	kfree(saved_state);
7785 
7786 	/* Re-init hw state */
7787 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7788 	crtc_state->hw.enable = from_crtc_state->hw.enable;
7789 	crtc_state->hw.active = from_crtc_state->hw.active;
7790 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7791 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7792 
7793 	/* Some fixups */
7794 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7795 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7796 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7797 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7798 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7799 	crtc_state->bigjoiner_slave = true;
7800 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7801 	crtc_state->has_audio = false;
7802 
7803 	return 0;
7804 }
7805 
7806 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7807 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7808 				 struct intel_crtc_state *crtc_state)
7809 {
7810 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7811 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7812 	struct intel_crtc_state *saved_state;
7813 
7814 	saved_state = intel_crtc_state_alloc(crtc);
7815 	if (!saved_state)
7816 		return -ENOMEM;
7817 
7818 	/* free the old crtc_state->hw members */
7819 	intel_crtc_free_hw_state(crtc_state);
7820 
7821 	/* FIXME: before the switch to atomic started, a new pipe_config was
7822 	 * kzalloc'd. Code that depends on any field being zero should be
7823 	 * fixed, so that the crtc_state can be safely duplicated. For now,
7824 	 * only fields that are know to not cause problems are preserved. */
7825 
7826 	saved_state->uapi = crtc_state->uapi;
7827 	saved_state->inherited = crtc_state->inherited;
7828 	saved_state->scaler_state = crtc_state->scaler_state;
7829 	saved_state->shared_dpll = crtc_state->shared_dpll;
7830 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7831 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7832 	       sizeof(saved_state->icl_port_dplls));
7833 	saved_state->crc_enabled = crtc_state->crc_enabled;
7834 	if (IS_G4X(dev_priv) ||
7835 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7836 		saved_state->wm = crtc_state->wm;
7837 
7838 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7839 	kfree(saved_state);
7840 
7841 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7842 
7843 	return 0;
7844 }
7845 
7846 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc_state * pipe_config)7847 intel_modeset_pipe_config(struct intel_atomic_state *state,
7848 			  struct intel_crtc_state *pipe_config)
7849 {
7850 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
7851 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7852 	struct drm_connector *connector;
7853 	struct drm_connector_state *connector_state;
7854 	int base_bpp, ret, i;
7855 	bool retry = true;
7856 
7857 	pipe_config->cpu_transcoder =
7858 		(enum transcoder) to_intel_crtc(crtc)->pipe;
7859 
7860 	/*
7861 	 * Sanitize sync polarity flags based on requested ones. If neither
7862 	 * positive or negative polarity is requested, treat this as meaning
7863 	 * negative polarity.
7864 	 */
7865 	if (!(pipe_config->hw.adjusted_mode.flags &
7866 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7867 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7868 
7869 	if (!(pipe_config->hw.adjusted_mode.flags &
7870 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7871 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7872 
7873 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7874 					pipe_config);
7875 	if (ret)
7876 		return ret;
7877 
7878 	base_bpp = pipe_config->pipe_bpp;
7879 
7880 	/*
7881 	 * Determine the real pipe dimensions. Note that stereo modes can
7882 	 * increase the actual pipe size due to the frame doubling and
7883 	 * insertion of additional space for blanks between the frame. This
7884 	 * is stored in the crtc timings. We use the requested mode to do this
7885 	 * computation to clearly distinguish it from the adjusted mode, which
7886 	 * can be changed by the connectors in the below retry loop.
7887 	 */
7888 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
7889 			       &pipe_config->pipe_src_w,
7890 			       &pipe_config->pipe_src_h);
7891 
7892 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7893 		struct intel_encoder *encoder =
7894 			to_intel_encoder(connector_state->best_encoder);
7895 
7896 		if (connector_state->crtc != crtc)
7897 			continue;
7898 
7899 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7900 			drm_dbg_kms(&i915->drm,
7901 				    "rejecting invalid cloning configuration\n");
7902 			return -EINVAL;
7903 		}
7904 
7905 		/*
7906 		 * Determine output_types before calling the .compute_config()
7907 		 * hooks so that the hooks can use this information safely.
7908 		 */
7909 		if (encoder->compute_output_type)
7910 			pipe_config->output_types |=
7911 				BIT(encoder->compute_output_type(encoder, pipe_config,
7912 								 connector_state));
7913 		else
7914 			pipe_config->output_types |= BIT(encoder->type);
7915 	}
7916 
7917 encoder_retry:
7918 	/* Ensure the port clock defaults are reset when retrying. */
7919 	pipe_config->port_clock = 0;
7920 	pipe_config->pixel_multiplier = 1;
7921 
7922 	/* Fill in default crtc timings, allow encoders to overwrite them. */
7923 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7924 			      CRTC_STEREO_DOUBLE);
7925 
7926 	/* Pass our mode to the connectors and the CRTC to give them a chance to
7927 	 * adjust it according to limitations or connector properties, and also
7928 	 * a chance to reject the mode entirely.
7929 	 */
7930 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7931 		struct intel_encoder *encoder =
7932 			to_intel_encoder(connector_state->best_encoder);
7933 
7934 		if (connector_state->crtc != crtc)
7935 			continue;
7936 
7937 		ret = encoder->compute_config(encoder, pipe_config,
7938 					      connector_state);
7939 		if (ret < 0) {
7940 			if (ret != -EDEADLK)
7941 				drm_dbg_kms(&i915->drm,
7942 					    "Encoder config failure: %d\n",
7943 					    ret);
7944 			return ret;
7945 		}
7946 	}
7947 
7948 	/* Set default port clock if not overwritten by the encoder. Needs to be
7949 	 * done afterwards in case the encoder adjusts the mode. */
7950 	if (!pipe_config->port_clock)
7951 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7952 			* pipe_config->pixel_multiplier;
7953 
7954 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7955 	if (ret == -EDEADLK)
7956 		return ret;
7957 	if (ret < 0) {
7958 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7959 		return ret;
7960 	}
7961 
7962 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
7963 		if (drm_WARN(&i915->drm, !retry,
7964 			     "loop in pipe configuration computation\n"))
7965 			return -EINVAL;
7966 
7967 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7968 		retry = false;
7969 		goto encoder_retry;
7970 	}
7971 
7972 	/* Dithering seems to not pass-through bits correctly when it should, so
7973 	 * only enable it on 6bpc panels and when its not a compliance
7974 	 * test requesting 6bpc video pattern.
7975 	 */
7976 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7977 		!pipe_config->dither_force_disable;
7978 	drm_dbg_kms(&i915->drm,
7979 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7980 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7981 
7982 	return 0;
7983 }
7984 
7985 static int
intel_modeset_pipe_config_late(struct intel_crtc_state * crtc_state)7986 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7987 {
7988 	struct intel_atomic_state *state =
7989 		to_intel_atomic_state(crtc_state->uapi.state);
7990 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7991 	struct drm_connector_state *conn_state;
7992 	struct drm_connector *connector;
7993 	int i;
7994 
7995 	for_each_new_connector_in_state(&state->base, connector,
7996 					conn_state, i) {
7997 		struct intel_encoder *encoder =
7998 			to_intel_encoder(conn_state->best_encoder);
7999 		int ret;
8000 
8001 		if (conn_state->crtc != &crtc->base ||
8002 		    !encoder->compute_config_late)
8003 			continue;
8004 
8005 		ret = encoder->compute_config_late(encoder, crtc_state,
8006 						   conn_state);
8007 		if (ret)
8008 			return ret;
8009 	}
8010 
8011 	return 0;
8012 }
8013 
intel_fuzzy_clock_check(int clock1,int clock2)8014 bool intel_fuzzy_clock_check(int clock1, int clock2)
8015 {
8016 	int diff;
8017 
8018 	if (clock1 == clock2)
8019 		return true;
8020 
8021 	if (!clock1 || !clock2)
8022 		return false;
8023 
8024 	diff = abs(clock1 - clock2);
8025 
8026 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8027 		return true;
8028 
8029 	return false;
8030 }
8031 
8032 static bool
intel_compare_m_n(unsigned int m,unsigned int n,unsigned int m2,unsigned int n2,bool exact)8033 intel_compare_m_n(unsigned int m, unsigned int n,
8034 		  unsigned int m2, unsigned int n2,
8035 		  bool exact)
8036 {
8037 	if (m == m2 && n == n2)
8038 		return true;
8039 
8040 	if (exact || !m || !n || !m2 || !n2)
8041 		return false;
8042 
8043 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8044 
8045 	if (n > n2) {
8046 		while (n > n2) {
8047 			m2 <<= 1;
8048 			n2 <<= 1;
8049 		}
8050 	} else if (n < n2) {
8051 		while (n < n2) {
8052 			m <<= 1;
8053 			n <<= 1;
8054 		}
8055 	}
8056 
8057 	if (n != n2)
8058 		return false;
8059 
8060 	return intel_fuzzy_clock_check(m, m2);
8061 }
8062 
8063 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2,bool exact)8064 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8065 		       const struct intel_link_m_n *m2_n2,
8066 		       bool exact)
8067 {
8068 	return m_n->tu == m2_n2->tu &&
8069 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8070 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8071 		intel_compare_m_n(m_n->link_m, m_n->link_n,
8072 				  m2_n2->link_m, m2_n2->link_n, exact);
8073 }
8074 
8075 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)8076 intel_compare_infoframe(const union hdmi_infoframe *a,
8077 			const union hdmi_infoframe *b)
8078 {
8079 	return memcmp(a, b, sizeof(*a)) == 0;
8080 }
8081 
8082 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)8083 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8084 			 const struct drm_dp_vsc_sdp *b)
8085 {
8086 	return memcmp(a, b, sizeof(*a)) == 0;
8087 }
8088 
8089 static void
pipe_config_infoframe_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)8090 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8091 			       bool fastset, const char *name,
8092 			       const union hdmi_infoframe *a,
8093 			       const union hdmi_infoframe *b)
8094 {
8095 	if (fastset) {
8096 		if (!drm_debug_enabled(DRM_UT_KMS))
8097 			return;
8098 
8099 		drm_dbg_kms(&dev_priv->drm,
8100 			    "fastset mismatch in %s infoframe\n", name);
8101 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8102 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8103 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8104 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8105 	} else {
8106 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8107 		drm_err(&dev_priv->drm, "expected:\n");
8108 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8109 		drm_err(&dev_priv->drm, "found:\n");
8110 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8111 	}
8112 }
8113 
8114 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)8115 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8116 				bool fastset, const char *name,
8117 				const struct drm_dp_vsc_sdp *a,
8118 				const struct drm_dp_vsc_sdp *b)
8119 {
8120 	if (fastset) {
8121 		if (!drm_debug_enabled(DRM_UT_KMS))
8122 			return;
8123 
8124 		drm_dbg_kms(&dev_priv->drm,
8125 			    "fastset mismatch in %s dp sdp\n", name);
8126 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8127 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8128 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8129 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8130 	} else {
8131 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8132 		drm_err(&dev_priv->drm, "expected:\n");
8133 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8134 		drm_err(&dev_priv->drm, "found:\n");
8135 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8136 	}
8137 }
8138 
8139 static void __printf(4, 5)
pipe_config_mismatch(bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)8140 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8141 		     const char *name, const char *format, ...)
8142 {
8143 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8144 	struct va_format vaf;
8145 	va_list args;
8146 
8147 	va_start(args, format);
8148 	vaf.fmt = format;
8149 	vaf.va = &args;
8150 
8151 	if (fastset)
8152 		drm_dbg_kms(&i915->drm,
8153 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8154 			    crtc->base.base.id, crtc->base.name, name, &vaf);
8155 	else
8156 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8157 			crtc->base.base.id, crtc->base.name, name, &vaf);
8158 
8159 	va_end(args);
8160 }
8161 
fastboot_enabled(struct drm_i915_private * dev_priv)8162 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8163 {
8164 	if (dev_priv->params.fastboot != -1)
8165 		return dev_priv->params.fastboot;
8166 
8167 	/* Enable fastboot by default on Skylake and newer */
8168 	if (DISPLAY_VER(dev_priv) >= 9)
8169 		return true;
8170 
8171 	/* Enable fastboot by default on VLV and CHV */
8172 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8173 		return true;
8174 
8175 	/* Disabled by default on all others */
8176 	return false;
8177 }
8178 
8179 static bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)8180 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8181 			  const struct intel_crtc_state *pipe_config,
8182 			  bool fastset)
8183 {
8184 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8185 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8186 	bool ret = true;
8187 	u32 bp_gamma = 0;
8188 	bool fixup_inherited = fastset &&
8189 		current_config->inherited && !pipe_config->inherited;
8190 
8191 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8192 		drm_dbg_kms(&dev_priv->drm,
8193 			    "initial modeset and fastboot not set\n");
8194 		ret = false;
8195 	}
8196 
8197 #define PIPE_CONF_CHECK_X(name) do { \
8198 	if (current_config->name != pipe_config->name) { \
8199 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8200 				     "(expected 0x%08x, found 0x%08x)", \
8201 				     current_config->name, \
8202 				     pipe_config->name); \
8203 		ret = false; \
8204 	} \
8205 } while (0)
8206 
8207 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
8208 	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
8209 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8210 				     "(expected 0x%08x, found 0x%08x)", \
8211 				     current_config->name & (mask), \
8212 				     pipe_config->name & (mask)); \
8213 		ret = false; \
8214 	} \
8215 } while (0)
8216 
8217 #define PIPE_CONF_CHECK_I(name) do { \
8218 	if (current_config->name != pipe_config->name) { \
8219 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8220 				     "(expected %i, found %i)", \
8221 				     current_config->name, \
8222 				     pipe_config->name); \
8223 		ret = false; \
8224 	} \
8225 } while (0)
8226 
8227 #define PIPE_CONF_CHECK_BOOL(name) do { \
8228 	if (current_config->name != pipe_config->name) { \
8229 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8230 				     "(expected %s, found %s)", \
8231 				     yesno(current_config->name), \
8232 				     yesno(pipe_config->name)); \
8233 		ret = false; \
8234 	} \
8235 } while (0)
8236 
8237 /*
8238  * Checks state where we only read out the enabling, but not the entire
8239  * state itself (like full infoframes or ELD for audio). These states
8240  * require a full modeset on bootup to fix up.
8241  */
8242 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8243 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8244 		PIPE_CONF_CHECK_BOOL(name); \
8245 	} else { \
8246 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8247 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8248 				     yesno(current_config->name), \
8249 				     yesno(pipe_config->name)); \
8250 		ret = false; \
8251 	} \
8252 } while (0)
8253 
8254 #define PIPE_CONF_CHECK_P(name) do { \
8255 	if (current_config->name != pipe_config->name) { \
8256 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8257 				     "(expected %p, found %p)", \
8258 				     current_config->name, \
8259 				     pipe_config->name); \
8260 		ret = false; \
8261 	} \
8262 } while (0)
8263 
8264 #define PIPE_CONF_CHECK_M_N(name) do { \
8265 	if (!intel_compare_link_m_n(&current_config->name, \
8266 				    &pipe_config->name,\
8267 				    !fastset)) { \
8268 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8269 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8270 				     "found tu %i, gmch %i/%i link %i/%i)", \
8271 				     current_config->name.tu, \
8272 				     current_config->name.gmch_m, \
8273 				     current_config->name.gmch_n, \
8274 				     current_config->name.link_m, \
8275 				     current_config->name.link_n, \
8276 				     pipe_config->name.tu, \
8277 				     pipe_config->name.gmch_m, \
8278 				     pipe_config->name.gmch_n, \
8279 				     pipe_config->name.link_m, \
8280 				     pipe_config->name.link_n); \
8281 		ret = false; \
8282 	} \
8283 } while (0)
8284 
8285 /* This is required for BDW+ where there is only one set of registers for
8286  * switching between high and low RR.
8287  * This macro can be used whenever a comparison has to be made between one
8288  * hw state and multiple sw state variables.
8289  */
8290 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8291 	if (!intel_compare_link_m_n(&current_config->name, \
8292 				    &pipe_config->name, !fastset) && \
8293 	    !intel_compare_link_m_n(&current_config->alt_name, \
8294 				    &pipe_config->name, !fastset)) { \
8295 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8296 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8297 				     "or tu %i gmch %i/%i link %i/%i, " \
8298 				     "found tu %i, gmch %i/%i link %i/%i)", \
8299 				     current_config->name.tu, \
8300 				     current_config->name.gmch_m, \
8301 				     current_config->name.gmch_n, \
8302 				     current_config->name.link_m, \
8303 				     current_config->name.link_n, \
8304 				     current_config->alt_name.tu, \
8305 				     current_config->alt_name.gmch_m, \
8306 				     current_config->alt_name.gmch_n, \
8307 				     current_config->alt_name.link_m, \
8308 				     current_config->alt_name.link_n, \
8309 				     pipe_config->name.tu, \
8310 				     pipe_config->name.gmch_m, \
8311 				     pipe_config->name.gmch_n, \
8312 				     pipe_config->name.link_m, \
8313 				     pipe_config->name.link_n); \
8314 		ret = false; \
8315 	} \
8316 } while (0)
8317 
8318 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8319 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8320 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8321 				     "(%x) (expected %i, found %i)", \
8322 				     (mask), \
8323 				     current_config->name & (mask), \
8324 				     pipe_config->name & (mask)); \
8325 		ret = false; \
8326 	} \
8327 } while (0)
8328 
8329 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8330 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8331 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8332 				     "(expected %i, found %i)", \
8333 				     current_config->name, \
8334 				     pipe_config->name); \
8335 		ret = false; \
8336 	} \
8337 } while (0)
8338 
8339 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8340 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8341 				     &pipe_config->infoframes.name)) { \
8342 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8343 					       &current_config->infoframes.name, \
8344 					       &pipe_config->infoframes.name); \
8345 		ret = false; \
8346 	} \
8347 } while (0)
8348 
8349 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8350 	if (!current_config->has_psr && !pipe_config->has_psr && \
8351 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8352 				      &pipe_config->infoframes.name)) { \
8353 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8354 						&current_config->infoframes.name, \
8355 						&pipe_config->infoframes.name); \
8356 		ret = false; \
8357 	} \
8358 } while (0)
8359 
8360 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8361 	if (current_config->name1 != pipe_config->name1) { \
8362 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8363 				"(expected %i, found %i, won't compare lut values)", \
8364 				current_config->name1, \
8365 				pipe_config->name1); \
8366 		ret = false;\
8367 	} else { \
8368 		if (!intel_color_lut_equal(current_config->name2, \
8369 					pipe_config->name2, pipe_config->name1, \
8370 					bit_precision)) { \
8371 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8372 					"hw_state doesn't match sw_state"); \
8373 			ret = false; \
8374 		} \
8375 	} \
8376 } while (0)
8377 
8378 #define PIPE_CONF_QUIRK(quirk) \
8379 	((current_config->quirks | pipe_config->quirks) & (quirk))
8380 
8381 	PIPE_CONF_CHECK_I(cpu_transcoder);
8382 
8383 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8384 	PIPE_CONF_CHECK_I(fdi_lanes);
8385 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8386 
8387 	PIPE_CONF_CHECK_I(lane_count);
8388 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8389 
8390 	if (DISPLAY_VER(dev_priv) < 8) {
8391 		PIPE_CONF_CHECK_M_N(dp_m_n);
8392 
8393 		if (current_config->has_drrs)
8394 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8395 	} else
8396 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8397 
8398 	PIPE_CONF_CHECK_X(output_types);
8399 
8400 	/* FIXME do the readout properly and get rid of this quirk */
8401 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8402 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8403 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8404 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8405 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8406 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8407 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8408 
8409 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8410 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8411 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8412 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8413 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8414 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8415 
8416 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8417 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8418 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8419 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8420 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8421 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8422 
8423 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8424 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8425 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8426 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8427 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8428 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8429 
8430 		PIPE_CONF_CHECK_I(pixel_multiplier);
8431 
8432 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8433 				      DRM_MODE_FLAG_INTERLACE);
8434 
8435 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8436 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8437 					      DRM_MODE_FLAG_PHSYNC);
8438 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8439 					      DRM_MODE_FLAG_NHSYNC);
8440 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8441 					      DRM_MODE_FLAG_PVSYNC);
8442 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8443 					      DRM_MODE_FLAG_NVSYNC);
8444 		}
8445 	}
8446 
8447 	PIPE_CONF_CHECK_I(output_format);
8448 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8449 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8450 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8451 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8452 
8453 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8454 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8455 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8456 	/* FIXME do the readout properly and get rid of this quirk */
8457 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8458 		PIPE_CONF_CHECK_BOOL(fec_enable);
8459 
8460 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8461 
8462 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8463 	/* pfit ratios are autocomputed by the hw on gen4+ */
8464 	if (DISPLAY_VER(dev_priv) < 4)
8465 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8466 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8467 
8468 	/*
8469 	 * Changing the EDP transcoder input mux
8470 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8471 	 */
8472 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8473 
8474 	if (!fastset) {
8475 		PIPE_CONF_CHECK_I(pipe_src_w);
8476 		PIPE_CONF_CHECK_I(pipe_src_h);
8477 
8478 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8479 		if (current_config->pch_pfit.enabled) {
8480 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8481 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8482 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8483 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8484 		}
8485 
8486 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8487 		/* FIXME do the readout properly and get rid of this quirk */
8488 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8489 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8490 
8491 		PIPE_CONF_CHECK_X(gamma_mode);
8492 		if (IS_CHERRYVIEW(dev_priv))
8493 			PIPE_CONF_CHECK_X(cgm_mode);
8494 		else
8495 			PIPE_CONF_CHECK_X(csc_mode);
8496 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8497 		PIPE_CONF_CHECK_BOOL(csc_enable);
8498 
8499 		PIPE_CONF_CHECK_I(linetime);
8500 		PIPE_CONF_CHECK_I(ips_linetime);
8501 
8502 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8503 		if (bp_gamma)
8504 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8505 
8506 		PIPE_CONF_CHECK_BOOL(has_psr);
8507 		PIPE_CONF_CHECK_BOOL(has_psr2);
8508 		PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8509 		PIPE_CONF_CHECK_I(dc3co_exitline);
8510 	}
8511 
8512 	PIPE_CONF_CHECK_BOOL(double_wide);
8513 
8514 	if (dev_priv->dpll.mgr)
8515 		PIPE_CONF_CHECK_P(shared_dpll);
8516 
8517 	/* FIXME do the readout properly and get rid of this quirk */
8518 	if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8519 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8520 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8521 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8522 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8523 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8524 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8525 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8526 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8527 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8528 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8529 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8530 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8531 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8532 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8533 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8534 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8535 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8536 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8537 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8538 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8539 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8540 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8541 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8542 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8543 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8544 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8545 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8546 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8547 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8548 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8549 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8550 	}
8551 
8552 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8553 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8554 		PIPE_CONF_CHECK_X(dsi_pll.div);
8555 
8556 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8557 			PIPE_CONF_CHECK_I(pipe_bpp);
8558 
8559 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8560 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8561 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8562 
8563 		PIPE_CONF_CHECK_I(min_voltage_level);
8564 	}
8565 
8566 	if (fastset && (current_config->has_psr || pipe_config->has_psr))
8567 		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8568 					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8569 	else
8570 		PIPE_CONF_CHECK_X(infoframes.enable);
8571 
8572 	PIPE_CONF_CHECK_X(infoframes.gcp);
8573 	PIPE_CONF_CHECK_INFOFRAME(avi);
8574 	PIPE_CONF_CHECK_INFOFRAME(spd);
8575 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8576 	PIPE_CONF_CHECK_INFOFRAME(drm);
8577 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8578 
8579 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8580 	PIPE_CONF_CHECK_I(master_transcoder);
8581 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8582 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8583 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8584 
8585 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8586 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8587 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8588 
8589 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8590 	PIPE_CONF_CHECK_I(splitter.link_count);
8591 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8592 
8593 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8594 
8595 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8596 	PIPE_CONF_CHECK_I(vrr.vmin);
8597 	PIPE_CONF_CHECK_I(vrr.vmax);
8598 	PIPE_CONF_CHECK_I(vrr.flipline);
8599 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8600 	PIPE_CONF_CHECK_I(vrr.guardband);
8601 
8602 #undef PIPE_CONF_CHECK_X
8603 #undef PIPE_CONF_CHECK_I
8604 #undef PIPE_CONF_CHECK_BOOL
8605 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8606 #undef PIPE_CONF_CHECK_P
8607 #undef PIPE_CONF_CHECK_FLAGS
8608 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8609 #undef PIPE_CONF_CHECK_COLOR_LUT
8610 #undef PIPE_CONF_QUIRK
8611 
8612 	return ret;
8613 }
8614 
intel_pipe_config_sanity_check(struct drm_i915_private * dev_priv,const struct intel_crtc_state * pipe_config)8615 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8616 					   const struct intel_crtc_state *pipe_config)
8617 {
8618 	if (pipe_config->has_pch_encoder) {
8619 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8620 							    &pipe_config->fdi_m_n);
8621 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8622 
8623 		/*
8624 		 * FDI already provided one idea for the dotclock.
8625 		 * Yell if the encoder disagrees.
8626 		 */
8627 		drm_WARN(&dev_priv->drm,
8628 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8629 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8630 			 fdi_dotclock, dotclock);
8631 	}
8632 }
8633 
verify_wm_state(struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)8634 static void verify_wm_state(struct intel_crtc *crtc,
8635 			    struct intel_crtc_state *new_crtc_state)
8636 {
8637 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8638 	struct skl_hw_state {
8639 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8640 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8641 		struct skl_pipe_wm wm;
8642 	} *hw;
8643 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8644 	int level, max_level = ilk_wm_max_level(dev_priv);
8645 	struct intel_plane *plane;
8646 	u8 hw_enabled_slices;
8647 
8648 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8649 		return;
8650 
8651 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8652 	if (!hw)
8653 		return;
8654 
8655 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8656 
8657 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8658 
8659 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8660 
8661 	if (DISPLAY_VER(dev_priv) >= 11 &&
8662 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8663 		drm_err(&dev_priv->drm,
8664 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8665 			dev_priv->dbuf.enabled_slices,
8666 			hw_enabled_slices);
8667 
8668 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8669 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8670 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8671 
8672 		/* Watermarks */
8673 		for (level = 0; level <= max_level; level++) {
8674 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8675 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8676 
8677 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8678 				continue;
8679 
8680 			drm_err(&dev_priv->drm,
8681 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8682 				plane->base.base.id, plane->base.name, level,
8683 				sw_wm_level->enable,
8684 				sw_wm_level->blocks,
8685 				sw_wm_level->lines,
8686 				hw_wm_level->enable,
8687 				hw_wm_level->blocks,
8688 				hw_wm_level->lines);
8689 		}
8690 
8691 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8692 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8693 
8694 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8695 			drm_err(&dev_priv->drm,
8696 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8697 				plane->base.base.id, plane->base.name,
8698 				sw_wm_level->enable,
8699 				sw_wm_level->blocks,
8700 				sw_wm_level->lines,
8701 				hw_wm_level->enable,
8702 				hw_wm_level->blocks,
8703 				hw_wm_level->lines);
8704 		}
8705 
8706 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8707 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8708 
8709 		if (HAS_HW_SAGV_WM(dev_priv) &&
8710 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8711 			drm_err(&dev_priv->drm,
8712 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8713 				plane->base.base.id, plane->base.name,
8714 				sw_wm_level->enable,
8715 				sw_wm_level->blocks,
8716 				sw_wm_level->lines,
8717 				hw_wm_level->enable,
8718 				hw_wm_level->blocks,
8719 				hw_wm_level->lines);
8720 		}
8721 
8722 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8723 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8724 
8725 		if (HAS_HW_SAGV_WM(dev_priv) &&
8726 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8727 			drm_err(&dev_priv->drm,
8728 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8729 				plane->base.base.id, plane->base.name,
8730 				sw_wm_level->enable,
8731 				sw_wm_level->blocks,
8732 				sw_wm_level->lines,
8733 				hw_wm_level->enable,
8734 				hw_wm_level->blocks,
8735 				hw_wm_level->lines);
8736 		}
8737 
8738 		/* DDB */
8739 		hw_ddb_entry = &hw->ddb_y[plane->id];
8740 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8741 
8742 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8743 			drm_err(&dev_priv->drm,
8744 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8745 				plane->base.base.id, plane->base.name,
8746 				sw_ddb_entry->start, sw_ddb_entry->end,
8747 				hw_ddb_entry->start, hw_ddb_entry->end);
8748 		}
8749 	}
8750 
8751 	kfree(hw);
8752 }
8753 
8754 static void
verify_connector_state(struct intel_atomic_state * state,struct intel_crtc * crtc)8755 verify_connector_state(struct intel_atomic_state *state,
8756 		       struct intel_crtc *crtc)
8757 {
8758 	struct drm_connector *connector;
8759 	struct drm_connector_state *new_conn_state;
8760 	int i;
8761 
8762 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8763 		struct drm_encoder *encoder = connector->encoder;
8764 		struct intel_crtc_state *crtc_state = NULL;
8765 
8766 		if (new_conn_state->crtc != &crtc->base)
8767 			continue;
8768 
8769 		if (crtc)
8770 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8771 
8772 		intel_connector_verify_state(crtc_state, new_conn_state);
8773 
8774 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8775 		     "connector's atomic encoder doesn't match legacy encoder\n");
8776 	}
8777 }
8778 
8779 static void
verify_encoder_state(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)8780 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8781 {
8782 	struct intel_encoder *encoder;
8783 	struct drm_connector *connector;
8784 	struct drm_connector_state *old_conn_state, *new_conn_state;
8785 	int i;
8786 
8787 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8788 		bool enabled = false, found = false;
8789 		enum pipe pipe;
8790 
8791 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8792 			    encoder->base.base.id,
8793 			    encoder->base.name);
8794 
8795 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8796 						   new_conn_state, i) {
8797 			if (old_conn_state->best_encoder == &encoder->base)
8798 				found = true;
8799 
8800 			if (new_conn_state->best_encoder != &encoder->base)
8801 				continue;
8802 			found = enabled = true;
8803 
8804 			I915_STATE_WARN(new_conn_state->crtc !=
8805 					encoder->base.crtc,
8806 			     "connector's crtc doesn't match encoder crtc\n");
8807 		}
8808 
8809 		if (!found)
8810 			continue;
8811 
8812 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
8813 		     "encoder's enabled state mismatch "
8814 		     "(expected %i, found %i)\n",
8815 		     !!encoder->base.crtc, enabled);
8816 
8817 		if (!encoder->base.crtc) {
8818 			bool active;
8819 
8820 			active = encoder->get_hw_state(encoder, &pipe);
8821 			I915_STATE_WARN(active,
8822 			     "encoder detached but still enabled on pipe %c.\n",
8823 			     pipe_name(pipe));
8824 		}
8825 	}
8826 }
8827 
8828 static void
verify_crtc_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8829 verify_crtc_state(struct intel_crtc *crtc,
8830 		  struct intel_crtc_state *old_crtc_state,
8831 		  struct intel_crtc_state *new_crtc_state)
8832 {
8833 	struct drm_device *dev = crtc->base.dev;
8834 	struct drm_i915_private *dev_priv = to_i915(dev);
8835 	struct intel_encoder *encoder;
8836 	struct intel_crtc_state *pipe_config = old_crtc_state;
8837 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
8838 	struct intel_crtc *master = crtc;
8839 
8840 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8841 	intel_crtc_free_hw_state(old_crtc_state);
8842 	intel_crtc_state_reset(old_crtc_state, crtc);
8843 	old_crtc_state->uapi.state = state;
8844 
8845 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8846 		    crtc->base.name);
8847 
8848 	pipe_config->hw.enable = new_crtc_state->hw.enable;
8849 
8850 	intel_crtc_get_pipe_config(pipe_config);
8851 
8852 	/* we keep both pipes enabled on 830 */
8853 	if (IS_I830(dev_priv) && pipe_config->hw.active)
8854 		pipe_config->hw.active = new_crtc_state->hw.active;
8855 
8856 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8857 			"crtc active state doesn't match with hw state "
8858 			"(expected %i, found %i)\n",
8859 			new_crtc_state->hw.active, pipe_config->hw.active);
8860 
8861 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8862 			"transitional active state does not match atomic hw state "
8863 			"(expected %i, found %i)\n",
8864 			new_crtc_state->hw.active, crtc->active);
8865 
8866 	if (new_crtc_state->bigjoiner_slave)
8867 		master = new_crtc_state->bigjoiner_linked_crtc;
8868 
8869 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
8870 		enum pipe pipe;
8871 		bool active;
8872 
8873 		active = encoder->get_hw_state(encoder, &pipe);
8874 		I915_STATE_WARN(active != new_crtc_state->hw.active,
8875 				"[ENCODER:%i] active %i with crtc active %i\n",
8876 				encoder->base.base.id, active,
8877 				new_crtc_state->hw.active);
8878 
8879 		I915_STATE_WARN(active && master->pipe != pipe,
8880 				"Encoder connected to wrong pipe %c\n",
8881 				pipe_name(pipe));
8882 
8883 		if (active)
8884 			intel_encoder_get_config(encoder, pipe_config);
8885 	}
8886 
8887 	if (!new_crtc_state->hw.active)
8888 		return;
8889 
8890 	if (new_crtc_state->bigjoiner_slave)
8891 		/* No PLLs set for slave */
8892 		pipe_config->shared_dpll = NULL;
8893 
8894 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
8895 
8896 	if (!intel_pipe_config_compare(new_crtc_state,
8897 				       pipe_config, false)) {
8898 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
8899 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8900 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8901 	}
8902 }
8903 
8904 static void
intel_verify_planes(struct intel_atomic_state * state)8905 intel_verify_planes(struct intel_atomic_state *state)
8906 {
8907 	struct intel_plane *plane;
8908 	const struct intel_plane_state *plane_state;
8909 	int i;
8910 
8911 	for_each_new_intel_plane_in_state(state, plane,
8912 					  plane_state, i)
8913 		assert_plane(plane, plane_state->planar_slave ||
8914 			     plane_state->uapi.visible);
8915 }
8916 
8917 static void
verify_single_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)8918 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8919 			 struct intel_shared_dpll *pll,
8920 			 struct intel_crtc *crtc,
8921 			 struct intel_crtc_state *new_crtc_state)
8922 {
8923 	struct intel_dpll_hw_state dpll_hw_state;
8924 	u8 pipe_mask;
8925 	bool active;
8926 
8927 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8928 
8929 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8930 
8931 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8932 
8933 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8934 		I915_STATE_WARN(!pll->on && pll->active_mask,
8935 		     "pll in active use but not on in sw tracking\n");
8936 		I915_STATE_WARN(pll->on && !pll->active_mask,
8937 		     "pll is on but not used by any active pipe\n");
8938 		I915_STATE_WARN(pll->on != active,
8939 		     "pll on state mismatch (expected %i, found %i)\n",
8940 		     pll->on, active);
8941 	}
8942 
8943 	if (!crtc) {
8944 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8945 				"more active pll users than references: 0x%x vs 0x%x\n",
8946 				pll->active_mask, pll->state.pipe_mask);
8947 
8948 		return;
8949 	}
8950 
8951 	pipe_mask = BIT(crtc->pipe);
8952 
8953 	if (new_crtc_state->hw.active)
8954 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8955 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8956 				pipe_name(crtc->pipe), pll->active_mask);
8957 	else
8958 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8959 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8960 				pipe_name(crtc->pipe), pll->active_mask);
8961 
8962 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8963 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8964 			pipe_mask, pll->state.pipe_mask);
8965 
8966 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8967 					  &dpll_hw_state,
8968 					  sizeof(dpll_hw_state)),
8969 			"pll hw state mismatch\n");
8970 }
8971 
8972 static void
verify_shared_dpll_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8973 verify_shared_dpll_state(struct intel_crtc *crtc,
8974 			 struct intel_crtc_state *old_crtc_state,
8975 			 struct intel_crtc_state *new_crtc_state)
8976 {
8977 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8978 
8979 	if (new_crtc_state->shared_dpll)
8980 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8981 
8982 	if (old_crtc_state->shared_dpll &&
8983 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8984 		u8 pipe_mask = BIT(crtc->pipe);
8985 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8986 
8987 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8988 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8989 				pipe_name(crtc->pipe), pll->active_mask);
8990 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8991 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8992 				pipe_name(crtc->pipe), pll->state.pipe_mask);
8993 	}
8994 }
8995 
8996 static void
verify_mpllb_state(struct intel_atomic_state * state,struct intel_crtc_state * new_crtc_state)8997 verify_mpllb_state(struct intel_atomic_state *state,
8998 		   struct intel_crtc_state *new_crtc_state)
8999 {
9000 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9001 	struct intel_mpllb_state mpllb_hw_state = { 0 };
9002 	struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
9003 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9004 	struct intel_encoder *encoder;
9005 
9006 	if (!IS_DG2(i915))
9007 		return;
9008 
9009 	if (!new_crtc_state->hw.active)
9010 		return;
9011 
9012 	if (new_crtc_state->bigjoiner_slave)
9013 		return;
9014 
9015 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
9016 	intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
9017 
9018 #define MPLLB_CHECK(name) do { \
9019 	if (mpllb_sw_state->name != mpllb_hw_state.name) { \
9020 		pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
9021 				     "(expected 0x%08x, found 0x%08x)", \
9022 				     mpllb_sw_state->name, \
9023 				     mpllb_hw_state.name); \
9024 	} \
9025 } while (0)
9026 
9027 	MPLLB_CHECK(mpllb_cp);
9028 	MPLLB_CHECK(mpllb_div);
9029 	MPLLB_CHECK(mpllb_div2);
9030 	MPLLB_CHECK(mpllb_fracn1);
9031 	MPLLB_CHECK(mpllb_fracn2);
9032 	MPLLB_CHECK(mpllb_sscen);
9033 	MPLLB_CHECK(mpllb_sscstep);
9034 
9035 	/*
9036 	 * ref_control is handled by the hardware/firemware and never
9037 	 * programmed by the software, but the proper values are supplied
9038 	 * in the bspec for verification purposes.
9039 	 */
9040 	MPLLB_CHECK(ref_control);
9041 
9042 #undef MPLLB_CHECK
9043 }
9044 
9045 static void
intel_modeset_verify_crtc(struct intel_crtc * crtc,struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9046 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9047 			  struct intel_atomic_state *state,
9048 			  struct intel_crtc_state *old_crtc_state,
9049 			  struct intel_crtc_state *new_crtc_state)
9050 {
9051 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9052 		return;
9053 
9054 	verify_wm_state(crtc, new_crtc_state);
9055 	verify_connector_state(state, crtc);
9056 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9057 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9058 	verify_mpllb_state(state, new_crtc_state);
9059 }
9060 
9061 static void
verify_disabled_dpll_state(struct drm_i915_private * dev_priv)9062 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9063 {
9064 	int i;
9065 
9066 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9067 		verify_single_dpll_state(dev_priv,
9068 					 &dev_priv->dpll.shared_dplls[i],
9069 					 NULL, NULL);
9070 }
9071 
9072 static void
intel_modeset_verify_disabled(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)9073 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9074 			      struct intel_atomic_state *state)
9075 {
9076 	verify_encoder_state(dev_priv, state);
9077 	verify_connector_state(state, NULL);
9078 	verify_disabled_dpll_state(dev_priv);
9079 }
9080 
intel_modeset_all_pipes(struct intel_atomic_state * state)9081 int intel_modeset_all_pipes(struct intel_atomic_state *state)
9082 {
9083 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9084 	struct intel_crtc *crtc;
9085 
9086 	/*
9087 	 * Add all pipes to the state, and force
9088 	 * a modeset on all the active ones.
9089 	 */
9090 	for_each_intel_crtc(&dev_priv->drm, crtc) {
9091 		struct intel_crtc_state *crtc_state;
9092 		int ret;
9093 
9094 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9095 		if (IS_ERR(crtc_state))
9096 			return PTR_ERR(crtc_state);
9097 
9098 		if (!crtc_state->hw.active ||
9099 		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
9100 			continue;
9101 
9102 		crtc_state->uapi.mode_changed = true;
9103 
9104 		ret = drm_atomic_add_affected_connectors(&state->base,
9105 							 &crtc->base);
9106 		if (ret)
9107 			return ret;
9108 
9109 		ret = intel_atomic_add_affected_planes(state, crtc);
9110 		if (ret)
9111 			return ret;
9112 
9113 		crtc_state->update_planes |= crtc_state->active_planes;
9114 	}
9115 
9116 	return 0;
9117 }
9118 
9119 static void
intel_crtc_update_active_timings(const struct intel_crtc_state * crtc_state)9120 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9121 {
9122 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9123 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9124 	struct drm_display_mode adjusted_mode =
9125 		crtc_state->hw.adjusted_mode;
9126 
9127 	if (crtc_state->vrr.enable) {
9128 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9129 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9130 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9131 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9132 	}
9133 
9134 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9135 
9136 	crtc->mode_flags = crtc_state->mode_flags;
9137 
9138 	/*
9139 	 * The scanline counter increments at the leading edge of hsync.
9140 	 *
9141 	 * On most platforms it starts counting from vtotal-1 on the
9142 	 * first active line. That means the scanline counter value is
9143 	 * always one less than what we would expect. Ie. just after
9144 	 * start of vblank, which also occurs at start of hsync (on the
9145 	 * last active line), the scanline counter will read vblank_start-1.
9146 	 *
9147 	 * On gen2 the scanline counter starts counting from 1 instead
9148 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9149 	 * to keep the value positive), instead of adding one.
9150 	 *
9151 	 * On HSW+ the behaviour of the scanline counter depends on the output
9152 	 * type. For DP ports it behaves like most other platforms, but on HDMI
9153 	 * there's an extra 1 line difference. So we need to add two instead of
9154 	 * one to the value.
9155 	 *
9156 	 * On VLV/CHV DSI the scanline counter would appear to increment
9157 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9158 	 * that means we can't tell whether we're in vblank or not while
9159 	 * we're on that particular line. We must still set scanline_offset
9160 	 * to 1 so that the vblank timestamps come out correct when we query
9161 	 * the scanline counter from within the vblank interrupt handler.
9162 	 * However if queried just before the start of vblank we'll get an
9163 	 * answer that's slightly in the future.
9164 	 */
9165 	if (DISPLAY_VER(dev_priv) == 2) {
9166 		int vtotal;
9167 
9168 		vtotal = adjusted_mode.crtc_vtotal;
9169 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9170 			vtotal /= 2;
9171 
9172 		crtc->scanline_offset = vtotal - 1;
9173 	} else if (HAS_DDI(dev_priv) &&
9174 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9175 		crtc->scanline_offset = 2;
9176 	} else {
9177 		crtc->scanline_offset = 1;
9178 	}
9179 }
9180 
intel_modeset_clear_plls(struct intel_atomic_state * state)9181 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9182 {
9183 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9184 	struct intel_crtc_state *new_crtc_state;
9185 	struct intel_crtc *crtc;
9186 	int i;
9187 
9188 	if (!dev_priv->display.crtc_compute_clock)
9189 		return;
9190 
9191 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9192 		if (!intel_crtc_needs_modeset(new_crtc_state))
9193 			continue;
9194 
9195 		intel_release_shared_dplls(state, crtc);
9196 	}
9197 }
9198 
9199 /*
9200  * This implements the workaround described in the "notes" section of the mode
9201  * set sequence documentation. When going from no pipes or single pipe to
9202  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9203  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9204  */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)9205 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9206 {
9207 	struct intel_crtc_state *crtc_state;
9208 	struct intel_crtc *crtc;
9209 	struct intel_crtc_state *first_crtc_state = NULL;
9210 	struct intel_crtc_state *other_crtc_state = NULL;
9211 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9212 	int i;
9213 
9214 	/* look at all crtc's that are going to be enabled in during modeset */
9215 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9216 		if (!crtc_state->hw.active ||
9217 		    !intel_crtc_needs_modeset(crtc_state))
9218 			continue;
9219 
9220 		if (first_crtc_state) {
9221 			other_crtc_state = crtc_state;
9222 			break;
9223 		} else {
9224 			first_crtc_state = crtc_state;
9225 			first_pipe = crtc->pipe;
9226 		}
9227 	}
9228 
9229 	/* No workaround needed? */
9230 	if (!first_crtc_state)
9231 		return 0;
9232 
9233 	/* w/a possibly needed, check how many crtc's are already enabled. */
9234 	for_each_intel_crtc(state->base.dev, crtc) {
9235 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9236 		if (IS_ERR(crtc_state))
9237 			return PTR_ERR(crtc_state);
9238 
9239 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9240 
9241 		if (!crtc_state->hw.active ||
9242 		    intel_crtc_needs_modeset(crtc_state))
9243 			continue;
9244 
9245 		/* 2 or more enabled crtcs means no need for w/a */
9246 		if (enabled_pipe != INVALID_PIPE)
9247 			return 0;
9248 
9249 		enabled_pipe = crtc->pipe;
9250 	}
9251 
9252 	if (enabled_pipe != INVALID_PIPE)
9253 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9254 	else if (other_crtc_state)
9255 		other_crtc_state->hsw_workaround_pipe = first_pipe;
9256 
9257 	return 0;
9258 }
9259 
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)9260 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9261 			   u8 active_pipes)
9262 {
9263 	const struct intel_crtc_state *crtc_state;
9264 	struct intel_crtc *crtc;
9265 	int i;
9266 
9267 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9268 		if (crtc_state->hw.active)
9269 			active_pipes |= BIT(crtc->pipe);
9270 		else
9271 			active_pipes &= ~BIT(crtc->pipe);
9272 	}
9273 
9274 	return active_pipes;
9275 }
9276 
intel_modeset_checks(struct intel_atomic_state * state)9277 static int intel_modeset_checks(struct intel_atomic_state *state)
9278 {
9279 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9280 
9281 	state->modeset = true;
9282 
9283 	if (IS_HASWELL(dev_priv))
9284 		return hsw_mode_set_planes_workaround(state);
9285 
9286 	return 0;
9287 }
9288 
9289 /*
9290  * Handle calculation of various watermark data at the end of the atomic check
9291  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9292  * handlers to ensure that all derived state has been updated.
9293  */
calc_watermark_data(struct intel_atomic_state * state)9294 static int calc_watermark_data(struct intel_atomic_state *state)
9295 {
9296 	struct drm_device *dev = state->base.dev;
9297 	struct drm_i915_private *dev_priv = to_i915(dev);
9298 
9299 	/* Is there platform-specific watermark information to calculate? */
9300 	if (dev_priv->display.compute_global_watermarks)
9301 		return dev_priv->display.compute_global_watermarks(state);
9302 
9303 	return 0;
9304 }
9305 
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9306 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9307 				     struct intel_crtc_state *new_crtc_state)
9308 {
9309 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9310 		return;
9311 
9312 	new_crtc_state->uapi.mode_changed = false;
9313 	new_crtc_state->update_pipe = true;
9314 }
9315 
intel_crtc_copy_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9316 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9317 				    struct intel_crtc_state *new_crtc_state)
9318 {
9319 	/*
9320 	 * If we're not doing the full modeset we want to
9321 	 * keep the current M/N values as they may be
9322 	 * sufficiently different to the computed values
9323 	 * to cause problems.
9324 	 *
9325 	 * FIXME: should really copy more fuzzy state here
9326 	 */
9327 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9328 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9329 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9330 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9331 }
9332 
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)9333 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9334 					  struct intel_crtc *crtc,
9335 					  u8 plane_ids_mask)
9336 {
9337 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9338 	struct intel_plane *plane;
9339 
9340 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9341 		struct intel_plane_state *plane_state;
9342 
9343 		if ((plane_ids_mask & BIT(plane->id)) == 0)
9344 			continue;
9345 
9346 		plane_state = intel_atomic_get_plane_state(state, plane);
9347 		if (IS_ERR(plane_state))
9348 			return PTR_ERR(plane_state);
9349 	}
9350 
9351 	return 0;
9352 }
9353 
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)9354 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9355 				     struct intel_crtc *crtc)
9356 {
9357 	const struct intel_crtc_state *old_crtc_state =
9358 		intel_atomic_get_old_crtc_state(state, crtc);
9359 	const struct intel_crtc_state *new_crtc_state =
9360 		intel_atomic_get_new_crtc_state(state, crtc);
9361 
9362 	return intel_crtc_add_planes_to_state(state, crtc,
9363 					      old_crtc_state->enabled_planes |
9364 					      new_crtc_state->enabled_planes);
9365 }
9366 
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)9367 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9368 {
9369 	/* See {hsw,vlv,ivb}_plane_ratio() */
9370 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9371 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9372 		IS_IVYBRIDGE(dev_priv);
9373 }
9374 
intel_crtc_add_bigjoiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)9375 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9376 					   struct intel_crtc *crtc,
9377 					   struct intel_crtc *other)
9378 {
9379 	const struct intel_plane_state *plane_state;
9380 	struct intel_plane *plane;
9381 	u8 plane_ids = 0;
9382 	int i;
9383 
9384 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9385 		if (plane->pipe == crtc->pipe)
9386 			plane_ids |= BIT(plane->id);
9387 	}
9388 
9389 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9390 }
9391 
intel_bigjoiner_add_affected_planes(struct intel_atomic_state * state)9392 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9393 {
9394 	const struct intel_crtc_state *crtc_state;
9395 	struct intel_crtc *crtc;
9396 	int i;
9397 
9398 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9399 		int ret;
9400 
9401 		if (!crtc_state->bigjoiner)
9402 			continue;
9403 
9404 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9405 						      crtc_state->bigjoiner_linked_crtc);
9406 		if (ret)
9407 			return ret;
9408 	}
9409 
9410 	return 0;
9411 }
9412 
intel_atomic_check_planes(struct intel_atomic_state * state)9413 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9414 {
9415 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9416 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9417 	struct intel_plane_state *plane_state;
9418 	struct intel_plane *plane;
9419 	struct intel_crtc *crtc;
9420 	int i, ret;
9421 
9422 	ret = icl_add_linked_planes(state);
9423 	if (ret)
9424 		return ret;
9425 
9426 	ret = intel_bigjoiner_add_affected_planes(state);
9427 	if (ret)
9428 		return ret;
9429 
9430 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9431 		ret = intel_plane_atomic_check(state, plane);
9432 		if (ret) {
9433 			drm_dbg_atomic(&dev_priv->drm,
9434 				       "[PLANE:%d:%s] atomic driver check failed\n",
9435 				       plane->base.base.id, plane->base.name);
9436 			return ret;
9437 		}
9438 	}
9439 
9440 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9441 					    new_crtc_state, i) {
9442 		u8 old_active_planes, new_active_planes;
9443 
9444 		ret = icl_check_nv12_planes(new_crtc_state);
9445 		if (ret)
9446 			return ret;
9447 
9448 		/*
9449 		 * On some platforms the number of active planes affects
9450 		 * the planes' minimum cdclk calculation. Add such planes
9451 		 * to the state before we compute the minimum cdclk.
9452 		 */
9453 		if (!active_planes_affects_min_cdclk(dev_priv))
9454 			continue;
9455 
9456 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9457 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9458 
9459 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9460 			continue;
9461 
9462 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9463 		if (ret)
9464 			return ret;
9465 	}
9466 
9467 	return 0;
9468 }
9469 
intel_atomic_check_cdclk(struct intel_atomic_state * state,bool * need_cdclk_calc)9470 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9471 				    bool *need_cdclk_calc)
9472 {
9473 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9474 	const struct intel_cdclk_state *old_cdclk_state;
9475 	const struct intel_cdclk_state *new_cdclk_state;
9476 	struct intel_plane_state *plane_state;
9477 	struct intel_bw_state *new_bw_state;
9478 	struct intel_plane *plane;
9479 	int min_cdclk = 0;
9480 	enum pipe pipe;
9481 	int ret;
9482 	int i;
9483 	/*
9484 	 * active_planes bitmask has been updated, and potentially
9485 	 * affected planes are part of the state. We can now
9486 	 * compute the minimum cdclk for each plane.
9487 	 */
9488 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9489 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9490 		if (ret)
9491 			return ret;
9492 	}
9493 
9494 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9495 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9496 
9497 	if (new_cdclk_state &&
9498 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9499 		*need_cdclk_calc = true;
9500 
9501 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9502 	if (ret)
9503 		return ret;
9504 
9505 	new_bw_state = intel_atomic_get_new_bw_state(state);
9506 
9507 	if (!new_cdclk_state || !new_bw_state)
9508 		return 0;
9509 
9510 	for_each_pipe(dev_priv, pipe) {
9511 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9512 
9513 		/*
9514 		 * Currently do this change only if we need to increase
9515 		 */
9516 		if (new_bw_state->min_cdclk > min_cdclk)
9517 			*need_cdclk_calc = true;
9518 	}
9519 
9520 	return 0;
9521 }
9522 
intel_atomic_check_crtcs(struct intel_atomic_state * state)9523 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9524 {
9525 	struct intel_crtc_state *crtc_state;
9526 	struct intel_crtc *crtc;
9527 	int i;
9528 
9529 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9530 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9531 		int ret;
9532 
9533 		ret = intel_crtc_atomic_check(state, crtc);
9534 		if (ret) {
9535 			drm_dbg_atomic(&i915->drm,
9536 				       "[CRTC:%d:%s] atomic driver check failed\n",
9537 				       crtc->base.base.id, crtc->base.name);
9538 			return ret;
9539 		}
9540 	}
9541 
9542 	return 0;
9543 }
9544 
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)9545 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9546 					       u8 transcoders)
9547 {
9548 	const struct intel_crtc_state *new_crtc_state;
9549 	struct intel_crtc *crtc;
9550 	int i;
9551 
9552 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9553 		if (new_crtc_state->hw.enable &&
9554 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9555 		    intel_crtc_needs_modeset(new_crtc_state))
9556 			return true;
9557 	}
9558 
9559 	return false;
9560 }
9561 
intel_atomic_check_bigjoiner(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9562 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9563 					struct intel_crtc *crtc,
9564 					struct intel_crtc_state *old_crtc_state,
9565 					struct intel_crtc_state *new_crtc_state)
9566 {
9567 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9568 	struct intel_crtc *slave, *master;
9569 
9570 	/* slave being enabled, is master is still claiming this crtc? */
9571 	if (old_crtc_state->bigjoiner_slave) {
9572 		slave = crtc;
9573 		master = old_crtc_state->bigjoiner_linked_crtc;
9574 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9575 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9576 			goto claimed;
9577 	}
9578 
9579 	if (!new_crtc_state->bigjoiner)
9580 		return 0;
9581 
9582 	slave = intel_dsc_get_bigjoiner_secondary(crtc);
9583 	if (!slave) {
9584 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9585 			      "CRTC + 1 to be used, doesn't exist\n",
9586 			      crtc->base.base.id, crtc->base.name);
9587 		return -EINVAL;
9588 	}
9589 
9590 	new_crtc_state->bigjoiner_linked_crtc = slave;
9591 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9592 	master = crtc;
9593 	if (IS_ERR(slave_crtc_state))
9594 		return PTR_ERR(slave_crtc_state);
9595 
9596 	/* master being enabled, slave was already configured? */
9597 	if (slave_crtc_state->uapi.enable)
9598 		goto claimed;
9599 
9600 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9601 		      slave->base.base.id, slave->base.name);
9602 
9603 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9604 
9605 claimed:
9606 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9607 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9608 		      slave->base.base.id, slave->base.name,
9609 		      master->base.base.id, master->base.name);
9610 	return -EINVAL;
9611 }
9612 
kill_bigjoiner_slave(struct intel_atomic_state * state,struct intel_crtc_state * master_crtc_state)9613 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9614 				 struct intel_crtc_state *master_crtc_state)
9615 {
9616 	struct intel_crtc_state *slave_crtc_state =
9617 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9618 
9619 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9620 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9621 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9622 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9623 }
9624 
9625 /**
9626  * DOC: asynchronous flip implementation
9627  *
9628  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9629  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9630  * Correspondingly, support is currently added for primary plane only.
9631  *
9632  * Async flip can only change the plane surface address, so anything else
9633  * changing is rejected from the intel_atomic_check_async() function.
9634  * Once this check is cleared, flip done interrupt is enabled using
9635  * the intel_crtc_enable_flip_done() function.
9636  *
9637  * As soon as the surface address register is written, flip done interrupt is
9638  * generated and the requested events are sent to the usersapce in the interrupt
9639  * handler itself. The timestamp and sequence sent during the flip done event
9640  * correspond to the last vblank and have no relation to the actual time when
9641  * the flip done event was sent.
9642  */
intel_atomic_check_async(struct intel_atomic_state * state)9643 static int intel_atomic_check_async(struct intel_atomic_state *state)
9644 {
9645 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9646 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9647 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9648 	struct intel_crtc *crtc;
9649 	struct intel_plane *plane;
9650 	int i;
9651 
9652 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9653 					    new_crtc_state, i) {
9654 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9655 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9656 			return -EINVAL;
9657 		}
9658 
9659 		if (!new_crtc_state->hw.active) {
9660 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9661 			return -EINVAL;
9662 		}
9663 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9664 			drm_dbg_kms(&i915->drm,
9665 				    "Active planes cannot be changed during async flip\n");
9666 			return -EINVAL;
9667 		}
9668 	}
9669 
9670 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9671 					     new_plane_state, i) {
9672 		/*
9673 		 * TODO: Async flip is only supported through the page flip IOCTL
9674 		 * as of now. So support currently added for primary plane only.
9675 		 * Support for other planes on platforms on which supports
9676 		 * this(vlv/chv and icl+) should be added when async flip is
9677 		 * enabled in the atomic IOCTL path.
9678 		 */
9679 		if (!plane->async_flip)
9680 			return -EINVAL;
9681 
9682 		/*
9683 		 * FIXME: This check is kept generic for all platforms.
9684 		 * Need to verify this for all gen9 platforms to enable
9685 		 * this selectively if required.
9686 		 */
9687 		switch (new_plane_state->hw.fb->modifier) {
9688 		case I915_FORMAT_MOD_X_TILED:
9689 		case I915_FORMAT_MOD_Y_TILED:
9690 		case I915_FORMAT_MOD_Yf_TILED:
9691 			break;
9692 		default:
9693 			drm_dbg_kms(&i915->drm,
9694 				    "Linear memory/CCS does not support async flips\n");
9695 			return -EINVAL;
9696 		}
9697 
9698 		if (old_plane_state->view.color_plane[0].stride !=
9699 		    new_plane_state->view.color_plane[0].stride) {
9700 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9701 			return -EINVAL;
9702 		}
9703 
9704 		if (old_plane_state->hw.fb->modifier !=
9705 		    new_plane_state->hw.fb->modifier) {
9706 			drm_dbg_kms(&i915->drm,
9707 				    "Framebuffer modifiers cannot be changed in async flip\n");
9708 			return -EINVAL;
9709 		}
9710 
9711 		if (old_plane_state->hw.fb->format !=
9712 		    new_plane_state->hw.fb->format) {
9713 			drm_dbg_kms(&i915->drm,
9714 				    "Framebuffer format cannot be changed in async flip\n");
9715 			return -EINVAL;
9716 		}
9717 
9718 		if (old_plane_state->hw.rotation !=
9719 		    new_plane_state->hw.rotation) {
9720 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9721 			return -EINVAL;
9722 		}
9723 
9724 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9725 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9726 			drm_dbg_kms(&i915->drm,
9727 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9728 			return -EINVAL;
9729 		}
9730 
9731 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9732 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9733 			return -EINVAL;
9734 		}
9735 
9736 		if (old_plane_state->hw.pixel_blend_mode !=
9737 		    new_plane_state->hw.pixel_blend_mode) {
9738 			drm_dbg_kms(&i915->drm,
9739 				    "Pixel blend mode cannot be changed in async flip\n");
9740 			return -EINVAL;
9741 		}
9742 
9743 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9744 			drm_dbg_kms(&i915->drm,
9745 				    "Color encoding cannot be changed in async flip\n");
9746 			return -EINVAL;
9747 		}
9748 
9749 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9750 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9751 			return -EINVAL;
9752 		}
9753 	}
9754 
9755 	return 0;
9756 }
9757 
intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state * state)9758 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9759 {
9760 	struct intel_crtc_state *crtc_state;
9761 	struct intel_crtc *crtc;
9762 	int i;
9763 
9764 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9765 		struct intel_crtc_state *linked_crtc_state;
9766 		struct intel_crtc *linked_crtc;
9767 		int ret;
9768 
9769 		if (!crtc_state->bigjoiner)
9770 			continue;
9771 
9772 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9773 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9774 		if (IS_ERR(linked_crtc_state))
9775 			return PTR_ERR(linked_crtc_state);
9776 
9777 		if (!intel_crtc_needs_modeset(crtc_state))
9778 			continue;
9779 
9780 		linked_crtc_state->uapi.mode_changed = true;
9781 
9782 		ret = drm_atomic_add_affected_connectors(&state->base,
9783 							 &linked_crtc->base);
9784 		if (ret)
9785 			return ret;
9786 
9787 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
9788 		if (ret)
9789 			return ret;
9790 	}
9791 
9792 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9793 		/* Kill old bigjoiner link, we may re-establish afterwards */
9794 		if (intel_crtc_needs_modeset(crtc_state) &&
9795 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9796 			kill_bigjoiner_slave(state, crtc_state);
9797 	}
9798 
9799 	return 0;
9800 }
9801 
9802 /**
9803  * intel_atomic_check - validate state object
9804  * @dev: drm device
9805  * @_state: state to validate
9806  */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)9807 static int intel_atomic_check(struct drm_device *dev,
9808 			      struct drm_atomic_state *_state)
9809 {
9810 	struct drm_i915_private *dev_priv = to_i915(dev);
9811 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
9812 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9813 	struct intel_crtc *crtc;
9814 	int ret, i;
9815 	bool any_ms = false;
9816 
9817 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9818 					    new_crtc_state, i) {
9819 		if (new_crtc_state->inherited != old_crtc_state->inherited)
9820 			new_crtc_state->uapi.mode_changed = true;
9821 	}
9822 
9823 	intel_vrr_check_modeset(state);
9824 
9825 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
9826 	if (ret)
9827 		goto fail;
9828 
9829 	ret = intel_bigjoiner_add_affected_crtcs(state);
9830 	if (ret)
9831 		goto fail;
9832 
9833 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9834 					    new_crtc_state, i) {
9835 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9836 			/* Light copy */
9837 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9838 
9839 			continue;
9840 		}
9841 
9842 		if (!new_crtc_state->uapi.enable) {
9843 			if (!new_crtc_state->bigjoiner_slave) {
9844 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9845 				any_ms = true;
9846 			}
9847 			continue;
9848 		}
9849 
9850 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9851 		if (ret)
9852 			goto fail;
9853 
9854 		ret = intel_modeset_pipe_config(state, new_crtc_state);
9855 		if (ret)
9856 			goto fail;
9857 
9858 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9859 						   new_crtc_state);
9860 		if (ret)
9861 			goto fail;
9862 	}
9863 
9864 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9865 					    new_crtc_state, i) {
9866 		if (!intel_crtc_needs_modeset(new_crtc_state))
9867 			continue;
9868 
9869 		ret = intel_modeset_pipe_config_late(new_crtc_state);
9870 		if (ret)
9871 			goto fail;
9872 
9873 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9874 	}
9875 
9876 	/**
9877 	 * Check if fastset is allowed by external dependencies like other
9878 	 * pipes and transcoders.
9879 	 *
9880 	 * Right now it only forces a fullmodeset when the MST master
9881 	 * transcoder did not changed but the pipe of the master transcoder
9882 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9883 	 * in case of port synced crtcs, if one of the synced crtcs
9884 	 * needs a full modeset, all other synced crtcs should be
9885 	 * forced a full modeset.
9886 	 */
9887 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9888 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9889 			continue;
9890 
9891 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9892 			enum transcoder master = new_crtc_state->mst_master_transcoder;
9893 
9894 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9895 				new_crtc_state->uapi.mode_changed = true;
9896 				new_crtc_state->update_pipe = false;
9897 			}
9898 		}
9899 
9900 		if (is_trans_port_sync_mode(new_crtc_state)) {
9901 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
9902 
9903 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9904 				trans |= BIT(new_crtc_state->master_transcoder);
9905 
9906 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
9907 				new_crtc_state->uapi.mode_changed = true;
9908 				new_crtc_state->update_pipe = false;
9909 			}
9910 		}
9911 
9912 		if (new_crtc_state->bigjoiner) {
9913 			struct intel_crtc_state *linked_crtc_state =
9914 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9915 
9916 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
9917 				new_crtc_state->uapi.mode_changed = true;
9918 				new_crtc_state->update_pipe = false;
9919 			}
9920 		}
9921 	}
9922 
9923 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9924 					    new_crtc_state, i) {
9925 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9926 			any_ms = true;
9927 			continue;
9928 		}
9929 
9930 		if (!new_crtc_state->update_pipe)
9931 			continue;
9932 
9933 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9934 	}
9935 
9936 	if (any_ms && !check_digital_port_conflicts(state)) {
9937 		drm_dbg_kms(&dev_priv->drm,
9938 			    "rejecting conflicting digital port configuration\n");
9939 		ret = -EINVAL;
9940 		goto fail;
9941 	}
9942 
9943 	ret = drm_dp_mst_atomic_check(&state->base);
9944 	if (ret)
9945 		goto fail;
9946 
9947 	ret = intel_atomic_check_planes(state);
9948 	if (ret)
9949 		goto fail;
9950 
9951 	intel_fbc_choose_crtc(dev_priv, state);
9952 	ret = calc_watermark_data(state);
9953 	if (ret)
9954 		goto fail;
9955 
9956 	ret = intel_bw_atomic_check(state);
9957 	if (ret)
9958 		goto fail;
9959 
9960 	ret = intel_atomic_check_cdclk(state, &any_ms);
9961 	if (ret)
9962 		goto fail;
9963 
9964 	if (intel_any_crtc_needs_modeset(state))
9965 		any_ms = true;
9966 
9967 	if (any_ms) {
9968 		ret = intel_modeset_checks(state);
9969 		if (ret)
9970 			goto fail;
9971 
9972 		ret = intel_modeset_calc_cdclk(state);
9973 		if (ret)
9974 			return ret;
9975 
9976 		intel_modeset_clear_plls(state);
9977 	}
9978 
9979 	ret = intel_atomic_check_crtcs(state);
9980 	if (ret)
9981 		goto fail;
9982 
9983 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9984 					    new_crtc_state, i) {
9985 		if (new_crtc_state->uapi.async_flip) {
9986 			ret = intel_atomic_check_async(state);
9987 			if (ret)
9988 				goto fail;
9989 		}
9990 
9991 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
9992 		    !new_crtc_state->update_pipe)
9993 			continue;
9994 
9995 		intel_dump_pipe_config(new_crtc_state, state,
9996 				       intel_crtc_needs_modeset(new_crtc_state) ?
9997 				       "[modeset]" : "[fastset]");
9998 	}
9999 
10000 	return 0;
10001 
10002  fail:
10003 	if (ret == -EDEADLK)
10004 		return ret;
10005 
10006 	/*
10007 	 * FIXME would probably be nice to know which crtc specifically
10008 	 * caused the failure, in cases where we can pinpoint it.
10009 	 */
10010 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10011 					    new_crtc_state, i)
10012 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10013 
10014 	return ret;
10015 }
10016 
intel_atomic_prepare_commit(struct intel_atomic_state * state)10017 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10018 {
10019 	struct intel_crtc_state *crtc_state;
10020 	struct intel_crtc *crtc;
10021 	int i, ret;
10022 
10023 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10024 	if (ret < 0)
10025 		return ret;
10026 
10027 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10028 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10029 
10030 		if (mode_changed || crtc_state->update_pipe ||
10031 		    crtc_state->uapi.color_mgmt_changed) {
10032 			intel_dsb_prepare(crtc_state);
10033 		}
10034 	}
10035 
10036 	return 0;
10037 }
10038 
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)10039 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10040 				  struct intel_crtc_state *crtc_state)
10041 {
10042 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10043 
10044 	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
10045 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10046 
10047 	if (crtc_state->has_pch_encoder) {
10048 		enum pipe pch_transcoder =
10049 			intel_crtc_pch_transcoder(crtc);
10050 
10051 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10052 	}
10053 }
10054 
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)10055 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10056 			       const struct intel_crtc_state *new_crtc_state)
10057 {
10058 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10059 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10060 
10061 	/*
10062 	 * Update pipe size and adjust fitter if needed: the reason for this is
10063 	 * that in compute_mode_changes we check the native mode (not the pfit
10064 	 * mode) to see if we can flip rather than do a full mode set. In the
10065 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
10066 	 * pfit state, we'll end up with a big fb scanned out into the wrong
10067 	 * sized surface.
10068 	 */
10069 	intel_set_pipe_src_size(new_crtc_state);
10070 
10071 	/* on skylake this is done by detaching scalers */
10072 	if (DISPLAY_VER(dev_priv) >= 9) {
10073 		if (new_crtc_state->pch_pfit.enabled)
10074 			skl_pfit_enable(new_crtc_state);
10075 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10076 		if (new_crtc_state->pch_pfit.enabled)
10077 			ilk_pfit_enable(new_crtc_state);
10078 		else if (old_crtc_state->pch_pfit.enabled)
10079 			ilk_pfit_disable(old_crtc_state);
10080 	}
10081 
10082 	/*
10083 	 * The register is supposedly single buffered so perhaps
10084 	 * not 100% correct to do this here. But SKL+ calculate
10085 	 * this based on the adjust pixel rate so pfit changes do
10086 	 * affect it and so it must be updated for fastsets.
10087 	 * HSW/BDW only really need this here for fastboot, after
10088 	 * that the value should not change without a full modeset.
10089 	 */
10090 	if (DISPLAY_VER(dev_priv) >= 9 ||
10091 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10092 		hsw_set_linetime_wm(new_crtc_state);
10093 
10094 	if (DISPLAY_VER(dev_priv) >= 11)
10095 		icl_set_pipe_chicken(new_crtc_state);
10096 }
10097 
commit_pipe_pre_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)10098 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
10099 				   struct intel_crtc *crtc)
10100 {
10101 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10102 	const struct intel_crtc_state *old_crtc_state =
10103 		intel_atomic_get_old_crtc_state(state, crtc);
10104 	const struct intel_crtc_state *new_crtc_state =
10105 		intel_atomic_get_new_crtc_state(state, crtc);
10106 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10107 
10108 	/*
10109 	 * During modesets pipe configuration was programmed as the
10110 	 * CRTC was enabled.
10111 	 */
10112 	if (!modeset) {
10113 		if (new_crtc_state->uapi.color_mgmt_changed ||
10114 		    new_crtc_state->update_pipe)
10115 			intel_color_commit(new_crtc_state);
10116 
10117 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10118 			bdw_set_pipemisc(new_crtc_state);
10119 
10120 		if (new_crtc_state->update_pipe)
10121 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
10122 
10123 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10124 	}
10125 
10126 	if (dev_priv->display.atomic_update_watermarks)
10127 		dev_priv->display.atomic_update_watermarks(state, crtc);
10128 }
10129 
commit_pipe_post_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)10130 static void commit_pipe_post_planes(struct intel_atomic_state *state,
10131 				    struct intel_crtc *crtc)
10132 {
10133 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10134 	const struct intel_crtc_state *new_crtc_state =
10135 		intel_atomic_get_new_crtc_state(state, crtc);
10136 
10137 	/*
10138 	 * Disable the scaler(s) after the plane(s) so that we don't
10139 	 * get a catastrophic underrun even if the two operations
10140 	 * end up happening in two different frames.
10141 	 */
10142 	if (DISPLAY_VER(dev_priv) >= 9 &&
10143 	    !intel_crtc_needs_modeset(new_crtc_state))
10144 		skl_detach_scalers(new_crtc_state);
10145 }
10146 
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)10147 static void intel_enable_crtc(struct intel_atomic_state *state,
10148 			      struct intel_crtc *crtc)
10149 {
10150 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10151 	const struct intel_crtc_state *new_crtc_state =
10152 		intel_atomic_get_new_crtc_state(state, crtc);
10153 
10154 	if (!intel_crtc_needs_modeset(new_crtc_state))
10155 		return;
10156 
10157 	intel_crtc_update_active_timings(new_crtc_state);
10158 
10159 	dev_priv->display.crtc_enable(state, crtc);
10160 
10161 	if (new_crtc_state->bigjoiner_slave)
10162 		return;
10163 
10164 	/* vblanks work again, re-enable pipe CRC. */
10165 	intel_crtc_enable_pipe_crc(crtc);
10166 }
10167 
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)10168 static void intel_update_crtc(struct intel_atomic_state *state,
10169 			      struct intel_crtc *crtc)
10170 {
10171 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10172 	const struct intel_crtc_state *old_crtc_state =
10173 		intel_atomic_get_old_crtc_state(state, crtc);
10174 	struct intel_crtc_state *new_crtc_state =
10175 		intel_atomic_get_new_crtc_state(state, crtc);
10176 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10177 
10178 	if (!modeset) {
10179 		if (new_crtc_state->preload_luts &&
10180 		    (new_crtc_state->uapi.color_mgmt_changed ||
10181 		     new_crtc_state->update_pipe))
10182 			intel_color_load_luts(new_crtc_state);
10183 
10184 		intel_pre_plane_update(state, crtc);
10185 
10186 		if (new_crtc_state->update_pipe)
10187 			intel_encoders_update_pipe(state, crtc);
10188 	}
10189 
10190 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10191 		intel_fbc_disable(crtc);
10192 	else
10193 		intel_fbc_enable(state, crtc);
10194 
10195 	/* Perform vblank evasion around commit operation */
10196 	intel_pipe_update_start(new_crtc_state);
10197 
10198 	commit_pipe_pre_planes(state, crtc);
10199 
10200 	if (DISPLAY_VER(dev_priv) >= 9)
10201 		skl_update_planes_on_crtc(state, crtc);
10202 	else
10203 		i9xx_update_planes_on_crtc(state, crtc);
10204 
10205 	commit_pipe_post_planes(state, crtc);
10206 
10207 	intel_pipe_update_end(new_crtc_state);
10208 
10209 	/*
10210 	 * We usually enable FIFO underrun interrupts as part of the
10211 	 * CRTC enable sequence during modesets.  But when we inherit a
10212 	 * valid pipe configuration from the BIOS we need to take care
10213 	 * of enabling them on the CRTC's first fastset.
10214 	 */
10215 	if (new_crtc_state->update_pipe && !modeset &&
10216 	    old_crtc_state->inherited)
10217 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10218 }
10219 
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state,struct intel_crtc * crtc)10220 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10221 					  struct intel_crtc_state *old_crtc_state,
10222 					  struct intel_crtc_state *new_crtc_state,
10223 					  struct intel_crtc *crtc)
10224 {
10225 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10226 
10227 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10228 
10229 	intel_encoders_pre_disable(state, crtc);
10230 
10231 	intel_crtc_disable_planes(state, crtc);
10232 
10233 	/*
10234 	 * We still need special handling for disabling bigjoiner master
10235 	 * and slaves since for slave we do not have encoder or plls
10236 	 * so we dont need to disable those.
10237 	 */
10238 	if (old_crtc_state->bigjoiner) {
10239 		intel_crtc_disable_planes(state,
10240 					  old_crtc_state->bigjoiner_linked_crtc);
10241 		old_crtc_state->bigjoiner_linked_crtc->active = false;
10242 	}
10243 
10244 	/*
10245 	 * We need to disable pipe CRC before disabling the pipe,
10246 	 * or we race against vblank off.
10247 	 */
10248 	intel_crtc_disable_pipe_crc(crtc);
10249 
10250 	dev_priv->display.crtc_disable(state, crtc);
10251 	crtc->active = false;
10252 	intel_fbc_disable(crtc);
10253 	intel_disable_shared_dpll(old_crtc_state);
10254 
10255 	/* FIXME unify this for all platforms */
10256 	if (!new_crtc_state->hw.active &&
10257 	    !HAS_GMCH(dev_priv) &&
10258 	    dev_priv->display.initial_watermarks)
10259 		dev_priv->display.initial_watermarks(state, crtc);
10260 }
10261 
intel_commit_modeset_disables(struct intel_atomic_state * state)10262 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10263 {
10264 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10265 	struct intel_crtc *crtc;
10266 	u32 handled = 0;
10267 	int i;
10268 
10269 	/* Only disable port sync and MST slaves */
10270 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10271 					    new_crtc_state, i) {
10272 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10273 			continue;
10274 
10275 		if (!old_crtc_state->hw.active)
10276 			continue;
10277 
10278 		/* In case of Transcoder port Sync master slave CRTCs can be
10279 		 * assigned in any order and we need to make sure that
10280 		 * slave CRTCs are disabled first and then master CRTC since
10281 		 * Slave vblanks are masked till Master Vblanks.
10282 		 */
10283 		if (!is_trans_port_sync_slave(old_crtc_state) &&
10284 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
10285 			continue;
10286 
10287 		intel_pre_plane_update(state, crtc);
10288 		intel_old_crtc_state_disables(state, old_crtc_state,
10289 					      new_crtc_state, crtc);
10290 		handled |= BIT(crtc->pipe);
10291 	}
10292 
10293 	/* Disable everything else left on */
10294 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10295 					    new_crtc_state, i) {
10296 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
10297 		    (handled & BIT(crtc->pipe)) ||
10298 		    old_crtc_state->bigjoiner_slave)
10299 			continue;
10300 
10301 		intel_pre_plane_update(state, crtc);
10302 		if (old_crtc_state->bigjoiner) {
10303 			struct intel_crtc *slave =
10304 				old_crtc_state->bigjoiner_linked_crtc;
10305 
10306 			intel_pre_plane_update(state, slave);
10307 		}
10308 
10309 		if (old_crtc_state->hw.active)
10310 			intel_old_crtc_state_disables(state, old_crtc_state,
10311 						      new_crtc_state, crtc);
10312 	}
10313 }
10314 
intel_commit_modeset_enables(struct intel_atomic_state * state)10315 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10316 {
10317 	struct intel_crtc_state *new_crtc_state;
10318 	struct intel_crtc *crtc;
10319 	int i;
10320 
10321 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10322 		if (!new_crtc_state->hw.active)
10323 			continue;
10324 
10325 		intel_enable_crtc(state, crtc);
10326 		intel_update_crtc(state, crtc);
10327 	}
10328 }
10329 
skl_commit_modeset_enables(struct intel_atomic_state * state)10330 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10331 {
10332 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10333 	struct intel_crtc *crtc;
10334 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10335 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10336 	u8 update_pipes = 0, modeset_pipes = 0;
10337 	int i;
10338 
10339 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10340 		enum pipe pipe = crtc->pipe;
10341 
10342 		if (!new_crtc_state->hw.active)
10343 			continue;
10344 
10345 		/* ignore allocations for crtc's that have been turned off. */
10346 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10347 			entries[pipe] = old_crtc_state->wm.skl.ddb;
10348 			update_pipes |= BIT(pipe);
10349 		} else {
10350 			modeset_pipes |= BIT(pipe);
10351 		}
10352 	}
10353 
10354 	/*
10355 	 * Whenever the number of active pipes changes, we need to make sure we
10356 	 * update the pipes in the right order so that their ddb allocations
10357 	 * never overlap with each other between CRTC updates. Otherwise we'll
10358 	 * cause pipe underruns and other bad stuff.
10359 	 *
10360 	 * So first lets enable all pipes that do not need a fullmodeset as
10361 	 * those don't have any external dependency.
10362 	 */
10363 	while (update_pipes) {
10364 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10365 						    new_crtc_state, i) {
10366 			enum pipe pipe = crtc->pipe;
10367 
10368 			if ((update_pipes & BIT(pipe)) == 0)
10369 				continue;
10370 
10371 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10372 							entries, I915_MAX_PIPES, pipe))
10373 				continue;
10374 
10375 			entries[pipe] = new_crtc_state->wm.skl.ddb;
10376 			update_pipes &= ~BIT(pipe);
10377 
10378 			intel_update_crtc(state, crtc);
10379 
10380 			/*
10381 			 * If this is an already active pipe, it's DDB changed,
10382 			 * and this isn't the last pipe that needs updating
10383 			 * then we need to wait for a vblank to pass for the
10384 			 * new ddb allocation to take effect.
10385 			 */
10386 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10387 						 &old_crtc_state->wm.skl.ddb) &&
10388 			    (update_pipes | modeset_pipes))
10389 				intel_wait_for_vblank(dev_priv, pipe);
10390 		}
10391 	}
10392 
10393 	update_pipes = modeset_pipes;
10394 
10395 	/*
10396 	 * Enable all pipes that needs a modeset and do not depends on other
10397 	 * pipes
10398 	 */
10399 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10400 		enum pipe pipe = crtc->pipe;
10401 
10402 		if ((modeset_pipes & BIT(pipe)) == 0)
10403 			continue;
10404 
10405 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10406 		    is_trans_port_sync_master(new_crtc_state) ||
10407 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10408 			continue;
10409 
10410 		modeset_pipes &= ~BIT(pipe);
10411 
10412 		intel_enable_crtc(state, crtc);
10413 	}
10414 
10415 	/*
10416 	 * Then we enable all remaining pipes that depend on other
10417 	 * pipes: MST slaves and port sync masters, big joiner master
10418 	 */
10419 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10420 		enum pipe pipe = crtc->pipe;
10421 
10422 		if ((modeset_pipes & BIT(pipe)) == 0)
10423 			continue;
10424 
10425 		modeset_pipes &= ~BIT(pipe);
10426 
10427 		intel_enable_crtc(state, crtc);
10428 	}
10429 
10430 	/*
10431 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10432 	 */
10433 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10434 		enum pipe pipe = crtc->pipe;
10435 
10436 		if ((update_pipes & BIT(pipe)) == 0)
10437 			continue;
10438 
10439 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10440 									entries, I915_MAX_PIPES, pipe));
10441 
10442 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10443 		update_pipes &= ~BIT(pipe);
10444 
10445 		intel_update_crtc(state, crtc);
10446 	}
10447 
10448 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10449 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10450 }
10451 
intel_atomic_helper_free_state(struct drm_i915_private * dev_priv)10452 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10453 {
10454 	struct intel_atomic_state *state, *next;
10455 	struct llist_node *freed;
10456 
10457 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10458 	llist_for_each_entry_safe(state, next, freed, freed)
10459 		drm_atomic_state_put(&state->base);
10460 }
10461 
intel_atomic_helper_free_state_worker(struct work_struct * work)10462 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10463 {
10464 	struct drm_i915_private *dev_priv =
10465 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10466 
10467 	intel_atomic_helper_free_state(dev_priv);
10468 }
10469 
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)10470 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10471 {
10472 	struct wait_queue_entry wait_fence, wait_reset;
10473 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10474 
10475 	init_wait_entry(&wait_fence, 0);
10476 	init_wait_entry(&wait_reset, 0);
10477 	for (;;) {
10478 		prepare_to_wait(&intel_state->commit_ready.wait,
10479 				&wait_fence, TASK_UNINTERRUPTIBLE);
10480 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10481 					      I915_RESET_MODESET),
10482 				&wait_reset, TASK_UNINTERRUPTIBLE);
10483 
10484 
10485 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10486 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10487 			break;
10488 
10489 		schedule();
10490 	}
10491 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10492 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10493 				  I915_RESET_MODESET),
10494 		    &wait_reset);
10495 }
10496 
intel_cleanup_dsbs(struct intel_atomic_state * state)10497 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10498 {
10499 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10500 	struct intel_crtc *crtc;
10501 	int i;
10502 
10503 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10504 					    new_crtc_state, i)
10505 		intel_dsb_cleanup(old_crtc_state);
10506 }
10507 
intel_atomic_cleanup_work(struct work_struct * work)10508 static void intel_atomic_cleanup_work(struct work_struct *work)
10509 {
10510 	struct intel_atomic_state *state =
10511 		container_of(work, struct intel_atomic_state, base.commit_work);
10512 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10513 
10514 	intel_cleanup_dsbs(state);
10515 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10516 	drm_atomic_helper_commit_cleanup_done(&state->base);
10517 	drm_atomic_state_put(&state->base);
10518 
10519 	intel_atomic_helper_free_state(i915);
10520 }
10521 
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)10522 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10523 {
10524 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10525 	struct intel_plane *plane;
10526 	struct intel_plane_state *plane_state;
10527 	int i;
10528 
10529 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10530 		struct drm_framebuffer *fb = plane_state->hw.fb;
10531 		int ret;
10532 
10533 		if (!fb ||
10534 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10535 			continue;
10536 
10537 		/*
10538 		 * The layout of the fast clear color value expected by HW
10539 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10540 		 * - 4 x 4 bytes per-channel value
10541 		 *   (in surface type specific float/int format provided by the fb user)
10542 		 * - 8 bytes native color value used by the display
10543 		 *   (converted/written by GPU during a fast clear operation using the
10544 		 *    above per-channel values)
10545 		 *
10546 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10547 		 * caller made sure that the object is synced wrt. the related color clear value
10548 		 * GPU write on it.
10549 		 */
10550 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10551 						     fb->offsets[2] + 16,
10552 						     &plane_state->ccval,
10553 						     sizeof(plane_state->ccval));
10554 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10555 		drm_WARN_ON(&i915->drm, ret);
10556 	}
10557 }
10558 
intel_atomic_commit_tail(struct intel_atomic_state * state)10559 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10560 {
10561 	struct drm_device *dev = state->base.dev;
10562 	struct drm_i915_private *dev_priv = to_i915(dev);
10563 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10564 	struct intel_crtc *crtc;
10565 	u64 put_domains[I915_MAX_PIPES] = {};
10566 	intel_wakeref_t wakeref = 0;
10567 	int i;
10568 
10569 	intel_atomic_commit_fence_wait(state);
10570 
10571 	drm_atomic_helper_wait_for_dependencies(&state->base);
10572 
10573 	if (state->modeset)
10574 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10575 
10576 	intel_atomic_prepare_plane_clear_colors(state);
10577 
10578 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10579 					    new_crtc_state, i) {
10580 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10581 		    new_crtc_state->update_pipe) {
10582 
10583 			put_domains[crtc->pipe] =
10584 				modeset_get_crtc_power_domains(new_crtc_state);
10585 		}
10586 	}
10587 
10588 	intel_commit_modeset_disables(state);
10589 
10590 	/* FIXME: Eventually get rid of our crtc->config pointer */
10591 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10592 		crtc->config = new_crtc_state;
10593 
10594 	if (state->modeset) {
10595 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10596 
10597 		intel_set_cdclk_pre_plane_update(state);
10598 
10599 		intel_modeset_verify_disabled(dev_priv, state);
10600 	}
10601 
10602 	intel_sagv_pre_plane_update(state);
10603 
10604 	/* Complete the events for pipes that have now been disabled */
10605 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10606 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10607 
10608 		/* Complete events for now disable pipes here. */
10609 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10610 			spin_lock_irq(&dev->event_lock);
10611 			drm_crtc_send_vblank_event(&crtc->base,
10612 						   new_crtc_state->uapi.event);
10613 			spin_unlock_irq(&dev->event_lock);
10614 
10615 			new_crtc_state->uapi.event = NULL;
10616 		}
10617 	}
10618 
10619 	if (state->modeset)
10620 		intel_encoders_update_prepare(state);
10621 
10622 	intel_dbuf_pre_plane_update(state);
10623 
10624 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10625 		if (new_crtc_state->uapi.async_flip)
10626 			intel_crtc_enable_flip_done(state, crtc);
10627 	}
10628 
10629 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10630 	dev_priv->display.commit_modeset_enables(state);
10631 
10632 	if (state->modeset) {
10633 		intel_encoders_update_complete(state);
10634 
10635 		intel_set_cdclk_post_plane_update(state);
10636 	}
10637 
10638 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10639 	 * already, but still need the state for the delayed optimization. To
10640 	 * fix this:
10641 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10642 	 * - schedule that vblank worker _before_ calling hw_done
10643 	 * - at the start of commit_tail, cancel it _synchrously
10644 	 * - switch over to the vblank wait helper in the core after that since
10645 	 *   we don't need out special handling any more.
10646 	 */
10647 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10648 
10649 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10650 		if (new_crtc_state->uapi.async_flip)
10651 			intel_crtc_disable_flip_done(state, crtc);
10652 
10653 		if (new_crtc_state->hw.active &&
10654 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10655 		    !new_crtc_state->preload_luts &&
10656 		    (new_crtc_state->uapi.color_mgmt_changed ||
10657 		     new_crtc_state->update_pipe))
10658 			intel_color_load_luts(new_crtc_state);
10659 	}
10660 
10661 	/*
10662 	 * Now that the vblank has passed, we can go ahead and program the
10663 	 * optimal watermarks on platforms that need two-step watermark
10664 	 * programming.
10665 	 *
10666 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10667 	 */
10668 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10669 					    new_crtc_state, i) {
10670 		/*
10671 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10672 		 * So re-enable underrun reporting after some planes get enabled.
10673 		 *
10674 		 * We do this before .optimize_watermarks() so that we have a
10675 		 * chance of catching underruns with the intermediate watermarks
10676 		 * vs. the new plane configuration.
10677 		 */
10678 		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10679 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10680 
10681 		if (dev_priv->display.optimize_watermarks)
10682 			dev_priv->display.optimize_watermarks(state, crtc);
10683 	}
10684 
10685 	intel_dbuf_post_plane_update(state);
10686 
10687 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10688 		intel_post_plane_update(state, crtc);
10689 
10690 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10691 
10692 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10693 
10694 		/*
10695 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10696 		 * cleanup. So copy and reset the dsb structure to sync with
10697 		 * commit_done and later do dsb cleanup in cleanup_work.
10698 		 */
10699 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10700 	}
10701 
10702 	/* Underruns don't always raise interrupts, so check manually */
10703 	intel_check_cpu_fifo_underruns(dev_priv);
10704 	intel_check_pch_fifo_underruns(dev_priv);
10705 
10706 	if (state->modeset)
10707 		intel_verify_planes(state);
10708 
10709 	intel_sagv_post_plane_update(state);
10710 
10711 	drm_atomic_helper_commit_hw_done(&state->base);
10712 
10713 	if (state->modeset) {
10714 		/* As one of the primary mmio accessors, KMS has a high
10715 		 * likelihood of triggering bugs in unclaimed access. After we
10716 		 * finish modesetting, see if an error has been flagged, and if
10717 		 * so enable debugging for the next modeset - and hope we catch
10718 		 * the culprit.
10719 		 */
10720 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10721 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10722 	}
10723 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10724 
10725 	/*
10726 	 * Defer the cleanup of the old state to a separate worker to not
10727 	 * impede the current task (userspace for blocking modesets) that
10728 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10729 	 * deferring to a new worker seems overkill, but we would place a
10730 	 * schedule point (cond_resched()) here anyway to keep latencies
10731 	 * down.
10732 	 */
10733 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10734 	queue_work(system_highpri_wq, &state->base.commit_work);
10735 }
10736 
intel_atomic_commit_work(struct work_struct * work)10737 static void intel_atomic_commit_work(struct work_struct *work)
10738 {
10739 	struct intel_atomic_state *state =
10740 		container_of(work, struct intel_atomic_state, base.commit_work);
10741 
10742 	intel_atomic_commit_tail(state);
10743 }
10744 
10745 static int __i915_sw_fence_call
intel_atomic_commit_ready(struct i915_sw_fence * fence,enum i915_sw_fence_notify notify)10746 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10747 			  enum i915_sw_fence_notify notify)
10748 {
10749 	struct intel_atomic_state *state =
10750 		container_of(fence, struct intel_atomic_state, commit_ready);
10751 
10752 	switch (notify) {
10753 	case FENCE_COMPLETE:
10754 		/* we do blocking waits in the worker, nothing to do here */
10755 		break;
10756 	case FENCE_FREE:
10757 		{
10758 			struct intel_atomic_helper *helper =
10759 				&to_i915(state->base.dev)->atomic_helper;
10760 
10761 			if (llist_add(&state->freed, &helper->free_list))
10762 				schedule_work(&helper->free_work);
10763 			break;
10764 		}
10765 	}
10766 
10767 	return NOTIFY_DONE;
10768 }
10769 
intel_atomic_track_fbs(struct intel_atomic_state * state)10770 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10771 {
10772 	struct intel_plane_state *old_plane_state, *new_plane_state;
10773 	struct intel_plane *plane;
10774 	int i;
10775 
10776 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10777 					     new_plane_state, i)
10778 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10779 					to_intel_frontbuffer(new_plane_state->hw.fb),
10780 					plane->frontbuffer_bit);
10781 }
10782 
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)10783 static int intel_atomic_commit(struct drm_device *dev,
10784 			       struct drm_atomic_state *_state,
10785 			       bool nonblock)
10786 {
10787 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10788 	struct drm_i915_private *dev_priv = to_i915(dev);
10789 	int ret = 0;
10790 
10791 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10792 
10793 	drm_atomic_state_get(&state->base);
10794 	i915_sw_fence_init(&state->commit_ready,
10795 			   intel_atomic_commit_ready);
10796 
10797 	/*
10798 	 * The intel_legacy_cursor_update() fast path takes care
10799 	 * of avoiding the vblank waits for simple cursor
10800 	 * movement and flips. For cursor on/off and size changes,
10801 	 * we want to perform the vblank waits so that watermark
10802 	 * updates happen during the correct frames. Gen9+ have
10803 	 * double buffered watermarks and so shouldn't need this.
10804 	 *
10805 	 * Unset state->legacy_cursor_update before the call to
10806 	 * drm_atomic_helper_setup_commit() because otherwise
10807 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
10808 	 * we get FIFO underruns because we didn't wait
10809 	 * for vblank.
10810 	 *
10811 	 * FIXME doing watermarks and fb cleanup from a vblank worker
10812 	 * (assuming we had any) would solve these problems.
10813 	 */
10814 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10815 		struct intel_crtc_state *new_crtc_state;
10816 		struct intel_crtc *crtc;
10817 		int i;
10818 
10819 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10820 			if (new_crtc_state->wm.need_postvbl_update ||
10821 			    new_crtc_state->update_wm_post)
10822 				state->base.legacy_cursor_update = false;
10823 	}
10824 
10825 	ret = intel_atomic_prepare_commit(state);
10826 	if (ret) {
10827 		drm_dbg_atomic(&dev_priv->drm,
10828 			       "Preparing state failed with %i\n", ret);
10829 		i915_sw_fence_commit(&state->commit_ready);
10830 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10831 		return ret;
10832 	}
10833 
10834 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10835 	if (!ret)
10836 		ret = drm_atomic_helper_swap_state(&state->base, true);
10837 	if (!ret)
10838 		intel_atomic_swap_global_state(state);
10839 
10840 	if (ret) {
10841 		struct intel_crtc_state *new_crtc_state;
10842 		struct intel_crtc *crtc;
10843 		int i;
10844 
10845 		i915_sw_fence_commit(&state->commit_ready);
10846 
10847 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10848 			intel_dsb_cleanup(new_crtc_state);
10849 
10850 		drm_atomic_helper_cleanup_planes(dev, &state->base);
10851 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10852 		return ret;
10853 	}
10854 	intel_shared_dpll_swap_state(state);
10855 	intel_atomic_track_fbs(state);
10856 
10857 	drm_atomic_state_get(&state->base);
10858 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10859 
10860 	i915_sw_fence_commit(&state->commit_ready);
10861 	if (nonblock && state->modeset) {
10862 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10863 	} else if (nonblock) {
10864 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
10865 	} else {
10866 		if (state->modeset)
10867 			flush_workqueue(dev_priv->modeset_wq);
10868 		intel_atomic_commit_tail(state);
10869 	}
10870 
10871 	return 0;
10872 }
10873 
10874 struct wait_rps_boost {
10875 	struct wait_queue_entry wait;
10876 
10877 	struct drm_crtc *crtc;
10878 	struct i915_request *request;
10879 };
10880 
do_rps_boost(struct wait_queue_entry * _wait,unsigned mode,int sync,void * key)10881 static int do_rps_boost(struct wait_queue_entry *_wait,
10882 			unsigned mode, int sync, void *key)
10883 {
10884 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10885 	struct i915_request *rq = wait->request;
10886 
10887 	/*
10888 	 * If we missed the vblank, but the request is already running it
10889 	 * is reasonable to assume that it will complete before the next
10890 	 * vblank without our intervention, so leave RPS alone.
10891 	 */
10892 	if (!i915_request_started(rq))
10893 		intel_rps_boost(rq);
10894 	i915_request_put(rq);
10895 
10896 	drm_crtc_vblank_put(wait->crtc);
10897 
10898 	list_del(&wait->wait.entry);
10899 	kfree(wait);
10900 	return 1;
10901 }
10902 
add_rps_boost_after_vblank(struct drm_crtc * crtc,struct dma_fence * fence)10903 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10904 				       struct dma_fence *fence)
10905 {
10906 	struct wait_rps_boost *wait;
10907 
10908 	if (!dma_fence_is_i915(fence))
10909 		return;
10910 
10911 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10912 		return;
10913 
10914 	if (drm_crtc_vblank_get(crtc))
10915 		return;
10916 
10917 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10918 	if (!wait) {
10919 		drm_crtc_vblank_put(crtc);
10920 		return;
10921 	}
10922 
10923 	wait->request = to_request(dma_fence_get(fence));
10924 	wait->crtc = crtc;
10925 
10926 	wait->wait.func = do_rps_boost;
10927 	wait->wait.flags = 0;
10928 
10929 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10930 }
10931 
intel_plane_pin_fb(struct intel_plane_state * plane_state)10932 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10933 {
10934 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10935 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10936 	struct drm_framebuffer *fb = plane_state->hw.fb;
10937 	struct i915_vma *vma;
10938 	bool phys_cursor =
10939 		plane->id == PLANE_CURSOR &&
10940 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10941 
10942 	if (!intel_fb_uses_dpt(fb)) {
10943 		vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10944 						 &plane_state->view.gtt,
10945 						 intel_plane_uses_fence(plane_state),
10946 						 &plane_state->flags);
10947 		if (IS_ERR(vma))
10948 			return PTR_ERR(vma);
10949 
10950 		plane_state->ggtt_vma = vma;
10951 	} else {
10952 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10953 
10954 		vma = intel_dpt_pin(intel_fb->dpt_vm);
10955 		if (IS_ERR(vma))
10956 			return PTR_ERR(vma);
10957 
10958 		plane_state->ggtt_vma = vma;
10959 
10960 		vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
10961 					   &plane_state->flags, intel_fb->dpt_vm);
10962 		if (IS_ERR(vma)) {
10963 			intel_dpt_unpin(intel_fb->dpt_vm);
10964 			plane_state->ggtt_vma = NULL;
10965 			return PTR_ERR(vma);
10966 		}
10967 
10968 		plane_state->dpt_vma = vma;
10969 
10970 		WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
10971 	}
10972 
10973 	return 0;
10974 }
10975 
intel_plane_unpin_fb(struct intel_plane_state * old_plane_state)10976 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10977 {
10978 	struct drm_framebuffer *fb = old_plane_state->hw.fb;
10979 	struct i915_vma *vma;
10980 
10981 	if (!intel_fb_uses_dpt(fb)) {
10982 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10983 		if (vma)
10984 			intel_unpin_fb_vma(vma, old_plane_state->flags);
10985 	} else {
10986 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10987 
10988 		vma = fetch_and_zero(&old_plane_state->dpt_vma);
10989 		if (vma)
10990 			intel_unpin_fb_vma(vma, old_plane_state->flags);
10991 
10992 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10993 		if (vma)
10994 			intel_dpt_unpin(intel_fb->dpt_vm);
10995 	}
10996 }
10997 
10998 /**
10999  * intel_prepare_plane_fb - Prepare fb for usage on plane
11000  * @_plane: drm plane to prepare for
11001  * @_new_plane_state: the plane state being prepared
11002  *
11003  * Prepares a framebuffer for usage on a display plane.  Generally this
11004  * involves pinning the underlying object and updating the frontbuffer tracking
11005  * bits.  Some older platforms need special physical address handling for
11006  * cursor planes.
11007  *
11008  * Returns 0 on success, negative error code on failure.
11009  */
11010 int
intel_prepare_plane_fb(struct drm_plane * _plane,struct drm_plane_state * _new_plane_state)11011 intel_prepare_plane_fb(struct drm_plane *_plane,
11012 		       struct drm_plane_state *_new_plane_state)
11013 {
11014 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
11015 	struct intel_plane *plane = to_intel_plane(_plane);
11016 	struct intel_plane_state *new_plane_state =
11017 		to_intel_plane_state(_new_plane_state);
11018 	struct intel_atomic_state *state =
11019 		to_intel_atomic_state(new_plane_state->uapi.state);
11020 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11021 	const struct intel_plane_state *old_plane_state =
11022 		intel_atomic_get_old_plane_state(state, plane);
11023 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11024 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11025 	int ret;
11026 
11027 	if (old_obj) {
11028 		const struct intel_crtc_state *crtc_state =
11029 			intel_atomic_get_new_crtc_state(state,
11030 							to_intel_crtc(old_plane_state->hw.crtc));
11031 
11032 		/* Big Hammer, we also need to ensure that any pending
11033 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11034 		 * current scanout is retired before unpinning the old
11035 		 * framebuffer. Note that we rely on userspace rendering
11036 		 * into the buffer attached to the pipe they are waiting
11037 		 * on. If not, userspace generates a GPU hang with IPEHR
11038 		 * point to the MI_WAIT_FOR_EVENT.
11039 		 *
11040 		 * This should only fail upon a hung GPU, in which case we
11041 		 * can safely continue.
11042 		 */
11043 		if (intel_crtc_needs_modeset(crtc_state)) {
11044 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
11045 							      old_obj->base.resv, NULL,
11046 							      false, 0,
11047 							      GFP_KERNEL);
11048 			if (ret < 0)
11049 				return ret;
11050 		}
11051 	}
11052 
11053 	if (new_plane_state->uapi.fence) { /* explicit fencing */
11054 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11055 					     &attr);
11056 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11057 						    new_plane_state->uapi.fence,
11058 						    i915_fence_timeout(dev_priv),
11059 						    GFP_KERNEL);
11060 		if (ret < 0)
11061 			return ret;
11062 	}
11063 
11064 	if (!obj)
11065 		return 0;
11066 
11067 
11068 	ret = intel_plane_pin_fb(new_plane_state);
11069 	if (ret)
11070 		return ret;
11071 
11072 	i915_gem_object_wait_priority(obj, 0, &attr);
11073 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11074 
11075 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
11076 		struct dma_fence *fence;
11077 
11078 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
11079 						      obj->base.resv, NULL,
11080 						      false,
11081 						      i915_fence_timeout(dev_priv),
11082 						      GFP_KERNEL);
11083 		if (ret < 0)
11084 			goto unpin_fb;
11085 
11086 		fence = dma_resv_get_excl_unlocked(obj->base.resv);
11087 		if (fence) {
11088 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11089 						   fence);
11090 			dma_fence_put(fence);
11091 		}
11092 	} else {
11093 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11094 					   new_plane_state->uapi.fence);
11095 	}
11096 
11097 	/*
11098 	 * We declare pageflips to be interactive and so merit a small bias
11099 	 * towards upclocking to deliver the frame on time. By only changing
11100 	 * the RPS thresholds to sample more regularly and aim for higher
11101 	 * clocks we can hopefully deliver low power workloads (like kodi)
11102 	 * that are not quite steady state without resorting to forcing
11103 	 * maximum clocks following a vblank miss (see do_rps_boost()).
11104 	 */
11105 	if (!state->rps_interactive) {
11106 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11107 		state->rps_interactive = true;
11108 	}
11109 
11110 	return 0;
11111 
11112 unpin_fb:
11113 	intel_plane_unpin_fb(new_plane_state);
11114 
11115 	return ret;
11116 }
11117 
11118 /**
11119  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11120  * @plane: drm plane to clean up for
11121  * @_old_plane_state: the state from the previous modeset
11122  *
11123  * Cleans up a framebuffer that has just been removed from a plane.
11124  */
11125 void
intel_cleanup_plane_fb(struct drm_plane * plane,struct drm_plane_state * _old_plane_state)11126 intel_cleanup_plane_fb(struct drm_plane *plane,
11127 		       struct drm_plane_state *_old_plane_state)
11128 {
11129 	struct intel_plane_state *old_plane_state =
11130 		to_intel_plane_state(_old_plane_state);
11131 	struct intel_atomic_state *state =
11132 		to_intel_atomic_state(old_plane_state->uapi.state);
11133 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
11134 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11135 
11136 	if (!obj)
11137 		return;
11138 
11139 	if (state->rps_interactive) {
11140 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11141 		state->rps_interactive = false;
11142 	}
11143 
11144 	/* Should only be called after a successful intel_prepare_plane_fb()! */
11145 	intel_plane_unpin_fb(old_plane_state);
11146 }
11147 
11148 /**
11149  * intel_plane_destroy - destroy a plane
11150  * @plane: plane to destroy
11151  *
11152  * Common destruction function for all types of planes (primary, cursor,
11153  * sprite).
11154  */
intel_plane_destroy(struct drm_plane * plane)11155 void intel_plane_destroy(struct drm_plane *plane)
11156 {
11157 	drm_plane_cleanup(plane);
11158 	kfree(to_intel_plane(plane));
11159 }
11160 
intel_plane_possible_crtcs_init(struct drm_i915_private * dev_priv)11161 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11162 {
11163 	struct intel_plane *plane;
11164 
11165 	for_each_intel_plane(&dev_priv->drm, plane) {
11166 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11167 								  plane->pipe);
11168 
11169 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11170 	}
11171 }
11172 
11173 
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)11174 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11175 				      struct drm_file *file)
11176 {
11177 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11178 	struct drm_crtc *drmmode_crtc;
11179 	struct intel_crtc *crtc;
11180 
11181 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11182 	if (!drmmode_crtc)
11183 		return -ENOENT;
11184 
11185 	crtc = to_intel_crtc(drmmode_crtc);
11186 	pipe_from_crtc_id->pipe = crtc->pipe;
11187 
11188 	return 0;
11189 }
11190 
intel_encoder_possible_clones(struct intel_encoder * encoder)11191 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11192 {
11193 	struct drm_device *dev = encoder->base.dev;
11194 	struct intel_encoder *source_encoder;
11195 	u32 possible_clones = 0;
11196 
11197 	for_each_intel_encoder(dev, source_encoder) {
11198 		if (encoders_cloneable(encoder, source_encoder))
11199 			possible_clones |= drm_encoder_mask(&source_encoder->base);
11200 	}
11201 
11202 	return possible_clones;
11203 }
11204 
intel_encoder_possible_crtcs(struct intel_encoder * encoder)11205 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11206 {
11207 	struct drm_device *dev = encoder->base.dev;
11208 	struct intel_crtc *crtc;
11209 	u32 possible_crtcs = 0;
11210 
11211 	for_each_intel_crtc(dev, crtc) {
11212 		if (encoder->pipe_mask & BIT(crtc->pipe))
11213 			possible_crtcs |= drm_crtc_mask(&crtc->base);
11214 	}
11215 
11216 	return possible_crtcs;
11217 }
11218 
ilk_has_edp_a(struct drm_i915_private * dev_priv)11219 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11220 {
11221 	if (!IS_MOBILE(dev_priv))
11222 		return false;
11223 
11224 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11225 		return false;
11226 
11227 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11228 		return false;
11229 
11230 	return true;
11231 }
11232 
intel_ddi_crt_present(struct drm_i915_private * dev_priv)11233 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11234 {
11235 	if (DISPLAY_VER(dev_priv) >= 9)
11236 		return false;
11237 
11238 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11239 		return false;
11240 
11241 	if (HAS_PCH_LPT_H(dev_priv) &&
11242 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11243 		return false;
11244 
11245 	/* DDI E can't be used if DDI A requires 4 lanes */
11246 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11247 		return false;
11248 
11249 	if (!dev_priv->vbt.int_crt_support)
11250 		return false;
11251 
11252 	return true;
11253 }
11254 
intel_setup_outputs(struct drm_i915_private * dev_priv)11255 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11256 {
11257 	struct intel_encoder *encoder;
11258 	bool dpd_is_edp = false;
11259 
11260 	intel_pps_unlock_regs_wa(dev_priv);
11261 
11262 	if (!HAS_DISPLAY(dev_priv))
11263 		return;
11264 
11265 	if (IS_DG2(dev_priv)) {
11266 		intel_ddi_init(dev_priv, PORT_A);
11267 		intel_ddi_init(dev_priv, PORT_B);
11268 		intel_ddi_init(dev_priv, PORT_C);
11269 		intel_ddi_init(dev_priv, PORT_D_XELPD);
11270 	} else if (IS_ALDERLAKE_P(dev_priv)) {
11271 		intel_ddi_init(dev_priv, PORT_A);
11272 		intel_ddi_init(dev_priv, PORT_B);
11273 		intel_ddi_init(dev_priv, PORT_TC1);
11274 		intel_ddi_init(dev_priv, PORT_TC2);
11275 		intel_ddi_init(dev_priv, PORT_TC3);
11276 		intel_ddi_init(dev_priv, PORT_TC4);
11277 	} else if (IS_ALDERLAKE_S(dev_priv)) {
11278 		intel_ddi_init(dev_priv, PORT_A);
11279 		intel_ddi_init(dev_priv, PORT_TC1);
11280 		intel_ddi_init(dev_priv, PORT_TC2);
11281 		intel_ddi_init(dev_priv, PORT_TC3);
11282 		intel_ddi_init(dev_priv, PORT_TC4);
11283 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11284 		intel_ddi_init(dev_priv, PORT_A);
11285 		intel_ddi_init(dev_priv, PORT_B);
11286 		intel_ddi_init(dev_priv, PORT_TC1);
11287 		intel_ddi_init(dev_priv, PORT_TC2);
11288 	} else if (DISPLAY_VER(dev_priv) >= 12) {
11289 		intel_ddi_init(dev_priv, PORT_A);
11290 		intel_ddi_init(dev_priv, PORT_B);
11291 		intel_ddi_init(dev_priv, PORT_TC1);
11292 		intel_ddi_init(dev_priv, PORT_TC2);
11293 		intel_ddi_init(dev_priv, PORT_TC3);
11294 		intel_ddi_init(dev_priv, PORT_TC4);
11295 		intel_ddi_init(dev_priv, PORT_TC5);
11296 		intel_ddi_init(dev_priv, PORT_TC6);
11297 		icl_dsi_init(dev_priv);
11298 	} else if (IS_JSL_EHL(dev_priv)) {
11299 		intel_ddi_init(dev_priv, PORT_A);
11300 		intel_ddi_init(dev_priv, PORT_B);
11301 		intel_ddi_init(dev_priv, PORT_C);
11302 		intel_ddi_init(dev_priv, PORT_D);
11303 		icl_dsi_init(dev_priv);
11304 	} else if (DISPLAY_VER(dev_priv) == 11) {
11305 		intel_ddi_init(dev_priv, PORT_A);
11306 		intel_ddi_init(dev_priv, PORT_B);
11307 		intel_ddi_init(dev_priv, PORT_C);
11308 		intel_ddi_init(dev_priv, PORT_D);
11309 		intel_ddi_init(dev_priv, PORT_E);
11310 		intel_ddi_init(dev_priv, PORT_F);
11311 		icl_dsi_init(dev_priv);
11312 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
11313 		intel_ddi_init(dev_priv, PORT_A);
11314 		intel_ddi_init(dev_priv, PORT_B);
11315 		intel_ddi_init(dev_priv, PORT_C);
11316 		vlv_dsi_init(dev_priv);
11317 	} else if (DISPLAY_VER(dev_priv) >= 9) {
11318 		intel_ddi_init(dev_priv, PORT_A);
11319 		intel_ddi_init(dev_priv, PORT_B);
11320 		intel_ddi_init(dev_priv, PORT_C);
11321 		intel_ddi_init(dev_priv, PORT_D);
11322 		intel_ddi_init(dev_priv, PORT_E);
11323 	} else if (HAS_DDI(dev_priv)) {
11324 		u32 found;
11325 
11326 		if (intel_ddi_crt_present(dev_priv))
11327 			intel_crt_init(dev_priv);
11328 
11329 		/* Haswell uses DDI functions to detect digital outputs. */
11330 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11331 		if (found)
11332 			intel_ddi_init(dev_priv, PORT_A);
11333 
11334 		found = intel_de_read(dev_priv, SFUSE_STRAP);
11335 		if (found & SFUSE_STRAP_DDIB_DETECTED)
11336 			intel_ddi_init(dev_priv, PORT_B);
11337 		if (found & SFUSE_STRAP_DDIC_DETECTED)
11338 			intel_ddi_init(dev_priv, PORT_C);
11339 		if (found & SFUSE_STRAP_DDID_DETECTED)
11340 			intel_ddi_init(dev_priv, PORT_D);
11341 		if (found & SFUSE_STRAP_DDIF_DETECTED)
11342 			intel_ddi_init(dev_priv, PORT_F);
11343 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11344 		int found;
11345 
11346 		/*
11347 		 * intel_edp_init_connector() depends on this completing first,
11348 		 * to prevent the registration of both eDP and LVDS and the
11349 		 * incorrect sharing of the PPS.
11350 		 */
11351 		intel_lvds_init(dev_priv);
11352 		intel_crt_init(dev_priv);
11353 
11354 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11355 
11356 		if (ilk_has_edp_a(dev_priv))
11357 			g4x_dp_init(dev_priv, DP_A, PORT_A);
11358 
11359 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11360 			/* PCH SDVOB multiplex with HDMIB */
11361 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11362 			if (!found)
11363 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11364 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11365 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11366 		}
11367 
11368 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11369 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11370 
11371 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11372 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11373 
11374 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11375 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11376 
11377 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11378 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11379 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11380 		bool has_edp, has_port;
11381 
11382 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11383 			intel_crt_init(dev_priv);
11384 
11385 		/*
11386 		 * The DP_DETECTED bit is the latched state of the DDC
11387 		 * SDA pin at boot. However since eDP doesn't require DDC
11388 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
11389 		 * eDP ports may have been muxed to an alternate function.
11390 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
11391 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
11392 		 * detect eDP ports.
11393 		 *
11394 		 * Sadly the straps seem to be missing sometimes even for HDMI
11395 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11396 		 * and VBT for the presence of the port. Additionally we can't
11397 		 * trust the port type the VBT declares as we've seen at least
11398 		 * HDMI ports that the VBT claim are DP or eDP.
11399 		 */
11400 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11401 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11402 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11403 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11404 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11405 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11406 
11407 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11408 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11409 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11410 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11411 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11412 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11413 
11414 		if (IS_CHERRYVIEW(dev_priv)) {
11415 			/*
11416 			 * eDP not supported on port D,
11417 			 * so no need to worry about it
11418 			 */
11419 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11420 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11421 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11422 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11423 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11424 		}
11425 
11426 		vlv_dsi_init(dev_priv);
11427 	} else if (IS_PINEVIEW(dev_priv)) {
11428 		intel_lvds_init(dev_priv);
11429 		intel_crt_init(dev_priv);
11430 	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11431 		bool found = false;
11432 
11433 		if (IS_MOBILE(dev_priv))
11434 			intel_lvds_init(dev_priv);
11435 
11436 		intel_crt_init(dev_priv);
11437 
11438 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11439 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11440 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11441 			if (!found && IS_G4X(dev_priv)) {
11442 				drm_dbg_kms(&dev_priv->drm,
11443 					    "probing HDMI on SDVOB\n");
11444 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11445 			}
11446 
11447 			if (!found && IS_G4X(dev_priv))
11448 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11449 		}
11450 
11451 		/* Before G4X SDVOC doesn't have its own detect register */
11452 
11453 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11454 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11455 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11456 		}
11457 
11458 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11459 
11460 			if (IS_G4X(dev_priv)) {
11461 				drm_dbg_kms(&dev_priv->drm,
11462 					    "probing HDMI on SDVOC\n");
11463 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11464 			}
11465 			if (IS_G4X(dev_priv))
11466 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11467 		}
11468 
11469 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11470 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11471 
11472 		if (SUPPORTS_TV(dev_priv))
11473 			intel_tv_init(dev_priv);
11474 	} else if (DISPLAY_VER(dev_priv) == 2) {
11475 		if (IS_I85X(dev_priv))
11476 			intel_lvds_init(dev_priv);
11477 
11478 		intel_crt_init(dev_priv);
11479 		intel_dvo_init(dev_priv);
11480 	}
11481 
11482 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11483 		encoder->base.possible_crtcs =
11484 			intel_encoder_possible_crtcs(encoder);
11485 		encoder->base.possible_clones =
11486 			intel_encoder_possible_clones(encoder);
11487 	}
11488 
11489 	intel_init_pch_refclk(dev_priv);
11490 
11491 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11492 }
11493 
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)11494 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11495 {
11496 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11497 
11498 	drm_framebuffer_cleanup(fb);
11499 
11500 	if (intel_fb_uses_dpt(fb))
11501 		intel_dpt_destroy(intel_fb->dpt_vm);
11502 
11503 	intel_frontbuffer_put(intel_fb->frontbuffer);
11504 
11505 	kfree(intel_fb);
11506 }
11507 
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)11508 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11509 						struct drm_file *file,
11510 						unsigned int *handle)
11511 {
11512 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11513 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11514 
11515 	if (i915_gem_object_is_userptr(obj)) {
11516 		drm_dbg(&i915->drm,
11517 			"attempting to use a userptr for a framebuffer, denied\n");
11518 		return -EINVAL;
11519 	}
11520 
11521 	return drm_gem_handle_create(file, &obj->base, handle);
11522 }
11523 
intel_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file,unsigned flags,unsigned color,struct drm_clip_rect * clips,unsigned num_clips)11524 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11525 					struct drm_file *file,
11526 					unsigned flags, unsigned color,
11527 					struct drm_clip_rect *clips,
11528 					unsigned num_clips)
11529 {
11530 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11531 
11532 	i915_gem_object_flush_if_display(obj);
11533 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11534 
11535 	return 0;
11536 }
11537 
11538 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11539 	.destroy = intel_user_framebuffer_destroy,
11540 	.create_handle = intel_user_framebuffer_create_handle,
11541 	.dirty = intel_user_framebuffer_dirty,
11542 };
11543 
intel_framebuffer_init(struct intel_framebuffer * intel_fb,struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)11544 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11545 				  struct drm_i915_gem_object *obj,
11546 				  struct drm_mode_fb_cmd2 *mode_cmd)
11547 {
11548 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11549 	struct drm_framebuffer *fb = &intel_fb->base;
11550 	u32 max_stride;
11551 	unsigned int tiling, stride;
11552 	int ret = -EINVAL;
11553 	int i;
11554 
11555 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11556 	if (!intel_fb->frontbuffer)
11557 		return -ENOMEM;
11558 
11559 	i915_gem_object_lock(obj, NULL);
11560 	tiling = i915_gem_object_get_tiling(obj);
11561 	stride = i915_gem_object_get_stride(obj);
11562 	i915_gem_object_unlock(obj);
11563 
11564 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11565 		/*
11566 		 * If there's a fence, enforce that
11567 		 * the fb modifier and tiling mode match.
11568 		 */
11569 		if (tiling != I915_TILING_NONE &&
11570 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11571 			drm_dbg_kms(&dev_priv->drm,
11572 				    "tiling_mode doesn't match fb modifier\n");
11573 			goto err;
11574 		}
11575 	} else {
11576 		if (tiling == I915_TILING_X) {
11577 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11578 		} else if (tiling == I915_TILING_Y) {
11579 			drm_dbg_kms(&dev_priv->drm,
11580 				    "No Y tiling for legacy addfb\n");
11581 			goto err;
11582 		}
11583 	}
11584 
11585 	if (!drm_any_plane_has_format(&dev_priv->drm,
11586 				      mode_cmd->pixel_format,
11587 				      mode_cmd->modifier[0])) {
11588 		drm_dbg_kms(&dev_priv->drm,
11589 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11590 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11591 		goto err;
11592 	}
11593 
11594 	/*
11595 	 * gen2/3 display engine uses the fence if present,
11596 	 * so the tiling mode must match the fb modifier exactly.
11597 	 */
11598 	if (DISPLAY_VER(dev_priv) < 4 &&
11599 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11600 		drm_dbg_kms(&dev_priv->drm,
11601 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11602 		goto err;
11603 	}
11604 
11605 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11606 					 mode_cmd->modifier[0]);
11607 	if (mode_cmd->pitches[0] > max_stride) {
11608 		drm_dbg_kms(&dev_priv->drm,
11609 			    "%s pitch (%u) must be at most %d\n",
11610 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11611 			    "tiled" : "linear",
11612 			    mode_cmd->pitches[0], max_stride);
11613 		goto err;
11614 	}
11615 
11616 	/*
11617 	 * If there's a fence, enforce that
11618 	 * the fb pitch and fence stride match.
11619 	 */
11620 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11621 		drm_dbg_kms(&dev_priv->drm,
11622 			    "pitch (%d) must match tiling stride (%d)\n",
11623 			    mode_cmd->pitches[0], stride);
11624 		goto err;
11625 	}
11626 
11627 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11628 	if (mode_cmd->offsets[0] != 0) {
11629 		drm_dbg_kms(&dev_priv->drm,
11630 			    "plane 0 offset (0x%08x) must be 0\n",
11631 			    mode_cmd->offsets[0]);
11632 		goto err;
11633 	}
11634 
11635 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11636 
11637 	for (i = 0; i < fb->format->num_planes; i++) {
11638 		u32 stride_alignment;
11639 
11640 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11641 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11642 				    i);
11643 			goto err;
11644 		}
11645 
11646 		stride_alignment = intel_fb_stride_alignment(fb, i);
11647 		if (fb->pitches[i] & (stride_alignment - 1)) {
11648 			drm_dbg_kms(&dev_priv->drm,
11649 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11650 				    i, fb->pitches[i], stride_alignment);
11651 			goto err;
11652 		}
11653 
11654 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11655 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11656 
11657 			if (fb->pitches[i] != ccs_aux_stride) {
11658 				drm_dbg_kms(&dev_priv->drm,
11659 					    "ccs aux plane %d pitch (%d) must be %d\n",
11660 					    i,
11661 					    fb->pitches[i], ccs_aux_stride);
11662 				goto err;
11663 			}
11664 		}
11665 
11666 		/* TODO: Add POT stride remapping support for CCS formats as well. */
11667 		if (IS_ALDERLAKE_P(dev_priv) &&
11668 		    mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
11669 		    !intel_fb_needs_pot_stride_remap(intel_fb) &&
11670 		    !is_power_of_2(mode_cmd->pitches[i])) {
11671 			drm_dbg_kms(&dev_priv->drm,
11672 				    "plane %d pitch (%d) must be power of two for tiled buffers\n",
11673 				    i, mode_cmd->pitches[i]);
11674 			goto err;
11675 		}
11676 
11677 		fb->obj[i] = &obj->base;
11678 	}
11679 
11680 	ret = intel_fill_fb_info(dev_priv, intel_fb);
11681 	if (ret)
11682 		goto err;
11683 
11684 	if (intel_fb_uses_dpt(fb)) {
11685 		struct i915_address_space *vm;
11686 
11687 		vm = intel_dpt_create(intel_fb);
11688 		if (IS_ERR(vm)) {
11689 			ret = PTR_ERR(vm);
11690 			goto err;
11691 		}
11692 
11693 		intel_fb->dpt_vm = vm;
11694 	}
11695 
11696 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11697 	if (ret) {
11698 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11699 		goto err;
11700 	}
11701 
11702 	return 0;
11703 
11704 err:
11705 	intel_frontbuffer_put(intel_fb->frontbuffer);
11706 	return ret;
11707 }
11708 
11709 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * user_mode_cmd)11710 intel_user_framebuffer_create(struct drm_device *dev,
11711 			      struct drm_file *filp,
11712 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11713 {
11714 	struct drm_framebuffer *fb;
11715 	struct drm_i915_gem_object *obj;
11716 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11717 	struct drm_i915_private *i915;
11718 
11719 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11720 	if (!obj)
11721 		return ERR_PTR(-ENOENT);
11722 
11723 	/* object is backed with LMEM for discrete */
11724 	i915 = to_i915(obj->base.dev);
11725 	if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
11726 		/* object is "remote", not in local memory */
11727 		i915_gem_object_put(obj);
11728 		return ERR_PTR(-EREMOTE);
11729 	}
11730 
11731 	fb = intel_framebuffer_create(obj, &mode_cmd);
11732 	i915_gem_object_put(obj);
11733 
11734 	return fb;
11735 }
11736 
11737 static enum drm_mode_status
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)11738 intel_mode_valid(struct drm_device *dev,
11739 		 const struct drm_display_mode *mode)
11740 {
11741 	struct drm_i915_private *dev_priv = to_i915(dev);
11742 	int hdisplay_max, htotal_max;
11743 	int vdisplay_max, vtotal_max;
11744 
11745 	/*
11746 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11747 	 * of DBLSCAN modes to the output's mode list when they detect
11748 	 * the scaling mode property on the connector. And they don't
11749 	 * ask the kernel to validate those modes in any way until
11750 	 * modeset time at which point the client gets a protocol error.
11751 	 * So in order to not upset those clients we silently ignore the
11752 	 * DBLSCAN flag on such connectors. For other connectors we will
11753 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11754 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11755 	 * as we never want such modes on the connector's mode list.
11756 	 */
11757 
11758 	if (mode->vscan > 1)
11759 		return MODE_NO_VSCAN;
11760 
11761 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11762 		return MODE_H_ILLEGAL;
11763 
11764 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11765 			   DRM_MODE_FLAG_NCSYNC |
11766 			   DRM_MODE_FLAG_PCSYNC))
11767 		return MODE_HSYNC;
11768 
11769 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11770 			   DRM_MODE_FLAG_PIXMUX |
11771 			   DRM_MODE_FLAG_CLKDIV2))
11772 		return MODE_BAD;
11773 
11774 	/* Transcoder timing limits */
11775 	if (DISPLAY_VER(dev_priv) >= 11) {
11776 		hdisplay_max = 16384;
11777 		vdisplay_max = 8192;
11778 		htotal_max = 16384;
11779 		vtotal_max = 8192;
11780 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11781 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11782 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11783 		vdisplay_max = 4096;
11784 		htotal_max = 8192;
11785 		vtotal_max = 8192;
11786 	} else if (DISPLAY_VER(dev_priv) >= 3) {
11787 		hdisplay_max = 4096;
11788 		vdisplay_max = 4096;
11789 		htotal_max = 8192;
11790 		vtotal_max = 8192;
11791 	} else {
11792 		hdisplay_max = 2048;
11793 		vdisplay_max = 2048;
11794 		htotal_max = 4096;
11795 		vtotal_max = 4096;
11796 	}
11797 
11798 	if (mode->hdisplay > hdisplay_max ||
11799 	    mode->hsync_start > htotal_max ||
11800 	    mode->hsync_end > htotal_max ||
11801 	    mode->htotal > htotal_max)
11802 		return MODE_H_ILLEGAL;
11803 
11804 	if (mode->vdisplay > vdisplay_max ||
11805 	    mode->vsync_start > vtotal_max ||
11806 	    mode->vsync_end > vtotal_max ||
11807 	    mode->vtotal > vtotal_max)
11808 		return MODE_V_ILLEGAL;
11809 
11810 	if (DISPLAY_VER(dev_priv) >= 5) {
11811 		if (mode->hdisplay < 64 ||
11812 		    mode->htotal - mode->hdisplay < 32)
11813 			return MODE_H_ILLEGAL;
11814 
11815 		if (mode->vtotal - mode->vdisplay < 5)
11816 			return MODE_V_ILLEGAL;
11817 	} else {
11818 		if (mode->htotal - mode->hdisplay < 32)
11819 			return MODE_H_ILLEGAL;
11820 
11821 		if (mode->vtotal - mode->vdisplay < 3)
11822 			return MODE_V_ILLEGAL;
11823 	}
11824 
11825 	return MODE_OK;
11826 }
11827 
11828 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool bigjoiner)11829 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11830 				const struct drm_display_mode *mode,
11831 				bool bigjoiner)
11832 {
11833 	int plane_width_max, plane_height_max;
11834 
11835 	/*
11836 	 * intel_mode_valid() should be
11837 	 * sufficient on older platforms.
11838 	 */
11839 	if (DISPLAY_VER(dev_priv) < 9)
11840 		return MODE_OK;
11841 
11842 	/*
11843 	 * Most people will probably want a fullscreen
11844 	 * plane so let's not advertize modes that are
11845 	 * too big for that.
11846 	 */
11847 	if (DISPLAY_VER(dev_priv) >= 11) {
11848 		plane_width_max = 5120 << bigjoiner;
11849 		plane_height_max = 4320;
11850 	} else {
11851 		plane_width_max = 5120;
11852 		plane_height_max = 4096;
11853 	}
11854 
11855 	if (mode->hdisplay > plane_width_max)
11856 		return MODE_H_ILLEGAL;
11857 
11858 	if (mode->vdisplay > plane_height_max)
11859 		return MODE_V_ILLEGAL;
11860 
11861 	return MODE_OK;
11862 }
11863 
11864 static const struct drm_mode_config_funcs intel_mode_funcs = {
11865 	.fb_create = intel_user_framebuffer_create,
11866 	.get_format_info = intel_get_format_info,
11867 	.output_poll_changed = intel_fbdev_output_poll_changed,
11868 	.mode_valid = intel_mode_valid,
11869 	.atomic_check = intel_atomic_check,
11870 	.atomic_commit = intel_atomic_commit,
11871 	.atomic_state_alloc = intel_atomic_state_alloc,
11872 	.atomic_state_clear = intel_atomic_state_clear,
11873 	.atomic_state_free = intel_atomic_state_free,
11874 };
11875 
11876 /**
11877  * intel_init_display_hooks - initialize the display modesetting hooks
11878  * @dev_priv: device private
11879  */
intel_init_display_hooks(struct drm_i915_private * dev_priv)11880 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11881 {
11882 	if (!HAS_DISPLAY(dev_priv))
11883 		return;
11884 
11885 	intel_init_cdclk_hooks(dev_priv);
11886 	intel_init_audio_hooks(dev_priv);
11887 
11888 	intel_dpll_init_clock_hook(dev_priv);
11889 
11890 	if (DISPLAY_VER(dev_priv) >= 9) {
11891 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11892 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11893 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11894 	} else if (HAS_DDI(dev_priv)) {
11895 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11896 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11897 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11898 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11899 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11900 		dev_priv->display.crtc_enable = ilk_crtc_enable;
11901 		dev_priv->display.crtc_disable = ilk_crtc_disable;
11902 	} else if (IS_CHERRYVIEW(dev_priv) ||
11903 		   IS_VALLEYVIEW(dev_priv)) {
11904 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11905 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
11906 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11907 	} else {
11908 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11909 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
11910 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11911 	}
11912 
11913 	intel_fdi_init_hook(dev_priv);
11914 
11915 	if (DISPLAY_VER(dev_priv) >= 9) {
11916 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11917 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11918 	} else {
11919 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11920 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11921 	}
11922 
11923 }
11924 
intel_modeset_init_hw(struct drm_i915_private * i915)11925 void intel_modeset_init_hw(struct drm_i915_private *i915)
11926 {
11927 	struct intel_cdclk_state *cdclk_state;
11928 
11929 	if (!HAS_DISPLAY(i915))
11930 		return;
11931 
11932 	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
11933 
11934 	intel_update_cdclk(i915);
11935 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11936 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11937 }
11938 
sanitize_watermarks_add_affected(struct drm_atomic_state * state)11939 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11940 {
11941 	struct drm_plane *plane;
11942 	struct intel_crtc *crtc;
11943 
11944 	for_each_intel_crtc(state->dev, crtc) {
11945 		struct intel_crtc_state *crtc_state;
11946 
11947 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
11948 		if (IS_ERR(crtc_state))
11949 			return PTR_ERR(crtc_state);
11950 
11951 		if (crtc_state->hw.active) {
11952 			/*
11953 			 * Preserve the inherited flag to avoid
11954 			 * taking the full modeset path.
11955 			 */
11956 			crtc_state->inherited = true;
11957 		}
11958 	}
11959 
11960 	drm_for_each_plane(plane, state->dev) {
11961 		struct drm_plane_state *plane_state;
11962 
11963 		plane_state = drm_atomic_get_plane_state(state, plane);
11964 		if (IS_ERR(plane_state))
11965 			return PTR_ERR(plane_state);
11966 	}
11967 
11968 	return 0;
11969 }
11970 
11971 /*
11972  * Calculate what we think the watermarks should be for the state we've read
11973  * out of the hardware and then immediately program those watermarks so that
11974  * we ensure the hardware settings match our internal state.
11975  *
11976  * We can calculate what we think WM's should be by creating a duplicate of the
11977  * current state (which was constructed during hardware readout) and running it
11978  * through the atomic check code to calculate new watermark values in the
11979  * state object.
11980  */
sanitize_watermarks(struct drm_i915_private * dev_priv)11981 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11982 {
11983 	struct drm_atomic_state *state;
11984 	struct intel_atomic_state *intel_state;
11985 	struct intel_crtc *crtc;
11986 	struct intel_crtc_state *crtc_state;
11987 	struct drm_modeset_acquire_ctx ctx;
11988 	int ret;
11989 	int i;
11990 
11991 	/* Only supported on platforms that use atomic watermark design */
11992 	if (!dev_priv->display.optimize_watermarks)
11993 		return;
11994 
11995 	state = drm_atomic_state_alloc(&dev_priv->drm);
11996 	if (drm_WARN_ON(&dev_priv->drm, !state))
11997 		return;
11998 
11999 	intel_state = to_intel_atomic_state(state);
12000 
12001 	drm_modeset_acquire_init(&ctx, 0);
12002 
12003 retry:
12004 	state->acquire_ctx = &ctx;
12005 
12006 	/*
12007 	 * Hardware readout is the only time we don't want to calculate
12008 	 * intermediate watermarks (since we don't trust the current
12009 	 * watermarks).
12010 	 */
12011 	if (!HAS_GMCH(dev_priv))
12012 		intel_state->skip_intermediate_wm = true;
12013 
12014 	ret = sanitize_watermarks_add_affected(state);
12015 	if (ret)
12016 		goto fail;
12017 
12018 	ret = intel_atomic_check(&dev_priv->drm, state);
12019 	if (ret)
12020 		goto fail;
12021 
12022 	/* Write calculated watermark values back */
12023 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12024 		crtc_state->wm.need_postvbl_update = true;
12025 		dev_priv->display.optimize_watermarks(intel_state, crtc);
12026 
12027 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12028 	}
12029 
12030 fail:
12031 	if (ret == -EDEADLK) {
12032 		drm_atomic_state_clear(state);
12033 		drm_modeset_backoff(&ctx);
12034 		goto retry;
12035 	}
12036 
12037 	/*
12038 	 * If we fail here, it means that the hardware appears to be
12039 	 * programmed in a way that shouldn't be possible, given our
12040 	 * understanding of watermark requirements.  This might mean a
12041 	 * mistake in the hardware readout code or a mistake in the
12042 	 * watermark calculations for a given platform.  Raise a WARN
12043 	 * so that this is noticeable.
12044 	 *
12045 	 * If this actually happens, we'll have to just leave the
12046 	 * BIOS-programmed watermarks untouched and hope for the best.
12047 	 */
12048 	drm_WARN(&dev_priv->drm, ret,
12049 		 "Could not determine valid watermarks for inherited state\n");
12050 
12051 	drm_atomic_state_put(state);
12052 
12053 	drm_modeset_drop_locks(&ctx);
12054 	drm_modeset_acquire_fini(&ctx);
12055 }
12056 
intel_update_fdi_pll_freq(struct drm_i915_private * dev_priv)12057 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12058 {
12059 	if (IS_IRONLAKE(dev_priv)) {
12060 		u32 fdi_pll_clk =
12061 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12062 
12063 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12064 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12065 		dev_priv->fdi_pll_freq = 270000;
12066 	} else {
12067 		return;
12068 	}
12069 
12070 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12071 }
12072 
intel_initial_commit(struct drm_device * dev)12073 static int intel_initial_commit(struct drm_device *dev)
12074 {
12075 	struct drm_atomic_state *state = NULL;
12076 	struct drm_modeset_acquire_ctx ctx;
12077 	struct intel_crtc *crtc;
12078 	int ret = 0;
12079 
12080 	state = drm_atomic_state_alloc(dev);
12081 	if (!state)
12082 		return -ENOMEM;
12083 
12084 	drm_modeset_acquire_init(&ctx, 0);
12085 
12086 retry:
12087 	state->acquire_ctx = &ctx;
12088 
12089 	for_each_intel_crtc(dev, crtc) {
12090 		struct intel_crtc_state *crtc_state =
12091 			intel_atomic_get_crtc_state(state, crtc);
12092 
12093 		if (IS_ERR(crtc_state)) {
12094 			ret = PTR_ERR(crtc_state);
12095 			goto out;
12096 		}
12097 
12098 		if (crtc_state->hw.active) {
12099 			struct intel_encoder *encoder;
12100 
12101 			/*
12102 			 * We've not yet detected sink capabilities
12103 			 * (audio,infoframes,etc.) and thus we don't want to
12104 			 * force a full state recomputation yet. We want that to
12105 			 * happen only for the first real commit from userspace.
12106 			 * So preserve the inherited flag for the time being.
12107 			 */
12108 			crtc_state->inherited = true;
12109 
12110 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
12111 			if (ret)
12112 				goto out;
12113 
12114 			/*
12115 			 * FIXME hack to force a LUT update to avoid the
12116 			 * plane update forcing the pipe gamma on without
12117 			 * having a proper LUT loaded. Remove once we
12118 			 * have readout for pipe gamma enable.
12119 			 */
12120 			crtc_state->uapi.color_mgmt_changed = true;
12121 
12122 			for_each_intel_encoder_mask(dev, encoder,
12123 						    crtc_state->uapi.encoder_mask) {
12124 				if (encoder->initial_fastset_check &&
12125 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
12126 					ret = drm_atomic_add_affected_connectors(state,
12127 										 &crtc->base);
12128 					if (ret)
12129 						goto out;
12130 				}
12131 			}
12132 		}
12133 	}
12134 
12135 	ret = drm_atomic_commit(state);
12136 
12137 out:
12138 	if (ret == -EDEADLK) {
12139 		drm_atomic_state_clear(state);
12140 		drm_modeset_backoff(&ctx);
12141 		goto retry;
12142 	}
12143 
12144 	drm_atomic_state_put(state);
12145 
12146 	drm_modeset_drop_locks(&ctx);
12147 	drm_modeset_acquire_fini(&ctx);
12148 
12149 	return ret;
12150 }
12151 
intel_mode_config_init(struct drm_i915_private * i915)12152 static void intel_mode_config_init(struct drm_i915_private *i915)
12153 {
12154 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
12155 
12156 	drm_mode_config_init(&i915->drm);
12157 	INIT_LIST_HEAD(&i915->global_obj_list);
12158 
12159 	mode_config->min_width = 0;
12160 	mode_config->min_height = 0;
12161 
12162 	mode_config->preferred_depth = 24;
12163 	mode_config->prefer_shadow = 1;
12164 
12165 	mode_config->funcs = &intel_mode_funcs;
12166 
12167 	mode_config->async_page_flip = has_async_flips(i915);
12168 
12169 	/*
12170 	 * Maximum framebuffer dimensions, chosen to match
12171 	 * the maximum render engine surface size on gen4+.
12172 	 */
12173 	if (DISPLAY_VER(i915) >= 7) {
12174 		mode_config->max_width = 16384;
12175 		mode_config->max_height = 16384;
12176 	} else if (DISPLAY_VER(i915) >= 4) {
12177 		mode_config->max_width = 8192;
12178 		mode_config->max_height = 8192;
12179 	} else if (DISPLAY_VER(i915) == 3) {
12180 		mode_config->max_width = 4096;
12181 		mode_config->max_height = 4096;
12182 	} else {
12183 		mode_config->max_width = 2048;
12184 		mode_config->max_height = 2048;
12185 	}
12186 
12187 	if (IS_I845G(i915) || IS_I865G(i915)) {
12188 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12189 		mode_config->cursor_height = 1023;
12190 	} else if (IS_I830(i915) || IS_I85X(i915) ||
12191 		   IS_I915G(i915) || IS_I915GM(i915)) {
12192 		mode_config->cursor_width = 64;
12193 		mode_config->cursor_height = 64;
12194 	} else {
12195 		mode_config->cursor_width = 256;
12196 		mode_config->cursor_height = 256;
12197 	}
12198 }
12199 
intel_mode_config_cleanup(struct drm_i915_private * i915)12200 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12201 {
12202 	intel_atomic_global_obj_cleanup(i915);
12203 	drm_mode_config_cleanup(&i915->drm);
12204 }
12205 
plane_config_fini(struct intel_initial_plane_config * plane_config)12206 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12207 {
12208 	if (plane_config->fb) {
12209 		struct drm_framebuffer *fb = &plane_config->fb->base;
12210 
12211 		/* We may only have the stub and not a full framebuffer */
12212 		if (drm_framebuffer_read_refcount(fb))
12213 			drm_framebuffer_put(fb);
12214 		else
12215 			kfree(fb);
12216 	}
12217 
12218 	if (plane_config->vma)
12219 		i915_vma_put(plane_config->vma);
12220 }
12221 
12222 /* part #1: call before irq install */
intel_modeset_init_noirq(struct drm_i915_private * i915)12223 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12224 {
12225 	int ret;
12226 
12227 	if (i915_inject_probe_failure(i915))
12228 		return -ENODEV;
12229 
12230 	if (HAS_DISPLAY(i915)) {
12231 		ret = drm_vblank_init(&i915->drm,
12232 				      INTEL_NUM_PIPES(i915));
12233 		if (ret)
12234 			return ret;
12235 	}
12236 
12237 	intel_bios_init(i915);
12238 
12239 	ret = intel_vga_register(i915);
12240 	if (ret)
12241 		goto cleanup_bios;
12242 
12243 	/* FIXME: completely on the wrong abstraction layer */
12244 	intel_power_domains_init_hw(i915, false);
12245 
12246 	if (!HAS_DISPLAY(i915))
12247 		return 0;
12248 
12249 	intel_dmc_ucode_init(i915);
12250 
12251 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12252 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12253 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12254 
12255 	i915->framestart_delay = 1; /* 1-4 */
12256 
12257 	i915->window2_delay = 0; /* No DSB so no window2 delay */
12258 
12259 	intel_mode_config_init(i915);
12260 
12261 	ret = intel_cdclk_init(i915);
12262 	if (ret)
12263 		goto cleanup_vga_client_pw_domain_dmc;
12264 
12265 	ret = intel_dbuf_init(i915);
12266 	if (ret)
12267 		goto cleanup_vga_client_pw_domain_dmc;
12268 
12269 	ret = intel_bw_init(i915);
12270 	if (ret)
12271 		goto cleanup_vga_client_pw_domain_dmc;
12272 
12273 	init_llist_head(&i915->atomic_helper.free_list);
12274 	INIT_WORK(&i915->atomic_helper.free_work,
12275 		  intel_atomic_helper_free_state_worker);
12276 
12277 	intel_init_quirks(i915);
12278 
12279 	intel_fbc_init(i915);
12280 
12281 	return 0;
12282 
12283 cleanup_vga_client_pw_domain_dmc:
12284 	intel_dmc_ucode_fini(i915);
12285 	intel_power_domains_driver_remove(i915);
12286 	intel_vga_unregister(i915);
12287 cleanup_bios:
12288 	intel_bios_driver_remove(i915);
12289 
12290 	return ret;
12291 }
12292 
12293 /* part #2: call after irq install, but before gem init */
intel_modeset_init_nogem(struct drm_i915_private * i915)12294 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12295 {
12296 	struct drm_device *dev = &i915->drm;
12297 	enum pipe pipe;
12298 	struct intel_crtc *crtc;
12299 	int ret;
12300 
12301 	if (!HAS_DISPLAY(i915))
12302 		return 0;
12303 
12304 	intel_init_pm(i915);
12305 
12306 	intel_panel_sanitize_ssc(i915);
12307 
12308 	intel_pps_setup(i915);
12309 
12310 	intel_gmbus_setup(i915);
12311 
12312 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12313 		    INTEL_NUM_PIPES(i915),
12314 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12315 
12316 	for_each_pipe(i915, pipe) {
12317 		ret = intel_crtc_init(i915, pipe);
12318 		if (ret) {
12319 			intel_mode_config_cleanup(i915);
12320 			return ret;
12321 		}
12322 	}
12323 
12324 	intel_plane_possible_crtcs_init(i915);
12325 	intel_shared_dpll_init(dev);
12326 	intel_update_fdi_pll_freq(i915);
12327 
12328 	intel_update_czclk(i915);
12329 	intel_modeset_init_hw(i915);
12330 	intel_dpll_update_ref_clks(i915);
12331 
12332 	intel_hdcp_component_init(i915);
12333 
12334 	if (i915->max_cdclk_freq == 0)
12335 		intel_update_max_cdclk(i915);
12336 
12337 	/*
12338 	 * If the platform has HTI, we need to find out whether it has reserved
12339 	 * any display resources before we create our display outputs.
12340 	 */
12341 	if (INTEL_INFO(i915)->display.has_hti)
12342 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12343 
12344 	/* Just disable it once at startup */
12345 	intel_vga_disable(i915);
12346 	intel_setup_outputs(i915);
12347 
12348 	drm_modeset_lock_all(dev);
12349 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12350 	drm_modeset_unlock_all(dev);
12351 
12352 	for_each_intel_crtc(dev, crtc) {
12353 		struct intel_initial_plane_config plane_config = {};
12354 
12355 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12356 			continue;
12357 
12358 		/*
12359 		 * Note that reserving the BIOS fb up front prevents us
12360 		 * from stuffing other stolen allocations like the ring
12361 		 * on top.  This prevents some ugliness at boot time, and
12362 		 * can even allow for smooth boot transitions if the BIOS
12363 		 * fb is large enough for the active pipe configuration.
12364 		 */
12365 		i915->display.get_initial_plane_config(crtc, &plane_config);
12366 
12367 		/*
12368 		 * If the fb is shared between multiple heads, we'll
12369 		 * just get the first one.
12370 		 */
12371 		intel_find_initial_plane_obj(crtc, &plane_config);
12372 
12373 		plane_config_fini(&plane_config);
12374 	}
12375 
12376 	/*
12377 	 * Make sure hardware watermarks really match the state we read out.
12378 	 * Note that we need to do this after reconstructing the BIOS fb's
12379 	 * since the watermark calculation done here will use pstate->fb.
12380 	 */
12381 	if (!HAS_GMCH(i915))
12382 		sanitize_watermarks(i915);
12383 
12384 	return 0;
12385 }
12386 
12387 /* part #3: call after gem init */
intel_modeset_init(struct drm_i915_private * i915)12388 int intel_modeset_init(struct drm_i915_private *i915)
12389 {
12390 	int ret;
12391 
12392 	if (!HAS_DISPLAY(i915))
12393 		return 0;
12394 
12395 	/*
12396 	 * Force all active planes to recompute their states. So that on
12397 	 * mode_setcrtc after probe, all the intel_plane_state variables
12398 	 * are already calculated and there is no assert_plane warnings
12399 	 * during bootup.
12400 	 */
12401 	ret = intel_initial_commit(&i915->drm);
12402 	if (ret)
12403 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12404 
12405 	intel_overlay_setup(i915);
12406 
12407 	ret = intel_fbdev_init(&i915->drm);
12408 	if (ret)
12409 		return ret;
12410 
12411 	/* Only enable hotplug handling once the fbdev is fully set up. */
12412 	intel_hpd_init(i915);
12413 	intel_hpd_poll_disable(i915);
12414 
12415 	intel_init_ipc(i915);
12416 
12417 	return 0;
12418 }
12419 
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)12420 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12421 {
12422 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12423 	/* 640x480@60Hz, ~25175 kHz */
12424 	struct dpll clock = {
12425 		.m1 = 18,
12426 		.m2 = 7,
12427 		.p1 = 13,
12428 		.p2 = 4,
12429 		.n = 2,
12430 	};
12431 	u32 dpll, fp;
12432 	int i;
12433 
12434 	drm_WARN_ON(&dev_priv->drm,
12435 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
12436 
12437 	drm_dbg_kms(&dev_priv->drm,
12438 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12439 		    pipe_name(pipe), clock.vco, clock.dot);
12440 
12441 	fp = i9xx_dpll_compute_fp(&clock);
12442 	dpll = DPLL_DVO_2X_MODE |
12443 		DPLL_VGA_MODE_DIS |
12444 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12445 		PLL_P2_DIVIDE_BY_4 |
12446 		PLL_REF_INPUT_DREFCLK |
12447 		DPLL_VCO_ENABLE;
12448 
12449 	intel_de_write(dev_priv, FP0(pipe), fp);
12450 	intel_de_write(dev_priv, FP1(pipe), fp);
12451 
12452 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12453 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12454 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12455 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12456 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12457 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12458 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12459 
12460 	/*
12461 	 * Apparently we need to have VGA mode enabled prior to changing
12462 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12463 	 * dividers, even though the register value does change.
12464 	 */
12465 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12466 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12467 
12468 	/* Wait for the clocks to stabilize. */
12469 	intel_de_posting_read(dev_priv, DPLL(pipe));
12470 	udelay(150);
12471 
12472 	/* The pixel multiplier can only be updated once the
12473 	 * DPLL is enabled and the clocks are stable.
12474 	 *
12475 	 * So write it again.
12476 	 */
12477 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12478 
12479 	/* We do this three times for luck */
12480 	for (i = 0; i < 3 ; i++) {
12481 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12482 		intel_de_posting_read(dev_priv, DPLL(pipe));
12483 		udelay(150); /* wait for warmup */
12484 	}
12485 
12486 	intel_de_write(dev_priv, PIPECONF(pipe),
12487 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12488 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12489 
12490 	intel_wait_for_pipe_scanline_moving(crtc);
12491 }
12492 
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)12493 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12494 {
12495 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12496 
12497 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12498 		    pipe_name(pipe));
12499 
12500 	drm_WARN_ON(&dev_priv->drm,
12501 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12502 		    DISPLAY_PLANE_ENABLE);
12503 	drm_WARN_ON(&dev_priv->drm,
12504 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12505 		    DISPLAY_PLANE_ENABLE);
12506 	drm_WARN_ON(&dev_priv->drm,
12507 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12508 		    DISPLAY_PLANE_ENABLE);
12509 	drm_WARN_ON(&dev_priv->drm,
12510 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12511 	drm_WARN_ON(&dev_priv->drm,
12512 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12513 
12514 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12515 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12516 
12517 	intel_wait_for_pipe_scanline_stopped(crtc);
12518 
12519 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12520 	intel_de_posting_read(dev_priv, DPLL(pipe));
12521 }
12522 
12523 static void
intel_sanitize_plane_mapping(struct drm_i915_private * dev_priv)12524 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12525 {
12526 	struct intel_crtc *crtc;
12527 
12528 	if (DISPLAY_VER(dev_priv) >= 4)
12529 		return;
12530 
12531 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12532 		struct intel_plane *plane =
12533 			to_intel_plane(crtc->base.primary);
12534 		struct intel_crtc *plane_crtc;
12535 		enum pipe pipe;
12536 
12537 		if (!plane->get_hw_state(plane, &pipe))
12538 			continue;
12539 
12540 		if (pipe == crtc->pipe)
12541 			continue;
12542 
12543 		drm_dbg_kms(&dev_priv->drm,
12544 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12545 			    plane->base.base.id, plane->base.name);
12546 
12547 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12548 		intel_plane_disable_noatomic(plane_crtc, plane);
12549 	}
12550 }
12551 
intel_crtc_has_encoders(struct intel_crtc * crtc)12552 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12553 {
12554 	struct drm_device *dev = crtc->base.dev;
12555 	struct intel_encoder *encoder;
12556 
12557 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12558 		return true;
12559 
12560 	return false;
12561 }
12562 
intel_encoder_find_connector(struct intel_encoder * encoder)12563 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12564 {
12565 	struct drm_device *dev = encoder->base.dev;
12566 	struct intel_connector *connector;
12567 
12568 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12569 		return connector;
12570 
12571 	return NULL;
12572 }
12573 
has_pch_trancoder(struct drm_i915_private * dev_priv,enum pipe pch_transcoder)12574 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12575 			      enum pipe pch_transcoder)
12576 {
12577 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12578 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12579 }
12580 
intel_sanitize_frame_start_delay(const struct intel_crtc_state * crtc_state)12581 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12582 {
12583 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12584 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12585 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12586 
12587 	if (DISPLAY_VER(dev_priv) >= 9 ||
12588 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12589 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12590 		u32 val;
12591 
12592 		if (transcoder_is_dsi(cpu_transcoder))
12593 			return;
12594 
12595 		val = intel_de_read(dev_priv, reg);
12596 		val &= ~HSW_FRAME_START_DELAY_MASK;
12597 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12598 		intel_de_write(dev_priv, reg, val);
12599 	} else {
12600 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12601 		u32 val;
12602 
12603 		val = intel_de_read(dev_priv, reg);
12604 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12605 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12606 		intel_de_write(dev_priv, reg, val);
12607 	}
12608 
12609 	if (!crtc_state->has_pch_encoder)
12610 		return;
12611 
12612 	if (HAS_PCH_IBX(dev_priv)) {
12613 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12614 		u32 val;
12615 
12616 		val = intel_de_read(dev_priv, reg);
12617 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12618 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12619 		intel_de_write(dev_priv, reg, val);
12620 	} else {
12621 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12622 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12623 		u32 val;
12624 
12625 		val = intel_de_read(dev_priv, reg);
12626 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12627 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12628 		intel_de_write(dev_priv, reg, val);
12629 	}
12630 }
12631 
intel_sanitize_crtc(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)12632 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12633 				struct drm_modeset_acquire_ctx *ctx)
12634 {
12635 	struct drm_device *dev = crtc->base.dev;
12636 	struct drm_i915_private *dev_priv = to_i915(dev);
12637 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12638 
12639 	if (crtc_state->hw.active) {
12640 		struct intel_plane *plane;
12641 
12642 		/* Clear any frame start delays used for debugging left by the BIOS */
12643 		intel_sanitize_frame_start_delay(crtc_state);
12644 
12645 		/* Disable everything but the primary plane */
12646 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12647 			const struct intel_plane_state *plane_state =
12648 				to_intel_plane_state(plane->base.state);
12649 
12650 			if (plane_state->uapi.visible &&
12651 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12652 				intel_plane_disable_noatomic(crtc, plane);
12653 		}
12654 
12655 		/*
12656 		 * Disable any background color set by the BIOS, but enable the
12657 		 * gamma and CSC to match how we program our planes.
12658 		 */
12659 		if (DISPLAY_VER(dev_priv) >= 9)
12660 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12661 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12662 	}
12663 
12664 	/* Adjust the state of the output pipe according to whether we
12665 	 * have active connectors/encoders. */
12666 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12667 	    !crtc_state->bigjoiner_slave)
12668 		intel_crtc_disable_noatomic(crtc, ctx);
12669 
12670 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12671 		/*
12672 		 * We start out with underrun reporting disabled to avoid races.
12673 		 * For correct bookkeeping mark this on active crtcs.
12674 		 *
12675 		 * Also on gmch platforms we dont have any hardware bits to
12676 		 * disable the underrun reporting. Which means we need to start
12677 		 * out with underrun reporting disabled also on inactive pipes,
12678 		 * since otherwise we'll complain about the garbage we read when
12679 		 * e.g. coming up after runtime pm.
12680 		 *
12681 		 * No protection against concurrent access is required - at
12682 		 * worst a fifo underrun happens which also sets this to false.
12683 		 */
12684 		crtc->cpu_fifo_underrun_disabled = true;
12685 		/*
12686 		 * We track the PCH trancoder underrun reporting state
12687 		 * within the crtc. With crtc for pipe A housing the underrun
12688 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12689 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12690 		 * and marking underrun reporting as disabled for the non-existing
12691 		 * PCH transcoders B and C would prevent enabling the south
12692 		 * error interrupt (see cpt_can_enable_serr_int()).
12693 		 */
12694 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12695 			crtc->pch_fifo_underrun_disabled = true;
12696 	}
12697 }
12698 
has_bogus_dpll_config(const struct intel_crtc_state * crtc_state)12699 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12700 {
12701 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12702 
12703 	/*
12704 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12705 	 * the hardware when a high res displays plugged in. DPLL P
12706 	 * divider is zero, and the pipe timings are bonkers. We'll
12707 	 * try to disable everything in that case.
12708 	 *
12709 	 * FIXME would be nice to be able to sanitize this state
12710 	 * without several WARNs, but for now let's take the easy
12711 	 * road.
12712 	 */
12713 	return IS_SANDYBRIDGE(dev_priv) &&
12714 		crtc_state->hw.active &&
12715 		crtc_state->shared_dpll &&
12716 		crtc_state->port_clock == 0;
12717 }
12718 
intel_sanitize_encoder(struct intel_encoder * encoder)12719 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12720 {
12721 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12722 	struct intel_connector *connector;
12723 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12724 	struct intel_crtc_state *crtc_state = crtc ?
12725 		to_intel_crtc_state(crtc->base.state) : NULL;
12726 
12727 	/* We need to check both for a crtc link (meaning that the
12728 	 * encoder is active and trying to read from a pipe) and the
12729 	 * pipe itself being active. */
12730 	bool has_active_crtc = crtc_state &&
12731 		crtc_state->hw.active;
12732 
12733 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12734 		drm_dbg_kms(&dev_priv->drm,
12735 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12736 			    pipe_name(crtc->pipe));
12737 		has_active_crtc = false;
12738 	}
12739 
12740 	connector = intel_encoder_find_connector(encoder);
12741 	if (connector && !has_active_crtc) {
12742 		drm_dbg_kms(&dev_priv->drm,
12743 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12744 			    encoder->base.base.id,
12745 			    encoder->base.name);
12746 
12747 		/* Connector is active, but has no active pipe. This is
12748 		 * fallout from our resume register restoring. Disable
12749 		 * the encoder manually again. */
12750 		if (crtc_state) {
12751 			struct drm_encoder *best_encoder;
12752 
12753 			drm_dbg_kms(&dev_priv->drm,
12754 				    "[ENCODER:%d:%s] manually disabled\n",
12755 				    encoder->base.base.id,
12756 				    encoder->base.name);
12757 
12758 			/* avoid oopsing in case the hooks consult best_encoder */
12759 			best_encoder = connector->base.state->best_encoder;
12760 			connector->base.state->best_encoder = &encoder->base;
12761 
12762 			/* FIXME NULL atomic state passed! */
12763 			if (encoder->disable)
12764 				encoder->disable(NULL, encoder, crtc_state,
12765 						 connector->base.state);
12766 			if (encoder->post_disable)
12767 				encoder->post_disable(NULL, encoder, crtc_state,
12768 						      connector->base.state);
12769 
12770 			connector->base.state->best_encoder = best_encoder;
12771 		}
12772 		encoder->base.crtc = NULL;
12773 
12774 		/* Inconsistent output/port/pipe state happens presumably due to
12775 		 * a bug in one of the get_hw_state functions. Or someplace else
12776 		 * in our code, like the register restore mess on resume. Clamp
12777 		 * things to off as a safer default. */
12778 
12779 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12780 		connector->base.encoder = NULL;
12781 	}
12782 
12783 	/* notify opregion of the sanitized encoder state */
12784 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12785 
12786 	if (HAS_DDI(dev_priv))
12787 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
12788 }
12789 
12790 /* FIXME read out full plane state for all planes */
readout_plane_state(struct drm_i915_private * dev_priv)12791 static void readout_plane_state(struct drm_i915_private *dev_priv)
12792 {
12793 	struct intel_plane *plane;
12794 	struct intel_crtc *crtc;
12795 
12796 	for_each_intel_plane(&dev_priv->drm, plane) {
12797 		struct intel_plane_state *plane_state =
12798 			to_intel_plane_state(plane->base.state);
12799 		struct intel_crtc_state *crtc_state;
12800 		enum pipe pipe = PIPE_A;
12801 		bool visible;
12802 
12803 		visible = plane->get_hw_state(plane, &pipe);
12804 
12805 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12806 		crtc_state = to_intel_crtc_state(crtc->base.state);
12807 
12808 		intel_set_plane_visible(crtc_state, plane_state, visible);
12809 
12810 		drm_dbg_kms(&dev_priv->drm,
12811 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12812 			    plane->base.base.id, plane->base.name,
12813 			    enableddisabled(visible), pipe_name(pipe));
12814 	}
12815 
12816 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12817 		struct intel_crtc_state *crtc_state =
12818 			to_intel_crtc_state(crtc->base.state);
12819 
12820 		fixup_plane_bitmasks(crtc_state);
12821 	}
12822 }
12823 
intel_modeset_readout_hw_state(struct drm_device * dev)12824 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12825 {
12826 	struct drm_i915_private *dev_priv = to_i915(dev);
12827 	struct intel_cdclk_state *cdclk_state =
12828 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12829 	struct intel_dbuf_state *dbuf_state =
12830 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12831 	enum pipe pipe;
12832 	struct intel_crtc *crtc;
12833 	struct intel_encoder *encoder;
12834 	struct intel_connector *connector;
12835 	struct drm_connector_list_iter conn_iter;
12836 	u8 active_pipes = 0;
12837 
12838 	for_each_intel_crtc(dev, crtc) {
12839 		struct intel_crtc_state *crtc_state =
12840 			to_intel_crtc_state(crtc->base.state);
12841 
12842 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12843 		intel_crtc_free_hw_state(crtc_state);
12844 		intel_crtc_state_reset(crtc_state, crtc);
12845 
12846 		intel_crtc_get_pipe_config(crtc_state);
12847 
12848 		crtc_state->hw.enable = crtc_state->hw.active;
12849 
12850 		crtc->base.enabled = crtc_state->hw.enable;
12851 		crtc->active = crtc_state->hw.active;
12852 
12853 		if (crtc_state->hw.active)
12854 			active_pipes |= BIT(crtc->pipe);
12855 
12856 		drm_dbg_kms(&dev_priv->drm,
12857 			    "[CRTC:%d:%s] hw state readout: %s\n",
12858 			    crtc->base.base.id, crtc->base.name,
12859 			    enableddisabled(crtc_state->hw.active));
12860 	}
12861 
12862 	dev_priv->active_pipes = cdclk_state->active_pipes =
12863 		dbuf_state->active_pipes = active_pipes;
12864 
12865 	readout_plane_state(dev_priv);
12866 
12867 	for_each_intel_encoder(dev, encoder) {
12868 		struct intel_crtc_state *crtc_state = NULL;
12869 
12870 		pipe = 0;
12871 
12872 		if (encoder->get_hw_state(encoder, &pipe)) {
12873 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12874 			crtc_state = to_intel_crtc_state(crtc->base.state);
12875 
12876 			encoder->base.crtc = &crtc->base;
12877 			intel_encoder_get_config(encoder, crtc_state);
12878 
12879 			/* read out to slave crtc as well for bigjoiner */
12880 			if (crtc_state->bigjoiner) {
12881 				/* encoder should read be linked to bigjoiner master */
12882 				WARN_ON(crtc_state->bigjoiner_slave);
12883 
12884 				crtc = crtc_state->bigjoiner_linked_crtc;
12885 				crtc_state = to_intel_crtc_state(crtc->base.state);
12886 				intel_encoder_get_config(encoder, crtc_state);
12887 			}
12888 		} else {
12889 			encoder->base.crtc = NULL;
12890 		}
12891 
12892 		if (encoder->sync_state)
12893 			encoder->sync_state(encoder, crtc_state);
12894 
12895 		drm_dbg_kms(&dev_priv->drm,
12896 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12897 			    encoder->base.base.id, encoder->base.name,
12898 			    enableddisabled(encoder->base.crtc),
12899 			    pipe_name(pipe));
12900 	}
12901 
12902 	intel_dpll_readout_hw_state(dev_priv);
12903 
12904 	drm_connector_list_iter_begin(dev, &conn_iter);
12905 	for_each_intel_connector_iter(connector, &conn_iter) {
12906 		if (connector->get_hw_state(connector)) {
12907 			struct intel_crtc_state *crtc_state;
12908 			struct intel_crtc *crtc;
12909 
12910 			connector->base.dpms = DRM_MODE_DPMS_ON;
12911 
12912 			encoder = intel_attached_encoder(connector);
12913 			connector->base.encoder = &encoder->base;
12914 
12915 			crtc = to_intel_crtc(encoder->base.crtc);
12916 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12917 
12918 			if (crtc_state && crtc_state->hw.active) {
12919 				/*
12920 				 * This has to be done during hardware readout
12921 				 * because anything calling .crtc_disable may
12922 				 * rely on the connector_mask being accurate.
12923 				 */
12924 				crtc_state->uapi.connector_mask |=
12925 					drm_connector_mask(&connector->base);
12926 				crtc_state->uapi.encoder_mask |=
12927 					drm_encoder_mask(&encoder->base);
12928 			}
12929 		} else {
12930 			connector->base.dpms = DRM_MODE_DPMS_OFF;
12931 			connector->base.encoder = NULL;
12932 		}
12933 		drm_dbg_kms(&dev_priv->drm,
12934 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
12935 			    connector->base.base.id, connector->base.name,
12936 			    enableddisabled(connector->base.encoder));
12937 	}
12938 	drm_connector_list_iter_end(&conn_iter);
12939 
12940 	for_each_intel_crtc(dev, crtc) {
12941 		struct intel_bw_state *bw_state =
12942 			to_intel_bw_state(dev_priv->bw_obj.state);
12943 		struct intel_crtc_state *crtc_state =
12944 			to_intel_crtc_state(crtc->base.state);
12945 		struct intel_plane *plane;
12946 		int min_cdclk = 0;
12947 
12948 		if (crtc_state->bigjoiner_slave)
12949 			continue;
12950 
12951 		if (crtc_state->hw.active) {
12952 			/*
12953 			 * The initial mode needs to be set in order to keep
12954 			 * the atomic core happy. It wants a valid mode if the
12955 			 * crtc's enabled, so we do the above call.
12956 			 *
12957 			 * But we don't set all the derived state fully, hence
12958 			 * set a flag to indicate that a full recalculation is
12959 			 * needed on the next commit.
12960 			 */
12961 			crtc_state->inherited = true;
12962 
12963 			intel_crtc_update_active_timings(crtc_state);
12964 
12965 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
12966 		}
12967 
12968 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12969 			const struct intel_plane_state *plane_state =
12970 				to_intel_plane_state(plane->base.state);
12971 
12972 			/*
12973 			 * FIXME don't have the fb yet, so can't
12974 			 * use intel_plane_data_rate() :(
12975 			 */
12976 			if (plane_state->uapi.visible)
12977 				crtc_state->data_rate[plane->id] =
12978 					4 * crtc_state->pixel_rate;
12979 			/*
12980 			 * FIXME don't have the fb yet, so can't
12981 			 * use plane->min_cdclk() :(
12982 			 */
12983 			if (plane_state->uapi.visible && plane->min_cdclk) {
12984 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12985 					crtc_state->min_cdclk[plane->id] =
12986 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12987 				else
12988 					crtc_state->min_cdclk[plane->id] =
12989 						crtc_state->pixel_rate;
12990 			}
12991 			drm_dbg_kms(&dev_priv->drm,
12992 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
12993 				    plane->base.base.id, plane->base.name,
12994 				    crtc_state->min_cdclk[plane->id]);
12995 		}
12996 
12997 		if (crtc_state->hw.active) {
12998 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12999 			if (drm_WARN_ON(dev, min_cdclk < 0))
13000 				min_cdclk = 0;
13001 		}
13002 
13003 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13004 		cdclk_state->min_voltage_level[crtc->pipe] =
13005 			crtc_state->min_voltage_level;
13006 
13007 		intel_bw_crtc_update(bw_state, crtc_state);
13008 
13009 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
13010 
13011 		/* discard our incomplete slave state, copy it from master */
13012 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
13013 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13014 			struct intel_crtc_state *slave_crtc_state =
13015 				to_intel_crtc_state(slave->base.state);
13016 
13017 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13018 			slave->base.mode = crtc->base.mode;
13019 
13020 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13021 			cdclk_state->min_voltage_level[slave->pipe] =
13022 				crtc_state->min_voltage_level;
13023 
13024 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13025 				const struct intel_plane_state *plane_state =
13026 					to_intel_plane_state(plane->base.state);
13027 
13028 				/*
13029 				 * FIXME don't have the fb yet, so can't
13030 				 * use intel_plane_data_rate() :(
13031 				 */
13032 				if (plane_state->uapi.visible)
13033 					crtc_state->data_rate[plane->id] =
13034 						4 * crtc_state->pixel_rate;
13035 				else
13036 					crtc_state->data_rate[plane->id] = 0;
13037 			}
13038 
13039 			intel_bw_crtc_update(bw_state, slave_crtc_state);
13040 			drm_calc_timestamping_constants(&slave->base,
13041 							&slave_crtc_state->hw.adjusted_mode);
13042 		}
13043 	}
13044 }
13045 
13046 static void
get_encoder_power_domains(struct drm_i915_private * dev_priv)13047 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13048 {
13049 	struct intel_encoder *encoder;
13050 
13051 	for_each_intel_encoder(&dev_priv->drm, encoder) {
13052 		struct intel_crtc_state *crtc_state;
13053 
13054 		if (!encoder->get_power_domains)
13055 			continue;
13056 
13057 		/*
13058 		 * MST-primary and inactive encoders don't have a crtc state
13059 		 * and neither of these require any power domain references.
13060 		 */
13061 		if (!encoder->base.crtc)
13062 			continue;
13063 
13064 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13065 		encoder->get_power_domains(encoder, crtc_state);
13066 	}
13067 }
13068 
intel_early_display_was(struct drm_i915_private * dev_priv)13069 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13070 {
13071 	/*
13072 	 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
13073 	 * Also known as Wa_14010480278.
13074 	 */
13075 	if (IS_DISPLAY_VER(dev_priv, 10, 12))
13076 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13077 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13078 
13079 	if (IS_HASWELL(dev_priv)) {
13080 		/*
13081 		 * WaRsPkgCStateDisplayPMReq:hsw
13082 		 * System hang if this isn't done before disabling all planes!
13083 		 */
13084 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
13085 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13086 	}
13087 
13088 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13089 		/* Display WA #1142:kbl,cfl,cml */
13090 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13091 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13092 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13093 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13094 			     KBL_ARB_FILL_SPARE_14);
13095 	}
13096 }
13097 
ibx_sanitize_pch_hdmi_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t hdmi_reg)13098 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13099 				       enum port port, i915_reg_t hdmi_reg)
13100 {
13101 	u32 val = intel_de_read(dev_priv, hdmi_reg);
13102 
13103 	if (val & SDVO_ENABLE ||
13104 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13105 		return;
13106 
13107 	drm_dbg_kms(&dev_priv->drm,
13108 		    "Sanitizing transcoder select for HDMI %c\n",
13109 		    port_name(port));
13110 
13111 	val &= ~SDVO_PIPE_SEL_MASK;
13112 	val |= SDVO_PIPE_SEL(PIPE_A);
13113 
13114 	intel_de_write(dev_priv, hdmi_reg, val);
13115 }
13116 
ibx_sanitize_pch_dp_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t dp_reg)13117 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13118 				     enum port port, i915_reg_t dp_reg)
13119 {
13120 	u32 val = intel_de_read(dev_priv, dp_reg);
13121 
13122 	if (val & DP_PORT_EN ||
13123 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13124 		return;
13125 
13126 	drm_dbg_kms(&dev_priv->drm,
13127 		    "Sanitizing transcoder select for DP %c\n",
13128 		    port_name(port));
13129 
13130 	val &= ~DP_PIPE_SEL_MASK;
13131 	val |= DP_PIPE_SEL(PIPE_A);
13132 
13133 	intel_de_write(dev_priv, dp_reg, val);
13134 }
13135 
ibx_sanitize_pch_ports(struct drm_i915_private * dev_priv)13136 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13137 {
13138 	/*
13139 	 * The BIOS may select transcoder B on some of the PCH
13140 	 * ports even it doesn't enable the port. This would trip
13141 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13142 	 * Sanitize the transcoder select bits to prevent that. We
13143 	 * assume that the BIOS never actually enabled the port,
13144 	 * because if it did we'd actually have to toggle the port
13145 	 * on and back off to make the transcoder A select stick
13146 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13147 	 * intel_disable_sdvo()).
13148 	 */
13149 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13150 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13151 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13152 
13153 	/* PCH SDVOB multiplex with HDMIB */
13154 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13155 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13156 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13157 }
13158 
13159 /* Scan out the current hw modeset state,
13160  * and sanitizes it to the current state
13161  */
13162 static void
intel_modeset_setup_hw_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)13163 intel_modeset_setup_hw_state(struct drm_device *dev,
13164 			     struct drm_modeset_acquire_ctx *ctx)
13165 {
13166 	struct drm_i915_private *dev_priv = to_i915(dev);
13167 	struct intel_encoder *encoder;
13168 	struct intel_crtc *crtc;
13169 	intel_wakeref_t wakeref;
13170 
13171 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13172 
13173 	intel_early_display_was(dev_priv);
13174 	intel_modeset_readout_hw_state(dev);
13175 
13176 	/* HW state is read out, now we need to sanitize this mess. */
13177 	get_encoder_power_domains(dev_priv);
13178 
13179 	if (HAS_PCH_IBX(dev_priv))
13180 		ibx_sanitize_pch_ports(dev_priv);
13181 
13182 	/*
13183 	 * intel_sanitize_plane_mapping() may need to do vblank
13184 	 * waits, so we need vblank interrupts restored beforehand.
13185 	 */
13186 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13187 		struct intel_crtc_state *crtc_state =
13188 			to_intel_crtc_state(crtc->base.state);
13189 
13190 		drm_crtc_vblank_reset(&crtc->base);
13191 
13192 		if (crtc_state->hw.active)
13193 			intel_crtc_vblank_on(crtc_state);
13194 	}
13195 
13196 	intel_sanitize_plane_mapping(dev_priv);
13197 
13198 	for_each_intel_encoder(dev, encoder)
13199 		intel_sanitize_encoder(encoder);
13200 
13201 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13202 		struct intel_crtc_state *crtc_state =
13203 			to_intel_crtc_state(crtc->base.state);
13204 
13205 		intel_sanitize_crtc(crtc, ctx);
13206 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13207 	}
13208 
13209 	intel_modeset_update_connector_atomic_state(dev);
13210 
13211 	intel_dpll_sanitize_state(dev_priv);
13212 
13213 	if (IS_G4X(dev_priv)) {
13214 		g4x_wm_get_hw_state(dev_priv);
13215 		g4x_wm_sanitize(dev_priv);
13216 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13217 		vlv_wm_get_hw_state(dev_priv);
13218 		vlv_wm_sanitize(dev_priv);
13219 	} else if (DISPLAY_VER(dev_priv) >= 9) {
13220 		skl_wm_get_hw_state(dev_priv);
13221 		skl_wm_sanitize(dev_priv);
13222 	} else if (HAS_PCH_SPLIT(dev_priv)) {
13223 		ilk_wm_get_hw_state(dev_priv);
13224 	}
13225 
13226 	for_each_intel_crtc(dev, crtc) {
13227 		struct intel_crtc_state *crtc_state =
13228 			to_intel_crtc_state(crtc->base.state);
13229 		u64 put_domains;
13230 
13231 		put_domains = modeset_get_crtc_power_domains(crtc_state);
13232 		if (drm_WARN_ON(dev, put_domains))
13233 			modeset_put_crtc_power_domains(crtc, put_domains);
13234 	}
13235 
13236 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13237 }
13238 
intel_display_resume(struct drm_device * dev)13239 void intel_display_resume(struct drm_device *dev)
13240 {
13241 	struct drm_i915_private *dev_priv = to_i915(dev);
13242 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13243 	struct drm_modeset_acquire_ctx ctx;
13244 	int ret;
13245 
13246 	if (!HAS_DISPLAY(dev_priv))
13247 		return;
13248 
13249 	dev_priv->modeset_restore_state = NULL;
13250 	if (state)
13251 		state->acquire_ctx = &ctx;
13252 
13253 	drm_modeset_acquire_init(&ctx, 0);
13254 
13255 	while (1) {
13256 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
13257 		if (ret != -EDEADLK)
13258 			break;
13259 
13260 		drm_modeset_backoff(&ctx);
13261 	}
13262 
13263 	if (!ret)
13264 		ret = __intel_display_resume(dev, state, &ctx);
13265 
13266 	intel_enable_ipc(dev_priv);
13267 	drm_modeset_drop_locks(&ctx);
13268 	drm_modeset_acquire_fini(&ctx);
13269 
13270 	if (ret)
13271 		drm_err(&dev_priv->drm,
13272 			"Restoring old state failed with %i\n", ret);
13273 	if (state)
13274 		drm_atomic_state_put(state);
13275 }
13276 
intel_hpd_poll_fini(struct drm_i915_private * i915)13277 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13278 {
13279 	struct intel_connector *connector;
13280 	struct drm_connector_list_iter conn_iter;
13281 
13282 	/* Kill all the work that may have been queued by hpd. */
13283 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13284 	for_each_intel_connector_iter(connector, &conn_iter) {
13285 		if (connector->modeset_retry_work.func)
13286 			cancel_work_sync(&connector->modeset_retry_work);
13287 		if (connector->hdcp.shim) {
13288 			cancel_delayed_work_sync(&connector->hdcp.check_work);
13289 			cancel_work_sync(&connector->hdcp.prop_work);
13290 		}
13291 	}
13292 	drm_connector_list_iter_end(&conn_iter);
13293 }
13294 
13295 /* part #1: call before irq uninstall */
intel_modeset_driver_remove(struct drm_i915_private * i915)13296 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13297 {
13298 	if (!HAS_DISPLAY(i915))
13299 		return;
13300 
13301 	flush_workqueue(i915->flip_wq);
13302 	flush_workqueue(i915->modeset_wq);
13303 
13304 	flush_work(&i915->atomic_helper.free_work);
13305 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13306 }
13307 
13308 /* part #2: call after irq uninstall */
intel_modeset_driver_remove_noirq(struct drm_i915_private * i915)13309 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13310 {
13311 	if (!HAS_DISPLAY(i915))
13312 		return;
13313 
13314 	/*
13315 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13316 	 * poll handlers. Hence disable polling after hpd handling is shut down.
13317 	 */
13318 	intel_hpd_poll_fini(i915);
13319 
13320 	/*
13321 	 * MST topology needs to be suspended so we don't have any calls to
13322 	 * fbdev after it's finalized. MST will be destroyed later as part of
13323 	 * drm_mode_config_cleanup()
13324 	 */
13325 	intel_dp_mst_suspend(i915);
13326 
13327 	/* poll work can call into fbdev, hence clean that up afterwards */
13328 	intel_fbdev_fini(i915);
13329 
13330 	intel_unregister_dsm_handler();
13331 
13332 	intel_fbc_global_disable(i915);
13333 
13334 	/* flush any delayed tasks or pending work */
13335 	flush_scheduled_work();
13336 
13337 	intel_hdcp_component_fini(i915);
13338 
13339 	intel_mode_config_cleanup(i915);
13340 
13341 	intel_overlay_cleanup(i915);
13342 
13343 	intel_gmbus_teardown(i915);
13344 
13345 	destroy_workqueue(i915->flip_wq);
13346 	destroy_workqueue(i915->modeset_wq);
13347 
13348 	intel_fbc_cleanup_cfb(i915);
13349 }
13350 
13351 /* part #3: call after gem init */
intel_modeset_driver_remove_nogem(struct drm_i915_private * i915)13352 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13353 {
13354 	intel_dmc_ucode_fini(i915);
13355 
13356 	intel_power_domains_driver_remove(i915);
13357 
13358 	intel_vga_unregister(i915);
13359 
13360 	intel_bios_driver_remove(i915);
13361 }
13362 
intel_display_driver_register(struct drm_i915_private * i915)13363 void intel_display_driver_register(struct drm_i915_private *i915)
13364 {
13365 	if (!HAS_DISPLAY(i915))
13366 		return;
13367 
13368 	intel_display_debugfs_register(i915);
13369 
13370 	/* Must be done after probing outputs */
13371 	intel_opregion_register(i915);
13372 	acpi_video_register();
13373 
13374 	intel_audio_init(i915);
13375 
13376 	/*
13377 	 * Some ports require correctly set-up hpd registers for
13378 	 * detection to work properly (leading to ghost connected
13379 	 * connector status), e.g. VGA on gm45.  Hence we can only set
13380 	 * up the initial fbdev config after hpd irqs are fully
13381 	 * enabled. We do it last so that the async config cannot run
13382 	 * before the connectors are registered.
13383 	 */
13384 	intel_fbdev_initial_config_async(&i915->drm);
13385 
13386 	/*
13387 	 * We need to coordinate the hotplugs with the asynchronous
13388 	 * fbdev configuration, for which we use the
13389 	 * fbdev->async_cookie.
13390 	 */
13391 	drm_kms_helper_poll_init(&i915->drm);
13392 }
13393 
intel_display_driver_unregister(struct drm_i915_private * i915)13394 void intel_display_driver_unregister(struct drm_i915_private *i915)
13395 {
13396 	if (!HAS_DISPLAY(i915))
13397 		return;
13398 
13399 	intel_fbdev_unregister(i915);
13400 	intel_audio_deinit(i915);
13401 
13402 	/*
13403 	 * After flushing the fbdev (incl. a late async config which
13404 	 * will have delayed queuing of a hotplug event), then flush
13405 	 * the hotplug events.
13406 	 */
13407 	drm_kms_helper_poll_fini(&i915->drm);
13408 	drm_atomic_helper_shutdown(&i915->drm);
13409 
13410 	acpi_video_unregister();
13411 	intel_opregion_unregister(i915);
13412 }
13413