1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34 #include <linux/string_helpers.h>
35 #include <linux/vga_switcheroo.h>
36
37 #include <drm/display/drm_dp_helper.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_uapi.h>
41 #include <drm/drm_damage_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_privacy_screen_consumer.h>
45 #include <drm/drm_probe_helper.h>
46 #include <drm/drm_rect.h>
47
48 #include "display/intel_audio.h"
49 #include "display/intel_crt.h"
50 #include "display/intel_ddi.h"
51 #include "display/intel_display_debugfs.h"
52 #include "display/intel_display_power.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
69
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
72
73 #include "gt/gen8_ppgtt.h"
74
75 #include "g4x_dp.h"
76 #include "g4x_hdmi.h"
77 #include "hsw_ips.h"
78 #include "i915_drv.h"
79 #include "i915_utils.h"
80 #include "icl_dsi.h"
81 #include "intel_acpi.h"
82 #include "intel_atomic.h"
83 #include "intel_atomic_plane.h"
84 #include "intel_bw.h"
85 #include "intel_cdclk.h"
86 #include "intel_color.h"
87 #include "intel_crtc.h"
88 #include "intel_crtc_state_dump.h"
89 #include "intel_de.h"
90 #include "intel_display_types.h"
91 #include "intel_dmc.h"
92 #include "intel_dp_link_training.h"
93 #include "intel_dpt.h"
94 #include "intel_dsb.h"
95 #include "intel_fbc.h"
96 #include "intel_fbdev.h"
97 #include "intel_fdi.h"
98 #include "intel_fifo_underrun.h"
99 #include "intel_frontbuffer.h"
100 #include "intel_hdcp.h"
101 #include "intel_hotplug.h"
102 #include "intel_modeset_verify.h"
103 #include "intel_modeset_setup.h"
104 #include "intel_overlay.h"
105 #include "intel_panel.h"
106 #include "intel_pch_display.h"
107 #include "intel_pch_refclk.h"
108 #include "intel_pcode.h"
109 #include "intel_pipe_crc.h"
110 #include "intel_plane_initial.h"
111 #include "intel_pm.h"
112 #include "intel_pps.h"
113 #include "intel_psr.h"
114 #include "intel_quirks.h"
115 #include "intel_sprite.h"
116 #include "intel_tc.h"
117 #include "intel_vga.h"
118 #include "i9xx_plane.h"
119 #include "skl_scaler.h"
120 #include "skl_universal_plane.h"
121 #include "skl_watermark.h"
122 #include "vlv_dsi.h"
123 #include "vlv_dsi_pll.h"
124 #include "vlv_dsi_regs.h"
125 #include "vlv_sideband.h"
126
127 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
128 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
129 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
130 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
131 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
132
133 /**
134 * intel_update_watermarks - update FIFO watermark values based on current modes
135 * @dev_priv: i915 device
136 *
137 * Calculate watermark values for the various WM regs based on current mode
138 * and plane configuration.
139 *
140 * There are several cases to deal with here:
141 * - normal (i.e. non-self-refresh)
142 * - self-refresh (SR) mode
143 * - lines are large relative to FIFO size (buffer can hold up to 2)
144 * - lines are small relative to FIFO size (buffer can hold more than 2
145 * lines), so need to account for TLB latency
146 *
147 * The normal calculation is:
148 * watermark = dotclock * bytes per pixel * latency
149 * where latency is platform & configuration dependent (we assume pessimal
150 * values here).
151 *
152 * The SR calculation is:
153 * watermark = (trunc(latency/line time)+1) * surface width *
154 * bytes per pixel
155 * where
156 * line time = htotal / dotclock
157 * surface width = hdisplay for normal plane and 64 for cursor
158 * and latency is assumed to be high, as above.
159 *
160 * The final value programmed to the register should always be rounded up,
161 * and include an extra 2 entries to account for clock crossings.
162 *
163 * We don't use the sprite, so we can ignore that. And on Crestline we have
164 * to set the non-SR watermarks to 8.
165 */
intel_update_watermarks(struct drm_i915_private * dev_priv)166 void intel_update_watermarks(struct drm_i915_private *dev_priv)
167 {
168 if (dev_priv->display.funcs.wm->update_wm)
169 dev_priv->display.funcs.wm->update_wm(dev_priv);
170 }
171
intel_compute_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)172 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
173 struct intel_crtc *crtc)
174 {
175 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
176 if (dev_priv->display.funcs.wm->compute_pipe_wm)
177 return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
178 return 0;
179 }
180
intel_compute_intermediate_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)181 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
182 struct intel_crtc *crtc)
183 {
184 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
185 if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
186 return 0;
187 if (drm_WARN_ON(&dev_priv->drm,
188 !dev_priv->display.funcs.wm->compute_pipe_wm))
189 return 0;
190 return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
191 }
192
intel_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)193 static bool intel_initial_watermarks(struct intel_atomic_state *state,
194 struct intel_crtc *crtc)
195 {
196 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
197 if (dev_priv->display.funcs.wm->initial_watermarks) {
198 dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
199 return true;
200 }
201 return false;
202 }
203
intel_atomic_update_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)204 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
205 struct intel_crtc *crtc)
206 {
207 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
208 if (dev_priv->display.funcs.wm->atomic_update_watermarks)
209 dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
210 }
211
intel_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)212 static void intel_optimize_watermarks(struct intel_atomic_state *state,
213 struct intel_crtc *crtc)
214 {
215 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
216 if (dev_priv->display.funcs.wm->optimize_watermarks)
217 dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
218 }
219
intel_compute_global_watermarks(struct intel_atomic_state * state)220 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
221 {
222 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
223 if (dev_priv->display.funcs.wm->compute_global_watermarks)
224 return dev_priv->display.funcs.wm->compute_global_watermarks(state);
225 return 0;
226 }
227
228 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)229 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
230 {
231 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
232
233 /* Obtain SKU information */
234 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
235 CCK_FUSE_HPLL_FREQ_MASK;
236
237 return vco_freq[hpll_freq] * 1000;
238 }
239
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)240 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
241 const char *name, u32 reg, int ref_freq)
242 {
243 u32 val;
244 int divider;
245
246 val = vlv_cck_read(dev_priv, reg);
247 divider = val & CCK_FREQUENCY_VALUES;
248
249 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
250 (divider << CCK_FREQUENCY_STATUS_SHIFT),
251 "%s change in progress\n", name);
252
253 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
254 }
255
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)256 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
257 const char *name, u32 reg)
258 {
259 int hpll;
260
261 vlv_cck_get(dev_priv);
262
263 if (dev_priv->hpll_freq == 0)
264 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
265
266 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
267
268 vlv_cck_put(dev_priv);
269
270 return hpll;
271 }
272
intel_update_czclk(struct drm_i915_private * dev_priv)273 static void intel_update_czclk(struct drm_i915_private *dev_priv)
274 {
275 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
276 return;
277
278 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
279 CCK_CZ_CLOCK_CONTROL);
280
281 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
282 dev_priv->czclk_freq);
283 }
284
is_hdr_mode(const struct intel_crtc_state * crtc_state)285 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
286 {
287 return (crtc_state->active_planes &
288 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
289 }
290
291 /* WA Display #0827: Gen9:all */
292 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)293 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
294 {
295 if (enable)
296 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
297 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
298 else
299 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
300 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
301 }
302
303 /* Wa_2006604312:icl,ehl */
304 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)305 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
306 bool enable)
307 {
308 if (enable)
309 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
310 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
311 else
312 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
313 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
314 }
315
316 /* Wa_1604331009:icl,jsl,ehl */
317 static void
icl_wa_cursorclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)318 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
319 bool enable)
320 {
321 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
322 enable ? CURSOR_GATING_DIS : 0);
323 }
324
325 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)326 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
327 {
328 return crtc_state->master_transcoder != INVALID_TRANSCODER;
329 }
330
331 static bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)332 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
333 {
334 return crtc_state->sync_mode_slaves_mask != 0;
335 }
336
337 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)338 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
339 {
340 return is_trans_port_sync_master(crtc_state) ||
341 is_trans_port_sync_slave(crtc_state);
342 }
343
bigjoiner_master_pipe(const struct intel_crtc_state * crtc_state)344 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
345 {
346 return ffs(crtc_state->bigjoiner_pipes) - 1;
347 }
348
intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state * crtc_state)349 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
350 {
351 if (crtc_state->bigjoiner_pipes)
352 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
353 else
354 return 0;
355 }
356
intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state * crtc_state)357 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
358 {
359 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
360
361 return crtc_state->bigjoiner_pipes &&
362 crtc->pipe != bigjoiner_master_pipe(crtc_state);
363 }
364
intel_crtc_is_bigjoiner_master(const struct intel_crtc_state * crtc_state)365 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
366 {
367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
368
369 return crtc_state->bigjoiner_pipes &&
370 crtc->pipe == bigjoiner_master_pipe(crtc_state);
371 }
372
intel_bigjoiner_num_pipes(const struct intel_crtc_state * crtc_state)373 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
374 {
375 return hweight8(crtc_state->bigjoiner_pipes);
376 }
377
intel_master_crtc(const struct intel_crtc_state * crtc_state)378 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
379 {
380 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
381
382 if (intel_crtc_is_bigjoiner_slave(crtc_state))
383 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
384 else
385 return to_intel_crtc(crtc_state->uapi.crtc);
386 }
387
pipe_scanline_is_moving(struct drm_i915_private * dev_priv,enum pipe pipe)388 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
389 enum pipe pipe)
390 {
391 i915_reg_t reg = PIPEDSL(pipe);
392 u32 line1, line2;
393
394 line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
395 msleep(5);
396 line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
397
398 return line1 != line2;
399 }
400
wait_for_pipe_scanline_moving(struct intel_crtc * crtc,bool state)401 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
402 {
403 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
404 enum pipe pipe = crtc->pipe;
405
406 /* Wait for the display line to settle/start moving */
407 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
408 drm_err(&dev_priv->drm,
409 "pipe %c scanline %s wait timed out\n",
410 pipe_name(pipe), str_on_off(state));
411 }
412
intel_wait_for_pipe_scanline_stopped(struct intel_crtc * crtc)413 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
414 {
415 wait_for_pipe_scanline_moving(crtc, false);
416 }
417
intel_wait_for_pipe_scanline_moving(struct intel_crtc * crtc)418 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
419 {
420 wait_for_pipe_scanline_moving(crtc, true);
421 }
422
423 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)424 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
425 {
426 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428
429 if (DISPLAY_VER(dev_priv) >= 4) {
430 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
431
432 /* Wait for the Pipe State to go off */
433 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
434 PIPECONF_STATE_ENABLE, 100))
435 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
436 } else {
437 intel_wait_for_pipe_scanline_stopped(crtc);
438 }
439 }
440
assert_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)441 void assert_transcoder(struct drm_i915_private *dev_priv,
442 enum transcoder cpu_transcoder, bool state)
443 {
444 bool cur_state;
445 enum intel_display_power_domain power_domain;
446 intel_wakeref_t wakeref;
447
448 /* we keep both pipes enabled on 830 */
449 if (IS_I830(dev_priv))
450 state = true;
451
452 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
453 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
454 if (wakeref) {
455 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
456 cur_state = !!(val & PIPECONF_ENABLE);
457
458 intel_display_power_put(dev_priv, power_domain, wakeref);
459 } else {
460 cur_state = false;
461 }
462
463 I915_STATE_WARN(cur_state != state,
464 "transcoder %s assertion failure (expected %s, current %s)\n",
465 transcoder_name(cpu_transcoder),
466 str_on_off(state), str_on_off(cur_state));
467 }
468
assert_plane(struct intel_plane * plane,bool state)469 static void assert_plane(struct intel_plane *plane, bool state)
470 {
471 enum pipe pipe;
472 bool cur_state;
473
474 cur_state = plane->get_hw_state(plane, &pipe);
475
476 I915_STATE_WARN(cur_state != state,
477 "%s assertion failure (expected %s, current %s)\n",
478 plane->base.name, str_on_off(state),
479 str_on_off(cur_state));
480 }
481
482 #define assert_plane_enabled(p) assert_plane(p, true)
483 #define assert_plane_disabled(p) assert_plane(p, false)
484
assert_planes_disabled(struct intel_crtc * crtc)485 static void assert_planes_disabled(struct intel_crtc *crtc)
486 {
487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
488 struct intel_plane *plane;
489
490 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
491 assert_plane_disabled(plane);
492 }
493
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)494 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
495 struct intel_digital_port *dig_port,
496 unsigned int expected_mask)
497 {
498 u32 port_mask;
499 i915_reg_t dpll_reg;
500
501 switch (dig_port->base.port) {
502 default:
503 MISSING_CASE(dig_port->base.port);
504 fallthrough;
505 case PORT_B:
506 port_mask = DPLL_PORTB_READY_MASK;
507 dpll_reg = DPLL(0);
508 break;
509 case PORT_C:
510 port_mask = DPLL_PORTC_READY_MASK;
511 dpll_reg = DPLL(0);
512 expected_mask <<= 4;
513 break;
514 case PORT_D:
515 port_mask = DPLL_PORTD_READY_MASK;
516 dpll_reg = DPIO_PHY_STATUS;
517 break;
518 }
519
520 if (intel_de_wait_for_register(dev_priv, dpll_reg,
521 port_mask, expected_mask, 1000))
522 drm_WARN(&dev_priv->drm, 1,
523 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
524 dig_port->base.base.base.id, dig_port->base.base.name,
525 intel_de_read(dev_priv, dpll_reg) & port_mask,
526 expected_mask);
527 }
528
intel_enable_transcoder(const struct intel_crtc_state * new_crtc_state)529 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
530 {
531 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
533 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
534 enum pipe pipe = crtc->pipe;
535 i915_reg_t reg;
536 u32 val;
537
538 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
539
540 assert_planes_disabled(crtc);
541
542 /*
543 * A pipe without a PLL won't actually be able to drive bits from
544 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
545 * need the check.
546 */
547 if (HAS_GMCH(dev_priv)) {
548 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
549 assert_dsi_pll_enabled(dev_priv);
550 else
551 assert_pll_enabled(dev_priv, pipe);
552 } else {
553 if (new_crtc_state->has_pch_encoder) {
554 /* if driving the PCH, we need FDI enabled */
555 assert_fdi_rx_pll_enabled(dev_priv,
556 intel_crtc_pch_transcoder(crtc));
557 assert_fdi_tx_pll_enabled(dev_priv,
558 (enum pipe) cpu_transcoder);
559 }
560 /* FIXME: assert CPU port conditions for SNB+ */
561 }
562
563 /* Wa_22012358565:adl-p */
564 if (DISPLAY_VER(dev_priv) == 13)
565 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
566 0, PIPE_ARB_USE_PROG_SLOTS);
567
568 reg = PIPECONF(cpu_transcoder);
569 val = intel_de_read(dev_priv, reg);
570 if (val & PIPECONF_ENABLE) {
571 /* we keep both pipes enabled on 830 */
572 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
573 return;
574 }
575
576 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
577 intel_de_posting_read(dev_priv, reg);
578
579 /*
580 * Until the pipe starts PIPEDSL reads will return a stale value,
581 * which causes an apparent vblank timestamp jump when PIPEDSL
582 * resets to its proper value. That also messes up the frame count
583 * when it's derived from the timestamps. So let's wait for the
584 * pipe to start properly before we call drm_crtc_vblank_on()
585 */
586 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
587 intel_wait_for_pipe_scanline_moving(crtc);
588 }
589
intel_disable_transcoder(const struct intel_crtc_state * old_crtc_state)590 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
591 {
592 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
593 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
594 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
595 enum pipe pipe = crtc->pipe;
596 i915_reg_t reg;
597 u32 val;
598
599 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
600
601 /*
602 * Make sure planes won't keep trying to pump pixels to us,
603 * or we might hang the display.
604 */
605 assert_planes_disabled(crtc);
606
607 reg = PIPECONF(cpu_transcoder);
608 val = intel_de_read(dev_priv, reg);
609 if ((val & PIPECONF_ENABLE) == 0)
610 return;
611
612 /*
613 * Double wide has implications for planes
614 * so best keep it disabled when not needed.
615 */
616 if (old_crtc_state->double_wide)
617 val &= ~PIPECONF_DOUBLE_WIDE;
618
619 /* Don't disable pipe or pipe PLLs if needed */
620 if (!IS_I830(dev_priv))
621 val &= ~PIPECONF_ENABLE;
622
623 if (DISPLAY_VER(dev_priv) >= 14)
624 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
625 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
626 else if (DISPLAY_VER(dev_priv) >= 12)
627 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
628 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
629
630 intel_de_write(dev_priv, reg, val);
631 if ((val & PIPECONF_ENABLE) == 0)
632 intel_wait_for_pipe_off(old_crtc_state);
633 }
634
intel_rotation_info_size(const struct intel_rotation_info * rot_info)635 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
636 {
637 unsigned int size = 0;
638 int i;
639
640 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
641 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
642
643 return size;
644 }
645
intel_remapped_info_size(const struct intel_remapped_info * rem_info)646 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
647 {
648 unsigned int size = 0;
649 int i;
650
651 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
652 unsigned int plane_size;
653
654 if (rem_info->plane[i].linear)
655 plane_size = rem_info->plane[i].size;
656 else
657 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
658
659 if (plane_size == 0)
660 continue;
661
662 if (rem_info->plane_alignment)
663 size = ALIGN(size, rem_info->plane_alignment);
664
665 size += plane_size;
666 }
667
668 return size;
669 }
670
intel_plane_uses_fence(const struct intel_plane_state * plane_state)671 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
672 {
673 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
674 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
675
676 return DISPLAY_VER(dev_priv) < 4 ||
677 (plane->fbc &&
678 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
679 }
680
681 /*
682 * Convert the x/y offsets into a linear offset.
683 * Only valid with 0/180 degree rotation, which is fine since linear
684 * offset is only used with linear buffers on pre-hsw and tiled buffers
685 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
686 */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)687 u32 intel_fb_xy_to_linear(int x, int y,
688 const struct intel_plane_state *state,
689 int color_plane)
690 {
691 const struct drm_framebuffer *fb = state->hw.fb;
692 unsigned int cpp = fb->format->cpp[color_plane];
693 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
694
695 return y * pitch + x * cpp;
696 }
697
698 /*
699 * Add the x/y offsets derived from fb->offsets[] to the user
700 * specified plane src x/y offsets. The resulting x/y offsets
701 * specify the start of scanout from the beginning of the gtt mapping.
702 */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)703 void intel_add_fb_offsets(int *x, int *y,
704 const struct intel_plane_state *state,
705 int color_plane)
706
707 {
708 *x += state->view.color_plane[color_plane].x;
709 *y += state->view.color_plane[color_plane].y;
710 }
711
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)712 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
713 u32 pixel_format, u64 modifier)
714 {
715 struct intel_crtc *crtc;
716 struct intel_plane *plane;
717
718 if (!HAS_DISPLAY(dev_priv))
719 return 0;
720
721 /*
722 * We assume the primary plane for pipe A has
723 * the highest stride limits of them all,
724 * if in case pipe A is disabled, use the first pipe from pipe_mask.
725 */
726 crtc = intel_first_crtc(dev_priv);
727 if (!crtc)
728 return 0;
729
730 plane = to_intel_plane(crtc->base.primary);
731
732 return plane->max_stride(plane, pixel_format, modifier,
733 DRM_MODE_ROTATE_0);
734 }
735
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)736 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
737 struct intel_plane_state *plane_state,
738 bool visible)
739 {
740 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
741
742 plane_state->uapi.visible = visible;
743
744 if (visible)
745 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
746 else
747 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
748 }
749
intel_plane_fixup_bitmasks(struct intel_crtc_state * crtc_state)750 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
751 {
752 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
753 struct drm_plane *plane;
754
755 /*
756 * Active_planes aliases if multiple "primary" or cursor planes
757 * have been used on the same (or wrong) pipe. plane_mask uses
758 * unique ids, hence we can use that to reconstruct active_planes.
759 */
760 crtc_state->enabled_planes = 0;
761 crtc_state->active_planes = 0;
762
763 drm_for_each_plane_mask(plane, &dev_priv->drm,
764 crtc_state->uapi.plane_mask) {
765 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
766 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
767 }
768 }
769
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)770 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
771 struct intel_plane *plane)
772 {
773 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
774 struct intel_crtc_state *crtc_state =
775 to_intel_crtc_state(crtc->base.state);
776 struct intel_plane_state *plane_state =
777 to_intel_plane_state(plane->base.state);
778
779 drm_dbg_kms(&dev_priv->drm,
780 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
781 plane->base.base.id, plane->base.name,
782 crtc->base.base.id, crtc->base.name);
783
784 intel_set_plane_visible(crtc_state, plane_state, false);
785 intel_plane_fixup_bitmasks(crtc_state);
786 crtc_state->data_rate[plane->id] = 0;
787 crtc_state->data_rate_y[plane->id] = 0;
788 crtc_state->rel_data_rate[plane->id] = 0;
789 crtc_state->rel_data_rate_y[plane->id] = 0;
790 crtc_state->min_cdclk[plane->id] = 0;
791
792 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
793 hsw_ips_disable(crtc_state)) {
794 crtc_state->ips_enabled = false;
795 intel_crtc_wait_for_next_vblank(crtc);
796 }
797
798 /*
799 * Vblank time updates from the shadow to live plane control register
800 * are blocked if the memory self-refresh mode is active at that
801 * moment. So to make sure the plane gets truly disabled, disable
802 * first the self-refresh mode. The self-refresh enable bit in turn
803 * will be checked/applied by the HW only at the next frame start
804 * event which is after the vblank start event, so we need to have a
805 * wait-for-vblank between disabling the plane and the pipe.
806 */
807 if (HAS_GMCH(dev_priv) &&
808 intel_set_memory_cxsr(dev_priv, false))
809 intel_crtc_wait_for_next_vblank(crtc);
810
811 /*
812 * Gen2 reports pipe underruns whenever all planes are disabled.
813 * So disable underrun reporting before all the planes get disabled.
814 */
815 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
816 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
817
818 intel_plane_disable_arm(plane, crtc_state);
819 intel_crtc_wait_for_next_vblank(crtc);
820 }
821
822 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)823 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
824 {
825 int x = 0, y = 0;
826
827 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
828 plane_state->view.color_plane[0].offset, 0);
829
830 return y;
831 }
832
833 static int
__intel_display_resume(struct drm_i915_private * i915,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)834 __intel_display_resume(struct drm_i915_private *i915,
835 struct drm_atomic_state *state,
836 struct drm_modeset_acquire_ctx *ctx)
837 {
838 struct drm_crtc_state *crtc_state;
839 struct drm_crtc *crtc;
840 int i, ret;
841
842 intel_modeset_setup_hw_state(i915, ctx);
843 intel_vga_redisable(i915);
844
845 if (!state)
846 return 0;
847
848 /*
849 * We've duplicated the state, pointers to the old state are invalid.
850 *
851 * Don't attempt to use the old state until we commit the duplicated state.
852 */
853 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
854 /*
855 * Force recalculation even if we restore
856 * current state. With fast modeset this may not result
857 * in a modeset when the state is compatible.
858 */
859 crtc_state->mode_changed = true;
860 }
861
862 /* ignore any reset values/BIOS leftovers in the WM registers */
863 if (!HAS_GMCH(i915))
864 to_intel_atomic_state(state)->skip_intermediate_wm = true;
865
866 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
867
868 drm_WARN_ON(&i915->drm, ret == -EDEADLK);
869
870 return ret;
871 }
872
gpu_reset_clobbers_display(struct drm_i915_private * dev_priv)873 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
874 {
875 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
876 intel_has_gpu_reset(to_gt(dev_priv)));
877 }
878
intel_display_prepare_reset(struct drm_i915_private * dev_priv)879 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
880 {
881 struct drm_device *dev = &dev_priv->drm;
882 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
883 struct drm_atomic_state *state;
884 int ret;
885
886 if (!HAS_DISPLAY(dev_priv))
887 return;
888
889 /* reset doesn't touch the display */
890 if (!dev_priv->params.force_reset_modeset_test &&
891 !gpu_reset_clobbers_display(dev_priv))
892 return;
893
894 /* We have a modeset vs reset deadlock, defensively unbreak it. */
895 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
896 smp_mb__after_atomic();
897 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
898
899 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
900 drm_dbg_kms(&dev_priv->drm,
901 "Modeset potentially stuck, unbreaking through wedging\n");
902 intel_gt_set_wedged(to_gt(dev_priv));
903 }
904
905 /*
906 * Need mode_config.mutex so that we don't
907 * trample ongoing ->detect() and whatnot.
908 */
909 mutex_lock(&dev->mode_config.mutex);
910 drm_modeset_acquire_init(ctx, 0);
911 while (1) {
912 ret = drm_modeset_lock_all_ctx(dev, ctx);
913 if (ret != -EDEADLK)
914 break;
915
916 drm_modeset_backoff(ctx);
917 }
918 /*
919 * Disabling the crtcs gracefully seems nicer. Also the
920 * g33 docs say we should at least disable all the planes.
921 */
922 state = drm_atomic_helper_duplicate_state(dev, ctx);
923 if (IS_ERR(state)) {
924 ret = PTR_ERR(state);
925 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
926 ret);
927 return;
928 }
929
930 ret = drm_atomic_helper_disable_all(dev, ctx);
931 if (ret) {
932 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
933 ret);
934 drm_atomic_state_put(state);
935 return;
936 }
937
938 dev_priv->modeset_restore_state = state;
939 state->acquire_ctx = ctx;
940 }
941
intel_display_finish_reset(struct drm_i915_private * i915)942 void intel_display_finish_reset(struct drm_i915_private *i915)
943 {
944 struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
945 struct drm_atomic_state *state;
946 int ret;
947
948 if (!HAS_DISPLAY(i915))
949 return;
950
951 /* reset doesn't touch the display */
952 if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
953 return;
954
955 state = fetch_and_zero(&i915->modeset_restore_state);
956 if (!state)
957 goto unlock;
958
959 /* reset doesn't touch the display */
960 if (!gpu_reset_clobbers_display(i915)) {
961 /* for testing only restore the display */
962 ret = __intel_display_resume(i915, state, ctx);
963 if (ret)
964 drm_err(&i915->drm,
965 "Restoring old state failed with %i\n", ret);
966 } else {
967 /*
968 * The display has been reset as well,
969 * so need a full re-initialization.
970 */
971 intel_pps_unlock_regs_wa(i915);
972 intel_modeset_init_hw(i915);
973 intel_init_clock_gating(i915);
974 intel_hpd_init(i915);
975
976 ret = __intel_display_resume(i915, state, ctx);
977 if (ret)
978 drm_err(&i915->drm,
979 "Restoring old state failed with %i\n", ret);
980
981 intel_hpd_poll_disable(i915);
982 }
983
984 drm_atomic_state_put(state);
985 unlock:
986 drm_modeset_drop_locks(ctx);
987 drm_modeset_acquire_fini(ctx);
988 mutex_unlock(&i915->drm.mode_config.mutex);
989
990 clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
991 }
992
icl_set_pipe_chicken(const struct intel_crtc_state * crtc_state)993 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
994 {
995 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
996 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
997 enum pipe pipe = crtc->pipe;
998 u32 tmp;
999
1000 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1001
1002 /*
1003 * Display WA #1153: icl
1004 * enable hardware to bypass the alpha math
1005 * and rounding for per-pixel values 00 and 0xff
1006 */
1007 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1008 /*
1009 * Display WA # 1605353570: icl
1010 * Set the pixel rounding bit to 1 for allowing
1011 * passthrough of Frame buffer pixels unmodified
1012 * across pipe
1013 */
1014 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1015
1016 /*
1017 * Underrun recovery must always be disabled on display 13+.
1018 * DG2 chicken bit meaning is inverted compared to other platforms.
1019 */
1020 if (IS_DG2(dev_priv))
1021 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1022 else if (DISPLAY_VER(dev_priv) >= 13)
1023 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1024
1025 /* Wa_14010547955:dg2 */
1026 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
1027 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
1028
1029 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1030 }
1031
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)1032 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1033 {
1034 struct drm_crtc *crtc;
1035 bool cleanup_done;
1036
1037 drm_for_each_crtc(crtc, &dev_priv->drm) {
1038 struct drm_crtc_commit *commit;
1039 spin_lock(&crtc->commit_lock);
1040 commit = list_first_entry_or_null(&crtc->commit_list,
1041 struct drm_crtc_commit, commit_entry);
1042 cleanup_done = commit ?
1043 try_wait_for_completion(&commit->cleanup_done) : true;
1044 spin_unlock(&crtc->commit_lock);
1045
1046 if (cleanup_done)
1047 continue;
1048
1049 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1050
1051 return true;
1052 }
1053
1054 return false;
1055 }
1056
1057 /*
1058 * Finds the encoder associated with the given CRTC. This can only be
1059 * used when we know that the CRTC isn't feeding multiple encoders!
1060 */
1061 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)1062 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1063 const struct intel_crtc_state *crtc_state)
1064 {
1065 const struct drm_connector_state *connector_state;
1066 const struct drm_connector *connector;
1067 struct intel_encoder *encoder = NULL;
1068 struct intel_crtc *master_crtc;
1069 int num_encoders = 0;
1070 int i;
1071
1072 master_crtc = intel_master_crtc(crtc_state);
1073
1074 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1075 if (connector_state->crtc != &master_crtc->base)
1076 continue;
1077
1078 encoder = to_intel_encoder(connector_state->best_encoder);
1079 num_encoders++;
1080 }
1081
1082 drm_WARN(state->base.dev, num_encoders != 1,
1083 "%d encoders for pipe %c\n",
1084 num_encoders, pipe_name(master_crtc->pipe));
1085
1086 return encoder;
1087 }
1088
cpt_verify_modeset(struct drm_i915_private * dev_priv,enum pipe pipe)1089 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1090 enum pipe pipe)
1091 {
1092 i915_reg_t dslreg = PIPEDSL(pipe);
1093 u32 temp;
1094
1095 temp = intel_de_read(dev_priv, dslreg);
1096 udelay(500);
1097 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1098 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1099 drm_err(&dev_priv->drm,
1100 "mode set failed: pipe %c stuck\n",
1101 pipe_name(pipe));
1102 }
1103 }
1104
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)1105 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1106 {
1107 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1108 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1109 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1110 enum pipe pipe = crtc->pipe;
1111 int width = drm_rect_width(dst);
1112 int height = drm_rect_height(dst);
1113 int x = dst->x1;
1114 int y = dst->y1;
1115
1116 if (!crtc_state->pch_pfit.enabled)
1117 return;
1118
1119 /* Force use of hard-coded filter coefficients
1120 * as some pre-programmed values are broken,
1121 * e.g. x201.
1122 */
1123 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1124 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1125 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1126 else
1127 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1128 PF_FILTER_MED_3x3);
1129 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1130 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1131 }
1132
intel_crtc_dpms_overlay_disable(struct intel_crtc * crtc)1133 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1134 {
1135 if (crtc->overlay)
1136 (void) intel_overlay_switch_off(crtc->overlay);
1137
1138 /* Let userspace switch the overlay on again. In most cases userspace
1139 * has to recompute where to put it anyway.
1140 */
1141 }
1142
needs_nv12_wa(const struct intel_crtc_state * crtc_state)1143 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1144 {
1145 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1146
1147 if (!crtc_state->nv12_planes)
1148 return false;
1149
1150 /* WA Display #0827: Gen9:all */
1151 if (DISPLAY_VER(dev_priv) == 9)
1152 return true;
1153
1154 return false;
1155 }
1156
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)1157 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1158 {
1159 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1160
1161 /* Wa_2006604312:icl,ehl */
1162 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1163 return true;
1164
1165 return false;
1166 }
1167
needs_cursorclk_wa(const struct intel_crtc_state * crtc_state)1168 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1169 {
1170 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1171
1172 /* Wa_1604331009:icl,jsl,ehl */
1173 if (is_hdr_mode(crtc_state) &&
1174 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1175 DISPLAY_VER(dev_priv) == 11)
1176 return true;
1177
1178 return false;
1179 }
1180
intel_async_flip_vtd_wa(struct drm_i915_private * i915,enum pipe pipe,bool enable)1181 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1182 enum pipe pipe, bool enable)
1183 {
1184 if (DISPLAY_VER(i915) == 9) {
1185 /*
1186 * "Plane N strech max must be programmed to 11b (x1)
1187 * when Async flips are enabled on that plane."
1188 */
1189 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1190 SKL_PLANE1_STRETCH_MAX_MASK,
1191 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1192 } else {
1193 /* Also needed on HSW/BDW albeit undocumented */
1194 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1195 HSW_PRI_STRETCH_MAX_MASK,
1196 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1197 }
1198 }
1199
needs_async_flip_vtd_wa(const struct intel_crtc_state * crtc_state)1200 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1201 {
1202 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1203
1204 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
1205 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1206 }
1207
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1208 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1209 const struct intel_crtc_state *new_crtc_state)
1210 {
1211 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1212 new_crtc_state->active_planes;
1213 }
1214
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1215 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1216 const struct intel_crtc_state *new_crtc_state)
1217 {
1218 return old_crtc_state->active_planes &&
1219 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1220 }
1221
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1222 static void intel_post_plane_update(struct intel_atomic_state *state,
1223 struct intel_crtc *crtc)
1224 {
1225 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1226 const struct intel_crtc_state *old_crtc_state =
1227 intel_atomic_get_old_crtc_state(state, crtc);
1228 const struct intel_crtc_state *new_crtc_state =
1229 intel_atomic_get_new_crtc_state(state, crtc);
1230 enum pipe pipe = crtc->pipe;
1231
1232 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1233
1234 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1235 intel_update_watermarks(dev_priv);
1236
1237 hsw_ips_post_update(state, crtc);
1238 intel_fbc_post_update(state, crtc);
1239
1240 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1241 !needs_async_flip_vtd_wa(new_crtc_state))
1242 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1243
1244 if (needs_nv12_wa(old_crtc_state) &&
1245 !needs_nv12_wa(new_crtc_state))
1246 skl_wa_827(dev_priv, pipe, false);
1247
1248 if (needs_scalerclk_wa(old_crtc_state) &&
1249 !needs_scalerclk_wa(new_crtc_state))
1250 icl_wa_scalerclkgating(dev_priv, pipe, false);
1251
1252 if (needs_cursorclk_wa(old_crtc_state) &&
1253 !needs_cursorclk_wa(new_crtc_state))
1254 icl_wa_cursorclkgating(dev_priv, pipe, false);
1255
1256 intel_drrs_activate(new_crtc_state);
1257 }
1258
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1259 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1260 struct intel_crtc *crtc)
1261 {
1262 const struct intel_crtc_state *crtc_state =
1263 intel_atomic_get_new_crtc_state(state, crtc);
1264 u8 update_planes = crtc_state->update_planes;
1265 const struct intel_plane_state *plane_state;
1266 struct intel_plane *plane;
1267 int i;
1268
1269 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1270 if (plane->pipe == crtc->pipe &&
1271 update_planes & BIT(plane->id))
1272 plane->enable_flip_done(plane);
1273 }
1274 }
1275
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1276 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1277 struct intel_crtc *crtc)
1278 {
1279 const struct intel_crtc_state *crtc_state =
1280 intel_atomic_get_new_crtc_state(state, crtc);
1281 u8 update_planes = crtc_state->update_planes;
1282 const struct intel_plane_state *plane_state;
1283 struct intel_plane *plane;
1284 int i;
1285
1286 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1287 if (plane->pipe == crtc->pipe &&
1288 update_planes & BIT(plane->id))
1289 plane->disable_flip_done(plane);
1290 }
1291 }
1292
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)1293 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1294 struct intel_crtc *crtc)
1295 {
1296 const struct intel_crtc_state *old_crtc_state =
1297 intel_atomic_get_old_crtc_state(state, crtc);
1298 const struct intel_crtc_state *new_crtc_state =
1299 intel_atomic_get_new_crtc_state(state, crtc);
1300 u8 update_planes = new_crtc_state->update_planes;
1301 const struct intel_plane_state *old_plane_state;
1302 struct intel_plane *plane;
1303 bool need_vbl_wait = false;
1304 int i;
1305
1306 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1307 if (plane->need_async_flip_disable_wa &&
1308 plane->pipe == crtc->pipe &&
1309 update_planes & BIT(plane->id)) {
1310 /*
1311 * Apart from the async flip bit we want to
1312 * preserve the old state for the plane.
1313 */
1314 plane->async_flip(plane, old_crtc_state,
1315 old_plane_state, false);
1316 need_vbl_wait = true;
1317 }
1318 }
1319
1320 if (need_vbl_wait)
1321 intel_crtc_wait_for_next_vblank(crtc);
1322 }
1323
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1324 static void intel_pre_plane_update(struct intel_atomic_state *state,
1325 struct intel_crtc *crtc)
1326 {
1327 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1328 const struct intel_crtc_state *old_crtc_state =
1329 intel_atomic_get_old_crtc_state(state, crtc);
1330 const struct intel_crtc_state *new_crtc_state =
1331 intel_atomic_get_new_crtc_state(state, crtc);
1332 enum pipe pipe = crtc->pipe;
1333
1334 intel_drrs_deactivate(old_crtc_state);
1335
1336 intel_psr_pre_plane_update(state, crtc);
1337
1338 if (hsw_ips_pre_update(state, crtc))
1339 intel_crtc_wait_for_next_vblank(crtc);
1340
1341 if (intel_fbc_pre_update(state, crtc))
1342 intel_crtc_wait_for_next_vblank(crtc);
1343
1344 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1345 needs_async_flip_vtd_wa(new_crtc_state))
1346 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1347
1348 /* Display WA 827 */
1349 if (!needs_nv12_wa(old_crtc_state) &&
1350 needs_nv12_wa(new_crtc_state))
1351 skl_wa_827(dev_priv, pipe, true);
1352
1353 /* Wa_2006604312:icl,ehl */
1354 if (!needs_scalerclk_wa(old_crtc_state) &&
1355 needs_scalerclk_wa(new_crtc_state))
1356 icl_wa_scalerclkgating(dev_priv, pipe, true);
1357
1358 /* Wa_1604331009:icl,jsl,ehl */
1359 if (!needs_cursorclk_wa(old_crtc_state) &&
1360 needs_cursorclk_wa(new_crtc_state))
1361 icl_wa_cursorclkgating(dev_priv, pipe, true);
1362
1363 /*
1364 * Vblank time updates from the shadow to live plane control register
1365 * are blocked if the memory self-refresh mode is active at that
1366 * moment. So to make sure the plane gets truly disabled, disable
1367 * first the self-refresh mode. The self-refresh enable bit in turn
1368 * will be checked/applied by the HW only at the next frame start
1369 * event which is after the vblank start event, so we need to have a
1370 * wait-for-vblank between disabling the plane and the pipe.
1371 */
1372 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1373 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1374 intel_crtc_wait_for_next_vblank(crtc);
1375
1376 /*
1377 * IVB workaround: must disable low power watermarks for at least
1378 * one frame before enabling scaling. LP watermarks can be re-enabled
1379 * when scaling is disabled.
1380 *
1381 * WaCxSRDisabledForSpriteScaling:ivb
1382 */
1383 if (old_crtc_state->hw.active &&
1384 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1385 intel_crtc_wait_for_next_vblank(crtc);
1386
1387 /*
1388 * If we're doing a modeset we don't need to do any
1389 * pre-vblank watermark programming here.
1390 */
1391 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1392 /*
1393 * For platforms that support atomic watermarks, program the
1394 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1395 * will be the intermediate values that are safe for both pre- and
1396 * post- vblank; when vblank happens, the 'active' values will be set
1397 * to the final 'target' values and we'll do this again to get the
1398 * optimal watermarks. For gen9+ platforms, the values we program here
1399 * will be the final target values which will get automatically latched
1400 * at vblank time; no further programming will be necessary.
1401 *
1402 * If a platform hasn't been transitioned to atomic watermarks yet,
1403 * we'll continue to update watermarks the old way, if flags tell
1404 * us to.
1405 */
1406 if (!intel_initial_watermarks(state, crtc))
1407 if (new_crtc_state->update_wm_pre)
1408 intel_update_watermarks(dev_priv);
1409 }
1410
1411 /*
1412 * Gen2 reports pipe underruns whenever all planes are disabled.
1413 * So disable underrun reporting before all the planes get disabled.
1414 *
1415 * We do this after .initial_watermarks() so that we have a
1416 * chance of catching underruns with the intermediate watermarks
1417 * vs. the old plane configuration.
1418 */
1419 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1420 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1421
1422 /*
1423 * WA for platforms where async address update enable bit
1424 * is double buffered and only latched at start of vblank.
1425 */
1426 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1427 intel_crtc_async_flip_disable_wa(state, crtc);
1428 }
1429
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)1430 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1431 struct intel_crtc *crtc)
1432 {
1433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1434 const struct intel_crtc_state *new_crtc_state =
1435 intel_atomic_get_new_crtc_state(state, crtc);
1436 unsigned int update_mask = new_crtc_state->update_planes;
1437 const struct intel_plane_state *old_plane_state;
1438 struct intel_plane *plane;
1439 unsigned fb_bits = 0;
1440 int i;
1441
1442 intel_crtc_dpms_overlay_disable(crtc);
1443
1444 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1445 if (crtc->pipe != plane->pipe ||
1446 !(update_mask & BIT(plane->id)))
1447 continue;
1448
1449 intel_plane_disable_arm(plane, new_crtc_state);
1450
1451 if (old_plane_state->uapi.visible)
1452 fb_bits |= plane->frontbuffer_bit;
1453 }
1454
1455 intel_frontbuffer_flip(dev_priv, fb_bits);
1456 }
1457
1458 /*
1459 * intel_connector_primary_encoder - get the primary encoder for a connector
1460 * @connector: connector for which to return the encoder
1461 *
1462 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1463 * all connectors to their encoder, except for DP-MST connectors which have
1464 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1465 * pointed to by as many DP-MST connectors as there are pipes.
1466 */
1467 static struct intel_encoder *
intel_connector_primary_encoder(struct intel_connector * connector)1468 intel_connector_primary_encoder(struct intel_connector *connector)
1469 {
1470 struct intel_encoder *encoder;
1471
1472 if (connector->mst_port)
1473 return &dp_to_dig_port(connector->mst_port)->base;
1474
1475 encoder = intel_attached_encoder(connector);
1476 drm_WARN_ON(connector->base.dev, !encoder);
1477
1478 return encoder;
1479 }
1480
intel_encoders_update_prepare(struct intel_atomic_state * state)1481 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1482 {
1483 struct drm_i915_private *i915 = to_i915(state->base.dev);
1484 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1485 struct intel_crtc *crtc;
1486 struct drm_connector_state *new_conn_state;
1487 struct drm_connector *connector;
1488 int i;
1489
1490 /*
1491 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1492 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1493 */
1494 if (i915->display.dpll.mgr) {
1495 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1496 if (intel_crtc_needs_modeset(new_crtc_state))
1497 continue;
1498
1499 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1500 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1501 }
1502 }
1503
1504 if (!state->modeset)
1505 return;
1506
1507 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1508 i) {
1509 struct intel_connector *intel_connector;
1510 struct intel_encoder *encoder;
1511 struct intel_crtc *crtc;
1512
1513 if (!intel_connector_needs_modeset(state, connector))
1514 continue;
1515
1516 intel_connector = to_intel_connector(connector);
1517 encoder = intel_connector_primary_encoder(intel_connector);
1518 if (!encoder->update_prepare)
1519 continue;
1520
1521 crtc = new_conn_state->crtc ?
1522 to_intel_crtc(new_conn_state->crtc) : NULL;
1523 encoder->update_prepare(state, encoder, crtc);
1524 }
1525 }
1526
intel_encoders_update_complete(struct intel_atomic_state * state)1527 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1528 {
1529 struct drm_connector_state *new_conn_state;
1530 struct drm_connector *connector;
1531 int i;
1532
1533 if (!state->modeset)
1534 return;
1535
1536 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1537 i) {
1538 struct intel_connector *intel_connector;
1539 struct intel_encoder *encoder;
1540 struct intel_crtc *crtc;
1541
1542 if (!intel_connector_needs_modeset(state, connector))
1543 continue;
1544
1545 intel_connector = to_intel_connector(connector);
1546 encoder = intel_connector_primary_encoder(intel_connector);
1547 if (!encoder->update_complete)
1548 continue;
1549
1550 crtc = new_conn_state->crtc ?
1551 to_intel_crtc(new_conn_state->crtc) : NULL;
1552 encoder->update_complete(state, encoder, crtc);
1553 }
1554 }
1555
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1556 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1557 struct intel_crtc *crtc)
1558 {
1559 const struct intel_crtc_state *crtc_state =
1560 intel_atomic_get_new_crtc_state(state, crtc);
1561 const struct drm_connector_state *conn_state;
1562 struct drm_connector *conn;
1563 int i;
1564
1565 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1566 struct intel_encoder *encoder =
1567 to_intel_encoder(conn_state->best_encoder);
1568
1569 if (conn_state->crtc != &crtc->base)
1570 continue;
1571
1572 if (encoder->pre_pll_enable)
1573 encoder->pre_pll_enable(state, encoder,
1574 crtc_state, conn_state);
1575 }
1576 }
1577
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1578 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1579 struct intel_crtc *crtc)
1580 {
1581 const struct intel_crtc_state *crtc_state =
1582 intel_atomic_get_new_crtc_state(state, crtc);
1583 const struct drm_connector_state *conn_state;
1584 struct drm_connector *conn;
1585 int i;
1586
1587 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1588 struct intel_encoder *encoder =
1589 to_intel_encoder(conn_state->best_encoder);
1590
1591 if (conn_state->crtc != &crtc->base)
1592 continue;
1593
1594 if (encoder->pre_enable)
1595 encoder->pre_enable(state, encoder,
1596 crtc_state, conn_state);
1597 }
1598 }
1599
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1600 static void intel_encoders_enable(struct intel_atomic_state *state,
1601 struct intel_crtc *crtc)
1602 {
1603 const struct intel_crtc_state *crtc_state =
1604 intel_atomic_get_new_crtc_state(state, crtc);
1605 const struct drm_connector_state *conn_state;
1606 struct drm_connector *conn;
1607 int i;
1608
1609 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1610 struct intel_encoder *encoder =
1611 to_intel_encoder(conn_state->best_encoder);
1612
1613 if (conn_state->crtc != &crtc->base)
1614 continue;
1615
1616 if (encoder->enable)
1617 encoder->enable(state, encoder,
1618 crtc_state, conn_state);
1619 intel_opregion_notify_encoder(encoder, true);
1620 }
1621 }
1622
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1623 static void intel_encoders_disable(struct intel_atomic_state *state,
1624 struct intel_crtc *crtc)
1625 {
1626 const struct intel_crtc_state *old_crtc_state =
1627 intel_atomic_get_old_crtc_state(state, crtc);
1628 const struct drm_connector_state *old_conn_state;
1629 struct drm_connector *conn;
1630 int i;
1631
1632 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1633 struct intel_encoder *encoder =
1634 to_intel_encoder(old_conn_state->best_encoder);
1635
1636 if (old_conn_state->crtc != &crtc->base)
1637 continue;
1638
1639 intel_opregion_notify_encoder(encoder, false);
1640 if (encoder->disable)
1641 encoder->disable(state, encoder,
1642 old_crtc_state, old_conn_state);
1643 }
1644 }
1645
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1646 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1647 struct intel_crtc *crtc)
1648 {
1649 const struct intel_crtc_state *old_crtc_state =
1650 intel_atomic_get_old_crtc_state(state, crtc);
1651 const struct drm_connector_state *old_conn_state;
1652 struct drm_connector *conn;
1653 int i;
1654
1655 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1656 struct intel_encoder *encoder =
1657 to_intel_encoder(old_conn_state->best_encoder);
1658
1659 if (old_conn_state->crtc != &crtc->base)
1660 continue;
1661
1662 if (encoder->post_disable)
1663 encoder->post_disable(state, encoder,
1664 old_crtc_state, old_conn_state);
1665 }
1666 }
1667
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1668 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1669 struct intel_crtc *crtc)
1670 {
1671 const struct intel_crtc_state *old_crtc_state =
1672 intel_atomic_get_old_crtc_state(state, crtc);
1673 const struct drm_connector_state *old_conn_state;
1674 struct drm_connector *conn;
1675 int i;
1676
1677 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1678 struct intel_encoder *encoder =
1679 to_intel_encoder(old_conn_state->best_encoder);
1680
1681 if (old_conn_state->crtc != &crtc->base)
1682 continue;
1683
1684 if (encoder->post_pll_disable)
1685 encoder->post_pll_disable(state, encoder,
1686 old_crtc_state, old_conn_state);
1687 }
1688 }
1689
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)1690 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1691 struct intel_crtc *crtc)
1692 {
1693 const struct intel_crtc_state *crtc_state =
1694 intel_atomic_get_new_crtc_state(state, crtc);
1695 const struct drm_connector_state *conn_state;
1696 struct drm_connector *conn;
1697 int i;
1698
1699 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1700 struct intel_encoder *encoder =
1701 to_intel_encoder(conn_state->best_encoder);
1702
1703 if (conn_state->crtc != &crtc->base)
1704 continue;
1705
1706 if (encoder->update_pipe)
1707 encoder->update_pipe(state, encoder,
1708 crtc_state, conn_state);
1709 }
1710 }
1711
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)1712 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1713 {
1714 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1715 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1716
1717 plane->disable_arm(plane, crtc_state);
1718 }
1719
ilk_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1720 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1721 {
1722 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1723 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1724
1725 if (crtc_state->has_pch_encoder) {
1726 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1727 &crtc_state->fdi_m_n);
1728 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1729 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1730 &crtc_state->dp_m_n);
1731 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1732 &crtc_state->dp_m2_n2);
1733 }
1734
1735 intel_set_transcoder_timings(crtc_state);
1736
1737 ilk_set_pipeconf(crtc_state);
1738 }
1739
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1740 static void ilk_crtc_enable(struct intel_atomic_state *state,
1741 struct intel_crtc *crtc)
1742 {
1743 const struct intel_crtc_state *new_crtc_state =
1744 intel_atomic_get_new_crtc_state(state, crtc);
1745 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1746 enum pipe pipe = crtc->pipe;
1747
1748 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1749 return;
1750
1751 /*
1752 * Sometimes spurious CPU pipe underruns happen during FDI
1753 * training, at least with VGA+HDMI cloning. Suppress them.
1754 *
1755 * On ILK we get an occasional spurious CPU pipe underruns
1756 * between eDP port A enable and vdd enable. Also PCH port
1757 * enable seems to result in the occasional CPU pipe underrun.
1758 *
1759 * Spurious PCH underruns also occur during PCH enabling.
1760 */
1761 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1762 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1763
1764 ilk_configure_cpu_transcoder(new_crtc_state);
1765
1766 intel_set_pipe_src_size(new_crtc_state);
1767
1768 crtc->active = true;
1769
1770 intel_encoders_pre_enable(state, crtc);
1771
1772 if (new_crtc_state->has_pch_encoder) {
1773 ilk_pch_pre_enable(state, crtc);
1774 } else {
1775 assert_fdi_tx_disabled(dev_priv, pipe);
1776 assert_fdi_rx_disabled(dev_priv, pipe);
1777 }
1778
1779 ilk_pfit_enable(new_crtc_state);
1780
1781 /*
1782 * On ILK+ LUT must be loaded before the pipe is running but with
1783 * clocks enabled
1784 */
1785 intel_color_load_luts(new_crtc_state);
1786 intel_color_commit_noarm(new_crtc_state);
1787 intel_color_commit_arm(new_crtc_state);
1788 /* update DSPCNTR to configure gamma for pipe bottom color */
1789 intel_disable_primary_plane(new_crtc_state);
1790
1791 intel_initial_watermarks(state, crtc);
1792 intel_enable_transcoder(new_crtc_state);
1793
1794 if (new_crtc_state->has_pch_encoder)
1795 ilk_pch_enable(state, crtc);
1796
1797 intel_crtc_vblank_on(new_crtc_state);
1798
1799 intel_encoders_enable(state, crtc);
1800
1801 if (HAS_PCH_CPT(dev_priv))
1802 cpt_verify_modeset(dev_priv, pipe);
1803
1804 /*
1805 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1806 * And a second vblank wait is needed at least on ILK with
1807 * some interlaced HDMI modes. Let's do the double wait always
1808 * in case there are more corner cases we don't know about.
1809 */
1810 if (new_crtc_state->has_pch_encoder) {
1811 intel_crtc_wait_for_next_vblank(crtc);
1812 intel_crtc_wait_for_next_vblank(crtc);
1813 }
1814 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1815 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1816 }
1817
glk_pipe_scaler_clock_gating_wa(struct drm_i915_private * dev_priv,enum pipe pipe,bool apply)1818 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1819 enum pipe pipe, bool apply)
1820 {
1821 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1822 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1823
1824 if (apply)
1825 val |= mask;
1826 else
1827 val &= ~mask;
1828
1829 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1830 }
1831
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)1832 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1833 {
1834 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1835 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1836
1837 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1838 HSW_LINETIME(crtc_state->linetime) |
1839 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1840 }
1841
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)1842 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1843 {
1844 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1846 enum transcoder transcoder = crtc_state->cpu_transcoder;
1847 i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
1848 CHICKEN_TRANS(transcoder);
1849 u32 val;
1850
1851 val = intel_de_read(dev_priv, reg);
1852 val &= ~HSW_FRAME_START_DELAY_MASK;
1853 val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
1854 intel_de_write(dev_priv, reg, val);
1855 }
1856
icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)1857 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1858 const struct intel_crtc_state *crtc_state)
1859 {
1860 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1861
1862 /*
1863 * Enable sequence steps 1-7 on bigjoiner master
1864 */
1865 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1866 intel_encoders_pre_pll_enable(state, master_crtc);
1867
1868 if (crtc_state->shared_dpll)
1869 intel_enable_shared_dpll(crtc_state);
1870
1871 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1872 intel_encoders_pre_enable(state, master_crtc);
1873 }
1874
hsw_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1875 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1876 {
1877 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1878 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1879 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1880
1881 if (crtc_state->has_pch_encoder) {
1882 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1883 &crtc_state->fdi_m_n);
1884 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1885 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1886 &crtc_state->dp_m_n);
1887 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1888 &crtc_state->dp_m2_n2);
1889 }
1890
1891 intel_set_transcoder_timings(crtc_state);
1892
1893 if (cpu_transcoder != TRANSCODER_EDP)
1894 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
1895 crtc_state->pixel_multiplier - 1);
1896
1897 hsw_set_frame_start_delay(crtc_state);
1898
1899 hsw_set_transconf(crtc_state);
1900 }
1901
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1902 static void hsw_crtc_enable(struct intel_atomic_state *state,
1903 struct intel_crtc *crtc)
1904 {
1905 const struct intel_crtc_state *new_crtc_state =
1906 intel_atomic_get_new_crtc_state(state, crtc);
1907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1908 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1909 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1910 bool psl_clkgate_wa;
1911
1912 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1913 return;
1914
1915 if (!new_crtc_state->bigjoiner_pipes) {
1916 intel_encoders_pre_pll_enable(state, crtc);
1917
1918 if (new_crtc_state->shared_dpll)
1919 intel_enable_shared_dpll(new_crtc_state);
1920
1921 intel_encoders_pre_enable(state, crtc);
1922 } else {
1923 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1924 }
1925
1926 intel_dsc_enable(new_crtc_state);
1927
1928 if (DISPLAY_VER(dev_priv) >= 13)
1929 intel_uncompressed_joiner_enable(new_crtc_state);
1930
1931 intel_set_pipe_src_size(new_crtc_state);
1932 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1933 bdw_set_pipemisc(new_crtc_state);
1934
1935 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1936 !transcoder_is_dsi(cpu_transcoder))
1937 hsw_configure_cpu_transcoder(new_crtc_state);
1938
1939 crtc->active = true;
1940
1941 /* Display WA #1180: WaDisableScalarClockGating: glk */
1942 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1943 new_crtc_state->pch_pfit.enabled;
1944 if (psl_clkgate_wa)
1945 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1946
1947 if (DISPLAY_VER(dev_priv) >= 9)
1948 skl_pfit_enable(new_crtc_state);
1949 else
1950 ilk_pfit_enable(new_crtc_state);
1951
1952 /*
1953 * On ILK+ LUT must be loaded before the pipe is running but with
1954 * clocks enabled
1955 */
1956 intel_color_load_luts(new_crtc_state);
1957 intel_color_commit_noarm(new_crtc_state);
1958 intel_color_commit_arm(new_crtc_state);
1959 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1960 if (DISPLAY_VER(dev_priv) < 9)
1961 intel_disable_primary_plane(new_crtc_state);
1962
1963 hsw_set_linetime_wm(new_crtc_state);
1964
1965 if (DISPLAY_VER(dev_priv) >= 11)
1966 icl_set_pipe_chicken(new_crtc_state);
1967
1968 intel_initial_watermarks(state, crtc);
1969
1970 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1971 intel_crtc_vblank_on(new_crtc_state);
1972
1973 intel_encoders_enable(state, crtc);
1974
1975 if (psl_clkgate_wa) {
1976 intel_crtc_wait_for_next_vblank(crtc);
1977 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1978 }
1979
1980 /* If we change the relative order between pipe/planes enabling, we need
1981 * to change the workaround. */
1982 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1983 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1984 struct intel_crtc *wa_crtc;
1985
1986 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1987
1988 intel_crtc_wait_for_next_vblank(wa_crtc);
1989 intel_crtc_wait_for_next_vblank(wa_crtc);
1990 }
1991 }
1992
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)1993 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1994 {
1995 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1996 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1997 enum pipe pipe = crtc->pipe;
1998
1999 /* To avoid upsetting the power well on haswell only disable the pfit if
2000 * it's in use. The hw state code will make sure we get this right. */
2001 if (!old_crtc_state->pch_pfit.enabled)
2002 return;
2003
2004 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
2005 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
2006 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
2007 }
2008
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2009 static void ilk_crtc_disable(struct intel_atomic_state *state,
2010 struct intel_crtc *crtc)
2011 {
2012 const struct intel_crtc_state *old_crtc_state =
2013 intel_atomic_get_old_crtc_state(state, crtc);
2014 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2015 enum pipe pipe = crtc->pipe;
2016
2017 /*
2018 * Sometimes spurious CPU pipe underruns happen when the
2019 * pipe is already disabled, but FDI RX/TX is still enabled.
2020 * Happens at least with VGA+HDMI cloning. Suppress them.
2021 */
2022 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2023 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2024
2025 intel_encoders_disable(state, crtc);
2026
2027 intel_crtc_vblank_off(old_crtc_state);
2028
2029 intel_disable_transcoder(old_crtc_state);
2030
2031 ilk_pfit_disable(old_crtc_state);
2032
2033 if (old_crtc_state->has_pch_encoder)
2034 ilk_pch_disable(state, crtc);
2035
2036 intel_encoders_post_disable(state, crtc);
2037
2038 if (old_crtc_state->has_pch_encoder)
2039 ilk_pch_post_disable(state, crtc);
2040
2041 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2042 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2043 }
2044
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2045 static void hsw_crtc_disable(struct intel_atomic_state *state,
2046 struct intel_crtc *crtc)
2047 {
2048 const struct intel_crtc_state *old_crtc_state =
2049 intel_atomic_get_old_crtc_state(state, crtc);
2050
2051 /*
2052 * FIXME collapse everything to one hook.
2053 * Need care with mst->ddi interactions.
2054 */
2055 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
2056 intel_encoders_disable(state, crtc);
2057 intel_encoders_post_disable(state, crtc);
2058 }
2059 }
2060
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)2061 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2062 {
2063 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2064 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2065
2066 if (!crtc_state->gmch_pfit.control)
2067 return;
2068
2069 /*
2070 * The panel fitter should only be adjusted whilst the pipe is disabled,
2071 * according to register description and PRM.
2072 */
2073 drm_WARN_ON(&dev_priv->drm,
2074 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2075 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2076
2077 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2078 crtc_state->gmch_pfit.pgm_ratios);
2079 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2080
2081 /* Border color in case we don't scale up to the full screen. Black by
2082 * default, change to something else for debugging. */
2083 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2084 }
2085
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)2086 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2087 {
2088 if (phy == PHY_NONE)
2089 return false;
2090 else if (IS_ALDERLAKE_S(dev_priv))
2091 return phy <= PHY_E;
2092 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2093 return phy <= PHY_D;
2094 else if (IS_JSL_EHL(dev_priv))
2095 return phy <= PHY_C;
2096 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
2097 return phy <= PHY_B;
2098 else
2099 /*
2100 * DG2 outputs labelled as "combo PHY" in the bspec use
2101 * SNPS PHYs with completely different programming,
2102 * hence we always return false here.
2103 */
2104 return false;
2105 }
2106
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)2107 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2108 {
2109 if (IS_DG2(dev_priv))
2110 /* DG2's "TC1" output uses a SNPS PHY */
2111 return false;
2112 else if (IS_ALDERLAKE_P(dev_priv))
2113 return phy >= PHY_F && phy <= PHY_I;
2114 else if (IS_TIGERLAKE(dev_priv))
2115 return phy >= PHY_D && phy <= PHY_I;
2116 else if (IS_ICELAKE(dev_priv))
2117 return phy >= PHY_C && phy <= PHY_F;
2118 else
2119 return false;
2120 }
2121
intel_phy_is_snps(struct drm_i915_private * dev_priv,enum phy phy)2122 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2123 {
2124 if (phy == PHY_NONE)
2125 return false;
2126 else if (IS_DG2(dev_priv))
2127 /*
2128 * All four "combo" ports and the TC1 port (PHY E) use
2129 * Synopsis PHYs.
2130 */
2131 return phy <= PHY_E;
2132
2133 return false;
2134 }
2135
intel_port_to_phy(struct drm_i915_private * i915,enum port port)2136 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2137 {
2138 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2139 return PHY_D + port - PORT_D_XELPD;
2140 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2141 return PHY_F + port - PORT_TC1;
2142 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2143 return PHY_B + port - PORT_TC1;
2144 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2145 return PHY_C + port - PORT_TC1;
2146 else if (IS_JSL_EHL(i915) && port == PORT_D)
2147 return PHY_A;
2148
2149 return PHY_A + port - PORT_A;
2150 }
2151
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)2152 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2153 {
2154 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2155 return TC_PORT_NONE;
2156
2157 if (DISPLAY_VER(dev_priv) >= 12)
2158 return TC_PORT_1 + port - PORT_TC1;
2159 else
2160 return TC_PORT_1 + port - PORT_C;
2161 }
2162
2163 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)2164 intel_aux_power_domain(struct intel_digital_port *dig_port)
2165 {
2166 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2167
2168 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2169 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
2170
2171 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
2172 }
2173
get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * mask)2174 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2175 struct intel_power_domain_mask *mask)
2176 {
2177 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2178 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2179 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2180 struct drm_encoder *encoder;
2181 enum pipe pipe = crtc->pipe;
2182
2183 bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2184
2185 if (!crtc_state->hw.active)
2186 return;
2187
2188 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2189 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2190 if (crtc_state->pch_pfit.enabled ||
2191 crtc_state->pch_pfit.force_thru)
2192 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2193
2194 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2195 crtc_state->uapi.encoder_mask) {
2196 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2197
2198 set_bit(intel_encoder->power_domain, mask->bits);
2199 }
2200
2201 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2202 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2203
2204 if (crtc_state->shared_dpll)
2205 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2206
2207 if (crtc_state->dsc.compression_enable)
2208 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2209 }
2210
intel_modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * old_domains)2211 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2212 struct intel_power_domain_mask *old_domains)
2213 {
2214 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2215 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2216 enum intel_display_power_domain domain;
2217 struct intel_power_domain_mask domains, new_domains;
2218
2219 get_crtc_power_domains(crtc_state, &domains);
2220
2221 bitmap_andnot(new_domains.bits,
2222 domains.bits,
2223 crtc->enabled_power_domains.mask.bits,
2224 POWER_DOMAIN_NUM);
2225 bitmap_andnot(old_domains->bits,
2226 crtc->enabled_power_domains.mask.bits,
2227 domains.bits,
2228 POWER_DOMAIN_NUM);
2229
2230 for_each_power_domain(domain, &new_domains)
2231 intel_display_power_get_in_set(dev_priv,
2232 &crtc->enabled_power_domains,
2233 domain);
2234 }
2235
intel_modeset_put_crtc_power_domains(struct intel_crtc * crtc,struct intel_power_domain_mask * domains)2236 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2237 struct intel_power_domain_mask *domains)
2238 {
2239 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2240 &crtc->enabled_power_domains,
2241 domains);
2242 }
2243
i9xx_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)2244 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2245 {
2246 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2247 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2248
2249 if (intel_crtc_has_dp_encoder(crtc_state)) {
2250 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2251 &crtc_state->dp_m_n);
2252 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2253 &crtc_state->dp_m2_n2);
2254 }
2255
2256 intel_set_transcoder_timings(crtc_state);
2257
2258 i9xx_set_pipeconf(crtc_state);
2259 }
2260
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2261 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2262 struct intel_crtc *crtc)
2263 {
2264 const struct intel_crtc_state *new_crtc_state =
2265 intel_atomic_get_new_crtc_state(state, crtc);
2266 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2267 enum pipe pipe = crtc->pipe;
2268
2269 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2270 return;
2271
2272 i9xx_configure_cpu_transcoder(new_crtc_state);
2273
2274 intel_set_pipe_src_size(new_crtc_state);
2275
2276 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2277 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2278 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2279 }
2280
2281 crtc->active = true;
2282
2283 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2284
2285 intel_encoders_pre_pll_enable(state, crtc);
2286
2287 if (IS_CHERRYVIEW(dev_priv))
2288 chv_enable_pll(new_crtc_state);
2289 else
2290 vlv_enable_pll(new_crtc_state);
2291
2292 intel_encoders_pre_enable(state, crtc);
2293
2294 i9xx_pfit_enable(new_crtc_state);
2295
2296 intel_color_load_luts(new_crtc_state);
2297 intel_color_commit_noarm(new_crtc_state);
2298 intel_color_commit_arm(new_crtc_state);
2299 /* update DSPCNTR to configure gamma for pipe bottom color */
2300 intel_disable_primary_plane(new_crtc_state);
2301
2302 intel_initial_watermarks(state, crtc);
2303 intel_enable_transcoder(new_crtc_state);
2304
2305 intel_crtc_vblank_on(new_crtc_state);
2306
2307 intel_encoders_enable(state, crtc);
2308 }
2309
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2310 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2311 struct intel_crtc *crtc)
2312 {
2313 const struct intel_crtc_state *new_crtc_state =
2314 intel_atomic_get_new_crtc_state(state, crtc);
2315 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2316 enum pipe pipe = crtc->pipe;
2317
2318 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2319 return;
2320
2321 i9xx_configure_cpu_transcoder(new_crtc_state);
2322
2323 intel_set_pipe_src_size(new_crtc_state);
2324
2325 crtc->active = true;
2326
2327 if (DISPLAY_VER(dev_priv) != 2)
2328 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2329
2330 intel_encoders_pre_enable(state, crtc);
2331
2332 i9xx_enable_pll(new_crtc_state);
2333
2334 i9xx_pfit_enable(new_crtc_state);
2335
2336 intel_color_load_luts(new_crtc_state);
2337 intel_color_commit_noarm(new_crtc_state);
2338 intel_color_commit_arm(new_crtc_state);
2339 /* update DSPCNTR to configure gamma for pipe bottom color */
2340 intel_disable_primary_plane(new_crtc_state);
2341
2342 if (!intel_initial_watermarks(state, crtc))
2343 intel_update_watermarks(dev_priv);
2344 intel_enable_transcoder(new_crtc_state);
2345
2346 intel_crtc_vblank_on(new_crtc_state);
2347
2348 intel_encoders_enable(state, crtc);
2349
2350 /* prevents spurious underruns */
2351 if (DISPLAY_VER(dev_priv) == 2)
2352 intel_crtc_wait_for_next_vblank(crtc);
2353 }
2354
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)2355 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2356 {
2357 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2358 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2359
2360 if (!old_crtc_state->gmch_pfit.control)
2361 return;
2362
2363 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2364
2365 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2366 intel_de_read(dev_priv, PFIT_CONTROL));
2367 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2368 }
2369
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2370 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2371 struct intel_crtc *crtc)
2372 {
2373 struct intel_crtc_state *old_crtc_state =
2374 intel_atomic_get_old_crtc_state(state, crtc);
2375 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2376 enum pipe pipe = crtc->pipe;
2377
2378 /*
2379 * On gen2 planes are double buffered but the pipe isn't, so we must
2380 * wait for planes to fully turn off before disabling the pipe.
2381 */
2382 if (DISPLAY_VER(dev_priv) == 2)
2383 intel_crtc_wait_for_next_vblank(crtc);
2384
2385 intel_encoders_disable(state, crtc);
2386
2387 intel_crtc_vblank_off(old_crtc_state);
2388
2389 intel_disable_transcoder(old_crtc_state);
2390
2391 i9xx_pfit_disable(old_crtc_state);
2392
2393 intel_encoders_post_disable(state, crtc);
2394
2395 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2396 if (IS_CHERRYVIEW(dev_priv))
2397 chv_disable_pll(dev_priv, pipe);
2398 else if (IS_VALLEYVIEW(dev_priv))
2399 vlv_disable_pll(dev_priv, pipe);
2400 else
2401 i9xx_disable_pll(old_crtc_state);
2402 }
2403
2404 intel_encoders_post_pll_disable(state, crtc);
2405
2406 if (DISPLAY_VER(dev_priv) != 2)
2407 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2408
2409 if (!dev_priv->display.funcs.wm->initial_watermarks)
2410 intel_update_watermarks(dev_priv);
2411
2412 /* clock the pipe down to 640x480@60 to potentially save power */
2413 if (IS_I830(dev_priv))
2414 i830_enable_pipe(dev_priv, pipe);
2415 }
2416
2417
2418 /*
2419 * turn all crtc's off, but do not adjust state
2420 * This has to be paired with a call to intel_modeset_setup_hw_state.
2421 */
intel_display_suspend(struct drm_device * dev)2422 int intel_display_suspend(struct drm_device *dev)
2423 {
2424 struct drm_i915_private *dev_priv = to_i915(dev);
2425 struct drm_atomic_state *state;
2426 int ret;
2427
2428 if (!HAS_DISPLAY(dev_priv))
2429 return 0;
2430
2431 state = drm_atomic_helper_suspend(dev);
2432 ret = PTR_ERR_OR_ZERO(state);
2433 if (ret)
2434 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2435 ret);
2436 else
2437 dev_priv->modeset_restore_state = state;
2438 return ret;
2439 }
2440
intel_encoder_destroy(struct drm_encoder * encoder)2441 void intel_encoder_destroy(struct drm_encoder *encoder)
2442 {
2443 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2444
2445 drm_encoder_cleanup(encoder);
2446 kfree(intel_encoder);
2447 }
2448
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)2449 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2450 {
2451 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2452
2453 /* GDG double wide on either pipe, otherwise pipe A only */
2454 return DISPLAY_VER(dev_priv) < 4 &&
2455 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2456 }
2457
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)2458 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2459 {
2460 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2461 struct drm_rect src;
2462
2463 /*
2464 * We only use IF-ID interlacing. If we ever use
2465 * PF-ID we'll need to adjust the pixel_rate here.
2466 */
2467
2468 if (!crtc_state->pch_pfit.enabled)
2469 return pixel_rate;
2470
2471 drm_rect_init(&src, 0, 0,
2472 drm_rect_width(&crtc_state->pipe_src) << 16,
2473 drm_rect_height(&crtc_state->pipe_src) << 16);
2474
2475 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2476 pixel_rate);
2477 }
2478
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)2479 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2480 const struct drm_display_mode *timings)
2481 {
2482 mode->hdisplay = timings->crtc_hdisplay;
2483 mode->htotal = timings->crtc_htotal;
2484 mode->hsync_start = timings->crtc_hsync_start;
2485 mode->hsync_end = timings->crtc_hsync_end;
2486
2487 mode->vdisplay = timings->crtc_vdisplay;
2488 mode->vtotal = timings->crtc_vtotal;
2489 mode->vsync_start = timings->crtc_vsync_start;
2490 mode->vsync_end = timings->crtc_vsync_end;
2491
2492 mode->flags = timings->flags;
2493 mode->type = DRM_MODE_TYPE_DRIVER;
2494
2495 mode->clock = timings->crtc_clock;
2496
2497 drm_mode_set_name(mode);
2498 }
2499
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)2500 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2501 {
2502 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2503
2504 if (HAS_GMCH(dev_priv))
2505 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2506 crtc_state->pixel_rate =
2507 crtc_state->hw.pipe_mode.crtc_clock;
2508 else
2509 crtc_state->pixel_rate =
2510 ilk_pipe_pixel_rate(crtc_state);
2511 }
2512
intel_bigjoiner_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2513 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2514 struct drm_display_mode *mode)
2515 {
2516 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2517
2518 if (num_pipes < 2)
2519 return;
2520
2521 mode->crtc_clock /= num_pipes;
2522 mode->crtc_hdisplay /= num_pipes;
2523 mode->crtc_hblank_start /= num_pipes;
2524 mode->crtc_hblank_end /= num_pipes;
2525 mode->crtc_hsync_start /= num_pipes;
2526 mode->crtc_hsync_end /= num_pipes;
2527 mode->crtc_htotal /= num_pipes;
2528 }
2529
intel_splitter_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2530 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2531 struct drm_display_mode *mode)
2532 {
2533 int overlap = crtc_state->splitter.pixel_overlap;
2534 int n = crtc_state->splitter.link_count;
2535
2536 if (!crtc_state->splitter.enable)
2537 return;
2538
2539 /*
2540 * eDP MSO uses segment timings from EDID for transcoder
2541 * timings, but full mode for everything else.
2542 *
2543 * h_full = (h_segment - pixel_overlap) * link_count
2544 */
2545 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2546 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2547 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2548 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2549 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2550 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2551 mode->crtc_clock *= n;
2552 }
2553
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)2554 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2555 {
2556 struct drm_display_mode *mode = &crtc_state->hw.mode;
2557 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2558 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2559
2560 /*
2561 * Start with the adjusted_mode crtc timings, which
2562 * have been filled with the transcoder timings.
2563 */
2564 drm_mode_copy(pipe_mode, adjusted_mode);
2565
2566 /* Expand MSO per-segment transcoder timings to full */
2567 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2568
2569 /*
2570 * We want the full numbers in adjusted_mode normal timings,
2571 * adjusted_mode crtc timings are left with the raw transcoder
2572 * timings.
2573 */
2574 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2575
2576 /* Populate the "user" mode with full numbers */
2577 drm_mode_copy(mode, pipe_mode);
2578 intel_mode_from_crtc_timings(mode, mode);
2579 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2580 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2581 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2582
2583 /* Derive per-pipe timings in case bigjoiner is used */
2584 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2585 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2586
2587 intel_crtc_compute_pixel_rate(crtc_state);
2588 }
2589
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)2590 void intel_encoder_get_config(struct intel_encoder *encoder,
2591 struct intel_crtc_state *crtc_state)
2592 {
2593 encoder->get_config(encoder, crtc_state);
2594
2595 intel_crtc_readout_derived_state(crtc_state);
2596 }
2597
intel_bigjoiner_compute_pipe_src(struct intel_crtc_state * crtc_state)2598 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2599 {
2600 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2601 int width, height;
2602
2603 if (num_pipes < 2)
2604 return;
2605
2606 width = drm_rect_width(&crtc_state->pipe_src);
2607 height = drm_rect_height(&crtc_state->pipe_src);
2608
2609 drm_rect_init(&crtc_state->pipe_src, 0, 0,
2610 width / num_pipes, height);
2611 }
2612
intel_crtc_compute_pipe_src(struct intel_crtc_state * crtc_state)2613 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2614 {
2615 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2616 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2617
2618 intel_bigjoiner_compute_pipe_src(crtc_state);
2619
2620 /*
2621 * Pipe horizontal size must be even in:
2622 * - DVO ganged mode
2623 * - LVDS dual channel mode
2624 * - Double wide pipe
2625 */
2626 if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2627 if (crtc_state->double_wide) {
2628 drm_dbg_kms(&i915->drm,
2629 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2630 crtc->base.base.id, crtc->base.name);
2631 return -EINVAL;
2632 }
2633
2634 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2635 intel_is_dual_link_lvds(i915)) {
2636 drm_dbg_kms(&i915->drm,
2637 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2638 crtc->base.base.id, crtc->base.name);
2639 return -EINVAL;
2640 }
2641 }
2642
2643 return 0;
2644 }
2645
intel_crtc_compute_pipe_mode(struct intel_crtc_state * crtc_state)2646 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2647 {
2648 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2649 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2650 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2651 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2652 int clock_limit = i915->max_dotclk_freq;
2653
2654 /*
2655 * Start with the adjusted_mode crtc timings, which
2656 * have been filled with the transcoder timings.
2657 */
2658 drm_mode_copy(pipe_mode, adjusted_mode);
2659
2660 /* Expand MSO per-segment transcoder timings to full */
2661 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2662
2663 /* Derive per-pipe timings in case bigjoiner is used */
2664 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2665 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2666
2667 if (DISPLAY_VER(i915) < 4) {
2668 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2669
2670 /*
2671 * Enable double wide mode when the dot clock
2672 * is > 90% of the (display) core speed.
2673 */
2674 if (intel_crtc_supports_double_wide(crtc) &&
2675 pipe_mode->crtc_clock > clock_limit) {
2676 clock_limit = i915->max_dotclk_freq;
2677 crtc_state->double_wide = true;
2678 }
2679 }
2680
2681 if (pipe_mode->crtc_clock > clock_limit) {
2682 drm_dbg_kms(&i915->drm,
2683 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2684 crtc->base.base.id, crtc->base.name,
2685 pipe_mode->crtc_clock, clock_limit,
2686 str_yes_no(crtc_state->double_wide));
2687 return -EINVAL;
2688 }
2689
2690 return 0;
2691 }
2692
intel_crtc_compute_config(struct intel_atomic_state * state,struct intel_crtc * crtc)2693 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2694 struct intel_crtc *crtc)
2695 {
2696 struct intel_crtc_state *crtc_state =
2697 intel_atomic_get_new_crtc_state(state, crtc);
2698 int ret;
2699
2700 ret = intel_dpll_crtc_compute_clock(state, crtc);
2701 if (ret)
2702 return ret;
2703
2704 ret = intel_crtc_compute_pipe_src(crtc_state);
2705 if (ret)
2706 return ret;
2707
2708 ret = intel_crtc_compute_pipe_mode(crtc_state);
2709 if (ret)
2710 return ret;
2711
2712 intel_crtc_compute_pixel_rate(crtc_state);
2713
2714 if (crtc_state->has_pch_encoder)
2715 return ilk_fdi_compute_config(crtc, crtc_state);
2716
2717 return 0;
2718 }
2719
2720 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)2721 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2722 {
2723 while (*num > DATA_LINK_M_N_MASK ||
2724 *den > DATA_LINK_M_N_MASK) {
2725 *num >>= 1;
2726 *den >>= 1;
2727 }
2728 }
2729
compute_m_n(u32 * ret_m,u32 * ret_n,u32 m,u32 n,u32 constant_n)2730 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2731 u32 m, u32 n, u32 constant_n)
2732 {
2733 if (constant_n)
2734 *ret_n = constant_n;
2735 else
2736 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2737
2738 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2739 intel_reduce_m_n_ratio(ret_m, ret_n);
2740 }
2741
2742 void
intel_link_compute_m_n(u16 bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n,bool fec_enable)2743 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2744 int pixel_clock, int link_clock,
2745 struct intel_link_m_n *m_n,
2746 bool fec_enable)
2747 {
2748 u32 data_clock = bits_per_pixel * pixel_clock;
2749
2750 if (fec_enable)
2751 data_clock = intel_dp_mode_to_fec_clock(data_clock);
2752
2753 /*
2754 * Windows/BIOS uses fixed M/N values always. Follow suit.
2755 *
2756 * Also several DP dongles in particular seem to be fussy
2757 * about too large link M/N values. Presumably the 20bit
2758 * value used by Windows/BIOS is acceptable to everyone.
2759 */
2760 m_n->tu = 64;
2761 compute_m_n(&m_n->data_m, &m_n->data_n,
2762 data_clock, link_clock * nlanes * 8,
2763 0x8000000);
2764
2765 compute_m_n(&m_n->link_m, &m_n->link_n,
2766 pixel_clock, link_clock,
2767 0x80000);
2768 }
2769
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)2770 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2771 {
2772 /*
2773 * There may be no VBT; and if the BIOS enabled SSC we can
2774 * just keep using it to avoid unnecessary flicker. Whereas if the
2775 * BIOS isn't using it, don't assume it will work even if the VBT
2776 * indicates as much.
2777 */
2778 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2779 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2780 PCH_DREF_CONTROL) &
2781 DREF_SSC1_ENABLE;
2782
2783 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2784 drm_dbg_kms(&dev_priv->drm,
2785 "SSC %s by BIOS, overriding VBT which says %s\n",
2786 str_enabled_disabled(bios_lvds_use_ssc),
2787 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2788 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2789 }
2790 }
2791 }
2792
intel_zero_m_n(struct intel_link_m_n * m_n)2793 void intel_zero_m_n(struct intel_link_m_n *m_n)
2794 {
2795 /* corresponds to 0 register value */
2796 memset(m_n, 0, sizeof(*m_n));
2797 m_n->tu = 1;
2798 }
2799
intel_set_m_n(struct drm_i915_private * i915,const struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)2800 void intel_set_m_n(struct drm_i915_private *i915,
2801 const struct intel_link_m_n *m_n,
2802 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2803 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2804 {
2805 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2806 intel_de_write(i915, data_n_reg, m_n->data_n);
2807 intel_de_write(i915, link_m_reg, m_n->link_m);
2808 /*
2809 * On BDW+ writing LINK_N arms the double buffered update
2810 * of all the M/N registers, so it must be written last.
2811 */
2812 intel_de_write(i915, link_n_reg, m_n->link_n);
2813 }
2814
intel_cpu_transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)2815 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2816 enum transcoder transcoder)
2817 {
2818 if (IS_HASWELL(dev_priv))
2819 return transcoder == TRANSCODER_EDP;
2820
2821 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2822 }
2823
intel_cpu_transcoder_set_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2824 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2825 enum transcoder transcoder,
2826 const struct intel_link_m_n *m_n)
2827 {
2828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2829 enum pipe pipe = crtc->pipe;
2830
2831 if (DISPLAY_VER(dev_priv) >= 5)
2832 intel_set_m_n(dev_priv, m_n,
2833 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2834 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2835 else
2836 intel_set_m_n(dev_priv, m_n,
2837 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2838 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2839 }
2840
intel_cpu_transcoder_set_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2841 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2842 enum transcoder transcoder,
2843 const struct intel_link_m_n *m_n)
2844 {
2845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2846
2847 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2848 return;
2849
2850 intel_set_m_n(dev_priv, m_n,
2851 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2852 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2853 }
2854
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)2855 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2856 {
2857 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2859 enum pipe pipe = crtc->pipe;
2860 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2861 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2862 u32 crtc_vtotal, crtc_vblank_end;
2863 int vsyncshift = 0;
2864
2865 /* We need to be careful not to changed the adjusted mode, for otherwise
2866 * the hw state checker will get angry at the mismatch. */
2867 crtc_vtotal = adjusted_mode->crtc_vtotal;
2868 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2869
2870 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2871 /* the chip adds 2 halflines automatically */
2872 crtc_vtotal -= 1;
2873 crtc_vblank_end -= 1;
2874
2875 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2876 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2877 else
2878 vsyncshift = adjusted_mode->crtc_hsync_start -
2879 adjusted_mode->crtc_htotal / 2;
2880 if (vsyncshift < 0)
2881 vsyncshift += adjusted_mode->crtc_htotal;
2882 }
2883
2884 if (DISPLAY_VER(dev_priv) > 3)
2885 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
2886 vsyncshift);
2887
2888 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
2889 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
2890 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
2891 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
2892 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
2893 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
2894
2895 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
2896 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
2897 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
2898 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
2899 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
2900 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
2901
2902 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2903 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2904 * documented on the DDI_FUNC_CTL register description, EDP Input Select
2905 * bits. */
2906 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2907 (pipe == PIPE_B || pipe == PIPE_C))
2908 intel_de_write(dev_priv, VTOTAL(pipe),
2909 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2910
2911 }
2912
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)2913 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2914 {
2915 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2917 int width = drm_rect_width(&crtc_state->pipe_src);
2918 int height = drm_rect_height(&crtc_state->pipe_src);
2919 enum pipe pipe = crtc->pipe;
2920
2921 /* pipesrc controls the size that is scaled from, which should
2922 * always be the user's requested size.
2923 */
2924 intel_de_write(dev_priv, PIPESRC(pipe),
2925 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2926 }
2927
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)2928 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2929 {
2930 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2931 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2932
2933 if (DISPLAY_VER(dev_priv) == 2)
2934 return false;
2935
2936 if (DISPLAY_VER(dev_priv) >= 9 ||
2937 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2938 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
2939 else
2940 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
2941 }
2942
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)2943 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2944 struct intel_crtc_state *pipe_config)
2945 {
2946 struct drm_device *dev = crtc->base.dev;
2947 struct drm_i915_private *dev_priv = to_i915(dev);
2948 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2949 u32 tmp;
2950
2951 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
2952 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
2953 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
2954
2955 if (!transcoder_is_dsi(cpu_transcoder)) {
2956 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
2957 pipe_config->hw.adjusted_mode.crtc_hblank_start =
2958 (tmp & 0xffff) + 1;
2959 pipe_config->hw.adjusted_mode.crtc_hblank_end =
2960 ((tmp >> 16) & 0xffff) + 1;
2961 }
2962 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
2963 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
2964 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
2965
2966 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
2967 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
2968 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
2969
2970 if (!transcoder_is_dsi(cpu_transcoder)) {
2971 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
2972 pipe_config->hw.adjusted_mode.crtc_vblank_start =
2973 (tmp & 0xffff) + 1;
2974 pipe_config->hw.adjusted_mode.crtc_vblank_end =
2975 ((tmp >> 16) & 0xffff) + 1;
2976 }
2977 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
2978 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
2979 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
2980
2981 if (intel_pipe_is_interlaced(pipe_config)) {
2982 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
2983 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
2984 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
2985 }
2986 }
2987
intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state * crtc_state)2988 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2989 {
2990 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2991 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2992 enum pipe master_pipe, pipe = crtc->pipe;
2993 int width;
2994
2995 if (num_pipes < 2)
2996 return;
2997
2998 master_pipe = bigjoiner_master_pipe(crtc_state);
2999 width = drm_rect_width(&crtc_state->pipe_src);
3000
3001 drm_rect_translate_to(&crtc_state->pipe_src,
3002 (pipe - master_pipe) * width, 0);
3003 }
3004
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3005 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3006 struct intel_crtc_state *pipe_config)
3007 {
3008 struct drm_device *dev = crtc->base.dev;
3009 struct drm_i915_private *dev_priv = to_i915(dev);
3010 u32 tmp;
3011
3012 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3013
3014 drm_rect_init(&pipe_config->pipe_src, 0, 0,
3015 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
3016 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
3017
3018 intel_bigjoiner_adjust_pipe_src(pipe_config);
3019 }
3020
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)3021 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3022 {
3023 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3025 u32 pipeconf = 0;
3026
3027 /*
3028 * - We keep both pipes enabled on 830
3029 * - During modeset the pipe is still disabled and must remain so
3030 * - During fastset the pipe is already enabled and must remain so
3031 */
3032 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
3033 pipeconf |= PIPECONF_ENABLE;
3034
3035 if (crtc_state->double_wide)
3036 pipeconf |= PIPECONF_DOUBLE_WIDE;
3037
3038 /* only g4x and later have fancy bpc/dither controls */
3039 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3040 IS_CHERRYVIEW(dev_priv)) {
3041 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3042 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3043 pipeconf |= PIPECONF_DITHER_EN |
3044 PIPECONF_DITHER_TYPE_SP;
3045
3046 switch (crtc_state->pipe_bpp) {
3047 default:
3048 /* Case prevented by intel_choose_pipe_bpp_dither. */
3049 MISSING_CASE(crtc_state->pipe_bpp);
3050 fallthrough;
3051 case 18:
3052 pipeconf |= PIPECONF_BPC_6;
3053 break;
3054 case 24:
3055 pipeconf |= PIPECONF_BPC_8;
3056 break;
3057 case 30:
3058 pipeconf |= PIPECONF_BPC_10;
3059 break;
3060 }
3061 }
3062
3063 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3064 if (DISPLAY_VER(dev_priv) < 4 ||
3065 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3066 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3067 else
3068 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3069 } else {
3070 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
3071 }
3072
3073 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3074 crtc_state->limited_color_range)
3075 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3076
3077 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3078
3079 pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3080
3081 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3082 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3083 }
3084
i9xx_has_pfit(struct drm_i915_private * dev_priv)3085 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3086 {
3087 if (IS_I830(dev_priv))
3088 return false;
3089
3090 return DISPLAY_VER(dev_priv) >= 4 ||
3091 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3092 }
3093
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)3094 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3095 {
3096 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3097 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3098 u32 tmp;
3099
3100 if (!i9xx_has_pfit(dev_priv))
3101 return;
3102
3103 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3104 if (!(tmp & PFIT_ENABLE))
3105 return;
3106
3107 /* Check whether the pfit is attached to our pipe. */
3108 if (DISPLAY_VER(dev_priv) < 4) {
3109 if (crtc->pipe != PIPE_B)
3110 return;
3111 } else {
3112 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3113 return;
3114 }
3115
3116 crtc_state->gmch_pfit.control = tmp;
3117 crtc_state->gmch_pfit.pgm_ratios =
3118 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3119 }
3120
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3121 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3122 struct intel_crtc_state *pipe_config)
3123 {
3124 struct drm_device *dev = crtc->base.dev;
3125 struct drm_i915_private *dev_priv = to_i915(dev);
3126 enum pipe pipe = crtc->pipe;
3127 struct dpll clock;
3128 u32 mdiv;
3129 int refclk = 100000;
3130
3131 /* In case of DSI, DPLL will not be used */
3132 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3133 return;
3134
3135 vlv_dpio_get(dev_priv);
3136 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3137 vlv_dpio_put(dev_priv);
3138
3139 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3140 clock.m2 = mdiv & DPIO_M2DIV_MASK;
3141 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3142 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3143 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3144
3145 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3146 }
3147
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3148 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3149 struct intel_crtc_state *pipe_config)
3150 {
3151 struct drm_device *dev = crtc->base.dev;
3152 struct drm_i915_private *dev_priv = to_i915(dev);
3153 enum pipe pipe = crtc->pipe;
3154 enum dpio_channel port = vlv_pipe_to_channel(pipe);
3155 struct dpll clock;
3156 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3157 int refclk = 100000;
3158
3159 /* In case of DSI, DPLL will not be used */
3160 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3161 return;
3162
3163 vlv_dpio_get(dev_priv);
3164 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3165 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3166 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3167 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3168 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3169 vlv_dpio_put(dev_priv);
3170
3171 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3172 clock.m2 = (pll_dw0 & 0xff) << 22;
3173 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3174 clock.m2 |= pll_dw2 & 0x3fffff;
3175 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3176 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3177 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3178
3179 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3180 }
3181
3182 static enum intel_output_format
bdw_get_pipemisc_output_format(struct intel_crtc * crtc)3183 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3184 {
3185 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3186 u32 tmp;
3187
3188 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3189
3190 if (tmp & PIPEMISC_YUV420_ENABLE) {
3191 /* We support 4:2:0 in full blend mode only */
3192 drm_WARN_ON(&dev_priv->drm,
3193 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3194
3195 return INTEL_OUTPUT_FORMAT_YCBCR420;
3196 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3197 return INTEL_OUTPUT_FORMAT_YCBCR444;
3198 } else {
3199 return INTEL_OUTPUT_FORMAT_RGB;
3200 }
3201 }
3202
i9xx_get_pipe_color_config(struct intel_crtc_state * crtc_state)3203 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3204 {
3205 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3206 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3208 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3209 u32 tmp;
3210
3211 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3212
3213 if (tmp & DISP_PIPE_GAMMA_ENABLE)
3214 crtc_state->gamma_enable = true;
3215
3216 if (!HAS_GMCH(dev_priv) &&
3217 tmp & DISP_PIPE_CSC_ENABLE)
3218 crtc_state->csc_enable = true;
3219 }
3220
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3221 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3222 struct intel_crtc_state *pipe_config)
3223 {
3224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3225 enum intel_display_power_domain power_domain;
3226 intel_wakeref_t wakeref;
3227 u32 tmp;
3228 bool ret;
3229
3230 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3231 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3232 if (!wakeref)
3233 return false;
3234
3235 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3236 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3237 pipe_config->shared_dpll = NULL;
3238
3239 ret = false;
3240
3241 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3242 if (!(tmp & PIPECONF_ENABLE))
3243 goto out;
3244
3245 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3246 IS_CHERRYVIEW(dev_priv)) {
3247 switch (tmp & PIPECONF_BPC_MASK) {
3248 case PIPECONF_BPC_6:
3249 pipe_config->pipe_bpp = 18;
3250 break;
3251 case PIPECONF_BPC_8:
3252 pipe_config->pipe_bpp = 24;
3253 break;
3254 case PIPECONF_BPC_10:
3255 pipe_config->pipe_bpp = 30;
3256 break;
3257 default:
3258 MISSING_CASE(tmp);
3259 break;
3260 }
3261 }
3262
3263 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3264 (tmp & PIPECONF_COLOR_RANGE_SELECT))
3265 pipe_config->limited_color_range = true;
3266
3267 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
3268
3269 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3270
3271 if (IS_CHERRYVIEW(dev_priv))
3272 pipe_config->cgm_mode = intel_de_read(dev_priv,
3273 CGM_PIPE_MODE(crtc->pipe));
3274
3275 i9xx_get_pipe_color_config(pipe_config);
3276 intel_color_get_config(pipe_config);
3277
3278 if (DISPLAY_VER(dev_priv) < 4)
3279 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3280
3281 intel_get_transcoder_timings(crtc, pipe_config);
3282 intel_get_pipe_src_size(crtc, pipe_config);
3283
3284 i9xx_get_pfit_config(pipe_config);
3285
3286 if (DISPLAY_VER(dev_priv) >= 4) {
3287 /* No way to read it out on pipes B and C */
3288 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3289 tmp = dev_priv->chv_dpll_md[crtc->pipe];
3290 else
3291 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3292 pipe_config->pixel_multiplier =
3293 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3294 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3295 pipe_config->dpll_hw_state.dpll_md = tmp;
3296 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3297 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3298 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3299 pipe_config->pixel_multiplier =
3300 ((tmp & SDVO_MULTIPLIER_MASK)
3301 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3302 } else {
3303 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3304 * port and will be fixed up in the encoder->get_config
3305 * function. */
3306 pipe_config->pixel_multiplier = 1;
3307 }
3308 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3309 DPLL(crtc->pipe));
3310 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3311 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3312 FP0(crtc->pipe));
3313 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3314 FP1(crtc->pipe));
3315 } else {
3316 /* Mask out read-only status bits. */
3317 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3318 DPLL_PORTC_READY_MASK |
3319 DPLL_PORTB_READY_MASK);
3320 }
3321
3322 if (IS_CHERRYVIEW(dev_priv))
3323 chv_crtc_clock_get(crtc, pipe_config);
3324 else if (IS_VALLEYVIEW(dev_priv))
3325 vlv_crtc_clock_get(crtc, pipe_config);
3326 else
3327 i9xx_crtc_clock_get(crtc, pipe_config);
3328
3329 /*
3330 * Normally the dotclock is filled in by the encoder .get_config()
3331 * but in case the pipe is enabled w/o any ports we need a sane
3332 * default.
3333 */
3334 pipe_config->hw.adjusted_mode.crtc_clock =
3335 pipe_config->port_clock / pipe_config->pixel_multiplier;
3336
3337 ret = true;
3338
3339 out:
3340 intel_display_power_put(dev_priv, power_domain, wakeref);
3341
3342 return ret;
3343 }
3344
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)3345 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3346 {
3347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3348 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3349 enum pipe pipe = crtc->pipe;
3350 u32 val = 0;
3351
3352 /*
3353 * - During modeset the pipe is still disabled and must remain so
3354 * - During fastset the pipe is already enabled and must remain so
3355 */
3356 if (!intel_crtc_needs_modeset(crtc_state))
3357 val |= PIPECONF_ENABLE;
3358
3359 switch (crtc_state->pipe_bpp) {
3360 default:
3361 /* Case prevented by intel_choose_pipe_bpp_dither. */
3362 MISSING_CASE(crtc_state->pipe_bpp);
3363 fallthrough;
3364 case 18:
3365 val |= PIPECONF_BPC_6;
3366 break;
3367 case 24:
3368 val |= PIPECONF_BPC_8;
3369 break;
3370 case 30:
3371 val |= PIPECONF_BPC_10;
3372 break;
3373 case 36:
3374 val |= PIPECONF_BPC_12;
3375 break;
3376 }
3377
3378 if (crtc_state->dither)
3379 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3380
3381 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3382 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3383 else
3384 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3385
3386 /*
3387 * This would end up with an odd purple hue over
3388 * the entire display. Make sure we don't do it.
3389 */
3390 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3391 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3392
3393 if (crtc_state->limited_color_range &&
3394 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3395 val |= PIPECONF_COLOR_RANGE_SELECT;
3396
3397 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3398 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3399
3400 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3401
3402 val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3403 val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3404
3405 intel_de_write(dev_priv, PIPECONF(pipe), val);
3406 intel_de_posting_read(dev_priv, PIPECONF(pipe));
3407 }
3408
hsw_set_transconf(const struct intel_crtc_state * crtc_state)3409 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3410 {
3411 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3413 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3414 u32 val = 0;
3415
3416 /*
3417 * - During modeset the pipe is still disabled and must remain so
3418 * - During fastset the pipe is already enabled and must remain so
3419 */
3420 if (!intel_crtc_needs_modeset(crtc_state))
3421 val |= PIPECONF_ENABLE;
3422
3423 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3424 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3425
3426 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3427 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3428 else
3429 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3430
3431 if (IS_HASWELL(dev_priv) &&
3432 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3433 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3434
3435 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3436 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3437 }
3438
bdw_set_pipemisc(const struct intel_crtc_state * crtc_state)3439 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3440 {
3441 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3443 u32 val = 0;
3444
3445 switch (crtc_state->pipe_bpp) {
3446 case 18:
3447 val |= PIPEMISC_BPC_6;
3448 break;
3449 case 24:
3450 val |= PIPEMISC_BPC_8;
3451 break;
3452 case 30:
3453 val |= PIPEMISC_BPC_10;
3454 break;
3455 case 36:
3456 /* Port output 12BPC defined for ADLP+ */
3457 if (DISPLAY_VER(dev_priv) > 12)
3458 val |= PIPEMISC_BPC_12_ADLP;
3459 break;
3460 default:
3461 MISSING_CASE(crtc_state->pipe_bpp);
3462 break;
3463 }
3464
3465 if (crtc_state->dither)
3466 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3467
3468 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3469 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3470 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3471
3472 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3473 val |= PIPEMISC_YUV420_ENABLE |
3474 PIPEMISC_YUV420_MODE_FULL_BLEND;
3475
3476 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3477 val |= PIPEMISC_HDR_MODE_PRECISION;
3478
3479 if (DISPLAY_VER(dev_priv) >= 12)
3480 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3481
3482 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3483 }
3484
bdw_get_pipemisc_bpp(struct intel_crtc * crtc)3485 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3486 {
3487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488 u32 tmp;
3489
3490 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3491
3492 switch (tmp & PIPEMISC_BPC_MASK) {
3493 case PIPEMISC_BPC_6:
3494 return 18;
3495 case PIPEMISC_BPC_8:
3496 return 24;
3497 case PIPEMISC_BPC_10:
3498 return 30;
3499 /*
3500 * PORT OUTPUT 12 BPC defined for ADLP+.
3501 *
3502 * TODO:
3503 * For previous platforms with DSI interface, bits 5:7
3504 * are used for storing pipe_bpp irrespective of dithering.
3505 * Since the value of 12 BPC is not defined for these bits
3506 * on older platforms, need to find a workaround for 12 BPC
3507 * MIPI DSI HW readout.
3508 */
3509 case PIPEMISC_BPC_12_ADLP:
3510 if (DISPLAY_VER(dev_priv) > 12)
3511 return 36;
3512 fallthrough;
3513 default:
3514 MISSING_CASE(tmp);
3515 return 0;
3516 }
3517 }
3518
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)3519 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3520 {
3521 /*
3522 * Account for spread spectrum to avoid
3523 * oversubscribing the link. Max center spread
3524 * is 2.5%; use 5% for safety's sake.
3525 */
3526 u32 bps = target_clock * bpp * 21 / 20;
3527 return DIV_ROUND_UP(bps, link_bw * 8);
3528 }
3529
intel_get_m_n(struct drm_i915_private * i915,struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)3530 void intel_get_m_n(struct drm_i915_private *i915,
3531 struct intel_link_m_n *m_n,
3532 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3533 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3534 {
3535 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3536 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3537 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3538 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3539 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3540 }
3541
intel_cpu_transcoder_get_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3542 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3543 enum transcoder transcoder,
3544 struct intel_link_m_n *m_n)
3545 {
3546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3547 enum pipe pipe = crtc->pipe;
3548
3549 if (DISPLAY_VER(dev_priv) >= 5)
3550 intel_get_m_n(dev_priv, m_n,
3551 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3552 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3553 else
3554 intel_get_m_n(dev_priv, m_n,
3555 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3556 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3557 }
3558
intel_cpu_transcoder_get_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3559 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3560 enum transcoder transcoder,
3561 struct intel_link_m_n *m_n)
3562 {
3563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3564
3565 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3566 return;
3567
3568 intel_get_m_n(dev_priv, m_n,
3569 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3570 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3571 }
3572
ilk_get_pfit_pos_size(struct intel_crtc_state * crtc_state,u32 pos,u32 size)3573 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3574 u32 pos, u32 size)
3575 {
3576 drm_rect_init(&crtc_state->pch_pfit.dst,
3577 pos >> 16, pos & 0xffff,
3578 size >> 16, size & 0xffff);
3579 }
3580
skl_get_pfit_config(struct intel_crtc_state * crtc_state)3581 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3582 {
3583 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3584 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3585 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3586 int id = -1;
3587 int i;
3588
3589 /* find scaler attached to this pipe */
3590 for (i = 0; i < crtc->num_scalers; i++) {
3591 u32 ctl, pos, size;
3592
3593 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3594 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3595 continue;
3596
3597 id = i;
3598 crtc_state->pch_pfit.enabled = true;
3599
3600 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3601 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3602
3603 ilk_get_pfit_pos_size(crtc_state, pos, size);
3604
3605 scaler_state->scalers[i].in_use = true;
3606 break;
3607 }
3608
3609 scaler_state->scaler_id = id;
3610 if (id >= 0)
3611 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3612 else
3613 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3614 }
3615
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)3616 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3617 {
3618 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3620 u32 ctl, pos, size;
3621
3622 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3623 if ((ctl & PF_ENABLE) == 0)
3624 return;
3625
3626 crtc_state->pch_pfit.enabled = true;
3627
3628 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3629 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3630
3631 ilk_get_pfit_pos_size(crtc_state, pos, size);
3632
3633 /*
3634 * We currently do not free assignements of panel fitters on
3635 * ivb/hsw (since we don't use the higher upscaling modes which
3636 * differentiates them) so just WARN about this case for now.
3637 */
3638 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3639 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3640 }
3641
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3642 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3643 struct intel_crtc_state *pipe_config)
3644 {
3645 struct drm_device *dev = crtc->base.dev;
3646 struct drm_i915_private *dev_priv = to_i915(dev);
3647 enum intel_display_power_domain power_domain;
3648 intel_wakeref_t wakeref;
3649 u32 tmp;
3650 bool ret;
3651
3652 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3653 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3654 if (!wakeref)
3655 return false;
3656
3657 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3658 pipe_config->shared_dpll = NULL;
3659
3660 ret = false;
3661 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3662 if (!(tmp & PIPECONF_ENABLE))
3663 goto out;
3664
3665 switch (tmp & PIPECONF_BPC_MASK) {
3666 case PIPECONF_BPC_6:
3667 pipe_config->pipe_bpp = 18;
3668 break;
3669 case PIPECONF_BPC_8:
3670 pipe_config->pipe_bpp = 24;
3671 break;
3672 case PIPECONF_BPC_10:
3673 pipe_config->pipe_bpp = 30;
3674 break;
3675 case PIPECONF_BPC_12:
3676 pipe_config->pipe_bpp = 36;
3677 break;
3678 default:
3679 break;
3680 }
3681
3682 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
3683 pipe_config->limited_color_range = true;
3684
3685 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
3686 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
3687 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
3688 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3689 break;
3690 default:
3691 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3692 break;
3693 }
3694
3695 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
3696
3697 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3698
3699 pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
3700
3701 pipe_config->csc_mode = intel_de_read(dev_priv,
3702 PIPE_CSC_MODE(crtc->pipe));
3703
3704 i9xx_get_pipe_color_config(pipe_config);
3705 intel_color_get_config(pipe_config);
3706
3707 pipe_config->pixel_multiplier = 1;
3708
3709 ilk_pch_get_config(pipe_config);
3710
3711 intel_get_transcoder_timings(crtc, pipe_config);
3712 intel_get_pipe_src_size(crtc, pipe_config);
3713
3714 ilk_get_pfit_config(pipe_config);
3715
3716 ret = true;
3717
3718 out:
3719 intel_display_power_put(dev_priv, power_domain, wakeref);
3720
3721 return ret;
3722 }
3723
bigjoiner_pipes(struct drm_i915_private * i915)3724 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3725 {
3726 u8 pipes;
3727
3728 if (DISPLAY_VER(i915) >= 12)
3729 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3730 else if (DISPLAY_VER(i915) >= 11)
3731 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3732 else
3733 pipes = 0;
3734
3735 return pipes & RUNTIME_INFO(i915)->pipe_mask;
3736 }
3737
transcoder_ddi_func_is_enabled(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)3738 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3739 enum transcoder cpu_transcoder)
3740 {
3741 enum intel_display_power_domain power_domain;
3742 intel_wakeref_t wakeref;
3743 u32 tmp = 0;
3744
3745 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3746
3747 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3748 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3749
3750 return tmp & TRANS_DDI_FUNC_ENABLE;
3751 }
3752
enabled_bigjoiner_pipes(struct drm_i915_private * dev_priv,u8 * master_pipes,u8 * slave_pipes)3753 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3754 u8 *master_pipes, u8 *slave_pipes)
3755 {
3756 struct intel_crtc *crtc;
3757
3758 *master_pipes = 0;
3759 *slave_pipes = 0;
3760
3761 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3762 bigjoiner_pipes(dev_priv)) {
3763 enum intel_display_power_domain power_domain;
3764 enum pipe pipe = crtc->pipe;
3765 intel_wakeref_t wakeref;
3766
3767 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3768 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3769 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3770
3771 if (!(tmp & BIG_JOINER_ENABLE))
3772 continue;
3773
3774 if (tmp & MASTER_BIG_JOINER_ENABLE)
3775 *master_pipes |= BIT(pipe);
3776 else
3777 *slave_pipes |= BIT(pipe);
3778 }
3779
3780 if (DISPLAY_VER(dev_priv) < 13)
3781 continue;
3782
3783 power_domain = POWER_DOMAIN_PIPE(pipe);
3784 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3785 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3786
3787 if (tmp & UNCOMPRESSED_JOINER_MASTER)
3788 *master_pipes |= BIT(pipe);
3789 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3790 *slave_pipes |= BIT(pipe);
3791 }
3792 }
3793
3794 /* Bigjoiner pipes should always be consecutive master and slave */
3795 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3796 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3797 *master_pipes, *slave_pipes);
3798 }
3799
get_bigjoiner_master_pipe(enum pipe pipe,u8 master_pipes,u8 slave_pipes)3800 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3801 {
3802 if ((slave_pipes & BIT(pipe)) == 0)
3803 return pipe;
3804
3805 /* ignore everything above our pipe */
3806 master_pipes &= ~GENMASK(7, pipe);
3807
3808 /* highest remaining bit should be our master pipe */
3809 return fls(master_pipes) - 1;
3810 }
3811
get_bigjoiner_slave_pipes(enum pipe pipe,u8 master_pipes,u8 slave_pipes)3812 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3813 {
3814 enum pipe master_pipe, next_master_pipe;
3815
3816 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3817
3818 if ((master_pipes & BIT(master_pipe)) == 0)
3819 return 0;
3820
3821 /* ignore our master pipe and everything below it */
3822 master_pipes &= ~GENMASK(master_pipe, 0);
3823 /* make sure a high bit is set for the ffs() */
3824 master_pipes |= BIT(7);
3825 /* lowest remaining bit should be the next master pipe */
3826 next_master_pipe = ffs(master_pipes) - 1;
3827
3828 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3829 }
3830
hsw_panel_transcoders(struct drm_i915_private * i915)3831 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3832 {
3833 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3834
3835 if (DISPLAY_VER(i915) >= 11)
3836 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3837
3838 return panel_transcoder_mask;
3839 }
3840
hsw_enabled_transcoders(struct intel_crtc * crtc)3841 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3842 {
3843 struct drm_device *dev = crtc->base.dev;
3844 struct drm_i915_private *dev_priv = to_i915(dev);
3845 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3846 enum transcoder cpu_transcoder;
3847 u8 master_pipes, slave_pipes;
3848 u8 enabled_transcoders = 0;
3849
3850 /*
3851 * XXX: Do intel_display_power_get_if_enabled before reading this (for
3852 * consistency and less surprising code; it's in always on power).
3853 */
3854 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3855 panel_transcoder_mask) {
3856 enum intel_display_power_domain power_domain;
3857 intel_wakeref_t wakeref;
3858 enum pipe trans_pipe;
3859 u32 tmp = 0;
3860
3861 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3862 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3863 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3864
3865 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3866 continue;
3867
3868 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3869 default:
3870 drm_WARN(dev, 1,
3871 "unknown pipe linked to transcoder %s\n",
3872 transcoder_name(cpu_transcoder));
3873 fallthrough;
3874 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3875 case TRANS_DDI_EDP_INPUT_A_ON:
3876 trans_pipe = PIPE_A;
3877 break;
3878 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3879 trans_pipe = PIPE_B;
3880 break;
3881 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3882 trans_pipe = PIPE_C;
3883 break;
3884 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3885 trans_pipe = PIPE_D;
3886 break;
3887 }
3888
3889 if (trans_pipe == crtc->pipe)
3890 enabled_transcoders |= BIT(cpu_transcoder);
3891 }
3892
3893 /* single pipe or bigjoiner master */
3894 cpu_transcoder = (enum transcoder) crtc->pipe;
3895 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3896 enabled_transcoders |= BIT(cpu_transcoder);
3897
3898 /* bigjoiner slave -> consider the master pipe's transcoder as well */
3899 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3900 if (slave_pipes & BIT(crtc->pipe)) {
3901 cpu_transcoder = (enum transcoder)
3902 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3903 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3904 enabled_transcoders |= BIT(cpu_transcoder);
3905 }
3906
3907 return enabled_transcoders;
3908 }
3909
has_edp_transcoders(u8 enabled_transcoders)3910 static bool has_edp_transcoders(u8 enabled_transcoders)
3911 {
3912 return enabled_transcoders & BIT(TRANSCODER_EDP);
3913 }
3914
has_dsi_transcoders(u8 enabled_transcoders)3915 static bool has_dsi_transcoders(u8 enabled_transcoders)
3916 {
3917 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3918 BIT(TRANSCODER_DSI_1));
3919 }
3920
has_pipe_transcoders(u8 enabled_transcoders)3921 static bool has_pipe_transcoders(u8 enabled_transcoders)
3922 {
3923 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3924 BIT(TRANSCODER_DSI_0) |
3925 BIT(TRANSCODER_DSI_1));
3926 }
3927
assert_enabled_transcoders(struct drm_i915_private * i915,u8 enabled_transcoders)3928 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3929 u8 enabled_transcoders)
3930 {
3931 /* Only one type of transcoder please */
3932 drm_WARN_ON(&i915->drm,
3933 has_edp_transcoders(enabled_transcoders) +
3934 has_dsi_transcoders(enabled_transcoders) +
3935 has_pipe_transcoders(enabled_transcoders) > 1);
3936
3937 /* Only DSI transcoders can be ganged */
3938 drm_WARN_ON(&i915->drm,
3939 !has_dsi_transcoders(enabled_transcoders) &&
3940 !is_power_of_2(enabled_transcoders));
3941 }
3942
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3943 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3944 struct intel_crtc_state *pipe_config,
3945 struct intel_display_power_domain_set *power_domain_set)
3946 {
3947 struct drm_device *dev = crtc->base.dev;
3948 struct drm_i915_private *dev_priv = to_i915(dev);
3949 unsigned long enabled_transcoders;
3950 u32 tmp;
3951
3952 enabled_transcoders = hsw_enabled_transcoders(crtc);
3953 if (!enabled_transcoders)
3954 return false;
3955
3956 assert_enabled_transcoders(dev_priv, enabled_transcoders);
3957
3958 /*
3959 * With the exception of DSI we should only ever have
3960 * a single enabled transcoder. With DSI let's just
3961 * pick the first one.
3962 */
3963 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3964
3965 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3966 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3967 return false;
3968
3969 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3970 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3971
3972 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3973 pipe_config->pch_pfit.force_thru = true;
3974 }
3975
3976 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
3977
3978 return tmp & PIPECONF_ENABLE;
3979 }
3980
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3981 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3982 struct intel_crtc_state *pipe_config,
3983 struct intel_display_power_domain_set *power_domain_set)
3984 {
3985 struct drm_device *dev = crtc->base.dev;
3986 struct drm_i915_private *dev_priv = to_i915(dev);
3987 enum transcoder cpu_transcoder;
3988 enum port port;
3989 u32 tmp;
3990
3991 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3992 if (port == PORT_A)
3993 cpu_transcoder = TRANSCODER_DSI_A;
3994 else
3995 cpu_transcoder = TRANSCODER_DSI_C;
3996
3997 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3998 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3999 continue;
4000
4001 /*
4002 * The PLL needs to be enabled with a valid divider
4003 * configuration, otherwise accessing DSI registers will hang
4004 * the machine. See BSpec North Display Engine
4005 * registers/MIPI[BXT]. We can break out here early, since we
4006 * need the same DSI PLL to be enabled for both DSI ports.
4007 */
4008 if (!bxt_dsi_pll_is_enabled(dev_priv))
4009 break;
4010
4011 /* XXX: this works for video mode only */
4012 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4013 if (!(tmp & DPI_ENABLE))
4014 continue;
4015
4016 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4017 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4018 continue;
4019
4020 pipe_config->cpu_transcoder = cpu_transcoder;
4021 break;
4022 }
4023
4024 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4025 }
4026
intel_bigjoiner_get_config(struct intel_crtc_state * crtc_state)4027 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
4028 {
4029 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4030 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4031 u8 master_pipes, slave_pipes;
4032 enum pipe pipe = crtc->pipe;
4033
4034 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
4035
4036 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
4037 return;
4038
4039 crtc_state->bigjoiner_pipes =
4040 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
4041 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
4042 }
4043
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4044 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4045 struct intel_crtc_state *pipe_config)
4046 {
4047 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4048 struct intel_display_power_domain_set power_domain_set = { };
4049 bool active;
4050 u32 tmp;
4051
4052 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4053 POWER_DOMAIN_PIPE(crtc->pipe)))
4054 return false;
4055
4056 pipe_config->shared_dpll = NULL;
4057
4058 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4059
4060 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4061 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4062 drm_WARN_ON(&dev_priv->drm, active);
4063 active = true;
4064 }
4065
4066 if (!active)
4067 goto out;
4068
4069 intel_dsc_get_config(pipe_config);
4070 intel_bigjoiner_get_config(pipe_config);
4071
4072 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4073 DISPLAY_VER(dev_priv) >= 11)
4074 intel_get_transcoder_timings(crtc, pipe_config);
4075
4076 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4077 intel_vrr_get_config(crtc, pipe_config);
4078
4079 intel_get_pipe_src_size(crtc, pipe_config);
4080
4081 if (IS_HASWELL(dev_priv)) {
4082 u32 tmp = intel_de_read(dev_priv,
4083 PIPECONF(pipe_config->cpu_transcoder));
4084
4085 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4086 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4087 else
4088 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4089 } else {
4090 pipe_config->output_format =
4091 bdw_get_pipemisc_output_format(crtc);
4092 }
4093
4094 pipe_config->gamma_mode = intel_de_read(dev_priv,
4095 GAMMA_MODE(crtc->pipe));
4096
4097 pipe_config->csc_mode = intel_de_read(dev_priv,
4098 PIPE_CSC_MODE(crtc->pipe));
4099
4100 if (DISPLAY_VER(dev_priv) >= 9) {
4101 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4102
4103 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4104 pipe_config->gamma_enable = true;
4105
4106 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4107 pipe_config->csc_enable = true;
4108 } else {
4109 i9xx_get_pipe_color_config(pipe_config);
4110 }
4111
4112 intel_color_get_config(pipe_config);
4113
4114 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4115 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4116 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4117 pipe_config->ips_linetime =
4118 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4119
4120 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4121 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4122 if (DISPLAY_VER(dev_priv) >= 9)
4123 skl_get_pfit_config(pipe_config);
4124 else
4125 ilk_get_pfit_config(pipe_config);
4126 }
4127
4128 hsw_ips_get_config(pipe_config);
4129
4130 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4131 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4132 pipe_config->pixel_multiplier =
4133 intel_de_read(dev_priv,
4134 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4135 } else {
4136 pipe_config->pixel_multiplier = 1;
4137 }
4138
4139 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4140 tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
4141 MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
4142 CHICKEN_TRANS(pipe_config->cpu_transcoder));
4143
4144 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
4145 } else {
4146 /* no idea if this is correct */
4147 pipe_config->framestart_delay = 1;
4148 }
4149
4150 out:
4151 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4152
4153 return active;
4154 }
4155
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)4156 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4157 {
4158 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4159 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4160
4161 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
4162 return false;
4163
4164 crtc_state->hw.active = true;
4165
4166 intel_crtc_readout_derived_state(crtc_state);
4167
4168 return true;
4169 }
4170
4171 /* VESA 640x480x72Hz mode to set on the pipe */
4172 static const struct drm_display_mode load_detect_mode = {
4173 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4174 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4175 };
4176
intel_modeset_disable_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)4177 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4178 struct drm_crtc *crtc)
4179 {
4180 struct drm_plane *plane;
4181 struct drm_plane_state *plane_state;
4182 int ret, i;
4183
4184 ret = drm_atomic_add_affected_planes(state, crtc);
4185 if (ret)
4186 return ret;
4187
4188 for_each_new_plane_in_state(state, plane, plane_state, i) {
4189 if (plane_state->crtc != crtc)
4190 continue;
4191
4192 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4193 if (ret)
4194 return ret;
4195
4196 drm_atomic_set_fb_for_plane(plane_state, NULL);
4197 }
4198
4199 return 0;
4200 }
4201
intel_get_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)4202 int intel_get_load_detect_pipe(struct drm_connector *connector,
4203 struct intel_load_detect_pipe *old,
4204 struct drm_modeset_acquire_ctx *ctx)
4205 {
4206 struct intel_encoder *encoder =
4207 intel_attached_encoder(to_intel_connector(connector));
4208 struct intel_crtc *possible_crtc;
4209 struct intel_crtc *crtc = NULL;
4210 struct drm_device *dev = encoder->base.dev;
4211 struct drm_i915_private *dev_priv = to_i915(dev);
4212 struct drm_mode_config *config = &dev->mode_config;
4213 struct drm_atomic_state *state = NULL, *restore_state = NULL;
4214 struct drm_connector_state *connector_state;
4215 struct intel_crtc_state *crtc_state;
4216 int ret;
4217
4218 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4219 connector->base.id, connector->name,
4220 encoder->base.base.id, encoder->base.name);
4221
4222 old->restore_state = NULL;
4223
4224 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4225
4226 /*
4227 * Algorithm gets a little messy:
4228 *
4229 * - if the connector already has an assigned crtc, use it (but make
4230 * sure it's on first)
4231 *
4232 * - try to find the first unused crtc that can drive this connector,
4233 * and use that if we find one
4234 */
4235
4236 /* See if we already have a CRTC for this connector */
4237 if (connector->state->crtc) {
4238 crtc = to_intel_crtc(connector->state->crtc);
4239
4240 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4241 if (ret)
4242 goto fail;
4243
4244 /* Make sure the crtc and connector are running */
4245 goto found;
4246 }
4247
4248 /* Find an unused one (if possible) */
4249 for_each_intel_crtc(dev, possible_crtc) {
4250 if (!(encoder->base.possible_crtcs &
4251 drm_crtc_mask(&possible_crtc->base)))
4252 continue;
4253
4254 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4255 if (ret)
4256 goto fail;
4257
4258 if (possible_crtc->base.state->enable) {
4259 drm_modeset_unlock(&possible_crtc->base.mutex);
4260 continue;
4261 }
4262
4263 crtc = possible_crtc;
4264 break;
4265 }
4266
4267 /*
4268 * If we didn't find an unused CRTC, don't use any.
4269 */
4270 if (!crtc) {
4271 drm_dbg_kms(&dev_priv->drm,
4272 "no pipe available for load-detect\n");
4273 ret = -ENODEV;
4274 goto fail;
4275 }
4276
4277 found:
4278 state = drm_atomic_state_alloc(dev);
4279 restore_state = drm_atomic_state_alloc(dev);
4280 if (!state || !restore_state) {
4281 ret = -ENOMEM;
4282 goto fail;
4283 }
4284
4285 state->acquire_ctx = ctx;
4286 restore_state->acquire_ctx = ctx;
4287
4288 connector_state = drm_atomic_get_connector_state(state, connector);
4289 if (IS_ERR(connector_state)) {
4290 ret = PTR_ERR(connector_state);
4291 goto fail;
4292 }
4293
4294 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4295 if (ret)
4296 goto fail;
4297
4298 crtc_state = intel_atomic_get_crtc_state(state, crtc);
4299 if (IS_ERR(crtc_state)) {
4300 ret = PTR_ERR(crtc_state);
4301 goto fail;
4302 }
4303
4304 crtc_state->uapi.active = true;
4305
4306 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4307 &load_detect_mode);
4308 if (ret)
4309 goto fail;
4310
4311 ret = intel_modeset_disable_planes(state, &crtc->base);
4312 if (ret)
4313 goto fail;
4314
4315 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4316 if (!ret)
4317 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4318 if (!ret)
4319 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4320 if (ret) {
4321 drm_dbg_kms(&dev_priv->drm,
4322 "Failed to create a copy of old state to restore: %i\n",
4323 ret);
4324 goto fail;
4325 }
4326
4327 ret = drm_atomic_commit(state);
4328 if (ret) {
4329 drm_dbg_kms(&dev_priv->drm,
4330 "failed to set mode on load-detect pipe\n");
4331 goto fail;
4332 }
4333
4334 old->restore_state = restore_state;
4335 drm_atomic_state_put(state);
4336
4337 /* let the connector get through one full cycle before testing */
4338 intel_crtc_wait_for_next_vblank(crtc);
4339
4340 return true;
4341
4342 fail:
4343 if (state) {
4344 drm_atomic_state_put(state);
4345 state = NULL;
4346 }
4347 if (restore_state) {
4348 drm_atomic_state_put(restore_state);
4349 restore_state = NULL;
4350 }
4351
4352 if (ret == -EDEADLK)
4353 return ret;
4354
4355 return false;
4356 }
4357
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)4358 void intel_release_load_detect_pipe(struct drm_connector *connector,
4359 struct intel_load_detect_pipe *old,
4360 struct drm_modeset_acquire_ctx *ctx)
4361 {
4362 struct intel_encoder *intel_encoder =
4363 intel_attached_encoder(to_intel_connector(connector));
4364 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4365 struct drm_encoder *encoder = &intel_encoder->base;
4366 struct drm_atomic_state *state = old->restore_state;
4367 int ret;
4368
4369 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4370 connector->base.id, connector->name,
4371 encoder->base.id, encoder->name);
4372
4373 if (!state)
4374 return;
4375
4376 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4377 if (ret)
4378 drm_dbg_kms(&i915->drm,
4379 "Couldn't release load detect pipe: %i\n", ret);
4380 drm_atomic_state_put(state);
4381 }
4382
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)4383 static int i9xx_pll_refclk(struct drm_device *dev,
4384 const struct intel_crtc_state *pipe_config)
4385 {
4386 struct drm_i915_private *dev_priv = to_i915(dev);
4387 u32 dpll = pipe_config->dpll_hw_state.dpll;
4388
4389 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4390 return dev_priv->display.vbt.lvds_ssc_freq;
4391 else if (HAS_PCH_SPLIT(dev_priv))
4392 return 120000;
4393 else if (DISPLAY_VER(dev_priv) != 2)
4394 return 96000;
4395 else
4396 return 48000;
4397 }
4398
4399 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4400 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4401 struct intel_crtc_state *pipe_config)
4402 {
4403 struct drm_device *dev = crtc->base.dev;
4404 struct drm_i915_private *dev_priv = to_i915(dev);
4405 u32 dpll = pipe_config->dpll_hw_state.dpll;
4406 u32 fp;
4407 struct dpll clock;
4408 int port_clock;
4409 int refclk = i9xx_pll_refclk(dev, pipe_config);
4410
4411 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4412 fp = pipe_config->dpll_hw_state.fp0;
4413 else
4414 fp = pipe_config->dpll_hw_state.fp1;
4415
4416 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4417 if (IS_PINEVIEW(dev_priv)) {
4418 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4419 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4420 } else {
4421 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4422 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4423 }
4424
4425 if (DISPLAY_VER(dev_priv) != 2) {
4426 if (IS_PINEVIEW(dev_priv))
4427 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4428 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4429 else
4430 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4431 DPLL_FPA01_P1_POST_DIV_SHIFT);
4432
4433 switch (dpll & DPLL_MODE_MASK) {
4434 case DPLLB_MODE_DAC_SERIAL:
4435 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4436 5 : 10;
4437 break;
4438 case DPLLB_MODE_LVDS:
4439 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4440 7 : 14;
4441 break;
4442 default:
4443 drm_dbg_kms(&dev_priv->drm,
4444 "Unknown DPLL mode %08x in programmed "
4445 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4446 return;
4447 }
4448
4449 if (IS_PINEVIEW(dev_priv))
4450 port_clock = pnv_calc_dpll_params(refclk, &clock);
4451 else
4452 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4453 } else {
4454 enum pipe lvds_pipe;
4455
4456 if (IS_I85X(dev_priv) &&
4457 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4458 lvds_pipe == crtc->pipe) {
4459 u32 lvds = intel_de_read(dev_priv, LVDS);
4460
4461 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4462 DPLL_FPA01_P1_POST_DIV_SHIFT);
4463
4464 if (lvds & LVDS_CLKB_POWER_UP)
4465 clock.p2 = 7;
4466 else
4467 clock.p2 = 14;
4468 } else {
4469 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4470 clock.p1 = 2;
4471 else {
4472 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4473 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4474 }
4475 if (dpll & PLL_P2_DIVIDE_BY_4)
4476 clock.p2 = 4;
4477 else
4478 clock.p2 = 2;
4479 }
4480
4481 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4482 }
4483
4484 /*
4485 * This value includes pixel_multiplier. We will use
4486 * port_clock to compute adjusted_mode.crtc_clock in the
4487 * encoder's get_config() function.
4488 */
4489 pipe_config->port_clock = port_clock;
4490 }
4491
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)4492 int intel_dotclock_calculate(int link_freq,
4493 const struct intel_link_m_n *m_n)
4494 {
4495 /*
4496 * The calculation for the data clock is:
4497 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4498 * But we want to avoid losing precison if possible, so:
4499 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4500 *
4501 * and the link clock is simpler:
4502 * link_clock = (m * link_clock) / n
4503 */
4504
4505 if (!m_n->link_n)
4506 return 0;
4507
4508 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
4509 m_n->link_n);
4510 }
4511
intel_crtc_dotclock(const struct intel_crtc_state * pipe_config)4512 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
4513 {
4514 int dotclock;
4515
4516 if (intel_crtc_has_dp_encoder(pipe_config))
4517 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
4518 &pipe_config->dp_m_n);
4519 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
4520 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
4521 pipe_config->pipe_bpp);
4522 else
4523 dotclock = pipe_config->port_clock;
4524
4525 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4526 !intel_crtc_has_dp_encoder(pipe_config))
4527 dotclock *= 2;
4528
4529 if (pipe_config->pixel_multiplier)
4530 dotclock /= pipe_config->pixel_multiplier;
4531
4532 return dotclock;
4533 }
4534
4535 /* Returns the currently programmed mode of the given encoder. */
4536 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)4537 intel_encoder_current_mode(struct intel_encoder *encoder)
4538 {
4539 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4540 struct intel_crtc_state *crtc_state;
4541 struct drm_display_mode *mode;
4542 struct intel_crtc *crtc;
4543 enum pipe pipe;
4544
4545 if (!encoder->get_hw_state(encoder, &pipe))
4546 return NULL;
4547
4548 crtc = intel_crtc_for_pipe(dev_priv, pipe);
4549
4550 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4551 if (!mode)
4552 return NULL;
4553
4554 crtc_state = intel_crtc_state_alloc(crtc);
4555 if (!crtc_state) {
4556 kfree(mode);
4557 return NULL;
4558 }
4559
4560 if (!intel_crtc_get_pipe_config(crtc_state)) {
4561 kfree(crtc_state);
4562 kfree(mode);
4563 return NULL;
4564 }
4565
4566 intel_encoder_get_config(encoder, crtc_state);
4567
4568 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4569
4570 kfree(crtc_state);
4571
4572 return mode;
4573 }
4574
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)4575 static bool encoders_cloneable(const struct intel_encoder *a,
4576 const struct intel_encoder *b)
4577 {
4578 /* masks could be asymmetric, so check both ways */
4579 return a == b || (a->cloneable & (1 << b->type) &&
4580 b->cloneable & (1 << a->type));
4581 }
4582
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4583 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4584 struct intel_crtc *crtc,
4585 struct intel_encoder *encoder)
4586 {
4587 struct intel_encoder *source_encoder;
4588 struct drm_connector *connector;
4589 struct drm_connector_state *connector_state;
4590 int i;
4591
4592 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4593 if (connector_state->crtc != &crtc->base)
4594 continue;
4595
4596 source_encoder =
4597 to_intel_encoder(connector_state->best_encoder);
4598 if (!encoders_cloneable(encoder, source_encoder))
4599 return false;
4600 }
4601
4602 return true;
4603 }
4604
icl_add_linked_planes(struct intel_atomic_state * state)4605 static int icl_add_linked_planes(struct intel_atomic_state *state)
4606 {
4607 struct intel_plane *plane, *linked;
4608 struct intel_plane_state *plane_state, *linked_plane_state;
4609 int i;
4610
4611 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4612 linked = plane_state->planar_linked_plane;
4613
4614 if (!linked)
4615 continue;
4616
4617 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4618 if (IS_ERR(linked_plane_state))
4619 return PTR_ERR(linked_plane_state);
4620
4621 drm_WARN_ON(state->base.dev,
4622 linked_plane_state->planar_linked_plane != plane);
4623 drm_WARN_ON(state->base.dev,
4624 linked_plane_state->planar_slave == plane_state->planar_slave);
4625 }
4626
4627 return 0;
4628 }
4629
icl_check_nv12_planes(struct intel_crtc_state * crtc_state)4630 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4631 {
4632 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4634 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4635 struct intel_plane *plane, *linked;
4636 struct intel_plane_state *plane_state;
4637 int i;
4638
4639 if (DISPLAY_VER(dev_priv) < 11)
4640 return 0;
4641
4642 /*
4643 * Destroy all old plane links and make the slave plane invisible
4644 * in the crtc_state->active_planes mask.
4645 */
4646 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4647 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4648 continue;
4649
4650 plane_state->planar_linked_plane = NULL;
4651 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4652 crtc_state->enabled_planes &= ~BIT(plane->id);
4653 crtc_state->active_planes &= ~BIT(plane->id);
4654 crtc_state->update_planes |= BIT(plane->id);
4655 crtc_state->data_rate[plane->id] = 0;
4656 crtc_state->rel_data_rate[plane->id] = 0;
4657 }
4658
4659 plane_state->planar_slave = false;
4660 }
4661
4662 if (!crtc_state->nv12_planes)
4663 return 0;
4664
4665 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4666 struct intel_plane_state *linked_state = NULL;
4667
4668 if (plane->pipe != crtc->pipe ||
4669 !(crtc_state->nv12_planes & BIT(plane->id)))
4670 continue;
4671
4672 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4673 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4674 continue;
4675
4676 if (crtc_state->active_planes & BIT(linked->id))
4677 continue;
4678
4679 linked_state = intel_atomic_get_plane_state(state, linked);
4680 if (IS_ERR(linked_state))
4681 return PTR_ERR(linked_state);
4682
4683 break;
4684 }
4685
4686 if (!linked_state) {
4687 drm_dbg_kms(&dev_priv->drm,
4688 "Need %d free Y planes for planar YUV\n",
4689 hweight8(crtc_state->nv12_planes));
4690
4691 return -EINVAL;
4692 }
4693
4694 plane_state->planar_linked_plane = linked;
4695
4696 linked_state->planar_slave = true;
4697 linked_state->planar_linked_plane = plane;
4698 crtc_state->enabled_planes |= BIT(linked->id);
4699 crtc_state->active_planes |= BIT(linked->id);
4700 crtc_state->update_planes |= BIT(linked->id);
4701 crtc_state->data_rate[linked->id] =
4702 crtc_state->data_rate_y[plane->id];
4703 crtc_state->rel_data_rate[linked->id] =
4704 crtc_state->rel_data_rate_y[plane->id];
4705 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4706 linked->base.name, plane->base.name);
4707
4708 /* Copy parameters to slave plane */
4709 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4710 linked_state->color_ctl = plane_state->color_ctl;
4711 linked_state->view = plane_state->view;
4712 linked_state->decrypt = plane_state->decrypt;
4713
4714 intel_plane_copy_hw_state(linked_state, plane_state);
4715 linked_state->uapi.src = plane_state->uapi.src;
4716 linked_state->uapi.dst = plane_state->uapi.dst;
4717
4718 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4719 if (linked->id == PLANE_SPRITE5)
4720 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4721 else if (linked->id == PLANE_SPRITE4)
4722 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4723 else if (linked->id == PLANE_SPRITE3)
4724 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4725 else if (linked->id == PLANE_SPRITE2)
4726 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4727 else
4728 MISSING_CASE(linked->id);
4729 }
4730 }
4731
4732 return 0;
4733 }
4734
c8_planes_changed(const struct intel_crtc_state * new_crtc_state)4735 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4736 {
4737 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4738 struct intel_atomic_state *state =
4739 to_intel_atomic_state(new_crtc_state->uapi.state);
4740 const struct intel_crtc_state *old_crtc_state =
4741 intel_atomic_get_old_crtc_state(state, crtc);
4742
4743 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4744 }
4745
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)4746 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4747 {
4748 const struct drm_display_mode *pipe_mode =
4749 &crtc_state->hw.pipe_mode;
4750 int linetime_wm;
4751
4752 if (!crtc_state->hw.enable)
4753 return 0;
4754
4755 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4756 pipe_mode->crtc_clock);
4757
4758 return min(linetime_wm, 0x1ff);
4759 }
4760
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)4761 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4762 const struct intel_cdclk_state *cdclk_state)
4763 {
4764 const struct drm_display_mode *pipe_mode =
4765 &crtc_state->hw.pipe_mode;
4766 int linetime_wm;
4767
4768 if (!crtc_state->hw.enable)
4769 return 0;
4770
4771 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4772 cdclk_state->logical.cdclk);
4773
4774 return min(linetime_wm, 0x1ff);
4775 }
4776
skl_linetime_wm(const struct intel_crtc_state * crtc_state)4777 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4778 {
4779 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4781 const struct drm_display_mode *pipe_mode =
4782 &crtc_state->hw.pipe_mode;
4783 int linetime_wm;
4784
4785 if (!crtc_state->hw.enable)
4786 return 0;
4787
4788 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4789 crtc_state->pixel_rate);
4790
4791 /* Display WA #1135: BXT:ALL GLK:ALL */
4792 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4793 skl_watermark_ipc_enabled(dev_priv))
4794 linetime_wm /= 2;
4795
4796 return min(linetime_wm, 0x1ff);
4797 }
4798
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)4799 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4800 struct intel_crtc *crtc)
4801 {
4802 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4803 struct intel_crtc_state *crtc_state =
4804 intel_atomic_get_new_crtc_state(state, crtc);
4805 const struct intel_cdclk_state *cdclk_state;
4806
4807 if (DISPLAY_VER(dev_priv) >= 9)
4808 crtc_state->linetime = skl_linetime_wm(crtc_state);
4809 else
4810 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4811
4812 if (!hsw_crtc_supports_ips(crtc))
4813 return 0;
4814
4815 cdclk_state = intel_atomic_get_cdclk_state(state);
4816 if (IS_ERR(cdclk_state))
4817 return PTR_ERR(cdclk_state);
4818
4819 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4820 cdclk_state);
4821
4822 return 0;
4823 }
4824
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)4825 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4826 struct intel_crtc *crtc)
4827 {
4828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4829 struct intel_crtc_state *crtc_state =
4830 intel_atomic_get_new_crtc_state(state, crtc);
4831 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
4832 int ret;
4833
4834 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4835 mode_changed && !crtc_state->hw.active)
4836 crtc_state->update_wm_post = true;
4837
4838 if (mode_changed) {
4839 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4840 if (ret)
4841 return ret;
4842 }
4843
4844 /*
4845 * May need to update pipe gamma enable bits
4846 * when C8 planes are getting enabled/disabled.
4847 */
4848 if (c8_planes_changed(crtc_state))
4849 crtc_state->uapi.color_mgmt_changed = true;
4850
4851 if (mode_changed || crtc_state->update_pipe ||
4852 crtc_state->uapi.color_mgmt_changed) {
4853 ret = intel_color_check(crtc_state);
4854 if (ret)
4855 return ret;
4856 }
4857
4858 ret = intel_compute_pipe_wm(state, crtc);
4859 if (ret) {
4860 drm_dbg_kms(&dev_priv->drm,
4861 "Target pipe watermarks are invalid\n");
4862 return ret;
4863 }
4864
4865 /*
4866 * Calculate 'intermediate' watermarks that satisfy both the
4867 * old state and the new state. We can program these
4868 * immediately.
4869 */
4870 ret = intel_compute_intermediate_wm(state, crtc);
4871 if (ret) {
4872 drm_dbg_kms(&dev_priv->drm,
4873 "No valid intermediate pipe watermarks are possible\n");
4874 return ret;
4875 }
4876
4877 if (DISPLAY_VER(dev_priv) >= 9) {
4878 if (mode_changed || crtc_state->update_pipe) {
4879 ret = skl_update_scaler_crtc(crtc_state);
4880 if (ret)
4881 return ret;
4882 }
4883
4884 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4885 if (ret)
4886 return ret;
4887 }
4888
4889 if (HAS_IPS(dev_priv)) {
4890 ret = hsw_ips_compute_config(state, crtc);
4891 if (ret)
4892 return ret;
4893 }
4894
4895 if (DISPLAY_VER(dev_priv) >= 9 ||
4896 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4897 ret = hsw_compute_linetime_wm(state, crtc);
4898 if (ret)
4899 return ret;
4900
4901 }
4902
4903 ret = intel_psr2_sel_fetch_update(state, crtc);
4904 if (ret)
4905 return ret;
4906
4907 return 0;
4908 }
4909
4910 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * crtc_state)4911 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4912 struct intel_crtc_state *crtc_state)
4913 {
4914 struct drm_connector *connector = conn_state->connector;
4915 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4916 const struct drm_display_info *info = &connector->display_info;
4917 int bpp;
4918
4919 switch (conn_state->max_bpc) {
4920 case 6 ... 7:
4921 bpp = 6 * 3;
4922 break;
4923 case 8 ... 9:
4924 bpp = 8 * 3;
4925 break;
4926 case 10 ... 11:
4927 bpp = 10 * 3;
4928 break;
4929 case 12 ... 16:
4930 bpp = 12 * 3;
4931 break;
4932 default:
4933 MISSING_CASE(conn_state->max_bpc);
4934 return -EINVAL;
4935 }
4936
4937 if (bpp < crtc_state->pipe_bpp) {
4938 drm_dbg_kms(&i915->drm,
4939 "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4940 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4941 connector->base.id, connector->name,
4942 bpp, 3 * info->bpc,
4943 3 * conn_state->max_requested_bpc,
4944 crtc_state->pipe_bpp);
4945
4946 crtc_state->pipe_bpp = bpp;
4947 }
4948
4949 return 0;
4950 }
4951
4952 static int
compute_baseline_pipe_bpp(struct intel_atomic_state * state,struct intel_crtc * crtc)4953 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4954 struct intel_crtc *crtc)
4955 {
4956 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4957 struct intel_crtc_state *crtc_state =
4958 intel_atomic_get_new_crtc_state(state, crtc);
4959 struct drm_connector *connector;
4960 struct drm_connector_state *connector_state;
4961 int bpp, i;
4962
4963 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4964 IS_CHERRYVIEW(dev_priv)))
4965 bpp = 10*3;
4966 else if (DISPLAY_VER(dev_priv) >= 5)
4967 bpp = 12*3;
4968 else
4969 bpp = 8*3;
4970
4971 crtc_state->pipe_bpp = bpp;
4972
4973 /* Clamp display bpp to connector max bpp */
4974 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4975 int ret;
4976
4977 if (connector_state->crtc != &crtc->base)
4978 continue;
4979
4980 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4981 if (ret)
4982 return ret;
4983 }
4984
4985 return 0;
4986 }
4987
check_digital_port_conflicts(struct intel_atomic_state * state)4988 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4989 {
4990 struct drm_device *dev = state->base.dev;
4991 struct drm_connector *connector;
4992 struct drm_connector_list_iter conn_iter;
4993 unsigned int used_ports = 0;
4994 unsigned int used_mst_ports = 0;
4995 bool ret = true;
4996
4997 /*
4998 * We're going to peek into connector->state,
4999 * hence connection_mutex must be held.
5000 */
5001 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5002
5003 /*
5004 * Walk the connector list instead of the encoder
5005 * list to detect the problem on ddi platforms
5006 * where there's just one encoder per digital port.
5007 */
5008 drm_connector_list_iter_begin(dev, &conn_iter);
5009 drm_for_each_connector_iter(connector, &conn_iter) {
5010 struct drm_connector_state *connector_state;
5011 struct intel_encoder *encoder;
5012
5013 connector_state =
5014 drm_atomic_get_new_connector_state(&state->base,
5015 connector);
5016 if (!connector_state)
5017 connector_state = connector->state;
5018
5019 if (!connector_state->best_encoder)
5020 continue;
5021
5022 encoder = to_intel_encoder(connector_state->best_encoder);
5023
5024 drm_WARN_ON(dev, !connector_state->crtc);
5025
5026 switch (encoder->type) {
5027 case INTEL_OUTPUT_DDI:
5028 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5029 break;
5030 fallthrough;
5031 case INTEL_OUTPUT_DP:
5032 case INTEL_OUTPUT_HDMI:
5033 case INTEL_OUTPUT_EDP:
5034 /* the same port mustn't appear more than once */
5035 if (used_ports & BIT(encoder->port))
5036 ret = false;
5037
5038 used_ports |= BIT(encoder->port);
5039 break;
5040 case INTEL_OUTPUT_DP_MST:
5041 used_mst_ports |=
5042 1 << encoder->port;
5043 break;
5044 default:
5045 break;
5046 }
5047 }
5048 drm_connector_list_iter_end(&conn_iter);
5049
5050 /* can't mix MST and SST/HDMI on the same port */
5051 if (used_ports & used_mst_ports)
5052 return false;
5053
5054 return ret;
5055 }
5056
5057 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * crtc)5058 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5059 struct intel_crtc *crtc)
5060 {
5061 struct intel_crtc_state *crtc_state =
5062 intel_atomic_get_new_crtc_state(state, crtc);
5063
5064 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5065
5066 drm_property_replace_blob(&crtc_state->hw.degamma_lut,
5067 crtc_state->uapi.degamma_lut);
5068 drm_property_replace_blob(&crtc_state->hw.gamma_lut,
5069 crtc_state->uapi.gamma_lut);
5070 drm_property_replace_blob(&crtc_state->hw.ctm,
5071 crtc_state->uapi.ctm);
5072 }
5073
5074 static void
intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state * state,struct intel_crtc * crtc)5075 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
5076 struct intel_crtc *crtc)
5077 {
5078 struct intel_crtc_state *crtc_state =
5079 intel_atomic_get_new_crtc_state(state, crtc);
5080
5081 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5082
5083 crtc_state->hw.enable = crtc_state->uapi.enable;
5084 crtc_state->hw.active = crtc_state->uapi.active;
5085 drm_mode_copy(&crtc_state->hw.mode,
5086 &crtc_state->uapi.mode);
5087 drm_mode_copy(&crtc_state->hw.adjusted_mode,
5088 &crtc_state->uapi.adjusted_mode);
5089 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5090
5091 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
5092 }
5093
5094 static void
copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * slave_crtc)5095 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
5096 struct intel_crtc *slave_crtc)
5097 {
5098 struct intel_crtc_state *slave_crtc_state =
5099 intel_atomic_get_new_crtc_state(state, slave_crtc);
5100 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5101 const struct intel_crtc_state *master_crtc_state =
5102 intel_atomic_get_new_crtc_state(state, master_crtc);
5103
5104 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
5105 master_crtc_state->hw.degamma_lut);
5106 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
5107 master_crtc_state->hw.gamma_lut);
5108 drm_property_replace_blob(&slave_crtc_state->hw.ctm,
5109 master_crtc_state->hw.ctm);
5110
5111 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
5112 }
5113
5114 static int
copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state * state,struct intel_crtc * slave_crtc)5115 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
5116 struct intel_crtc *slave_crtc)
5117 {
5118 struct intel_crtc_state *slave_crtc_state =
5119 intel_atomic_get_new_crtc_state(state, slave_crtc);
5120 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5121 const struct intel_crtc_state *master_crtc_state =
5122 intel_atomic_get_new_crtc_state(state, master_crtc);
5123 struct intel_crtc_state *saved_state;
5124
5125 WARN_ON(master_crtc_state->bigjoiner_pipes !=
5126 slave_crtc_state->bigjoiner_pipes);
5127
5128 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5129 if (!saved_state)
5130 return -ENOMEM;
5131
5132 /* preserve some things from the slave's original crtc state */
5133 saved_state->uapi = slave_crtc_state->uapi;
5134 saved_state->scaler_state = slave_crtc_state->scaler_state;
5135 saved_state->shared_dpll = slave_crtc_state->shared_dpll;
5136 saved_state->crc_enabled = slave_crtc_state->crc_enabled;
5137
5138 intel_crtc_free_hw_state(slave_crtc_state);
5139 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
5140 kfree(saved_state);
5141
5142 /* Re-init hw state */
5143 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
5144 slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
5145 slave_crtc_state->hw.active = master_crtc_state->hw.active;
5146 drm_mode_copy(&slave_crtc_state->hw.mode,
5147 &master_crtc_state->hw.mode);
5148 drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
5149 &master_crtc_state->hw.pipe_mode);
5150 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
5151 &master_crtc_state->hw.adjusted_mode);
5152 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
5153
5154 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
5155
5156 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
5157 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
5158 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
5159
5160 WARN_ON(master_crtc_state->bigjoiner_pipes !=
5161 slave_crtc_state->bigjoiner_pipes);
5162
5163 return 0;
5164 }
5165
5166 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc * crtc)5167 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5168 struct intel_crtc *crtc)
5169 {
5170 struct intel_crtc_state *crtc_state =
5171 intel_atomic_get_new_crtc_state(state, crtc);
5172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5173 struct intel_crtc_state *saved_state;
5174
5175 saved_state = intel_crtc_state_alloc(crtc);
5176 if (!saved_state)
5177 return -ENOMEM;
5178
5179 /* free the old crtc_state->hw members */
5180 intel_crtc_free_hw_state(crtc_state);
5181
5182 /* FIXME: before the switch to atomic started, a new pipe_config was
5183 * kzalloc'd. Code that depends on any field being zero should be
5184 * fixed, so that the crtc_state can be safely duplicated. For now,
5185 * only fields that are know to not cause problems are preserved. */
5186
5187 saved_state->uapi = crtc_state->uapi;
5188 saved_state->inherited = crtc_state->inherited;
5189 saved_state->scaler_state = crtc_state->scaler_state;
5190 saved_state->shared_dpll = crtc_state->shared_dpll;
5191 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5192 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5193 sizeof(saved_state->icl_port_dplls));
5194 saved_state->crc_enabled = crtc_state->crc_enabled;
5195 if (IS_G4X(dev_priv) ||
5196 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5197 saved_state->wm = crtc_state->wm;
5198
5199 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5200 kfree(saved_state);
5201
5202 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
5203
5204 return 0;
5205 }
5206
5207 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc * crtc)5208 intel_modeset_pipe_config(struct intel_atomic_state *state,
5209 struct intel_crtc *crtc)
5210 {
5211 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5212 struct intel_crtc_state *crtc_state =
5213 intel_atomic_get_new_crtc_state(state, crtc);
5214 struct drm_connector *connector;
5215 struct drm_connector_state *connector_state;
5216 int pipe_src_w, pipe_src_h;
5217 int base_bpp, ret, i;
5218 bool retry = true;
5219
5220 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
5221
5222 crtc_state->framestart_delay = 1;
5223
5224 /*
5225 * Sanitize sync polarity flags based on requested ones. If neither
5226 * positive or negative polarity is requested, treat this as meaning
5227 * negative polarity.
5228 */
5229 if (!(crtc_state->hw.adjusted_mode.flags &
5230 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5231 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5232
5233 if (!(crtc_state->hw.adjusted_mode.flags &
5234 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5235 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5236
5237 ret = compute_baseline_pipe_bpp(state, crtc);
5238 if (ret)
5239 return ret;
5240
5241 base_bpp = crtc_state->pipe_bpp;
5242
5243 /*
5244 * Determine the real pipe dimensions. Note that stereo modes can
5245 * increase the actual pipe size due to the frame doubling and
5246 * insertion of additional space for blanks between the frame. This
5247 * is stored in the crtc timings. We use the requested mode to do this
5248 * computation to clearly distinguish it from the adjusted mode, which
5249 * can be changed by the connectors in the below retry loop.
5250 */
5251 drm_mode_get_hv_timing(&crtc_state->hw.mode,
5252 &pipe_src_w, &pipe_src_h);
5253 drm_rect_init(&crtc_state->pipe_src, 0, 0,
5254 pipe_src_w, pipe_src_h);
5255
5256 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5257 struct intel_encoder *encoder =
5258 to_intel_encoder(connector_state->best_encoder);
5259
5260 if (connector_state->crtc != &crtc->base)
5261 continue;
5262
5263 if (!check_single_encoder_cloning(state, crtc, encoder)) {
5264 drm_dbg_kms(&i915->drm,
5265 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
5266 encoder->base.base.id, encoder->base.name);
5267 return -EINVAL;
5268 }
5269
5270 /*
5271 * Determine output_types before calling the .compute_config()
5272 * hooks so that the hooks can use this information safely.
5273 */
5274 if (encoder->compute_output_type)
5275 crtc_state->output_types |=
5276 BIT(encoder->compute_output_type(encoder, crtc_state,
5277 connector_state));
5278 else
5279 crtc_state->output_types |= BIT(encoder->type);
5280 }
5281
5282 encoder_retry:
5283 /* Ensure the port clock defaults are reset when retrying. */
5284 crtc_state->port_clock = 0;
5285 crtc_state->pixel_multiplier = 1;
5286
5287 /* Fill in default crtc timings, allow encoders to overwrite them. */
5288 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
5289 CRTC_STEREO_DOUBLE);
5290
5291 /* Pass our mode to the connectors and the CRTC to give them a chance to
5292 * adjust it according to limitations or connector properties, and also
5293 * a chance to reject the mode entirely.
5294 */
5295 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5296 struct intel_encoder *encoder =
5297 to_intel_encoder(connector_state->best_encoder);
5298
5299 if (connector_state->crtc != &crtc->base)
5300 continue;
5301
5302 ret = encoder->compute_config(encoder, crtc_state,
5303 connector_state);
5304 if (ret == -EDEADLK)
5305 return ret;
5306 if (ret < 0) {
5307 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
5308 encoder->base.base.id, encoder->base.name, ret);
5309 return ret;
5310 }
5311 }
5312
5313 /* Set default port clock if not overwritten by the encoder. Needs to be
5314 * done afterwards in case the encoder adjusts the mode. */
5315 if (!crtc_state->port_clock)
5316 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
5317 * crtc_state->pixel_multiplier;
5318
5319 ret = intel_crtc_compute_config(state, crtc);
5320 if (ret == -EDEADLK)
5321 return ret;
5322 if (ret == -EAGAIN) {
5323 if (drm_WARN(&i915->drm, !retry,
5324 "[CRTC:%d:%s] loop in pipe configuration computation\n",
5325 crtc->base.base.id, crtc->base.name))
5326 return -EINVAL;
5327
5328 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
5329 crtc->base.base.id, crtc->base.name);
5330 retry = false;
5331 goto encoder_retry;
5332 }
5333 if (ret < 0) {
5334 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
5335 crtc->base.base.id, crtc->base.name, ret);
5336 return ret;
5337 }
5338
5339 /* Dithering seems to not pass-through bits correctly when it should, so
5340 * only enable it on 6bpc panels and when its not a compliance
5341 * test requesting 6bpc video pattern.
5342 */
5343 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
5344 !crtc_state->dither_force_disable;
5345 drm_dbg_kms(&i915->drm,
5346 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5347 crtc->base.base.id, crtc->base.name,
5348 base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
5349
5350 return 0;
5351 }
5352
5353 static int
intel_modeset_pipe_config_late(struct intel_atomic_state * state,struct intel_crtc * crtc)5354 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
5355 struct intel_crtc *crtc)
5356 {
5357 struct intel_crtc_state *crtc_state =
5358 intel_atomic_get_new_crtc_state(state, crtc);
5359 struct drm_connector_state *conn_state;
5360 struct drm_connector *connector;
5361 int i;
5362
5363 intel_bigjoiner_adjust_pipe_src(crtc_state);
5364
5365 for_each_new_connector_in_state(&state->base, connector,
5366 conn_state, i) {
5367 struct intel_encoder *encoder =
5368 to_intel_encoder(conn_state->best_encoder);
5369 int ret;
5370
5371 if (conn_state->crtc != &crtc->base ||
5372 !encoder->compute_config_late)
5373 continue;
5374
5375 ret = encoder->compute_config_late(encoder, crtc_state,
5376 conn_state);
5377 if (ret)
5378 return ret;
5379 }
5380
5381 return 0;
5382 }
5383
intel_fuzzy_clock_check(int clock1,int clock2)5384 bool intel_fuzzy_clock_check(int clock1, int clock2)
5385 {
5386 int diff;
5387
5388 if (clock1 == clock2)
5389 return true;
5390
5391 if (!clock1 || !clock2)
5392 return false;
5393
5394 diff = abs(clock1 - clock2);
5395
5396 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5397 return true;
5398
5399 return false;
5400 }
5401
5402 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)5403 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5404 const struct intel_link_m_n *m2_n2)
5405 {
5406 return m_n->tu == m2_n2->tu &&
5407 m_n->data_m == m2_n2->data_m &&
5408 m_n->data_n == m2_n2->data_n &&
5409 m_n->link_m == m2_n2->link_m &&
5410 m_n->link_n == m2_n2->link_n;
5411 }
5412
5413 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)5414 intel_compare_infoframe(const union hdmi_infoframe *a,
5415 const union hdmi_infoframe *b)
5416 {
5417 return memcmp(a, b, sizeof(*a)) == 0;
5418 }
5419
5420 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)5421 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5422 const struct drm_dp_vsc_sdp *b)
5423 {
5424 return memcmp(a, b, sizeof(*a)) == 0;
5425 }
5426
5427 static void
pipe_config_infoframe_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)5428 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5429 bool fastset, const char *name,
5430 const union hdmi_infoframe *a,
5431 const union hdmi_infoframe *b)
5432 {
5433 if (fastset) {
5434 if (!drm_debug_enabled(DRM_UT_KMS))
5435 return;
5436
5437 drm_dbg_kms(&dev_priv->drm,
5438 "fastset mismatch in %s infoframe\n", name);
5439 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5440 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5441 drm_dbg_kms(&dev_priv->drm, "found:\n");
5442 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5443 } else {
5444 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5445 drm_err(&dev_priv->drm, "expected:\n");
5446 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5447 drm_err(&dev_priv->drm, "found:\n");
5448 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5449 }
5450 }
5451
5452 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)5453 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5454 bool fastset, const char *name,
5455 const struct drm_dp_vsc_sdp *a,
5456 const struct drm_dp_vsc_sdp *b)
5457 {
5458 if (fastset) {
5459 if (!drm_debug_enabled(DRM_UT_KMS))
5460 return;
5461
5462 drm_dbg_kms(&dev_priv->drm,
5463 "fastset mismatch in %s dp sdp\n", name);
5464 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5465 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5466 drm_dbg_kms(&dev_priv->drm, "found:\n");
5467 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5468 } else {
5469 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5470 drm_err(&dev_priv->drm, "expected:\n");
5471 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5472 drm_err(&dev_priv->drm, "found:\n");
5473 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
5474 }
5475 }
5476
5477 static void __printf(4, 5)
pipe_config_mismatch(bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)5478 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
5479 const char *name, const char *format, ...)
5480 {
5481 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5482 struct va_format vaf;
5483 va_list args;
5484
5485 va_start(args, format);
5486 vaf.fmt = format;
5487 vaf.va = &args;
5488
5489 if (fastset)
5490 drm_dbg_kms(&i915->drm,
5491 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
5492 crtc->base.base.id, crtc->base.name, name, &vaf);
5493 else
5494 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
5495 crtc->base.base.id, crtc->base.name, name, &vaf);
5496
5497 va_end(args);
5498 }
5499
fastboot_enabled(struct drm_i915_private * dev_priv)5500 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
5501 {
5502 if (dev_priv->params.fastboot != -1)
5503 return dev_priv->params.fastboot;
5504
5505 /* Enable fastboot by default on Skylake and newer */
5506 if (DISPLAY_VER(dev_priv) >= 9)
5507 return true;
5508
5509 /* Enable fastboot by default on VLV and CHV */
5510 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5511 return true;
5512
5513 /* Disabled by default on all others */
5514 return false;
5515 }
5516
5517 bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)5518 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5519 const struct intel_crtc_state *pipe_config,
5520 bool fastset)
5521 {
5522 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5523 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5524 bool ret = true;
5525 u32 bp_gamma = 0;
5526 bool fixup_inherited = fastset &&
5527 current_config->inherited && !pipe_config->inherited;
5528
5529 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
5530 drm_dbg_kms(&dev_priv->drm,
5531 "initial modeset and fastboot not set\n");
5532 ret = false;
5533 }
5534
5535 #define PIPE_CONF_CHECK_X(name) do { \
5536 if (current_config->name != pipe_config->name) { \
5537 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5538 "(expected 0x%08x, found 0x%08x)", \
5539 current_config->name, \
5540 pipe_config->name); \
5541 ret = false; \
5542 } \
5543 } while (0)
5544
5545 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5546 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5547 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5548 "(expected 0x%08x, found 0x%08x)", \
5549 current_config->name & (mask), \
5550 pipe_config->name & (mask)); \
5551 ret = false; \
5552 } \
5553 } while (0)
5554
5555 #define PIPE_CONF_CHECK_I(name) do { \
5556 if (current_config->name != pipe_config->name) { \
5557 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5558 "(expected %i, found %i)", \
5559 current_config->name, \
5560 pipe_config->name); \
5561 ret = false; \
5562 } \
5563 } while (0)
5564
5565 #define PIPE_CONF_CHECK_BOOL(name) do { \
5566 if (current_config->name != pipe_config->name) { \
5567 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5568 "(expected %s, found %s)", \
5569 str_yes_no(current_config->name), \
5570 str_yes_no(pipe_config->name)); \
5571 ret = false; \
5572 } \
5573 } while (0)
5574
5575 /*
5576 * Checks state where we only read out the enabling, but not the entire
5577 * state itself (like full infoframes or ELD for audio). These states
5578 * require a full modeset on bootup to fix up.
5579 */
5580 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
5581 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
5582 PIPE_CONF_CHECK_BOOL(name); \
5583 } else { \
5584 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5585 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
5586 str_yes_no(current_config->name), \
5587 str_yes_no(pipe_config->name)); \
5588 ret = false; \
5589 } \
5590 } while (0)
5591
5592 #define PIPE_CONF_CHECK_P(name) do { \
5593 if (current_config->name != pipe_config->name) { \
5594 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5595 "(expected %p, found %p)", \
5596 current_config->name, \
5597 pipe_config->name); \
5598 ret = false; \
5599 } \
5600 } while (0)
5601
5602 #define PIPE_CONF_CHECK_M_N(name) do { \
5603 if (!intel_compare_link_m_n(¤t_config->name, \
5604 &pipe_config->name)) { \
5605 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5606 "(expected tu %i data %i/%i link %i/%i, " \
5607 "found tu %i, data %i/%i link %i/%i)", \
5608 current_config->name.tu, \
5609 current_config->name.data_m, \
5610 current_config->name.data_n, \
5611 current_config->name.link_m, \
5612 current_config->name.link_n, \
5613 pipe_config->name.tu, \
5614 pipe_config->name.data_m, \
5615 pipe_config->name.data_n, \
5616 pipe_config->name.link_m, \
5617 pipe_config->name.link_n); \
5618 ret = false; \
5619 } \
5620 } while (0)
5621
5622 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5623 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5624 PIPE_CONF_CHECK_I(name.crtc_htotal); \
5625 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5626 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5627 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5628 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5629 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5630 PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5631 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5632 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5633 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5634 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5635 } while (0)
5636
5637 #define PIPE_CONF_CHECK_RECT(name) do { \
5638 PIPE_CONF_CHECK_I(name.x1); \
5639 PIPE_CONF_CHECK_I(name.x2); \
5640 PIPE_CONF_CHECK_I(name.y1); \
5641 PIPE_CONF_CHECK_I(name.y2); \
5642 } while (0)
5643
5644 /* This is required for BDW+ where there is only one set of registers for
5645 * switching between high and low RR.
5646 * This macro can be used whenever a comparison has to be made between one
5647 * hw state and multiple sw state variables.
5648 */
5649 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
5650 if (!intel_compare_link_m_n(¤t_config->name, \
5651 &pipe_config->name) && \
5652 !intel_compare_link_m_n(¤t_config->alt_name, \
5653 &pipe_config->name)) { \
5654 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5655 "(expected tu %i data %i/%i link %i/%i, " \
5656 "or tu %i data %i/%i link %i/%i, " \
5657 "found tu %i, data %i/%i link %i/%i)", \
5658 current_config->name.tu, \
5659 current_config->name.data_m, \
5660 current_config->name.data_n, \
5661 current_config->name.link_m, \
5662 current_config->name.link_n, \
5663 current_config->alt_name.tu, \
5664 current_config->alt_name.data_m, \
5665 current_config->alt_name.data_n, \
5666 current_config->alt_name.link_m, \
5667 current_config->alt_name.link_n, \
5668 pipe_config->name.tu, \
5669 pipe_config->name.data_m, \
5670 pipe_config->name.data_n, \
5671 pipe_config->name.link_m, \
5672 pipe_config->name.link_n); \
5673 ret = false; \
5674 } \
5675 } while (0)
5676
5677 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5678 if ((current_config->name ^ pipe_config->name) & (mask)) { \
5679 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5680 "(%x) (expected %i, found %i)", \
5681 (mask), \
5682 current_config->name & (mask), \
5683 pipe_config->name & (mask)); \
5684 ret = false; \
5685 } \
5686 } while (0)
5687
5688 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5689 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
5690 &pipe_config->infoframes.name)) { \
5691 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5692 ¤t_config->infoframes.name, \
5693 &pipe_config->infoframes.name); \
5694 ret = false; \
5695 } \
5696 } while (0)
5697
5698 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5699 if (!current_config->has_psr && !pipe_config->has_psr && \
5700 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
5701 &pipe_config->infoframes.name)) { \
5702 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5703 ¤t_config->infoframes.name, \
5704 &pipe_config->infoframes.name); \
5705 ret = false; \
5706 } \
5707 } while (0)
5708
5709 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
5710 if (current_config->name1 != pipe_config->name1) { \
5711 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
5712 "(expected %i, found %i, won't compare lut values)", \
5713 current_config->name1, \
5714 pipe_config->name1); \
5715 ret = false;\
5716 } else { \
5717 if (!intel_color_lut_equal(current_config->name2, \
5718 pipe_config->name2, pipe_config->name1, \
5719 bit_precision)) { \
5720 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
5721 "hw_state doesn't match sw_state"); \
5722 ret = false; \
5723 } \
5724 } \
5725 } while (0)
5726
5727 #define PIPE_CONF_QUIRK(quirk) \
5728 ((current_config->quirks | pipe_config->quirks) & (quirk))
5729
5730 PIPE_CONF_CHECK_I(hw.enable);
5731 PIPE_CONF_CHECK_I(hw.active);
5732
5733 PIPE_CONF_CHECK_I(cpu_transcoder);
5734 PIPE_CONF_CHECK_I(mst_master_transcoder);
5735
5736 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5737 PIPE_CONF_CHECK_I(fdi_lanes);
5738 PIPE_CONF_CHECK_M_N(fdi_m_n);
5739
5740 PIPE_CONF_CHECK_I(lane_count);
5741 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5742
5743 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5744 if (!fastset || !pipe_config->seamless_m_n)
5745 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
5746 } else {
5747 PIPE_CONF_CHECK_M_N(dp_m_n);
5748 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5749 }
5750
5751 PIPE_CONF_CHECK_X(output_types);
5752
5753 PIPE_CONF_CHECK_I(framestart_delay);
5754 PIPE_CONF_CHECK_I(msa_timing_delay);
5755
5756 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5757 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5758
5759 PIPE_CONF_CHECK_I(pixel_multiplier);
5760
5761 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5762 DRM_MODE_FLAG_INTERLACE);
5763
5764 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5765 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5766 DRM_MODE_FLAG_PHSYNC);
5767 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5768 DRM_MODE_FLAG_NHSYNC);
5769 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5770 DRM_MODE_FLAG_PVSYNC);
5771 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5772 DRM_MODE_FLAG_NVSYNC);
5773 }
5774
5775 PIPE_CONF_CHECK_I(output_format);
5776 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5777 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5778 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5779 PIPE_CONF_CHECK_BOOL(limited_color_range);
5780
5781 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5782 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5783 PIPE_CONF_CHECK_BOOL(has_infoframe);
5784 PIPE_CONF_CHECK_BOOL(fec_enable);
5785
5786 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
5787
5788 PIPE_CONF_CHECK_X(gmch_pfit.control);
5789 /* pfit ratios are autocomputed by the hw on gen4+ */
5790 if (DISPLAY_VER(dev_priv) < 4)
5791 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5792 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5793
5794 /*
5795 * Changing the EDP transcoder input mux
5796 * (A_ONOFF vs. A_ON) requires a full modeset.
5797 */
5798 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5799
5800 if (!fastset) {
5801 PIPE_CONF_CHECK_RECT(pipe_src);
5802
5803 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5804 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5805
5806 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5807 PIPE_CONF_CHECK_I(pixel_rate);
5808
5809 PIPE_CONF_CHECK_X(gamma_mode);
5810 if (IS_CHERRYVIEW(dev_priv))
5811 PIPE_CONF_CHECK_X(cgm_mode);
5812 else
5813 PIPE_CONF_CHECK_X(csc_mode);
5814 PIPE_CONF_CHECK_BOOL(gamma_enable);
5815 PIPE_CONF_CHECK_BOOL(csc_enable);
5816
5817 PIPE_CONF_CHECK_I(linetime);
5818 PIPE_CONF_CHECK_I(ips_linetime);
5819
5820 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
5821 if (bp_gamma)
5822 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
5823
5824 if (current_config->active_planes) {
5825 PIPE_CONF_CHECK_BOOL(has_psr);
5826 PIPE_CONF_CHECK_BOOL(has_psr2);
5827 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5828 PIPE_CONF_CHECK_I(dc3co_exitline);
5829 }
5830 }
5831
5832 PIPE_CONF_CHECK_BOOL(double_wide);
5833
5834 if (dev_priv->display.dpll.mgr) {
5835 PIPE_CONF_CHECK_P(shared_dpll);
5836
5837 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5838 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5839 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5840 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5841 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5842 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5843 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5844 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5845 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5846 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5847 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5848 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5849 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5850 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5851 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5852 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5853 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5854 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5855 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5856 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5857 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5858 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5859 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5860 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5861 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5862 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5863 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5864 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5865 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5866 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5867 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5868 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5869 }
5870
5871 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5872 PIPE_CONF_CHECK_X(dsi_pll.div);
5873
5874 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5875 PIPE_CONF_CHECK_I(pipe_bpp);
5876
5877 if (!fastset || !pipe_config->seamless_m_n) {
5878 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5879 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5880 }
5881 PIPE_CONF_CHECK_I(port_clock);
5882
5883 PIPE_CONF_CHECK_I(min_voltage_level);
5884
5885 if (current_config->has_psr || pipe_config->has_psr)
5886 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5887 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5888 else
5889 PIPE_CONF_CHECK_X(infoframes.enable);
5890
5891 PIPE_CONF_CHECK_X(infoframes.gcp);
5892 PIPE_CONF_CHECK_INFOFRAME(avi);
5893 PIPE_CONF_CHECK_INFOFRAME(spd);
5894 PIPE_CONF_CHECK_INFOFRAME(hdmi);
5895 PIPE_CONF_CHECK_INFOFRAME(drm);
5896 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5897
5898 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5899 PIPE_CONF_CHECK_I(master_transcoder);
5900 PIPE_CONF_CHECK_X(bigjoiner_pipes);
5901
5902 PIPE_CONF_CHECK_I(dsc.compression_enable);
5903 PIPE_CONF_CHECK_I(dsc.dsc_split);
5904 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
5905
5906 PIPE_CONF_CHECK_BOOL(splitter.enable);
5907 PIPE_CONF_CHECK_I(splitter.link_count);
5908 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5909
5910 PIPE_CONF_CHECK_BOOL(vrr.enable);
5911 PIPE_CONF_CHECK_I(vrr.vmin);
5912 PIPE_CONF_CHECK_I(vrr.vmax);
5913 PIPE_CONF_CHECK_I(vrr.flipline);
5914 PIPE_CONF_CHECK_I(vrr.pipeline_full);
5915 PIPE_CONF_CHECK_I(vrr.guardband);
5916
5917 #undef PIPE_CONF_CHECK_X
5918 #undef PIPE_CONF_CHECK_I
5919 #undef PIPE_CONF_CHECK_BOOL
5920 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
5921 #undef PIPE_CONF_CHECK_P
5922 #undef PIPE_CONF_CHECK_FLAGS
5923 #undef PIPE_CONF_CHECK_COLOR_LUT
5924 #undef PIPE_CONF_CHECK_TIMINGS
5925 #undef PIPE_CONF_CHECK_RECT
5926 #undef PIPE_CONF_QUIRK
5927
5928 return ret;
5929 }
5930
5931 static void
intel_verify_planes(struct intel_atomic_state * state)5932 intel_verify_planes(struct intel_atomic_state *state)
5933 {
5934 struct intel_plane *plane;
5935 const struct intel_plane_state *plane_state;
5936 int i;
5937
5938 for_each_new_intel_plane_in_state(state, plane,
5939 plane_state, i)
5940 assert_plane(plane, plane_state->planar_slave ||
5941 plane_state->uapi.visible);
5942 }
5943
intel_modeset_all_pipes(struct intel_atomic_state * state)5944 int intel_modeset_all_pipes(struct intel_atomic_state *state)
5945 {
5946 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5947 struct intel_crtc *crtc;
5948
5949 /*
5950 * Add all pipes to the state, and force
5951 * a modeset on all the active ones.
5952 */
5953 for_each_intel_crtc(&dev_priv->drm, crtc) {
5954 struct intel_crtc_state *crtc_state;
5955 int ret;
5956
5957 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5958 if (IS_ERR(crtc_state))
5959 return PTR_ERR(crtc_state);
5960
5961 if (!crtc_state->hw.active ||
5962 drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
5963 continue;
5964
5965 crtc_state->uapi.mode_changed = true;
5966
5967 ret = drm_atomic_add_affected_connectors(&state->base,
5968 &crtc->base);
5969 if (ret)
5970 return ret;
5971
5972 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5973 if (ret)
5974 return ret;
5975
5976 ret = intel_atomic_add_affected_planes(state, crtc);
5977 if (ret)
5978 return ret;
5979
5980 crtc_state->update_planes |= crtc_state->active_planes;
5981 }
5982
5983 return 0;
5984 }
5985
intel_crtc_update_active_timings(const struct intel_crtc_state * crtc_state)5986 void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
5987 {
5988 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5990 struct drm_display_mode adjusted_mode;
5991
5992 drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
5993
5994 if (crtc_state->vrr.enable) {
5995 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
5996 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
5997 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
5998 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
5999 }
6000
6001 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
6002
6003 crtc->mode_flags = crtc_state->mode_flags;
6004
6005 /*
6006 * The scanline counter increments at the leading edge of hsync.
6007 *
6008 * On most platforms it starts counting from vtotal-1 on the
6009 * first active line. That means the scanline counter value is
6010 * always one less than what we would expect. Ie. just after
6011 * start of vblank, which also occurs at start of hsync (on the
6012 * last active line), the scanline counter will read vblank_start-1.
6013 *
6014 * On gen2 the scanline counter starts counting from 1 instead
6015 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
6016 * to keep the value positive), instead of adding one.
6017 *
6018 * On HSW+ the behaviour of the scanline counter depends on the output
6019 * type. For DP ports it behaves like most other platforms, but on HDMI
6020 * there's an extra 1 line difference. So we need to add two instead of
6021 * one to the value.
6022 *
6023 * On VLV/CHV DSI the scanline counter would appear to increment
6024 * approx. 1/3 of a scanline before start of vblank. Unfortunately
6025 * that means we can't tell whether we're in vblank or not while
6026 * we're on that particular line. We must still set scanline_offset
6027 * to 1 so that the vblank timestamps come out correct when we query
6028 * the scanline counter from within the vblank interrupt handler.
6029 * However if queried just before the start of vblank we'll get an
6030 * answer that's slightly in the future.
6031 */
6032 if (DISPLAY_VER(dev_priv) == 2) {
6033 int vtotal;
6034
6035 vtotal = adjusted_mode.crtc_vtotal;
6036 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6037 vtotal /= 2;
6038
6039 crtc->scanline_offset = vtotal - 1;
6040 } else if (HAS_DDI(dev_priv) &&
6041 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
6042 crtc->scanline_offset = 2;
6043 } else {
6044 crtc->scanline_offset = 1;
6045 }
6046 }
6047
6048 /*
6049 * This implements the workaround described in the "notes" section of the mode
6050 * set sequence documentation. When going from no pipes or single pipe to
6051 * multiple pipes, and planes are enabled after the pipe, we need to wait at
6052 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
6053 */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)6054 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
6055 {
6056 struct intel_crtc_state *crtc_state;
6057 struct intel_crtc *crtc;
6058 struct intel_crtc_state *first_crtc_state = NULL;
6059 struct intel_crtc_state *other_crtc_state = NULL;
6060 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
6061 int i;
6062
6063 /* look at all crtc's that are going to be enabled in during modeset */
6064 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6065 if (!crtc_state->hw.active ||
6066 !intel_crtc_needs_modeset(crtc_state))
6067 continue;
6068
6069 if (first_crtc_state) {
6070 other_crtc_state = crtc_state;
6071 break;
6072 } else {
6073 first_crtc_state = crtc_state;
6074 first_pipe = crtc->pipe;
6075 }
6076 }
6077
6078 /* No workaround needed? */
6079 if (!first_crtc_state)
6080 return 0;
6081
6082 /* w/a possibly needed, check how many crtc's are already enabled. */
6083 for_each_intel_crtc(state->base.dev, crtc) {
6084 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6085 if (IS_ERR(crtc_state))
6086 return PTR_ERR(crtc_state);
6087
6088 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
6089
6090 if (!crtc_state->hw.active ||
6091 intel_crtc_needs_modeset(crtc_state))
6092 continue;
6093
6094 /* 2 or more enabled crtcs means no need for w/a */
6095 if (enabled_pipe != INVALID_PIPE)
6096 return 0;
6097
6098 enabled_pipe = crtc->pipe;
6099 }
6100
6101 if (enabled_pipe != INVALID_PIPE)
6102 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
6103 else if (other_crtc_state)
6104 other_crtc_state->hsw_workaround_pipe = first_pipe;
6105
6106 return 0;
6107 }
6108
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)6109 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
6110 u8 active_pipes)
6111 {
6112 const struct intel_crtc_state *crtc_state;
6113 struct intel_crtc *crtc;
6114 int i;
6115
6116 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6117 if (crtc_state->hw.active)
6118 active_pipes |= BIT(crtc->pipe);
6119 else
6120 active_pipes &= ~BIT(crtc->pipe);
6121 }
6122
6123 return active_pipes;
6124 }
6125
intel_modeset_checks(struct intel_atomic_state * state)6126 static int intel_modeset_checks(struct intel_atomic_state *state)
6127 {
6128 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6129
6130 state->modeset = true;
6131
6132 if (IS_HASWELL(dev_priv))
6133 return hsw_mode_set_planes_workaround(state);
6134
6135 return 0;
6136 }
6137
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)6138 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
6139 struct intel_crtc_state *new_crtc_state)
6140 {
6141 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
6142 return;
6143
6144 new_crtc_state->uapi.mode_changed = false;
6145 new_crtc_state->update_pipe = true;
6146 }
6147
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)6148 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
6149 struct intel_crtc *crtc,
6150 u8 plane_ids_mask)
6151 {
6152 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6153 struct intel_plane *plane;
6154
6155 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6156 struct intel_plane_state *plane_state;
6157
6158 if ((plane_ids_mask & BIT(plane->id)) == 0)
6159 continue;
6160
6161 plane_state = intel_atomic_get_plane_state(state, plane);
6162 if (IS_ERR(plane_state))
6163 return PTR_ERR(plane_state);
6164 }
6165
6166 return 0;
6167 }
6168
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6169 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
6170 struct intel_crtc *crtc)
6171 {
6172 const struct intel_crtc_state *old_crtc_state =
6173 intel_atomic_get_old_crtc_state(state, crtc);
6174 const struct intel_crtc_state *new_crtc_state =
6175 intel_atomic_get_new_crtc_state(state, crtc);
6176
6177 return intel_crtc_add_planes_to_state(state, crtc,
6178 old_crtc_state->enabled_planes |
6179 new_crtc_state->enabled_planes);
6180 }
6181
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)6182 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
6183 {
6184 /* See {hsw,vlv,ivb}_plane_ratio() */
6185 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
6186 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6187 IS_IVYBRIDGE(dev_priv);
6188 }
6189
intel_crtc_add_bigjoiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)6190 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
6191 struct intel_crtc *crtc,
6192 struct intel_crtc *other)
6193 {
6194 const struct intel_plane_state *plane_state;
6195 struct intel_plane *plane;
6196 u8 plane_ids = 0;
6197 int i;
6198
6199 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6200 if (plane->pipe == crtc->pipe)
6201 plane_ids |= BIT(plane->id);
6202 }
6203
6204 return intel_crtc_add_planes_to_state(state, other, plane_ids);
6205 }
6206
intel_bigjoiner_add_affected_planes(struct intel_atomic_state * state)6207 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
6208 {
6209 struct drm_i915_private *i915 = to_i915(state->base.dev);
6210 const struct intel_crtc_state *crtc_state;
6211 struct intel_crtc *crtc;
6212 int i;
6213
6214 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6215 struct intel_crtc *other;
6216
6217 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
6218 crtc_state->bigjoiner_pipes) {
6219 int ret;
6220
6221 if (crtc == other)
6222 continue;
6223
6224 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
6225 if (ret)
6226 return ret;
6227 }
6228 }
6229
6230 return 0;
6231 }
6232
intel_atomic_check_planes(struct intel_atomic_state * state)6233 static int intel_atomic_check_planes(struct intel_atomic_state *state)
6234 {
6235 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6236 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6237 struct intel_plane_state *plane_state;
6238 struct intel_plane *plane;
6239 struct intel_crtc *crtc;
6240 int i, ret;
6241
6242 ret = icl_add_linked_planes(state);
6243 if (ret)
6244 return ret;
6245
6246 ret = intel_bigjoiner_add_affected_planes(state);
6247 if (ret)
6248 return ret;
6249
6250 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6251 ret = intel_plane_atomic_check(state, plane);
6252 if (ret) {
6253 drm_dbg_atomic(&dev_priv->drm,
6254 "[PLANE:%d:%s] atomic driver check failed\n",
6255 plane->base.base.id, plane->base.name);
6256 return ret;
6257 }
6258 }
6259
6260 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6261 new_crtc_state, i) {
6262 u8 old_active_planes, new_active_planes;
6263
6264 ret = icl_check_nv12_planes(new_crtc_state);
6265 if (ret)
6266 return ret;
6267
6268 /*
6269 * On some platforms the number of active planes affects
6270 * the planes' minimum cdclk calculation. Add such planes
6271 * to the state before we compute the minimum cdclk.
6272 */
6273 if (!active_planes_affects_min_cdclk(dev_priv))
6274 continue;
6275
6276 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6277 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6278
6279 if (hweight8(old_active_planes) == hweight8(new_active_planes))
6280 continue;
6281
6282 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
6283 if (ret)
6284 return ret;
6285 }
6286
6287 return 0;
6288 }
6289
intel_atomic_check_crtcs(struct intel_atomic_state * state)6290 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
6291 {
6292 struct intel_crtc_state *crtc_state;
6293 struct intel_crtc *crtc;
6294 int i;
6295
6296 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6297 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6298 int ret;
6299
6300 ret = intel_crtc_atomic_check(state, crtc);
6301 if (ret) {
6302 drm_dbg_atomic(&i915->drm,
6303 "[CRTC:%d:%s] atomic driver check failed\n",
6304 crtc->base.base.id, crtc->base.name);
6305 return ret;
6306 }
6307 }
6308
6309 return 0;
6310 }
6311
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)6312 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
6313 u8 transcoders)
6314 {
6315 const struct intel_crtc_state *new_crtc_state;
6316 struct intel_crtc *crtc;
6317 int i;
6318
6319 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6320 if (new_crtc_state->hw.enable &&
6321 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
6322 intel_crtc_needs_modeset(new_crtc_state))
6323 return true;
6324 }
6325
6326 return false;
6327 }
6328
intel_pipes_need_modeset(struct intel_atomic_state * state,u8 pipes)6329 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
6330 u8 pipes)
6331 {
6332 const struct intel_crtc_state *new_crtc_state;
6333 struct intel_crtc *crtc;
6334 int i;
6335
6336 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6337 if (new_crtc_state->hw.enable &&
6338 pipes & BIT(crtc->pipe) &&
6339 intel_crtc_needs_modeset(new_crtc_state))
6340 return true;
6341 }
6342
6343 return false;
6344 }
6345
intel_atomic_check_bigjoiner(struct intel_atomic_state * state,struct intel_crtc * master_crtc)6346 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
6347 struct intel_crtc *master_crtc)
6348 {
6349 struct drm_i915_private *i915 = to_i915(state->base.dev);
6350 struct intel_crtc_state *master_crtc_state =
6351 intel_atomic_get_new_crtc_state(state, master_crtc);
6352 struct intel_crtc *slave_crtc;
6353
6354 if (!master_crtc_state->bigjoiner_pipes)
6355 return 0;
6356
6357 /* sanity check */
6358 if (drm_WARN_ON(&i915->drm,
6359 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
6360 return -EINVAL;
6361
6362 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
6363 drm_dbg_kms(&i915->drm,
6364 "[CRTC:%d:%s] Cannot act as big joiner master "
6365 "(need 0x%x as pipes, only 0x%x possible)\n",
6366 master_crtc->base.base.id, master_crtc->base.name,
6367 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
6368 return -EINVAL;
6369 }
6370
6371 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6372 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6373 struct intel_crtc_state *slave_crtc_state;
6374 int ret;
6375
6376 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
6377 if (IS_ERR(slave_crtc_state))
6378 return PTR_ERR(slave_crtc_state);
6379
6380 /* master being enabled, slave was already configured? */
6381 if (slave_crtc_state->uapi.enable) {
6382 drm_dbg_kms(&i915->drm,
6383 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
6384 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
6385 slave_crtc->base.base.id, slave_crtc->base.name,
6386 master_crtc->base.base.id, master_crtc->base.name);
6387 return -EINVAL;
6388 }
6389
6390 /*
6391 * The state copy logic assumes the master crtc gets processed
6392 * before the slave crtc during the main compute_config loop.
6393 * This works because the crtcs are created in pipe order,
6394 * and the hardware requires master pipe < slave pipe as well.
6395 * Should that change we need to rethink the logic.
6396 */
6397 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
6398 drm_crtc_index(&slave_crtc->base)))
6399 return -EINVAL;
6400
6401 drm_dbg_kms(&i915->drm,
6402 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
6403 slave_crtc->base.base.id, slave_crtc->base.name,
6404 master_crtc->base.base.id, master_crtc->base.name);
6405
6406 slave_crtc_state->bigjoiner_pipes =
6407 master_crtc_state->bigjoiner_pipes;
6408
6409 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
6410 if (ret)
6411 return ret;
6412 }
6413
6414 return 0;
6415 }
6416
kill_bigjoiner_slave(struct intel_atomic_state * state,struct intel_crtc * master_crtc)6417 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
6418 struct intel_crtc *master_crtc)
6419 {
6420 struct drm_i915_private *i915 = to_i915(state->base.dev);
6421 struct intel_crtc_state *master_crtc_state =
6422 intel_atomic_get_new_crtc_state(state, master_crtc);
6423 struct intel_crtc *slave_crtc;
6424
6425 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6426 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6427 struct intel_crtc_state *slave_crtc_state =
6428 intel_atomic_get_new_crtc_state(state, slave_crtc);
6429
6430 slave_crtc_state->bigjoiner_pipes = 0;
6431
6432 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
6433 }
6434
6435 master_crtc_state->bigjoiner_pipes = 0;
6436 }
6437
6438 /**
6439 * DOC: asynchronous flip implementation
6440 *
6441 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6442 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6443 * Correspondingly, support is currently added for primary plane only.
6444 *
6445 * Async flip can only change the plane surface address, so anything else
6446 * changing is rejected from the intel_async_flip_check_hw() function.
6447 * Once this check is cleared, flip done interrupt is enabled using
6448 * the intel_crtc_enable_flip_done() function.
6449 *
6450 * As soon as the surface address register is written, flip done interrupt is
6451 * generated and the requested events are sent to the usersapce in the interrupt
6452 * handler itself. The timestamp and sequence sent during the flip done event
6453 * correspond to the last vblank and have no relation to the actual time when
6454 * the flip done event was sent.
6455 */
intel_async_flip_check_uapi(struct intel_atomic_state * state,struct intel_crtc * crtc)6456 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6457 struct intel_crtc *crtc)
6458 {
6459 struct drm_i915_private *i915 = to_i915(state->base.dev);
6460 const struct intel_crtc_state *new_crtc_state =
6461 intel_atomic_get_new_crtc_state(state, crtc);
6462 const struct intel_plane_state *old_plane_state;
6463 struct intel_plane_state *new_plane_state;
6464 struct intel_plane *plane;
6465 int i;
6466
6467 if (!new_crtc_state->uapi.async_flip)
6468 return 0;
6469
6470 if (!new_crtc_state->uapi.active) {
6471 drm_dbg_kms(&i915->drm,
6472 "[CRTC:%d:%s] not active\n",
6473 crtc->base.base.id, crtc->base.name);
6474 return -EINVAL;
6475 }
6476
6477 if (intel_crtc_needs_modeset(new_crtc_state)) {
6478 drm_dbg_kms(&i915->drm,
6479 "[CRTC:%d:%s] modeset required\n",
6480 crtc->base.base.id, crtc->base.name);
6481 return -EINVAL;
6482 }
6483
6484 /*
6485 * FIXME: Bigjoiner+async flip is busted currently.
6486 * Remove this check once the issues are fixed.
6487 */
6488 if (new_crtc_state->bigjoiner_pipes) {
6489 drm_dbg_kms(&i915->drm,
6490 "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
6491 crtc->base.base.id, crtc->base.name);
6492 return -EINVAL;
6493 }
6494
6495 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6496 new_plane_state, i) {
6497 if (plane->pipe != crtc->pipe)
6498 continue;
6499
6500 /*
6501 * TODO: Async flip is only supported through the page flip IOCTL
6502 * as of now. So support currently added for primary plane only.
6503 * Support for other planes on platforms on which supports
6504 * this(vlv/chv and icl+) should be added when async flip is
6505 * enabled in the atomic IOCTL path.
6506 */
6507 if (!plane->async_flip) {
6508 drm_dbg_kms(&i915->drm,
6509 "[PLANE:%d:%s] async flip not supported\n",
6510 plane->base.base.id, plane->base.name);
6511 return -EINVAL;
6512 }
6513
6514 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6515 drm_dbg_kms(&i915->drm,
6516 "[PLANE:%d:%s] no old or new framebuffer\n",
6517 plane->base.base.id, plane->base.name);
6518 return -EINVAL;
6519 }
6520 }
6521
6522 return 0;
6523 }
6524
intel_async_flip_check_hw(struct intel_atomic_state * state,struct intel_crtc * crtc)6525 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6526 {
6527 struct drm_i915_private *i915 = to_i915(state->base.dev);
6528 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6529 const struct intel_plane_state *new_plane_state, *old_plane_state;
6530 struct intel_plane *plane;
6531 int i;
6532
6533 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6534 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6535
6536 if (!new_crtc_state->uapi.async_flip)
6537 return 0;
6538
6539 if (!new_crtc_state->hw.active) {
6540 drm_dbg_kms(&i915->drm,
6541 "[CRTC:%d:%s] not active\n",
6542 crtc->base.base.id, crtc->base.name);
6543 return -EINVAL;
6544 }
6545
6546 if (intel_crtc_needs_modeset(new_crtc_state)) {
6547 drm_dbg_kms(&i915->drm,
6548 "[CRTC:%d:%s] modeset required\n",
6549 crtc->base.base.id, crtc->base.name);
6550 return -EINVAL;
6551 }
6552
6553 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6554 drm_dbg_kms(&i915->drm,
6555 "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6556 crtc->base.base.id, crtc->base.name);
6557 return -EINVAL;
6558 }
6559
6560 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6561 new_plane_state, i) {
6562 if (plane->pipe != crtc->pipe)
6563 continue;
6564
6565 /*
6566 * Only async flip capable planes should be in the state
6567 * if we're really about to ask the hardware to perform
6568 * an async flip. We should never get this far otherwise.
6569 */
6570 if (drm_WARN_ON(&i915->drm,
6571 new_crtc_state->do_async_flip && !plane->async_flip))
6572 return -EINVAL;
6573
6574 /*
6575 * Only check async flip capable planes other planes
6576 * may be involved in the initial commit due to
6577 * the wm0/ddb optimization.
6578 *
6579 * TODO maybe should track which planes actually
6580 * were requested to do the async flip...
6581 */
6582 if (!plane->async_flip)
6583 continue;
6584
6585 /*
6586 * FIXME: This check is kept generic for all platforms.
6587 * Need to verify this for all gen9 platforms to enable
6588 * this selectively if required.
6589 */
6590 switch (new_plane_state->hw.fb->modifier) {
6591 case I915_FORMAT_MOD_X_TILED:
6592 case I915_FORMAT_MOD_Y_TILED:
6593 case I915_FORMAT_MOD_Yf_TILED:
6594 case I915_FORMAT_MOD_4_TILED:
6595 break;
6596 default:
6597 drm_dbg_kms(&i915->drm,
6598 "[PLANE:%d:%s] Modifier does not support async flips\n",
6599 plane->base.base.id, plane->base.name);
6600 return -EINVAL;
6601 }
6602
6603 if (new_plane_state->hw.fb->format->num_planes > 1) {
6604 drm_dbg_kms(&i915->drm,
6605 "[PLANE:%d:%s] Planar formats do not support async flips\n",
6606 plane->base.base.id, plane->base.name);
6607 return -EINVAL;
6608 }
6609
6610 if (old_plane_state->view.color_plane[0].mapping_stride !=
6611 new_plane_state->view.color_plane[0].mapping_stride) {
6612 drm_dbg_kms(&i915->drm,
6613 "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6614 plane->base.base.id, plane->base.name);
6615 return -EINVAL;
6616 }
6617
6618 if (old_plane_state->hw.fb->modifier !=
6619 new_plane_state->hw.fb->modifier) {
6620 drm_dbg_kms(&i915->drm,
6621 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6622 plane->base.base.id, plane->base.name);
6623 return -EINVAL;
6624 }
6625
6626 if (old_plane_state->hw.fb->format !=
6627 new_plane_state->hw.fb->format) {
6628 drm_dbg_kms(&i915->drm,
6629 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6630 plane->base.base.id, plane->base.name);
6631 return -EINVAL;
6632 }
6633
6634 if (old_plane_state->hw.rotation !=
6635 new_plane_state->hw.rotation) {
6636 drm_dbg_kms(&i915->drm,
6637 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6638 plane->base.base.id, plane->base.name);
6639 return -EINVAL;
6640 }
6641
6642 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6643 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6644 drm_dbg_kms(&i915->drm,
6645 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6646 plane->base.base.id, plane->base.name);
6647 return -EINVAL;
6648 }
6649
6650 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6651 drm_dbg_kms(&i915->drm,
6652 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6653 plane->base.base.id, plane->base.name);
6654 return -EINVAL;
6655 }
6656
6657 if (old_plane_state->hw.pixel_blend_mode !=
6658 new_plane_state->hw.pixel_blend_mode) {
6659 drm_dbg_kms(&i915->drm,
6660 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6661 plane->base.base.id, plane->base.name);
6662 return -EINVAL;
6663 }
6664
6665 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6666 drm_dbg_kms(&i915->drm,
6667 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6668 plane->base.base.id, plane->base.name);
6669 return -EINVAL;
6670 }
6671
6672 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6673 drm_dbg_kms(&i915->drm,
6674 "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6675 plane->base.base.id, plane->base.name);
6676 return -EINVAL;
6677 }
6678
6679 /* plane decryption is allow to change only in synchronous flips */
6680 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6681 drm_dbg_kms(&i915->drm,
6682 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6683 plane->base.base.id, plane->base.name);
6684 return -EINVAL;
6685 }
6686 }
6687
6688 return 0;
6689 }
6690
intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state * state)6691 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6692 {
6693 struct drm_i915_private *i915 = to_i915(state->base.dev);
6694 struct intel_crtc_state *crtc_state;
6695 struct intel_crtc *crtc;
6696 u8 affected_pipes = 0;
6697 u8 modeset_pipes = 0;
6698 int i;
6699
6700 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6701 affected_pipes |= crtc_state->bigjoiner_pipes;
6702 if (intel_crtc_needs_modeset(crtc_state))
6703 modeset_pipes |= crtc_state->bigjoiner_pipes;
6704 }
6705
6706 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6707 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6708 if (IS_ERR(crtc_state))
6709 return PTR_ERR(crtc_state);
6710 }
6711
6712 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6713 int ret;
6714
6715 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6716
6717 crtc_state->uapi.mode_changed = true;
6718
6719 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6720 if (ret)
6721 return ret;
6722
6723 ret = intel_atomic_add_affected_planes(state, crtc);
6724 if (ret)
6725 return ret;
6726 }
6727
6728 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6729 /* Kill old bigjoiner link, we may re-establish afterwards */
6730 if (intel_crtc_needs_modeset(crtc_state) &&
6731 intel_crtc_is_bigjoiner_master(crtc_state))
6732 kill_bigjoiner_slave(state, crtc);
6733 }
6734
6735 return 0;
6736 }
6737
6738 /**
6739 * intel_atomic_check - validate state object
6740 * @dev: drm device
6741 * @_state: state to validate
6742 */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)6743 static int intel_atomic_check(struct drm_device *dev,
6744 struct drm_atomic_state *_state)
6745 {
6746 struct drm_i915_private *dev_priv = to_i915(dev);
6747 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6748 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6749 struct intel_crtc *crtc;
6750 int ret, i;
6751 bool any_ms = false;
6752
6753 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6754 new_crtc_state, i) {
6755 if (new_crtc_state->inherited != old_crtc_state->inherited)
6756 new_crtc_state->uapi.mode_changed = true;
6757
6758 if (new_crtc_state->uapi.scaling_filter !=
6759 old_crtc_state->uapi.scaling_filter)
6760 new_crtc_state->uapi.mode_changed = true;
6761 }
6762
6763 intel_vrr_check_modeset(state);
6764
6765 ret = drm_atomic_helper_check_modeset(dev, &state->base);
6766 if (ret)
6767 goto fail;
6768
6769 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6770 ret = intel_async_flip_check_uapi(state, crtc);
6771 if (ret)
6772 return ret;
6773 }
6774
6775 ret = intel_bigjoiner_add_affected_crtcs(state);
6776 if (ret)
6777 goto fail;
6778
6779 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6780 new_crtc_state, i) {
6781 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6782 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6783 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6784 else
6785 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6786 continue;
6787 }
6788
6789 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6790 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6791 continue;
6792 }
6793
6794 ret = intel_crtc_prepare_cleared_state(state, crtc);
6795 if (ret)
6796 goto fail;
6797
6798 if (!new_crtc_state->hw.enable)
6799 continue;
6800
6801 ret = intel_modeset_pipe_config(state, crtc);
6802 if (ret)
6803 goto fail;
6804
6805 ret = intel_atomic_check_bigjoiner(state, crtc);
6806 if (ret)
6807 goto fail;
6808 }
6809
6810 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6811 new_crtc_state, i) {
6812 if (!intel_crtc_needs_modeset(new_crtc_state))
6813 continue;
6814
6815 if (new_crtc_state->hw.enable) {
6816 ret = intel_modeset_pipe_config_late(state, crtc);
6817 if (ret)
6818 goto fail;
6819 }
6820
6821 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6822 }
6823
6824 /**
6825 * Check if fastset is allowed by external dependencies like other
6826 * pipes and transcoders.
6827 *
6828 * Right now it only forces a fullmodeset when the MST master
6829 * transcoder did not changed but the pipe of the master transcoder
6830 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6831 * in case of port synced crtcs, if one of the synced crtcs
6832 * needs a full modeset, all other synced crtcs should be
6833 * forced a full modeset.
6834 */
6835 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6836 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6837 continue;
6838
6839 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6840 enum transcoder master = new_crtc_state->mst_master_transcoder;
6841
6842 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
6843 new_crtc_state->uapi.mode_changed = true;
6844 new_crtc_state->update_pipe = false;
6845 }
6846 }
6847
6848 if (is_trans_port_sync_mode(new_crtc_state)) {
6849 u8 trans = new_crtc_state->sync_mode_slaves_mask;
6850
6851 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6852 trans |= BIT(new_crtc_state->master_transcoder);
6853
6854 if (intel_cpu_transcoders_need_modeset(state, trans)) {
6855 new_crtc_state->uapi.mode_changed = true;
6856 new_crtc_state->update_pipe = false;
6857 }
6858 }
6859
6860 if (new_crtc_state->bigjoiner_pipes) {
6861 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
6862 new_crtc_state->uapi.mode_changed = true;
6863 new_crtc_state->update_pipe = false;
6864 }
6865 }
6866 }
6867
6868 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6869 new_crtc_state, i) {
6870 if (!intel_crtc_needs_modeset(new_crtc_state))
6871 continue;
6872
6873 any_ms = true;
6874
6875 intel_release_shared_dplls(state, crtc);
6876 }
6877
6878 if (any_ms && !check_digital_port_conflicts(state)) {
6879 drm_dbg_kms(&dev_priv->drm,
6880 "rejecting conflicting digital port configuration\n");
6881 ret = -EINVAL;
6882 goto fail;
6883 }
6884
6885 ret = drm_dp_mst_atomic_check(&state->base);
6886 if (ret)
6887 goto fail;
6888
6889 ret = intel_atomic_check_planes(state);
6890 if (ret)
6891 goto fail;
6892
6893 ret = intel_compute_global_watermarks(state);
6894 if (ret)
6895 goto fail;
6896
6897 ret = intel_bw_atomic_check(state);
6898 if (ret)
6899 goto fail;
6900
6901 ret = intel_cdclk_atomic_check(state, &any_ms);
6902 if (ret)
6903 goto fail;
6904
6905 if (intel_any_crtc_needs_modeset(state))
6906 any_ms = true;
6907
6908 if (any_ms) {
6909 ret = intel_modeset_checks(state);
6910 if (ret)
6911 goto fail;
6912
6913 ret = intel_modeset_calc_cdclk(state);
6914 if (ret)
6915 return ret;
6916 }
6917
6918 ret = intel_atomic_check_crtcs(state);
6919 if (ret)
6920 goto fail;
6921
6922 ret = intel_fbc_atomic_check(state);
6923 if (ret)
6924 goto fail;
6925
6926 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6927 new_crtc_state, i) {
6928 ret = intel_async_flip_check_hw(state, crtc);
6929 if (ret)
6930 goto fail;
6931
6932 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6933 !new_crtc_state->update_pipe)
6934 continue;
6935
6936 intel_crtc_state_dump(new_crtc_state, state,
6937 intel_crtc_needs_modeset(new_crtc_state) ?
6938 "modeset" : "fastset");
6939 }
6940
6941 return 0;
6942
6943 fail:
6944 if (ret == -EDEADLK)
6945 return ret;
6946
6947 /*
6948 * FIXME would probably be nice to know which crtc specifically
6949 * caused the failure, in cases where we can pinpoint it.
6950 */
6951 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6952 new_crtc_state, i)
6953 intel_crtc_state_dump(new_crtc_state, state, "failed");
6954
6955 return ret;
6956 }
6957
intel_atomic_prepare_commit(struct intel_atomic_state * state)6958 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6959 {
6960 struct intel_crtc_state *crtc_state;
6961 struct intel_crtc *crtc;
6962 int i, ret;
6963
6964 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6965 if (ret < 0)
6966 return ret;
6967
6968 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6969 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6970
6971 if (mode_changed || crtc_state->update_pipe ||
6972 crtc_state->uapi.color_mgmt_changed) {
6973 intel_dsb_prepare(crtc_state);
6974 }
6975 }
6976
6977 return 0;
6978 }
6979
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)6980 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6981 struct intel_crtc_state *crtc_state)
6982 {
6983 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6984
6985 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6986 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6987
6988 if (crtc_state->has_pch_encoder) {
6989 enum pipe pch_transcoder =
6990 intel_crtc_pch_transcoder(crtc);
6991
6992 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6993 }
6994 }
6995
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)6996 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6997 const struct intel_crtc_state *new_crtc_state)
6998 {
6999 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7000 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7001
7002 /*
7003 * Update pipe size and adjust fitter if needed: the reason for this is
7004 * that in compute_mode_changes we check the native mode (not the pfit
7005 * mode) to see if we can flip rather than do a full mode set. In the
7006 * fastboot case, we'll flip, but if we don't update the pipesrc and
7007 * pfit state, we'll end up with a big fb scanned out into the wrong
7008 * sized surface.
7009 */
7010 intel_set_pipe_src_size(new_crtc_state);
7011
7012 /* on skylake this is done by detaching scalers */
7013 if (DISPLAY_VER(dev_priv) >= 9) {
7014 if (new_crtc_state->pch_pfit.enabled)
7015 skl_pfit_enable(new_crtc_state);
7016 } else if (HAS_PCH_SPLIT(dev_priv)) {
7017 if (new_crtc_state->pch_pfit.enabled)
7018 ilk_pfit_enable(new_crtc_state);
7019 else if (old_crtc_state->pch_pfit.enabled)
7020 ilk_pfit_disable(old_crtc_state);
7021 }
7022
7023 /*
7024 * The register is supposedly single buffered so perhaps
7025 * not 100% correct to do this here. But SKL+ calculate
7026 * this based on the adjust pixel rate so pfit changes do
7027 * affect it and so it must be updated for fastsets.
7028 * HSW/BDW only really need this here for fastboot, after
7029 * that the value should not change without a full modeset.
7030 */
7031 if (DISPLAY_VER(dev_priv) >= 9 ||
7032 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
7033 hsw_set_linetime_wm(new_crtc_state);
7034
7035 if (new_crtc_state->seamless_m_n)
7036 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
7037 &new_crtc_state->dp_m_n);
7038 }
7039
commit_pipe_pre_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)7040 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
7041 struct intel_crtc *crtc)
7042 {
7043 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7044 const struct intel_crtc_state *old_crtc_state =
7045 intel_atomic_get_old_crtc_state(state, crtc);
7046 const struct intel_crtc_state *new_crtc_state =
7047 intel_atomic_get_new_crtc_state(state, crtc);
7048 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7049
7050 /*
7051 * During modesets pipe configuration was programmed as the
7052 * CRTC was enabled.
7053 */
7054 if (!modeset) {
7055 if (new_crtc_state->uapi.color_mgmt_changed ||
7056 new_crtc_state->update_pipe)
7057 intel_color_commit_arm(new_crtc_state);
7058
7059 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7060 bdw_set_pipemisc(new_crtc_state);
7061
7062 if (new_crtc_state->update_pipe)
7063 intel_pipe_fastset(old_crtc_state, new_crtc_state);
7064 }
7065
7066 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
7067
7068 intel_atomic_update_watermarks(state, crtc);
7069 }
7070
commit_pipe_post_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)7071 static void commit_pipe_post_planes(struct intel_atomic_state *state,
7072 struct intel_crtc *crtc)
7073 {
7074 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7075 const struct intel_crtc_state *new_crtc_state =
7076 intel_atomic_get_new_crtc_state(state, crtc);
7077
7078 /*
7079 * Disable the scaler(s) after the plane(s) so that we don't
7080 * get a catastrophic underrun even if the two operations
7081 * end up happening in two different frames.
7082 */
7083 if (DISPLAY_VER(dev_priv) >= 9 &&
7084 !intel_crtc_needs_modeset(new_crtc_state))
7085 skl_detach_scalers(new_crtc_state);
7086 }
7087
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)7088 static void intel_enable_crtc(struct intel_atomic_state *state,
7089 struct intel_crtc *crtc)
7090 {
7091 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7092 const struct intel_crtc_state *new_crtc_state =
7093 intel_atomic_get_new_crtc_state(state, crtc);
7094
7095 if (!intel_crtc_needs_modeset(new_crtc_state))
7096 return;
7097
7098 intel_crtc_update_active_timings(new_crtc_state);
7099
7100 dev_priv->display.funcs.display->crtc_enable(state, crtc);
7101
7102 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
7103 return;
7104
7105 /* vblanks work again, re-enable pipe CRC. */
7106 intel_crtc_enable_pipe_crc(crtc);
7107 }
7108
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)7109 static void intel_update_crtc(struct intel_atomic_state *state,
7110 struct intel_crtc *crtc)
7111 {
7112 struct drm_i915_private *i915 = to_i915(state->base.dev);
7113 const struct intel_crtc_state *old_crtc_state =
7114 intel_atomic_get_old_crtc_state(state, crtc);
7115 struct intel_crtc_state *new_crtc_state =
7116 intel_atomic_get_new_crtc_state(state, crtc);
7117 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7118
7119 if (!modeset) {
7120 if (new_crtc_state->preload_luts &&
7121 (new_crtc_state->uapi.color_mgmt_changed ||
7122 new_crtc_state->update_pipe))
7123 intel_color_load_luts(new_crtc_state);
7124
7125 intel_pre_plane_update(state, crtc);
7126
7127 if (new_crtc_state->update_pipe)
7128 intel_encoders_update_pipe(state, crtc);
7129
7130 if (DISPLAY_VER(i915) >= 11 &&
7131 new_crtc_state->update_pipe)
7132 icl_set_pipe_chicken(new_crtc_state);
7133 }
7134
7135 intel_fbc_update(state, crtc);
7136
7137 if (!modeset &&
7138 (new_crtc_state->uapi.color_mgmt_changed ||
7139 new_crtc_state->update_pipe))
7140 intel_color_commit_noarm(new_crtc_state);
7141
7142 intel_crtc_planes_update_noarm(state, crtc);
7143
7144 /* Perform vblank evasion around commit operation */
7145 intel_pipe_update_start(new_crtc_state);
7146
7147 commit_pipe_pre_planes(state, crtc);
7148
7149 intel_crtc_planes_update_arm(state, crtc);
7150
7151 commit_pipe_post_planes(state, crtc);
7152
7153 intel_pipe_update_end(new_crtc_state);
7154
7155 /*
7156 * We usually enable FIFO underrun interrupts as part of the
7157 * CRTC enable sequence during modesets. But when we inherit a
7158 * valid pipe configuration from the BIOS we need to take care
7159 * of enabling them on the CRTC's first fastset.
7160 */
7161 if (new_crtc_state->update_pipe && !modeset &&
7162 old_crtc_state->inherited)
7163 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
7164 }
7165
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state,struct intel_crtc * crtc)7166 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
7167 struct intel_crtc_state *old_crtc_state,
7168 struct intel_crtc_state *new_crtc_state,
7169 struct intel_crtc *crtc)
7170 {
7171 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7172
7173 /*
7174 * We need to disable pipe CRC before disabling the pipe,
7175 * or we race against vblank off.
7176 */
7177 intel_crtc_disable_pipe_crc(crtc);
7178
7179 dev_priv->display.funcs.display->crtc_disable(state, crtc);
7180 crtc->active = false;
7181 intel_fbc_disable(crtc);
7182 intel_disable_shared_dpll(old_crtc_state);
7183
7184 /* FIXME unify this for all platforms */
7185 if (!new_crtc_state->hw.active &&
7186 !HAS_GMCH(dev_priv))
7187 intel_initial_watermarks(state, crtc);
7188 }
7189
intel_commit_modeset_disables(struct intel_atomic_state * state)7190 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7191 {
7192 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7193 struct intel_crtc *crtc;
7194 u32 handled = 0;
7195 int i;
7196
7197 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7198 new_crtc_state, i) {
7199 if (!intel_crtc_needs_modeset(new_crtc_state))
7200 continue;
7201
7202 if (!old_crtc_state->hw.active)
7203 continue;
7204
7205 intel_pre_plane_update(state, crtc);
7206 intel_crtc_disable_planes(state, crtc);
7207 }
7208
7209 /* Only disable port sync and MST slaves */
7210 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7211 new_crtc_state, i) {
7212 if (!intel_crtc_needs_modeset(new_crtc_state))
7213 continue;
7214
7215 if (!old_crtc_state->hw.active)
7216 continue;
7217
7218 /* In case of Transcoder port Sync master slave CRTCs can be
7219 * assigned in any order and we need to make sure that
7220 * slave CRTCs are disabled first and then master CRTC since
7221 * Slave vblanks are masked till Master Vblanks.
7222 */
7223 if (!is_trans_port_sync_slave(old_crtc_state) &&
7224 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
7225 !intel_crtc_is_bigjoiner_slave(old_crtc_state))
7226 continue;
7227
7228 intel_old_crtc_state_disables(state, old_crtc_state,
7229 new_crtc_state, crtc);
7230 handled |= BIT(crtc->pipe);
7231 }
7232
7233 /* Disable everything else left on */
7234 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7235 new_crtc_state, i) {
7236 if (!intel_crtc_needs_modeset(new_crtc_state) ||
7237 (handled & BIT(crtc->pipe)))
7238 continue;
7239
7240 if (!old_crtc_state->hw.active)
7241 continue;
7242
7243 intel_old_crtc_state_disables(state, old_crtc_state,
7244 new_crtc_state, crtc);
7245 }
7246 }
7247
intel_commit_modeset_enables(struct intel_atomic_state * state)7248 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7249 {
7250 struct intel_crtc_state *new_crtc_state;
7251 struct intel_crtc *crtc;
7252 int i;
7253
7254 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7255 if (!new_crtc_state->hw.active)
7256 continue;
7257
7258 intel_enable_crtc(state, crtc);
7259 intel_update_crtc(state, crtc);
7260 }
7261 }
7262
skl_commit_modeset_enables(struct intel_atomic_state * state)7263 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7264 {
7265 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7266 struct intel_crtc *crtc;
7267 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7268 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7269 u8 update_pipes = 0, modeset_pipes = 0;
7270 int i;
7271
7272 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7273 enum pipe pipe = crtc->pipe;
7274
7275 if (!new_crtc_state->hw.active)
7276 continue;
7277
7278 /* ignore allocations for crtc's that have been turned off. */
7279 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7280 entries[pipe] = old_crtc_state->wm.skl.ddb;
7281 update_pipes |= BIT(pipe);
7282 } else {
7283 modeset_pipes |= BIT(pipe);
7284 }
7285 }
7286
7287 /*
7288 * Whenever the number of active pipes changes, we need to make sure we
7289 * update the pipes in the right order so that their ddb allocations
7290 * never overlap with each other between CRTC updates. Otherwise we'll
7291 * cause pipe underruns and other bad stuff.
7292 *
7293 * So first lets enable all pipes that do not need a fullmodeset as
7294 * those don't have any external dependency.
7295 */
7296 while (update_pipes) {
7297 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7298 new_crtc_state, i) {
7299 enum pipe pipe = crtc->pipe;
7300
7301 if ((update_pipes & BIT(pipe)) == 0)
7302 continue;
7303
7304 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7305 entries, I915_MAX_PIPES, pipe))
7306 continue;
7307
7308 entries[pipe] = new_crtc_state->wm.skl.ddb;
7309 update_pipes &= ~BIT(pipe);
7310
7311 intel_update_crtc(state, crtc);
7312
7313 /*
7314 * If this is an already active pipe, it's DDB changed,
7315 * and this isn't the last pipe that needs updating
7316 * then we need to wait for a vblank to pass for the
7317 * new ddb allocation to take effect.
7318 */
7319 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7320 &old_crtc_state->wm.skl.ddb) &&
7321 (update_pipes | modeset_pipes))
7322 intel_crtc_wait_for_next_vblank(crtc);
7323 }
7324 }
7325
7326 update_pipes = modeset_pipes;
7327
7328 /*
7329 * Enable all pipes that needs a modeset and do not depends on other
7330 * pipes
7331 */
7332 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7333 enum pipe pipe = crtc->pipe;
7334
7335 if ((modeset_pipes & BIT(pipe)) == 0)
7336 continue;
7337
7338 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7339 is_trans_port_sync_master(new_crtc_state) ||
7340 intel_crtc_is_bigjoiner_master(new_crtc_state))
7341 continue;
7342
7343 modeset_pipes &= ~BIT(pipe);
7344
7345 intel_enable_crtc(state, crtc);
7346 }
7347
7348 /*
7349 * Then we enable all remaining pipes that depend on other
7350 * pipes: MST slaves and port sync masters, big joiner master
7351 */
7352 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7353 enum pipe pipe = crtc->pipe;
7354
7355 if ((modeset_pipes & BIT(pipe)) == 0)
7356 continue;
7357
7358 modeset_pipes &= ~BIT(pipe);
7359
7360 intel_enable_crtc(state, crtc);
7361 }
7362
7363 /*
7364 * Finally we do the plane updates/etc. for all pipes that got enabled.
7365 */
7366 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7367 enum pipe pipe = crtc->pipe;
7368
7369 if ((update_pipes & BIT(pipe)) == 0)
7370 continue;
7371
7372 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7373 entries, I915_MAX_PIPES, pipe));
7374
7375 entries[pipe] = new_crtc_state->wm.skl.ddb;
7376 update_pipes &= ~BIT(pipe);
7377
7378 intel_update_crtc(state, crtc);
7379 }
7380
7381 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7382 drm_WARN_ON(&dev_priv->drm, update_pipes);
7383 }
7384
intel_atomic_helper_free_state(struct drm_i915_private * dev_priv)7385 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
7386 {
7387 struct intel_atomic_state *state, *next;
7388 struct llist_node *freed;
7389
7390 freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
7391 llist_for_each_entry_safe(state, next, freed, freed)
7392 drm_atomic_state_put(&state->base);
7393 }
7394
intel_atomic_helper_free_state_worker(struct work_struct * work)7395 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
7396 {
7397 struct drm_i915_private *dev_priv =
7398 container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
7399
7400 intel_atomic_helper_free_state(dev_priv);
7401 }
7402
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)7403 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7404 {
7405 struct wait_queue_entry wait_fence, wait_reset;
7406 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
7407
7408 init_wait_entry(&wait_fence, 0);
7409 init_wait_entry(&wait_reset, 0);
7410 for (;;) {
7411 prepare_to_wait(&intel_state->commit_ready.wait,
7412 &wait_fence, TASK_UNINTERRUPTIBLE);
7413 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7414 I915_RESET_MODESET),
7415 &wait_reset, TASK_UNINTERRUPTIBLE);
7416
7417
7418 if (i915_sw_fence_done(&intel_state->commit_ready) ||
7419 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
7420 break;
7421
7422 schedule();
7423 }
7424 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
7425 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7426 I915_RESET_MODESET),
7427 &wait_reset);
7428 }
7429
intel_cleanup_dsbs(struct intel_atomic_state * state)7430 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
7431 {
7432 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7433 struct intel_crtc *crtc;
7434 int i;
7435
7436 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7437 new_crtc_state, i)
7438 intel_dsb_cleanup(old_crtc_state);
7439 }
7440
intel_atomic_cleanup_work(struct work_struct * work)7441 static void intel_atomic_cleanup_work(struct work_struct *work)
7442 {
7443 struct intel_atomic_state *state =
7444 container_of(work, struct intel_atomic_state, base.commit_work);
7445 struct drm_i915_private *i915 = to_i915(state->base.dev);
7446
7447 intel_cleanup_dsbs(state);
7448 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7449 drm_atomic_helper_commit_cleanup_done(&state->base);
7450 drm_atomic_state_put(&state->base);
7451
7452 intel_atomic_helper_free_state(i915);
7453 }
7454
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)7455 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7456 {
7457 struct drm_i915_private *i915 = to_i915(state->base.dev);
7458 struct intel_plane *plane;
7459 struct intel_plane_state *plane_state;
7460 int i;
7461
7462 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7463 struct drm_framebuffer *fb = plane_state->hw.fb;
7464 int cc_plane;
7465 int ret;
7466
7467 if (!fb)
7468 continue;
7469
7470 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7471 if (cc_plane < 0)
7472 continue;
7473
7474 /*
7475 * The layout of the fast clear color value expected by HW
7476 * (the DRM ABI requiring this value to be located in fb at
7477 * offset 0 of cc plane, plane #2 previous generations or
7478 * plane #1 for flat ccs):
7479 * - 4 x 4 bytes per-channel value
7480 * (in surface type specific float/int format provided by the fb user)
7481 * - 8 bytes native color value used by the display
7482 * (converted/written by GPU during a fast clear operation using the
7483 * above per-channel values)
7484 *
7485 * The commit's FB prepare hook already ensured that FB obj is pinned and the
7486 * caller made sure that the object is synced wrt. the related color clear value
7487 * GPU write on it.
7488 */
7489 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7490 fb->offsets[cc_plane] + 16,
7491 &plane_state->ccval,
7492 sizeof(plane_state->ccval));
7493 /* The above could only fail if the FB obj has an unexpected backing store type. */
7494 drm_WARN_ON(&i915->drm, ret);
7495 }
7496 }
7497
intel_atomic_commit_tail(struct intel_atomic_state * state)7498 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7499 {
7500 struct drm_device *dev = state->base.dev;
7501 struct drm_i915_private *dev_priv = to_i915(dev);
7502 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7503 struct intel_crtc *crtc;
7504 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7505 intel_wakeref_t wakeref = 0;
7506 int i;
7507
7508 intel_atomic_commit_fence_wait(state);
7509
7510 drm_atomic_helper_wait_for_dependencies(&state->base);
7511 drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7512
7513 if (state->modeset)
7514 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
7515
7516 intel_atomic_prepare_plane_clear_colors(state);
7517
7518 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7519 new_crtc_state, i) {
7520 if (intel_crtc_needs_modeset(new_crtc_state) ||
7521 new_crtc_state->update_pipe) {
7522 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7523 }
7524 }
7525
7526 intel_commit_modeset_disables(state);
7527
7528 /* FIXME: Eventually get rid of our crtc->config pointer */
7529 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7530 crtc->config = new_crtc_state;
7531
7532 if (state->modeset) {
7533 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7534
7535 intel_set_cdclk_pre_plane_update(state);
7536
7537 intel_modeset_verify_disabled(dev_priv, state);
7538 }
7539
7540 intel_sagv_pre_plane_update(state);
7541
7542 /* Complete the events for pipes that have now been disabled */
7543 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7544 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7545
7546 /* Complete events for now disable pipes here. */
7547 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7548 spin_lock_irq(&dev->event_lock);
7549 drm_crtc_send_vblank_event(&crtc->base,
7550 new_crtc_state->uapi.event);
7551 spin_unlock_irq(&dev->event_lock);
7552
7553 new_crtc_state->uapi.event = NULL;
7554 }
7555 }
7556
7557 intel_encoders_update_prepare(state);
7558
7559 intel_dbuf_pre_plane_update(state);
7560 intel_mbus_dbox_update(state);
7561
7562 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7563 if (new_crtc_state->do_async_flip)
7564 intel_crtc_enable_flip_done(state, crtc);
7565 }
7566
7567 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7568 dev_priv->display.funcs.display->commit_modeset_enables(state);
7569
7570 intel_encoders_update_complete(state);
7571
7572 if (state->modeset)
7573 intel_set_cdclk_post_plane_update(state);
7574
7575 intel_wait_for_vblank_workers(state);
7576
7577 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7578 * already, but still need the state for the delayed optimization. To
7579 * fix this:
7580 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7581 * - schedule that vblank worker _before_ calling hw_done
7582 * - at the start of commit_tail, cancel it _synchrously
7583 * - switch over to the vblank wait helper in the core after that since
7584 * we don't need out special handling any more.
7585 */
7586 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7587
7588 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7589 if (new_crtc_state->do_async_flip)
7590 intel_crtc_disable_flip_done(state, crtc);
7591 }
7592
7593 /*
7594 * Now that the vblank has passed, we can go ahead and program the
7595 * optimal watermarks on platforms that need two-step watermark
7596 * programming.
7597 *
7598 * TODO: Move this (and other cleanup) to an async worker eventually.
7599 */
7600 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7601 new_crtc_state, i) {
7602 /*
7603 * Gen2 reports pipe underruns whenever all planes are disabled.
7604 * So re-enable underrun reporting after some planes get enabled.
7605 *
7606 * We do this before .optimize_watermarks() so that we have a
7607 * chance of catching underruns with the intermediate watermarks
7608 * vs. the new plane configuration.
7609 */
7610 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7611 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7612
7613 intel_optimize_watermarks(state, crtc);
7614 }
7615
7616 intel_dbuf_post_plane_update(state);
7617 intel_psr_post_plane_update(state);
7618
7619 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7620 intel_post_plane_update(state, crtc);
7621
7622 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7623
7624 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
7625
7626 /*
7627 * DSB cleanup is done in cleanup_work aligning with framebuffer
7628 * cleanup. So copy and reset the dsb structure to sync with
7629 * commit_done and later do dsb cleanup in cleanup_work.
7630 */
7631 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7632 }
7633
7634 /* Underruns don't always raise interrupts, so check manually */
7635 intel_check_cpu_fifo_underruns(dev_priv);
7636 intel_check_pch_fifo_underruns(dev_priv);
7637
7638 if (state->modeset)
7639 intel_verify_planes(state);
7640
7641 intel_sagv_post_plane_update(state);
7642
7643 drm_atomic_helper_commit_hw_done(&state->base);
7644
7645 if (state->modeset) {
7646 /* As one of the primary mmio accessors, KMS has a high
7647 * likelihood of triggering bugs in unclaimed access. After we
7648 * finish modesetting, see if an error has been flagged, and if
7649 * so enable debugging for the next modeset - and hope we catch
7650 * the culprit.
7651 */
7652 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7653 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
7654 }
7655 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7656
7657 /*
7658 * Defer the cleanup of the old state to a separate worker to not
7659 * impede the current task (userspace for blocking modesets) that
7660 * are executed inline. For out-of-line asynchronous modesets/flips,
7661 * deferring to a new worker seems overkill, but we would place a
7662 * schedule point (cond_resched()) here anyway to keep latencies
7663 * down.
7664 */
7665 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7666 queue_work(system_highpri_wq, &state->base.commit_work);
7667 }
7668
intel_atomic_commit_work(struct work_struct * work)7669 static void intel_atomic_commit_work(struct work_struct *work)
7670 {
7671 struct intel_atomic_state *state =
7672 container_of(work, struct intel_atomic_state, base.commit_work);
7673
7674 intel_atomic_commit_tail(state);
7675 }
7676
7677 static int
intel_atomic_commit_ready(struct i915_sw_fence * fence,enum i915_sw_fence_notify notify)7678 intel_atomic_commit_ready(struct i915_sw_fence *fence,
7679 enum i915_sw_fence_notify notify)
7680 {
7681 struct intel_atomic_state *state =
7682 container_of(fence, struct intel_atomic_state, commit_ready);
7683
7684 switch (notify) {
7685 case FENCE_COMPLETE:
7686 /* we do blocking waits in the worker, nothing to do here */
7687 break;
7688 case FENCE_FREE:
7689 {
7690 struct intel_atomic_helper *helper =
7691 &to_i915(state->base.dev)->display.atomic_helper;
7692
7693 if (llist_add(&state->freed, &helper->free_list))
7694 schedule_work(&helper->free_work);
7695 break;
7696 }
7697 }
7698
7699 return NOTIFY_DONE;
7700 }
7701
intel_atomic_track_fbs(struct intel_atomic_state * state)7702 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7703 {
7704 struct intel_plane_state *old_plane_state, *new_plane_state;
7705 struct intel_plane *plane;
7706 int i;
7707
7708 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7709 new_plane_state, i)
7710 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7711 to_intel_frontbuffer(new_plane_state->hw.fb),
7712 plane->frontbuffer_bit);
7713 }
7714
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)7715 static int intel_atomic_commit(struct drm_device *dev,
7716 struct drm_atomic_state *_state,
7717 bool nonblock)
7718 {
7719 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7720 struct drm_i915_private *dev_priv = to_i915(dev);
7721 int ret = 0;
7722
7723 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7724
7725 drm_atomic_state_get(&state->base);
7726 i915_sw_fence_init(&state->commit_ready,
7727 intel_atomic_commit_ready);
7728
7729 /*
7730 * The intel_legacy_cursor_update() fast path takes care
7731 * of avoiding the vblank waits for simple cursor
7732 * movement and flips. For cursor on/off and size changes,
7733 * we want to perform the vblank waits so that watermark
7734 * updates happen during the correct frames. Gen9+ have
7735 * double buffered watermarks and so shouldn't need this.
7736 *
7737 * Unset state->legacy_cursor_update before the call to
7738 * drm_atomic_helper_setup_commit() because otherwise
7739 * drm_atomic_helper_wait_for_flip_done() is a noop and
7740 * we get FIFO underruns because we didn't wait
7741 * for vblank.
7742 *
7743 * FIXME doing watermarks and fb cleanup from a vblank worker
7744 * (assuming we had any) would solve these problems.
7745 */
7746 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7747 struct intel_crtc_state *new_crtc_state;
7748 struct intel_crtc *crtc;
7749 int i;
7750
7751 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7752 if (new_crtc_state->wm.need_postvbl_update ||
7753 new_crtc_state->update_wm_post)
7754 state->base.legacy_cursor_update = false;
7755 }
7756
7757 ret = intel_atomic_prepare_commit(state);
7758 if (ret) {
7759 drm_dbg_atomic(&dev_priv->drm,
7760 "Preparing state failed with %i\n", ret);
7761 i915_sw_fence_commit(&state->commit_ready);
7762 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7763 return ret;
7764 }
7765
7766 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7767 if (!ret)
7768 ret = drm_atomic_helper_swap_state(&state->base, true);
7769 if (!ret)
7770 intel_atomic_swap_global_state(state);
7771
7772 if (ret) {
7773 struct intel_crtc_state *new_crtc_state;
7774 struct intel_crtc *crtc;
7775 int i;
7776
7777 i915_sw_fence_commit(&state->commit_ready);
7778
7779 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7780 intel_dsb_cleanup(new_crtc_state);
7781
7782 drm_atomic_helper_cleanup_planes(dev, &state->base);
7783 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7784 return ret;
7785 }
7786 intel_shared_dpll_swap_state(state);
7787 intel_atomic_track_fbs(state);
7788
7789 drm_atomic_state_get(&state->base);
7790 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7791
7792 i915_sw_fence_commit(&state->commit_ready);
7793 if (nonblock && state->modeset) {
7794 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7795 } else if (nonblock) {
7796 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7797 } else {
7798 if (state->modeset)
7799 flush_workqueue(dev_priv->display.wq.modeset);
7800 intel_atomic_commit_tail(state);
7801 }
7802
7803 return 0;
7804 }
7805
7806 /**
7807 * intel_plane_destroy - destroy a plane
7808 * @plane: plane to destroy
7809 *
7810 * Common destruction function for all types of planes (primary, cursor,
7811 * sprite).
7812 */
intel_plane_destroy(struct drm_plane * plane)7813 void intel_plane_destroy(struct drm_plane *plane)
7814 {
7815 drm_plane_cleanup(plane);
7816 kfree(to_intel_plane(plane));
7817 }
7818
intel_plane_possible_crtcs_init(struct drm_i915_private * dev_priv)7819 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
7820 {
7821 struct intel_plane *plane;
7822
7823 for_each_intel_plane(&dev_priv->drm, plane) {
7824 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
7825 plane->pipe);
7826
7827 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
7828 }
7829 }
7830
7831
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)7832 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7833 struct drm_file *file)
7834 {
7835 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7836 struct drm_crtc *drmmode_crtc;
7837 struct intel_crtc *crtc;
7838
7839 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7840 if (!drmmode_crtc)
7841 return -ENOENT;
7842
7843 crtc = to_intel_crtc(drmmode_crtc);
7844 pipe_from_crtc_id->pipe = crtc->pipe;
7845
7846 return 0;
7847 }
7848
intel_encoder_possible_clones(struct intel_encoder * encoder)7849 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7850 {
7851 struct drm_device *dev = encoder->base.dev;
7852 struct intel_encoder *source_encoder;
7853 u32 possible_clones = 0;
7854
7855 for_each_intel_encoder(dev, source_encoder) {
7856 if (encoders_cloneable(encoder, source_encoder))
7857 possible_clones |= drm_encoder_mask(&source_encoder->base);
7858 }
7859
7860 return possible_clones;
7861 }
7862
intel_encoder_possible_crtcs(struct intel_encoder * encoder)7863 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7864 {
7865 struct drm_device *dev = encoder->base.dev;
7866 struct intel_crtc *crtc;
7867 u32 possible_crtcs = 0;
7868
7869 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7870 possible_crtcs |= drm_crtc_mask(&crtc->base);
7871
7872 return possible_crtcs;
7873 }
7874
ilk_has_edp_a(struct drm_i915_private * dev_priv)7875 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7876 {
7877 if (!IS_MOBILE(dev_priv))
7878 return false;
7879
7880 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7881 return false;
7882
7883 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7884 return false;
7885
7886 return true;
7887 }
7888
intel_ddi_crt_present(struct drm_i915_private * dev_priv)7889 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7890 {
7891 if (DISPLAY_VER(dev_priv) >= 9)
7892 return false;
7893
7894 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
7895 return false;
7896
7897 if (HAS_PCH_LPT_H(dev_priv) &&
7898 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7899 return false;
7900
7901 /* DDI E can't be used if DDI A requires 4 lanes */
7902 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7903 return false;
7904
7905 if (!dev_priv->display.vbt.int_crt_support)
7906 return false;
7907
7908 return true;
7909 }
7910
intel_setup_outputs(struct drm_i915_private * dev_priv)7911 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
7912 {
7913 struct intel_encoder *encoder;
7914 bool dpd_is_edp = false;
7915
7916 intel_pps_unlock_regs_wa(dev_priv);
7917
7918 if (!HAS_DISPLAY(dev_priv))
7919 return;
7920
7921 if (IS_DG2(dev_priv)) {
7922 intel_ddi_init(dev_priv, PORT_A);
7923 intel_ddi_init(dev_priv, PORT_B);
7924 intel_ddi_init(dev_priv, PORT_C);
7925 intel_ddi_init(dev_priv, PORT_D_XELPD);
7926 intel_ddi_init(dev_priv, PORT_TC1);
7927 } else if (IS_ALDERLAKE_P(dev_priv)) {
7928 intel_ddi_init(dev_priv, PORT_A);
7929 intel_ddi_init(dev_priv, PORT_B);
7930 intel_ddi_init(dev_priv, PORT_TC1);
7931 intel_ddi_init(dev_priv, PORT_TC2);
7932 intel_ddi_init(dev_priv, PORT_TC3);
7933 intel_ddi_init(dev_priv, PORT_TC4);
7934 icl_dsi_init(dev_priv);
7935 } else if (IS_ALDERLAKE_S(dev_priv)) {
7936 intel_ddi_init(dev_priv, PORT_A);
7937 intel_ddi_init(dev_priv, PORT_TC1);
7938 intel_ddi_init(dev_priv, PORT_TC2);
7939 intel_ddi_init(dev_priv, PORT_TC3);
7940 intel_ddi_init(dev_priv, PORT_TC4);
7941 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
7942 intel_ddi_init(dev_priv, PORT_A);
7943 intel_ddi_init(dev_priv, PORT_B);
7944 intel_ddi_init(dev_priv, PORT_TC1);
7945 intel_ddi_init(dev_priv, PORT_TC2);
7946 } else if (DISPLAY_VER(dev_priv) >= 12) {
7947 intel_ddi_init(dev_priv, PORT_A);
7948 intel_ddi_init(dev_priv, PORT_B);
7949 intel_ddi_init(dev_priv, PORT_TC1);
7950 intel_ddi_init(dev_priv, PORT_TC2);
7951 intel_ddi_init(dev_priv, PORT_TC3);
7952 intel_ddi_init(dev_priv, PORT_TC4);
7953 intel_ddi_init(dev_priv, PORT_TC5);
7954 intel_ddi_init(dev_priv, PORT_TC6);
7955 icl_dsi_init(dev_priv);
7956 } else if (IS_JSL_EHL(dev_priv)) {
7957 intel_ddi_init(dev_priv, PORT_A);
7958 intel_ddi_init(dev_priv, PORT_B);
7959 intel_ddi_init(dev_priv, PORT_C);
7960 intel_ddi_init(dev_priv, PORT_D);
7961 icl_dsi_init(dev_priv);
7962 } else if (DISPLAY_VER(dev_priv) == 11) {
7963 intel_ddi_init(dev_priv, PORT_A);
7964 intel_ddi_init(dev_priv, PORT_B);
7965 intel_ddi_init(dev_priv, PORT_C);
7966 intel_ddi_init(dev_priv, PORT_D);
7967 intel_ddi_init(dev_priv, PORT_E);
7968 intel_ddi_init(dev_priv, PORT_F);
7969 icl_dsi_init(dev_priv);
7970 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
7971 intel_ddi_init(dev_priv, PORT_A);
7972 intel_ddi_init(dev_priv, PORT_B);
7973 intel_ddi_init(dev_priv, PORT_C);
7974 vlv_dsi_init(dev_priv);
7975 } else if (DISPLAY_VER(dev_priv) >= 9) {
7976 intel_ddi_init(dev_priv, PORT_A);
7977 intel_ddi_init(dev_priv, PORT_B);
7978 intel_ddi_init(dev_priv, PORT_C);
7979 intel_ddi_init(dev_priv, PORT_D);
7980 intel_ddi_init(dev_priv, PORT_E);
7981 } else if (HAS_DDI(dev_priv)) {
7982 u32 found;
7983
7984 if (intel_ddi_crt_present(dev_priv))
7985 intel_crt_init(dev_priv);
7986
7987 /* Haswell uses DDI functions to detect digital outputs. */
7988 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
7989 if (found)
7990 intel_ddi_init(dev_priv, PORT_A);
7991
7992 found = intel_de_read(dev_priv, SFUSE_STRAP);
7993 if (found & SFUSE_STRAP_DDIB_DETECTED)
7994 intel_ddi_init(dev_priv, PORT_B);
7995 if (found & SFUSE_STRAP_DDIC_DETECTED)
7996 intel_ddi_init(dev_priv, PORT_C);
7997 if (found & SFUSE_STRAP_DDID_DETECTED)
7998 intel_ddi_init(dev_priv, PORT_D);
7999 if (found & SFUSE_STRAP_DDIF_DETECTED)
8000 intel_ddi_init(dev_priv, PORT_F);
8001 } else if (HAS_PCH_SPLIT(dev_priv)) {
8002 int found;
8003
8004 /*
8005 * intel_edp_init_connector() depends on this completing first,
8006 * to prevent the registration of both eDP and LVDS and the
8007 * incorrect sharing of the PPS.
8008 */
8009 intel_lvds_init(dev_priv);
8010 intel_crt_init(dev_priv);
8011
8012 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
8013
8014 if (ilk_has_edp_a(dev_priv))
8015 g4x_dp_init(dev_priv, DP_A, PORT_A);
8016
8017 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
8018 /* PCH SDVOB multiplex with HDMIB */
8019 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
8020 if (!found)
8021 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
8022 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
8023 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
8024 }
8025
8026 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
8027 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
8028
8029 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
8030 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
8031
8032 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
8033 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
8034
8035 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
8036 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
8037 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8038 bool has_edp, has_port;
8039
8040 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
8041 intel_crt_init(dev_priv);
8042
8043 /*
8044 * The DP_DETECTED bit is the latched state of the DDC
8045 * SDA pin at boot. However since eDP doesn't require DDC
8046 * (no way to plug in a DP->HDMI dongle) the DDC pins for
8047 * eDP ports may have been muxed to an alternate function.
8048 * Thus we can't rely on the DP_DETECTED bit alone to detect
8049 * eDP ports. Consult the VBT as well as DP_DETECTED to
8050 * detect eDP ports.
8051 *
8052 * Sadly the straps seem to be missing sometimes even for HDMI
8053 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
8054 * and VBT for the presence of the port. Additionally we can't
8055 * trust the port type the VBT declares as we've seen at least
8056 * HDMI ports that the VBT claim are DP or eDP.
8057 */
8058 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
8059 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
8060 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
8061 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
8062 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
8063 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
8064
8065 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
8066 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
8067 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
8068 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
8069 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
8070 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
8071
8072 if (IS_CHERRYVIEW(dev_priv)) {
8073 /*
8074 * eDP not supported on port D,
8075 * so no need to worry about it
8076 */
8077 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
8078 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
8079 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
8080 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
8081 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
8082 }
8083
8084 vlv_dsi_init(dev_priv);
8085 } else if (IS_PINEVIEW(dev_priv)) {
8086 intel_lvds_init(dev_priv);
8087 intel_crt_init(dev_priv);
8088 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
8089 bool found = false;
8090
8091 if (IS_MOBILE(dev_priv))
8092 intel_lvds_init(dev_priv);
8093
8094 intel_crt_init(dev_priv);
8095
8096 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8097 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
8098 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
8099 if (!found && IS_G4X(dev_priv)) {
8100 drm_dbg_kms(&dev_priv->drm,
8101 "probing HDMI on SDVOB\n");
8102 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
8103 }
8104
8105 if (!found && IS_G4X(dev_priv))
8106 g4x_dp_init(dev_priv, DP_B, PORT_B);
8107 }
8108
8109 /* Before G4X SDVOC doesn't have its own detect register */
8110
8111 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8112 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
8113 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
8114 }
8115
8116 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
8117
8118 if (IS_G4X(dev_priv)) {
8119 drm_dbg_kms(&dev_priv->drm,
8120 "probing HDMI on SDVOC\n");
8121 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
8122 }
8123 if (IS_G4X(dev_priv))
8124 g4x_dp_init(dev_priv, DP_C, PORT_C);
8125 }
8126
8127 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
8128 g4x_dp_init(dev_priv, DP_D, PORT_D);
8129
8130 if (SUPPORTS_TV(dev_priv))
8131 intel_tv_init(dev_priv);
8132 } else if (DISPLAY_VER(dev_priv) == 2) {
8133 if (IS_I85X(dev_priv))
8134 intel_lvds_init(dev_priv);
8135
8136 intel_crt_init(dev_priv);
8137 intel_dvo_init(dev_priv);
8138 }
8139
8140 for_each_intel_encoder(&dev_priv->drm, encoder) {
8141 encoder->base.possible_crtcs =
8142 intel_encoder_possible_crtcs(encoder);
8143 encoder->base.possible_clones =
8144 intel_encoder_possible_clones(encoder);
8145 }
8146
8147 intel_init_pch_refclk(dev_priv);
8148
8149 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
8150 }
8151
max_dotclock(struct drm_i915_private * i915)8152 static int max_dotclock(struct drm_i915_private *i915)
8153 {
8154 int max_dotclock = i915->max_dotclk_freq;
8155
8156 /* icl+ might use bigjoiner */
8157 if (DISPLAY_VER(i915) >= 11)
8158 max_dotclock *= 2;
8159
8160 return max_dotclock;
8161 }
8162
8163 static enum drm_mode_status
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)8164 intel_mode_valid(struct drm_device *dev,
8165 const struct drm_display_mode *mode)
8166 {
8167 struct drm_i915_private *dev_priv = to_i915(dev);
8168 int hdisplay_max, htotal_max;
8169 int vdisplay_max, vtotal_max;
8170
8171 /*
8172 * Can't reject DBLSCAN here because Xorg ddxen can add piles
8173 * of DBLSCAN modes to the output's mode list when they detect
8174 * the scaling mode property on the connector. And they don't
8175 * ask the kernel to validate those modes in any way until
8176 * modeset time at which point the client gets a protocol error.
8177 * So in order to not upset those clients we silently ignore the
8178 * DBLSCAN flag on such connectors. For other connectors we will
8179 * reject modes with the DBLSCAN flag in encoder->compute_config().
8180 * And we always reject DBLSCAN modes in connector->mode_valid()
8181 * as we never want such modes on the connector's mode list.
8182 */
8183
8184 if (mode->vscan > 1)
8185 return MODE_NO_VSCAN;
8186
8187 if (mode->flags & DRM_MODE_FLAG_HSKEW)
8188 return MODE_H_ILLEGAL;
8189
8190 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
8191 DRM_MODE_FLAG_NCSYNC |
8192 DRM_MODE_FLAG_PCSYNC))
8193 return MODE_HSYNC;
8194
8195 if (mode->flags & (DRM_MODE_FLAG_BCAST |
8196 DRM_MODE_FLAG_PIXMUX |
8197 DRM_MODE_FLAG_CLKDIV2))
8198 return MODE_BAD;
8199
8200 /*
8201 * Reject clearly excessive dotclocks early to
8202 * avoid having to worry about huge integers later.
8203 */
8204 if (mode->clock > max_dotclock(dev_priv))
8205 return MODE_CLOCK_HIGH;
8206
8207 /* Transcoder timing limits */
8208 if (DISPLAY_VER(dev_priv) >= 11) {
8209 hdisplay_max = 16384;
8210 vdisplay_max = 8192;
8211 htotal_max = 16384;
8212 vtotal_max = 8192;
8213 } else if (DISPLAY_VER(dev_priv) >= 9 ||
8214 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8215 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8216 vdisplay_max = 4096;
8217 htotal_max = 8192;
8218 vtotal_max = 8192;
8219 } else if (DISPLAY_VER(dev_priv) >= 3) {
8220 hdisplay_max = 4096;
8221 vdisplay_max = 4096;
8222 htotal_max = 8192;
8223 vtotal_max = 8192;
8224 } else {
8225 hdisplay_max = 2048;
8226 vdisplay_max = 2048;
8227 htotal_max = 4096;
8228 vtotal_max = 4096;
8229 }
8230
8231 if (mode->hdisplay > hdisplay_max ||
8232 mode->hsync_start > htotal_max ||
8233 mode->hsync_end > htotal_max ||
8234 mode->htotal > htotal_max)
8235 return MODE_H_ILLEGAL;
8236
8237 if (mode->vdisplay > vdisplay_max ||
8238 mode->vsync_start > vtotal_max ||
8239 mode->vsync_end > vtotal_max ||
8240 mode->vtotal > vtotal_max)
8241 return MODE_V_ILLEGAL;
8242
8243 return MODE_OK;
8244 }
8245
intel_cpu_transcoder_mode_valid(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode)8246 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
8247 const struct drm_display_mode *mode)
8248 {
8249 /*
8250 * Additional transcoder timing limits,
8251 * excluding BXT/GLK DSI transcoders.
8252 */
8253 if (DISPLAY_VER(dev_priv) >= 5) {
8254 if (mode->hdisplay < 64 ||
8255 mode->htotal - mode->hdisplay < 32)
8256 return MODE_H_ILLEGAL;
8257
8258 if (mode->vtotal - mode->vdisplay < 5)
8259 return MODE_V_ILLEGAL;
8260 } else {
8261 if (mode->htotal - mode->hdisplay < 32)
8262 return MODE_H_ILLEGAL;
8263
8264 if (mode->vtotal - mode->vdisplay < 3)
8265 return MODE_V_ILLEGAL;
8266 }
8267
8268 /*
8269 * Cantiga+ cannot handle modes with a hsync front porch of 0.
8270 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8271 */
8272 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8273 mode->hsync_start == mode->hdisplay)
8274 return MODE_H_ILLEGAL;
8275
8276 return MODE_OK;
8277 }
8278
8279 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool bigjoiner)8280 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8281 const struct drm_display_mode *mode,
8282 bool bigjoiner)
8283 {
8284 int plane_width_max, plane_height_max;
8285
8286 /*
8287 * intel_mode_valid() should be
8288 * sufficient on older platforms.
8289 */
8290 if (DISPLAY_VER(dev_priv) < 9)
8291 return MODE_OK;
8292
8293 /*
8294 * Most people will probably want a fullscreen
8295 * plane so let's not advertize modes that are
8296 * too big for that.
8297 */
8298 if (DISPLAY_VER(dev_priv) >= 11) {
8299 plane_width_max = 5120 << bigjoiner;
8300 plane_height_max = 4320;
8301 } else {
8302 plane_width_max = 5120;
8303 plane_height_max = 4096;
8304 }
8305
8306 if (mode->hdisplay > plane_width_max)
8307 return MODE_H_ILLEGAL;
8308
8309 if (mode->vdisplay > plane_height_max)
8310 return MODE_V_ILLEGAL;
8311
8312 return MODE_OK;
8313 }
8314
8315 static const struct drm_mode_config_funcs intel_mode_funcs = {
8316 .fb_create = intel_user_framebuffer_create,
8317 .get_format_info = intel_fb_get_format_info,
8318 .output_poll_changed = intel_fbdev_output_poll_changed,
8319 .mode_valid = intel_mode_valid,
8320 .atomic_check = intel_atomic_check,
8321 .atomic_commit = intel_atomic_commit,
8322 .atomic_state_alloc = intel_atomic_state_alloc,
8323 .atomic_state_clear = intel_atomic_state_clear,
8324 .atomic_state_free = intel_atomic_state_free,
8325 };
8326
8327 static const struct intel_display_funcs skl_display_funcs = {
8328 .get_pipe_config = hsw_get_pipe_config,
8329 .crtc_enable = hsw_crtc_enable,
8330 .crtc_disable = hsw_crtc_disable,
8331 .commit_modeset_enables = skl_commit_modeset_enables,
8332 .get_initial_plane_config = skl_get_initial_plane_config,
8333 };
8334
8335 static const struct intel_display_funcs ddi_display_funcs = {
8336 .get_pipe_config = hsw_get_pipe_config,
8337 .crtc_enable = hsw_crtc_enable,
8338 .crtc_disable = hsw_crtc_disable,
8339 .commit_modeset_enables = intel_commit_modeset_enables,
8340 .get_initial_plane_config = i9xx_get_initial_plane_config,
8341 };
8342
8343 static const struct intel_display_funcs pch_split_display_funcs = {
8344 .get_pipe_config = ilk_get_pipe_config,
8345 .crtc_enable = ilk_crtc_enable,
8346 .crtc_disable = ilk_crtc_disable,
8347 .commit_modeset_enables = intel_commit_modeset_enables,
8348 .get_initial_plane_config = i9xx_get_initial_plane_config,
8349 };
8350
8351 static const struct intel_display_funcs vlv_display_funcs = {
8352 .get_pipe_config = i9xx_get_pipe_config,
8353 .crtc_enable = valleyview_crtc_enable,
8354 .crtc_disable = i9xx_crtc_disable,
8355 .commit_modeset_enables = intel_commit_modeset_enables,
8356 .get_initial_plane_config = i9xx_get_initial_plane_config,
8357 };
8358
8359 static const struct intel_display_funcs i9xx_display_funcs = {
8360 .get_pipe_config = i9xx_get_pipe_config,
8361 .crtc_enable = i9xx_crtc_enable,
8362 .crtc_disable = i9xx_crtc_disable,
8363 .commit_modeset_enables = intel_commit_modeset_enables,
8364 .get_initial_plane_config = i9xx_get_initial_plane_config,
8365 };
8366
8367 /**
8368 * intel_init_display_hooks - initialize the display modesetting hooks
8369 * @dev_priv: device private
8370 */
intel_init_display_hooks(struct drm_i915_private * dev_priv)8371 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8372 {
8373 if (!HAS_DISPLAY(dev_priv))
8374 return;
8375
8376 intel_init_cdclk_hooks(dev_priv);
8377 intel_audio_hooks_init(dev_priv);
8378
8379 intel_dpll_init_clock_hook(dev_priv);
8380
8381 if (DISPLAY_VER(dev_priv) >= 9) {
8382 dev_priv->display.funcs.display = &skl_display_funcs;
8383 } else if (HAS_DDI(dev_priv)) {
8384 dev_priv->display.funcs.display = &ddi_display_funcs;
8385 } else if (HAS_PCH_SPLIT(dev_priv)) {
8386 dev_priv->display.funcs.display = &pch_split_display_funcs;
8387 } else if (IS_CHERRYVIEW(dev_priv) ||
8388 IS_VALLEYVIEW(dev_priv)) {
8389 dev_priv->display.funcs.display = &vlv_display_funcs;
8390 } else {
8391 dev_priv->display.funcs.display = &i9xx_display_funcs;
8392 }
8393
8394 intel_fdi_init_hook(dev_priv);
8395 }
8396
intel_modeset_init_hw(struct drm_i915_private * i915)8397 void intel_modeset_init_hw(struct drm_i915_private *i915)
8398 {
8399 struct intel_cdclk_state *cdclk_state;
8400
8401 if (!HAS_DISPLAY(i915))
8402 return;
8403
8404 cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
8405
8406 intel_update_cdclk(i915);
8407 intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
8408 cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
8409 }
8410
sanitize_watermarks_add_affected(struct drm_atomic_state * state)8411 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
8412 {
8413 struct drm_plane *plane;
8414 struct intel_crtc *crtc;
8415
8416 for_each_intel_crtc(state->dev, crtc) {
8417 struct intel_crtc_state *crtc_state;
8418
8419 crtc_state = intel_atomic_get_crtc_state(state, crtc);
8420 if (IS_ERR(crtc_state))
8421 return PTR_ERR(crtc_state);
8422
8423 if (crtc_state->hw.active) {
8424 /*
8425 * Preserve the inherited flag to avoid
8426 * taking the full modeset path.
8427 */
8428 crtc_state->inherited = true;
8429 }
8430 }
8431
8432 drm_for_each_plane(plane, state->dev) {
8433 struct drm_plane_state *plane_state;
8434
8435 plane_state = drm_atomic_get_plane_state(state, plane);
8436 if (IS_ERR(plane_state))
8437 return PTR_ERR(plane_state);
8438 }
8439
8440 return 0;
8441 }
8442
8443 /*
8444 * Calculate what we think the watermarks should be for the state we've read
8445 * out of the hardware and then immediately program those watermarks so that
8446 * we ensure the hardware settings match our internal state.
8447 *
8448 * We can calculate what we think WM's should be by creating a duplicate of the
8449 * current state (which was constructed during hardware readout) and running it
8450 * through the atomic check code to calculate new watermark values in the
8451 * state object.
8452 */
sanitize_watermarks(struct drm_i915_private * dev_priv)8453 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
8454 {
8455 struct drm_atomic_state *state;
8456 struct intel_atomic_state *intel_state;
8457 struct intel_crtc *crtc;
8458 struct intel_crtc_state *crtc_state;
8459 struct drm_modeset_acquire_ctx ctx;
8460 int ret;
8461 int i;
8462
8463 /* Only supported on platforms that use atomic watermark design */
8464 if (!dev_priv->display.funcs.wm->optimize_watermarks)
8465 return;
8466
8467 state = drm_atomic_state_alloc(&dev_priv->drm);
8468 if (drm_WARN_ON(&dev_priv->drm, !state))
8469 return;
8470
8471 intel_state = to_intel_atomic_state(state);
8472
8473 drm_modeset_acquire_init(&ctx, 0);
8474
8475 retry:
8476 state->acquire_ctx = &ctx;
8477
8478 /*
8479 * Hardware readout is the only time we don't want to calculate
8480 * intermediate watermarks (since we don't trust the current
8481 * watermarks).
8482 */
8483 if (!HAS_GMCH(dev_priv))
8484 intel_state->skip_intermediate_wm = true;
8485
8486 ret = sanitize_watermarks_add_affected(state);
8487 if (ret)
8488 goto fail;
8489
8490 ret = intel_atomic_check(&dev_priv->drm, state);
8491 if (ret)
8492 goto fail;
8493
8494 /* Write calculated watermark values back */
8495 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
8496 crtc_state->wm.need_postvbl_update = true;
8497 intel_optimize_watermarks(intel_state, crtc);
8498
8499 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
8500 }
8501
8502 fail:
8503 if (ret == -EDEADLK) {
8504 drm_atomic_state_clear(state);
8505 drm_modeset_backoff(&ctx);
8506 goto retry;
8507 }
8508
8509 /*
8510 * If we fail here, it means that the hardware appears to be
8511 * programmed in a way that shouldn't be possible, given our
8512 * understanding of watermark requirements. This might mean a
8513 * mistake in the hardware readout code or a mistake in the
8514 * watermark calculations for a given platform. Raise a WARN
8515 * so that this is noticeable.
8516 *
8517 * If this actually happens, we'll have to just leave the
8518 * BIOS-programmed watermarks untouched and hope for the best.
8519 */
8520 drm_WARN(&dev_priv->drm, ret,
8521 "Could not determine valid watermarks for inherited state\n");
8522
8523 drm_atomic_state_put(state);
8524
8525 drm_modeset_drop_locks(&ctx);
8526 drm_modeset_acquire_fini(&ctx);
8527 }
8528
intel_initial_commit(struct drm_device * dev)8529 static int intel_initial_commit(struct drm_device *dev)
8530 {
8531 struct drm_atomic_state *state = NULL;
8532 struct drm_modeset_acquire_ctx ctx;
8533 struct intel_crtc *crtc;
8534 int ret = 0;
8535
8536 state = drm_atomic_state_alloc(dev);
8537 if (!state)
8538 return -ENOMEM;
8539
8540 drm_modeset_acquire_init(&ctx, 0);
8541
8542 retry:
8543 state->acquire_ctx = &ctx;
8544
8545 for_each_intel_crtc(dev, crtc) {
8546 struct intel_crtc_state *crtc_state =
8547 intel_atomic_get_crtc_state(state, crtc);
8548
8549 if (IS_ERR(crtc_state)) {
8550 ret = PTR_ERR(crtc_state);
8551 goto out;
8552 }
8553
8554 if (crtc_state->hw.active) {
8555 struct intel_encoder *encoder;
8556
8557 /*
8558 * We've not yet detected sink capabilities
8559 * (audio,infoframes,etc.) and thus we don't want to
8560 * force a full state recomputation yet. We want that to
8561 * happen only for the first real commit from userspace.
8562 * So preserve the inherited flag for the time being.
8563 */
8564 crtc_state->inherited = true;
8565
8566 ret = drm_atomic_add_affected_planes(state, &crtc->base);
8567 if (ret)
8568 goto out;
8569
8570 /*
8571 * FIXME hack to force a LUT update to avoid the
8572 * plane update forcing the pipe gamma on without
8573 * having a proper LUT loaded. Remove once we
8574 * have readout for pipe gamma enable.
8575 */
8576 crtc_state->uapi.color_mgmt_changed = true;
8577
8578 for_each_intel_encoder_mask(dev, encoder,
8579 crtc_state->uapi.encoder_mask) {
8580 if (encoder->initial_fastset_check &&
8581 !encoder->initial_fastset_check(encoder, crtc_state)) {
8582 ret = drm_atomic_add_affected_connectors(state,
8583 &crtc->base);
8584 if (ret)
8585 goto out;
8586 }
8587 }
8588 }
8589 }
8590
8591 ret = drm_atomic_commit(state);
8592
8593 out:
8594 if (ret == -EDEADLK) {
8595 drm_atomic_state_clear(state);
8596 drm_modeset_backoff(&ctx);
8597 goto retry;
8598 }
8599
8600 drm_atomic_state_put(state);
8601
8602 drm_modeset_drop_locks(&ctx);
8603 drm_modeset_acquire_fini(&ctx);
8604
8605 return ret;
8606 }
8607
8608 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
8609 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
8610 };
8611
intel_mode_config_init(struct drm_i915_private * i915)8612 static void intel_mode_config_init(struct drm_i915_private *i915)
8613 {
8614 struct drm_mode_config *mode_config = &i915->drm.mode_config;
8615
8616 drm_mode_config_init(&i915->drm);
8617 INIT_LIST_HEAD(&i915->global_obj_list);
8618
8619 mode_config->min_width = 0;
8620 mode_config->min_height = 0;
8621
8622 mode_config->preferred_depth = 24;
8623 mode_config->prefer_shadow = 1;
8624
8625 mode_config->funcs = &intel_mode_funcs;
8626 mode_config->helper_private = &intel_mode_config_funcs;
8627
8628 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
8629
8630 /*
8631 * Maximum framebuffer dimensions, chosen to match
8632 * the maximum render engine surface size on gen4+.
8633 */
8634 if (DISPLAY_VER(i915) >= 7) {
8635 mode_config->max_width = 16384;
8636 mode_config->max_height = 16384;
8637 } else if (DISPLAY_VER(i915) >= 4) {
8638 mode_config->max_width = 8192;
8639 mode_config->max_height = 8192;
8640 } else if (DISPLAY_VER(i915) == 3) {
8641 mode_config->max_width = 4096;
8642 mode_config->max_height = 4096;
8643 } else {
8644 mode_config->max_width = 2048;
8645 mode_config->max_height = 2048;
8646 }
8647
8648 if (IS_I845G(i915) || IS_I865G(i915)) {
8649 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
8650 mode_config->cursor_height = 1023;
8651 } else if (IS_I830(i915) || IS_I85X(i915) ||
8652 IS_I915G(i915) || IS_I915GM(i915)) {
8653 mode_config->cursor_width = 64;
8654 mode_config->cursor_height = 64;
8655 } else {
8656 mode_config->cursor_width = 256;
8657 mode_config->cursor_height = 256;
8658 }
8659 }
8660
intel_mode_config_cleanup(struct drm_i915_private * i915)8661 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
8662 {
8663 intel_atomic_global_obj_cleanup(i915);
8664 drm_mode_config_cleanup(&i915->drm);
8665 }
8666
8667 /* part #1: call before irq install */
intel_modeset_init_noirq(struct drm_i915_private * i915)8668 int intel_modeset_init_noirq(struct drm_i915_private *i915)
8669 {
8670 int ret;
8671
8672 if (i915_inject_probe_failure(i915))
8673 return -ENODEV;
8674
8675 if (HAS_DISPLAY(i915)) {
8676 ret = drm_vblank_init(&i915->drm,
8677 INTEL_NUM_PIPES(i915));
8678 if (ret)
8679 return ret;
8680 }
8681
8682 intel_bios_init(i915);
8683
8684 ret = intel_vga_register(i915);
8685 if (ret)
8686 goto cleanup_bios;
8687
8688 /* FIXME: completely on the wrong abstraction layer */
8689 intel_power_domains_init_hw(i915, false);
8690
8691 if (!HAS_DISPLAY(i915))
8692 return 0;
8693
8694 intel_dmc_ucode_init(i915);
8695
8696 i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
8697 i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
8698 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
8699
8700 intel_mode_config_init(i915);
8701
8702 ret = intel_cdclk_init(i915);
8703 if (ret)
8704 goto cleanup_vga_client_pw_domain_dmc;
8705
8706 ret = intel_dbuf_init(i915);
8707 if (ret)
8708 goto cleanup_vga_client_pw_domain_dmc;
8709
8710 ret = intel_bw_init(i915);
8711 if (ret)
8712 goto cleanup_vga_client_pw_domain_dmc;
8713
8714 init_llist_head(&i915->display.atomic_helper.free_list);
8715 INIT_WORK(&i915->display.atomic_helper.free_work,
8716 intel_atomic_helper_free_state_worker);
8717
8718 intel_init_quirks(i915);
8719
8720 intel_fbc_init(i915);
8721
8722 return 0;
8723
8724 cleanup_vga_client_pw_domain_dmc:
8725 intel_dmc_ucode_fini(i915);
8726 intel_power_domains_driver_remove(i915);
8727 intel_vga_unregister(i915);
8728 cleanup_bios:
8729 intel_bios_driver_remove(i915);
8730
8731 return ret;
8732 }
8733
8734 /* part #2: call after irq install, but before gem init */
intel_modeset_init_nogem(struct drm_i915_private * i915)8735 int intel_modeset_init_nogem(struct drm_i915_private *i915)
8736 {
8737 struct drm_device *dev = &i915->drm;
8738 enum pipe pipe;
8739 struct intel_crtc *crtc;
8740 int ret;
8741
8742 if (!HAS_DISPLAY(i915))
8743 return 0;
8744
8745 intel_init_pm(i915);
8746
8747 intel_panel_sanitize_ssc(i915);
8748
8749 intel_pps_setup(i915);
8750
8751 intel_gmbus_setup(i915);
8752
8753 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
8754 INTEL_NUM_PIPES(i915),
8755 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
8756
8757 for_each_pipe(i915, pipe) {
8758 ret = intel_crtc_init(i915, pipe);
8759 if (ret) {
8760 intel_mode_config_cleanup(i915);
8761 return ret;
8762 }
8763 }
8764
8765 intel_plane_possible_crtcs_init(i915);
8766 intel_shared_dpll_init(i915);
8767 intel_fdi_pll_freq_update(i915);
8768
8769 intel_update_czclk(i915);
8770 intel_modeset_init_hw(i915);
8771 intel_dpll_update_ref_clks(i915);
8772
8773 intel_hdcp_component_init(i915);
8774
8775 if (i915->display.cdclk.max_cdclk_freq == 0)
8776 intel_update_max_cdclk(i915);
8777
8778 /*
8779 * If the platform has HTI, we need to find out whether it has reserved
8780 * any display resources before we create our display outputs.
8781 */
8782 if (INTEL_INFO(i915)->display.has_hti)
8783 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
8784
8785 /* Just disable it once at startup */
8786 intel_vga_disable(i915);
8787 intel_setup_outputs(i915);
8788
8789 drm_modeset_lock_all(dev);
8790 intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
8791 intel_acpi_assign_connector_fwnodes(i915);
8792 drm_modeset_unlock_all(dev);
8793
8794 for_each_intel_crtc(dev, crtc) {
8795 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
8796 continue;
8797 intel_crtc_initial_plane_config(crtc);
8798 }
8799
8800 /*
8801 * Make sure hardware watermarks really match the state we read out.
8802 * Note that we need to do this after reconstructing the BIOS fb's
8803 * since the watermark calculation done here will use pstate->fb.
8804 */
8805 if (!HAS_GMCH(i915))
8806 sanitize_watermarks(i915);
8807
8808 return 0;
8809 }
8810
8811 /* part #3: call after gem init */
intel_modeset_init(struct drm_i915_private * i915)8812 int intel_modeset_init(struct drm_i915_private *i915)
8813 {
8814 int ret;
8815
8816 if (!HAS_DISPLAY(i915))
8817 return 0;
8818
8819 /*
8820 * Force all active planes to recompute their states. So that on
8821 * mode_setcrtc after probe, all the intel_plane_state variables
8822 * are already calculated and there is no assert_plane warnings
8823 * during bootup.
8824 */
8825 ret = intel_initial_commit(&i915->drm);
8826 if (ret)
8827 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
8828
8829 intel_overlay_setup(i915);
8830
8831 ret = intel_fbdev_init(&i915->drm);
8832 if (ret)
8833 return ret;
8834
8835 /* Only enable hotplug handling once the fbdev is fully set up. */
8836 intel_hpd_init(i915);
8837 intel_hpd_poll_disable(i915);
8838
8839 skl_watermark_ipc_init(i915);
8840
8841 return 0;
8842 }
8843
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8844 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8845 {
8846 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8847 /* 640x480@60Hz, ~25175 kHz */
8848 struct dpll clock = {
8849 .m1 = 18,
8850 .m2 = 7,
8851 .p1 = 13,
8852 .p2 = 4,
8853 .n = 2,
8854 };
8855 u32 dpll, fp;
8856 int i;
8857
8858 drm_WARN_ON(&dev_priv->drm,
8859 i9xx_calc_dpll_params(48000, &clock) != 25154);
8860
8861 drm_dbg_kms(&dev_priv->drm,
8862 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8863 pipe_name(pipe), clock.vco, clock.dot);
8864
8865 fp = i9xx_dpll_compute_fp(&clock);
8866 dpll = DPLL_DVO_2X_MODE |
8867 DPLL_VGA_MODE_DIS |
8868 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8869 PLL_P2_DIVIDE_BY_4 |
8870 PLL_REF_INPUT_DREFCLK |
8871 DPLL_VCO_ENABLE;
8872
8873 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
8874 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
8875 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
8876 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
8877 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
8878 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
8879 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
8880
8881 intel_de_write(dev_priv, FP0(pipe), fp);
8882 intel_de_write(dev_priv, FP1(pipe), fp);
8883
8884 /*
8885 * Apparently we need to have VGA mode enabled prior to changing
8886 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8887 * dividers, even though the register value does change.
8888 */
8889 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
8890 intel_de_write(dev_priv, DPLL(pipe), dpll);
8891
8892 /* Wait for the clocks to stabilize. */
8893 intel_de_posting_read(dev_priv, DPLL(pipe));
8894 udelay(150);
8895
8896 /* The pixel multiplier can only be updated once the
8897 * DPLL is enabled and the clocks are stable.
8898 *
8899 * So write it again.
8900 */
8901 intel_de_write(dev_priv, DPLL(pipe), dpll);
8902
8903 /* We do this three times for luck */
8904 for (i = 0; i < 3 ; i++) {
8905 intel_de_write(dev_priv, DPLL(pipe), dpll);
8906 intel_de_posting_read(dev_priv, DPLL(pipe));
8907 udelay(150); /* wait for warmup */
8908 }
8909
8910 intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
8911 intel_de_posting_read(dev_priv, PIPECONF(pipe));
8912
8913 intel_wait_for_pipe_scanline_moving(crtc);
8914 }
8915
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8916 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8917 {
8918 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8919
8920 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8921 pipe_name(pipe));
8922
8923 drm_WARN_ON(&dev_priv->drm,
8924 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8925 drm_WARN_ON(&dev_priv->drm,
8926 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8927 drm_WARN_ON(&dev_priv->drm,
8928 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8929 drm_WARN_ON(&dev_priv->drm,
8930 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8931 drm_WARN_ON(&dev_priv->drm,
8932 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8933
8934 intel_de_write(dev_priv, PIPECONF(pipe), 0);
8935 intel_de_posting_read(dev_priv, PIPECONF(pipe));
8936
8937 intel_wait_for_pipe_scanline_stopped(crtc);
8938
8939 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8940 intel_de_posting_read(dev_priv, DPLL(pipe));
8941 }
8942
intel_display_resume(struct drm_device * dev)8943 void intel_display_resume(struct drm_device *dev)
8944 {
8945 struct drm_i915_private *i915 = to_i915(dev);
8946 struct drm_atomic_state *state = i915->modeset_restore_state;
8947 struct drm_modeset_acquire_ctx ctx;
8948 int ret;
8949
8950 if (!HAS_DISPLAY(i915))
8951 return;
8952
8953 i915->modeset_restore_state = NULL;
8954 if (state)
8955 state->acquire_ctx = &ctx;
8956
8957 drm_modeset_acquire_init(&ctx, 0);
8958
8959 while (1) {
8960 ret = drm_modeset_lock_all_ctx(dev, &ctx);
8961 if (ret != -EDEADLK)
8962 break;
8963
8964 drm_modeset_backoff(&ctx);
8965 }
8966
8967 if (!ret)
8968 ret = __intel_display_resume(i915, state, &ctx);
8969
8970 skl_watermark_ipc_update(i915);
8971 drm_modeset_drop_locks(&ctx);
8972 drm_modeset_acquire_fini(&ctx);
8973
8974 if (ret)
8975 drm_err(&i915->drm,
8976 "Restoring old state failed with %i\n", ret);
8977 if (state)
8978 drm_atomic_state_put(state);
8979 }
8980
intel_hpd_poll_fini(struct drm_i915_private * i915)8981 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
8982 {
8983 struct intel_connector *connector;
8984 struct drm_connector_list_iter conn_iter;
8985
8986 /* Kill all the work that may have been queued by hpd. */
8987 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8988 for_each_intel_connector_iter(connector, &conn_iter) {
8989 if (connector->modeset_retry_work.func)
8990 cancel_work_sync(&connector->modeset_retry_work);
8991 if (connector->hdcp.shim) {
8992 cancel_delayed_work_sync(&connector->hdcp.check_work);
8993 cancel_work_sync(&connector->hdcp.prop_work);
8994 }
8995 }
8996 drm_connector_list_iter_end(&conn_iter);
8997 }
8998
8999 /* part #1: call before irq uninstall */
intel_modeset_driver_remove(struct drm_i915_private * i915)9000 void intel_modeset_driver_remove(struct drm_i915_private *i915)
9001 {
9002 if (!HAS_DISPLAY(i915))
9003 return;
9004
9005 flush_workqueue(i915->display.wq.flip);
9006 flush_workqueue(i915->display.wq.modeset);
9007
9008 flush_work(&i915->display.atomic_helper.free_work);
9009 drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
9010
9011 /*
9012 * MST topology needs to be suspended so we don't have any calls to
9013 * fbdev after it's finalized. MST will be destroyed later as part of
9014 * drm_mode_config_cleanup()
9015 */
9016 intel_dp_mst_suspend(i915);
9017 }
9018
9019 /* part #2: call after irq uninstall */
intel_modeset_driver_remove_noirq(struct drm_i915_private * i915)9020 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
9021 {
9022 if (!HAS_DISPLAY(i915))
9023 return;
9024
9025 /*
9026 * Due to the hpd irq storm handling the hotplug work can re-arm the
9027 * poll handlers. Hence disable polling after hpd handling is shut down.
9028 */
9029 intel_hpd_poll_fini(i915);
9030
9031 /* poll work can call into fbdev, hence clean that up afterwards */
9032 intel_fbdev_fini(i915);
9033
9034 intel_unregister_dsm_handler();
9035
9036 /* flush any delayed tasks or pending work */
9037 flush_scheduled_work();
9038
9039 intel_hdcp_component_fini(i915);
9040
9041 intel_mode_config_cleanup(i915);
9042
9043 intel_overlay_cleanup(i915);
9044
9045 intel_gmbus_teardown(i915);
9046
9047 destroy_workqueue(i915->display.wq.flip);
9048 destroy_workqueue(i915->display.wq.modeset);
9049
9050 intel_fbc_cleanup(i915);
9051 }
9052
9053 /* part #3: call after gem init */
intel_modeset_driver_remove_nogem(struct drm_i915_private * i915)9054 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
9055 {
9056 intel_dmc_ucode_fini(i915);
9057
9058 intel_power_domains_driver_remove(i915);
9059
9060 intel_vga_unregister(i915);
9061
9062 intel_bios_driver_remove(i915);
9063 }
9064
intel_modeset_probe_defer(struct pci_dev * pdev)9065 bool intel_modeset_probe_defer(struct pci_dev *pdev)
9066 {
9067 struct drm_privacy_screen *privacy_screen;
9068
9069 /*
9070 * apple-gmux is needed on dual GPU MacBook Pro
9071 * to probe the panel if we're the inactive GPU.
9072 */
9073 if (vga_switcheroo_client_probe_defer(pdev))
9074 return true;
9075
9076 /* If the LCD panel has a privacy-screen, wait for it */
9077 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
9078 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
9079 return true;
9080
9081 drm_privacy_screen_put(privacy_screen);
9082
9083 return false;
9084 }
9085
intel_display_driver_register(struct drm_i915_private * i915)9086 void intel_display_driver_register(struct drm_i915_private *i915)
9087 {
9088 if (!HAS_DISPLAY(i915))
9089 return;
9090
9091 intel_display_debugfs_register(i915);
9092
9093 /* Must be done after probing outputs */
9094 intel_opregion_register(i915);
9095 intel_acpi_video_register(i915);
9096
9097 intel_audio_init(i915);
9098
9099 /*
9100 * Some ports require correctly set-up hpd registers for
9101 * detection to work properly (leading to ghost connected
9102 * connector status), e.g. VGA on gm45. Hence we can only set
9103 * up the initial fbdev config after hpd irqs are fully
9104 * enabled. We do it last so that the async config cannot run
9105 * before the connectors are registered.
9106 */
9107 intel_fbdev_initial_config_async(&i915->drm);
9108
9109 /*
9110 * We need to coordinate the hotplugs with the asynchronous
9111 * fbdev configuration, for which we use the
9112 * fbdev->async_cookie.
9113 */
9114 drm_kms_helper_poll_init(&i915->drm);
9115 }
9116
intel_display_driver_unregister(struct drm_i915_private * i915)9117 void intel_display_driver_unregister(struct drm_i915_private *i915)
9118 {
9119 if (!HAS_DISPLAY(i915))
9120 return;
9121
9122 intel_fbdev_unregister(i915);
9123 intel_audio_deinit(i915);
9124
9125 /*
9126 * After flushing the fbdev (incl. a late async config which
9127 * will have delayed queuing of a hotplug event), then flush
9128 * the hotplug events.
9129 */
9130 drm_kms_helper_poll_fini(&i915->drm);
9131 drm_atomic_helper_shutdown(&i915->drm);
9132
9133 acpi_video_unregister();
9134 intel_opregion_unregister(i915);
9135 }
9136
intel_scanout_needs_vtd_wa(struct drm_i915_private * i915)9137 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
9138 {
9139 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
9140 }
9141