1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/dma-resv.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/string_helpers.h>
34
35 #include <drm/display/drm_dp_helper.h>
36 #include <drm/display/drm_dp_tunnel.h>
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fixed.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46
47 #include "gem/i915_gem_lmem.h"
48 #include "gem/i915_gem_object.h"
49
50 #include "g4x_dp.h"
51 #include "g4x_hdmi.h"
52 #include "hsw_ips.h"
53 #include "i915_config.h"
54 #include "i915_drv.h"
55 #include "i915_reg.h"
56 #include "i915_utils.h"
57 #include "i9xx_plane.h"
58 #include "i9xx_plane_regs.h"
59 #include "i9xx_wm.h"
60 #include "intel_atomic.h"
61 #include "intel_atomic_plane.h"
62 #include "intel_audio.h"
63 #include "intel_bw.h"
64 #include "intel_cdclk.h"
65 #include "intel_clock_gating.h"
66 #include "intel_color.h"
67 #include "intel_crt.h"
68 #include "intel_crtc.h"
69 #include "intel_crtc_state_dump.h"
70 #include "intel_cursor_regs.h"
71 #include "intel_cx0_phy.h"
72 #include "intel_cursor.h"
73 #include "intel_ddi.h"
74 #include "intel_de.h"
75 #include "intel_display_driver.h"
76 #include "intel_display_power.h"
77 #include "intel_display_types.h"
78 #include "intel_dmc.h"
79 #include "intel_dp.h"
80 #include "intel_dp_link_training.h"
81 #include "intel_dp_mst.h"
82 #include "intel_dp_tunnel.h"
83 #include "intel_dpll.h"
84 #include "intel_dpll_mgr.h"
85 #include "intel_dpt.h"
86 #include "intel_dpt_common.h"
87 #include "intel_drrs.h"
88 #include "intel_dsb.h"
89 #include "intel_dsi.h"
90 #include "intel_dvo.h"
91 #include "intel_fb.h"
92 #include "intel_fbc.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdmi.h"
97 #include "intel_hotplug.h"
98 #include "intel_link_bw.h"
99 #include "intel_lvds.h"
100 #include "intel_lvds_regs.h"
101 #include "intel_modeset_setup.h"
102 #include "intel_modeset_verify.h"
103 #include "intel_overlay.h"
104 #include "intel_panel.h"
105 #include "intel_pch_display.h"
106 #include "intel_pch_refclk.h"
107 #include "intel_pcode.h"
108 #include "intel_pipe_crc.h"
109 #include "intel_plane_initial.h"
110 #include "intel_pmdemand.h"
111 #include "intel_pps.h"
112 #include "intel_psr.h"
113 #include "intel_psr_regs.h"
114 #include "intel_sdvo.h"
115 #include "intel_snps_phy.h"
116 #include "intel_tc.h"
117 #include "intel_tdf.h"
118 #include "intel_tv.h"
119 #include "intel_vblank.h"
120 #include "intel_vdsc.h"
121 #include "intel_vdsc_regs.h"
122 #include "intel_vga.h"
123 #include "intel_vrr.h"
124 #include "intel_wm.h"
125 #include "skl_scaler.h"
126 #include "skl_universal_plane.h"
127 #include "skl_universal_plane_regs.h"
128 #include "skl_watermark.h"
129 #include "vlv_dpio_phy_regs.h"
130 #include "vlv_dsi.h"
131 #include "vlv_dsi_pll.h"
132 #include "vlv_dsi_regs.h"
133 #include "vlv_sideband.h"
134
135 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
136 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
137 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
138 static void bdw_set_pipe_misc(struct intel_dsb *dsb,
139 const struct intel_crtc_state *crtc_state);
140
141 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)142 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
143 {
144 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
145
146 /* Obtain SKU information */
147 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148 CCK_FUSE_HPLL_FREQ_MASK;
149
150 return vco_freq[hpll_freq] * 1000;
151 }
152
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)153 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
154 const char *name, u32 reg, int ref_freq)
155 {
156 u32 val;
157 int divider;
158
159 val = vlv_cck_read(dev_priv, reg);
160 divider = val & CCK_FREQUENCY_VALUES;
161
162 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
163 (divider << CCK_FREQUENCY_STATUS_SHIFT),
164 "%s change in progress\n", name);
165
166 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
167 }
168
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)169 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
170 const char *name, u32 reg)
171 {
172 int hpll;
173
174 vlv_cck_get(dev_priv);
175
176 if (dev_priv->hpll_freq == 0)
177 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
178
179 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
180
181 vlv_cck_put(dev_priv);
182
183 return hpll;
184 }
185
intel_update_czclk(struct drm_i915_private * dev_priv)186 void intel_update_czclk(struct drm_i915_private *dev_priv)
187 {
188 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
189 return;
190
191 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
192 CCK_CZ_CLOCK_CONTROL);
193
194 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
195 dev_priv->czclk_freq);
196 }
197
is_hdr_mode(const struct intel_crtc_state * crtc_state)198 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
199 {
200 return (crtc_state->active_planes &
201 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
202 }
203
204 /* WA Display #0827: Gen9:all */
205 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)206 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
207 {
208 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
209 DUPS1_GATING_DIS | DUPS2_GATING_DIS,
210 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
211 }
212
213 /* Wa_2006604312:icl,ehl */
214 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)215 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
216 bool enable)
217 {
218 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
219 DPFR_GATING_DIS,
220 enable ? DPFR_GATING_DIS : 0);
221 }
222
223 /* Wa_1604331009:icl,jsl,ehl */
224 static void
icl_wa_cursorclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)225 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
226 bool enable)
227 {
228 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
229 CURSOR_GATING_DIS,
230 enable ? CURSOR_GATING_DIS : 0);
231 }
232
233 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)234 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
235 {
236 return crtc_state->master_transcoder != INVALID_TRANSCODER;
237 }
238
239 bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)240 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
241 {
242 return crtc_state->sync_mode_slaves_mask != 0;
243 }
244
245 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)246 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
247 {
248 return is_trans_port_sync_master(crtc_state) ||
249 is_trans_port_sync_slave(crtc_state);
250 }
251
joiner_primary_pipe(const struct intel_crtc_state * crtc_state)252 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state)
253 {
254 return ffs(crtc_state->joiner_pipes) - 1;
255 }
256
intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state * crtc_state)257 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state)
258 {
259 if (crtc_state->joiner_pipes)
260 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state));
261 else
262 return 0;
263 }
264
intel_crtc_is_joiner_secondary(const struct intel_crtc_state * crtc_state)265 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state)
266 {
267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268
269 return crtc_state->joiner_pipes &&
270 crtc->pipe != joiner_primary_pipe(crtc_state);
271 }
272
intel_crtc_is_joiner_primary(const struct intel_crtc_state * crtc_state)273 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state)
274 {
275 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
276
277 return crtc_state->joiner_pipes &&
278 crtc->pipe == joiner_primary_pipe(crtc_state);
279 }
280
intel_joiner_num_pipes(const struct intel_crtc_state * crtc_state)281 static int intel_joiner_num_pipes(const struct intel_crtc_state *crtc_state)
282 {
283 return hweight8(crtc_state->joiner_pipes);
284 }
285
intel_crtc_joined_pipe_mask(const struct intel_crtc_state * crtc_state)286 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
287 {
288 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
289
290 return BIT(crtc->pipe) | crtc_state->joiner_pipes;
291 }
292
intel_primary_crtc(const struct intel_crtc_state * crtc_state)293 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
294 {
295 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
296
297 if (intel_crtc_is_joiner_secondary(crtc_state))
298 return intel_crtc_for_pipe(i915, joiner_primary_pipe(crtc_state));
299 else
300 return to_intel_crtc(crtc_state->uapi.crtc);
301 }
302
303 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)304 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
305 {
306 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
308
309 if (DISPLAY_VER(dev_priv) >= 4) {
310 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
311
312 /* Wait for the Pipe State to go off */
313 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
314 TRANSCONF_STATE_ENABLE, 100))
315 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
316 } else {
317 intel_wait_for_pipe_scanline_stopped(crtc);
318 }
319 }
320
assert_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)321 void assert_transcoder(struct drm_i915_private *dev_priv,
322 enum transcoder cpu_transcoder, bool state)
323 {
324 bool cur_state;
325 enum intel_display_power_domain power_domain;
326 intel_wakeref_t wakeref;
327
328 /* we keep both pipes enabled on 830 */
329 if (IS_I830(dev_priv))
330 state = true;
331
332 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
333 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
334 if (wakeref) {
335 u32 val = intel_de_read(dev_priv,
336 TRANSCONF(dev_priv, cpu_transcoder));
337 cur_state = !!(val & TRANSCONF_ENABLE);
338
339 intel_display_power_put(dev_priv, power_domain, wakeref);
340 } else {
341 cur_state = false;
342 }
343
344 I915_STATE_WARN(dev_priv, cur_state != state,
345 "transcoder %s assertion failure (expected %s, current %s)\n",
346 transcoder_name(cpu_transcoder), str_on_off(state),
347 str_on_off(cur_state));
348 }
349
assert_plane(struct intel_plane * plane,bool state)350 static void assert_plane(struct intel_plane *plane, bool state)
351 {
352 struct drm_i915_private *i915 = to_i915(plane->base.dev);
353 enum pipe pipe;
354 bool cur_state;
355
356 cur_state = plane->get_hw_state(plane, &pipe);
357
358 I915_STATE_WARN(i915, cur_state != state,
359 "%s assertion failure (expected %s, current %s)\n",
360 plane->base.name, str_on_off(state),
361 str_on_off(cur_state));
362 }
363
364 #define assert_plane_enabled(p) assert_plane(p, true)
365 #define assert_plane_disabled(p) assert_plane(p, false)
366
assert_planes_disabled(struct intel_crtc * crtc)367 static void assert_planes_disabled(struct intel_crtc *crtc)
368 {
369 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
370 struct intel_plane *plane;
371
372 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
373 assert_plane_disabled(plane);
374 }
375
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)376 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
377 struct intel_digital_port *dig_port,
378 unsigned int expected_mask)
379 {
380 u32 port_mask;
381 i915_reg_t dpll_reg;
382
383 switch (dig_port->base.port) {
384 default:
385 MISSING_CASE(dig_port->base.port);
386 fallthrough;
387 case PORT_B:
388 port_mask = DPLL_PORTB_READY_MASK;
389 dpll_reg = DPLL(dev_priv, 0);
390 break;
391 case PORT_C:
392 port_mask = DPLL_PORTC_READY_MASK;
393 dpll_reg = DPLL(dev_priv, 0);
394 expected_mask <<= 4;
395 break;
396 case PORT_D:
397 port_mask = DPLL_PORTD_READY_MASK;
398 dpll_reg = DPIO_PHY_STATUS;
399 break;
400 }
401
402 if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000))
403 drm_WARN(&dev_priv->drm, 1,
404 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
405 dig_port->base.base.base.id, dig_port->base.base.name,
406 intel_de_read(dev_priv, dpll_reg) & port_mask,
407 expected_mask);
408 }
409
intel_enable_transcoder(const struct intel_crtc_state * new_crtc_state)410 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
411 {
412 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
414 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
415 enum pipe pipe = crtc->pipe;
416 u32 val;
417
418 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
419
420 assert_planes_disabled(crtc);
421
422 /*
423 * A pipe without a PLL won't actually be able to drive bits from
424 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
425 * need the check.
426 */
427 if (HAS_GMCH(dev_priv)) {
428 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
429 assert_dsi_pll_enabled(dev_priv);
430 else
431 assert_pll_enabled(dev_priv, pipe);
432 } else {
433 if (new_crtc_state->has_pch_encoder) {
434 /* if driving the PCH, we need FDI enabled */
435 assert_fdi_rx_pll_enabled(dev_priv,
436 intel_crtc_pch_transcoder(crtc));
437 assert_fdi_tx_pll_enabled(dev_priv,
438 (enum pipe) cpu_transcoder);
439 }
440 /* FIXME: assert CPU port conditions for SNB+ */
441 }
442
443 /* Wa_22012358565:adl-p */
444 if (DISPLAY_VER(dev_priv) == 13)
445 intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe),
446 0, PIPE_ARB_USE_PROG_SLOTS);
447
448 if (DISPLAY_VER(dev_priv) >= 14) {
449 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
450 u32 set = 0;
451
452 if (DISPLAY_VER(dev_priv) == 14)
453 set |= DP_FEC_BS_JITTER_WA;
454
455 intel_de_rmw(dev_priv,
456 hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
457 clear, set);
458 }
459
460 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
461 if (val & TRANSCONF_ENABLE) {
462 /* we keep both pipes enabled on 830 */
463 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
464 return;
465 }
466
467 /* Wa_1409098942:adlp+ */
468 if (DISPLAY_VER(dev_priv) >= 13 &&
469 new_crtc_state->dsc.compression_enable) {
470 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
471 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
472 TRANSCONF_PIXEL_COUNT_SCALING_X4);
473 }
474
475 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
476 val | TRANSCONF_ENABLE);
477 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
478
479 /*
480 * Until the pipe starts PIPEDSL reads will return a stale value,
481 * which causes an apparent vblank timestamp jump when PIPEDSL
482 * resets to its proper value. That also messes up the frame count
483 * when it's derived from the timestamps. So let's wait for the
484 * pipe to start properly before we call drm_crtc_vblank_on()
485 */
486 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
487 intel_wait_for_pipe_scanline_moving(crtc);
488 }
489
intel_disable_transcoder(const struct intel_crtc_state * old_crtc_state)490 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
491 {
492 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
493 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
494 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
495 enum pipe pipe = crtc->pipe;
496 u32 val;
497
498 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
499
500 /*
501 * Make sure planes won't keep trying to pump pixels to us,
502 * or we might hang the display.
503 */
504 assert_planes_disabled(crtc);
505
506 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
507 if ((val & TRANSCONF_ENABLE) == 0)
508 return;
509
510 /*
511 * Double wide has implications for planes
512 * so best keep it disabled when not needed.
513 */
514 if (old_crtc_state->double_wide)
515 val &= ~TRANSCONF_DOUBLE_WIDE;
516
517 /* Don't disable pipe or pipe PLLs if needed */
518 if (!IS_I830(dev_priv))
519 val &= ~TRANSCONF_ENABLE;
520
521 /* Wa_1409098942:adlp+ */
522 if (DISPLAY_VER(dev_priv) >= 13 &&
523 old_crtc_state->dsc.compression_enable)
524 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
525
526 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
527
528 if (DISPLAY_VER(dev_priv) >= 12)
529 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
530 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
531
532 if ((val & TRANSCONF_ENABLE) == 0)
533 intel_wait_for_pipe_off(old_crtc_state);
534 }
535
intel_rotation_info_size(const struct intel_rotation_info * rot_info)536 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
537 {
538 unsigned int size = 0;
539 int i;
540
541 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
542 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
543
544 return size;
545 }
546
intel_remapped_info_size(const struct intel_remapped_info * rem_info)547 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
548 {
549 unsigned int size = 0;
550 int i;
551
552 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
553 unsigned int plane_size;
554
555 if (rem_info->plane[i].linear)
556 plane_size = rem_info->plane[i].size;
557 else
558 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
559
560 if (plane_size == 0)
561 continue;
562
563 if (rem_info->plane_alignment)
564 size = ALIGN(size, rem_info->plane_alignment);
565
566 size += plane_size;
567 }
568
569 return size;
570 }
571
intel_plane_uses_fence(const struct intel_plane_state * plane_state)572 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
573 {
574 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
575 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
576
577 return DISPLAY_VER(dev_priv) < 4 ||
578 (plane->fbc && !plane_state->no_fbc_reason &&
579 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
580 }
581
582 /*
583 * Convert the x/y offsets into a linear offset.
584 * Only valid with 0/180 degree rotation, which is fine since linear
585 * offset is only used with linear buffers on pre-hsw and tiled buffers
586 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
587 */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)588 u32 intel_fb_xy_to_linear(int x, int y,
589 const struct intel_plane_state *state,
590 int color_plane)
591 {
592 const struct drm_framebuffer *fb = state->hw.fb;
593 unsigned int cpp = fb->format->cpp[color_plane];
594 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
595
596 return y * pitch + x * cpp;
597 }
598
599 /*
600 * Add the x/y offsets derived from fb->offsets[] to the user
601 * specified plane src x/y offsets. The resulting x/y offsets
602 * specify the start of scanout from the beginning of the gtt mapping.
603 */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)604 void intel_add_fb_offsets(int *x, int *y,
605 const struct intel_plane_state *state,
606 int color_plane)
607
608 {
609 *x += state->view.color_plane[color_plane].x;
610 *y += state->view.color_plane[color_plane].y;
611 }
612
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)613 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
614 u32 pixel_format, u64 modifier)
615 {
616 struct intel_crtc *crtc;
617 struct intel_plane *plane;
618
619 if (!HAS_DISPLAY(dev_priv))
620 return 0;
621
622 /*
623 * We assume the primary plane for pipe A has
624 * the highest stride limits of them all,
625 * if in case pipe A is disabled, use the first pipe from pipe_mask.
626 */
627 crtc = intel_first_crtc(dev_priv);
628 if (!crtc)
629 return 0;
630
631 plane = to_intel_plane(crtc->base.primary);
632
633 return plane->max_stride(plane, pixel_format, modifier,
634 DRM_MODE_ROTATE_0);
635 }
636
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)637 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
638 struct intel_plane_state *plane_state,
639 bool visible)
640 {
641 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
642
643 plane_state->uapi.visible = visible;
644
645 if (visible)
646 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
647 else
648 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
649 }
650
intel_plane_fixup_bitmasks(struct intel_crtc_state * crtc_state)651 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
652 {
653 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
654 struct drm_plane *plane;
655
656 /*
657 * Active_planes aliases if multiple "primary" or cursor planes
658 * have been used on the same (or wrong) pipe. plane_mask uses
659 * unique ids, hence we can use that to reconstruct active_planes.
660 */
661 crtc_state->enabled_planes = 0;
662 crtc_state->active_planes = 0;
663
664 drm_for_each_plane_mask(plane, &dev_priv->drm,
665 crtc_state->uapi.plane_mask) {
666 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
667 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
668 }
669 }
670
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)671 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
672 struct intel_plane *plane)
673 {
674 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
675 struct intel_crtc_state *crtc_state =
676 to_intel_crtc_state(crtc->base.state);
677 struct intel_plane_state *plane_state =
678 to_intel_plane_state(plane->base.state);
679
680 drm_dbg_kms(&dev_priv->drm,
681 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
682 plane->base.base.id, plane->base.name,
683 crtc->base.base.id, crtc->base.name);
684
685 intel_set_plane_visible(crtc_state, plane_state, false);
686 intel_plane_fixup_bitmasks(crtc_state);
687 crtc_state->data_rate[plane->id] = 0;
688 crtc_state->data_rate_y[plane->id] = 0;
689 crtc_state->rel_data_rate[plane->id] = 0;
690 crtc_state->rel_data_rate_y[plane->id] = 0;
691 crtc_state->min_cdclk[plane->id] = 0;
692
693 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
694 hsw_ips_disable(crtc_state)) {
695 crtc_state->ips_enabled = false;
696 intel_crtc_wait_for_next_vblank(crtc);
697 }
698
699 /*
700 * Vblank time updates from the shadow to live plane control register
701 * are blocked if the memory self-refresh mode is active at that
702 * moment. So to make sure the plane gets truly disabled, disable
703 * first the self-refresh mode. The self-refresh enable bit in turn
704 * will be checked/applied by the HW only at the next frame start
705 * event which is after the vblank start event, so we need to have a
706 * wait-for-vblank between disabling the plane and the pipe.
707 */
708 if (HAS_GMCH(dev_priv) &&
709 intel_set_memory_cxsr(dev_priv, false))
710 intel_crtc_wait_for_next_vblank(crtc);
711
712 /*
713 * Gen2 reports pipe underruns whenever all planes are disabled.
714 * So disable underrun reporting before all the planes get disabled.
715 */
716 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
717 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
718
719 intel_plane_disable_arm(NULL, plane, crtc_state);
720 intel_crtc_wait_for_next_vblank(crtc);
721 }
722
723 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)724 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
725 {
726 int x = 0, y = 0;
727
728 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
729 plane_state->view.color_plane[0].offset, 0);
730
731 return y;
732 }
733
icl_set_pipe_chicken(const struct intel_crtc_state * crtc_state)734 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
735 {
736 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
737 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
738 enum pipe pipe = crtc->pipe;
739 u32 tmp;
740
741 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
742
743 /*
744 * Display WA #1153: icl
745 * enable hardware to bypass the alpha math
746 * and rounding for per-pixel values 00 and 0xff
747 */
748 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
749 /*
750 * Display WA # 1605353570: icl
751 * Set the pixel rounding bit to 1 for allowing
752 * passthrough of Frame buffer pixels unmodified
753 * across pipe
754 */
755 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
756
757 /*
758 * Underrun recovery must always be disabled on display 13+.
759 * DG2 chicken bit meaning is inverted compared to other platforms.
760 */
761 if (IS_DG2(dev_priv))
762 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
763 else if (DISPLAY_VER(dev_priv) >= 13)
764 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
765
766 /* Wa_14010547955:dg2 */
767 if (IS_DG2(dev_priv))
768 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
769
770 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
771 }
772
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)773 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
774 {
775 struct drm_crtc *crtc;
776 bool cleanup_done;
777
778 drm_for_each_crtc(crtc, &dev_priv->drm) {
779 struct drm_crtc_commit *commit;
780 spin_lock(&crtc->commit_lock);
781 commit = list_first_entry_or_null(&crtc->commit_list,
782 struct drm_crtc_commit, commit_entry);
783 cleanup_done = commit ?
784 try_wait_for_completion(&commit->cleanup_done) : true;
785 spin_unlock(&crtc->commit_lock);
786
787 if (cleanup_done)
788 continue;
789
790 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
791
792 return true;
793 }
794
795 return false;
796 }
797
798 /*
799 * Finds the encoder associated with the given CRTC. This can only be
800 * used when we know that the CRTC isn't feeding multiple encoders!
801 */
802 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)803 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
804 const struct intel_crtc_state *crtc_state)
805 {
806 const struct drm_connector_state *connector_state;
807 const struct drm_connector *connector;
808 struct intel_encoder *encoder = NULL;
809 struct intel_crtc *primary_crtc;
810 int num_encoders = 0;
811 int i;
812
813 primary_crtc = intel_primary_crtc(crtc_state);
814
815 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
816 if (connector_state->crtc != &primary_crtc->base)
817 continue;
818
819 encoder = to_intel_encoder(connector_state->best_encoder);
820 num_encoders++;
821 }
822
823 drm_WARN(state->base.dev, num_encoders != 1,
824 "%d encoders for pipe %c\n",
825 num_encoders, pipe_name(primary_crtc->pipe));
826
827 return encoder;
828 }
829
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)830 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
831 {
832 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
834 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
835 enum pipe pipe = crtc->pipe;
836 int width = drm_rect_width(dst);
837 int height = drm_rect_height(dst);
838 int x = dst->x1;
839 int y = dst->y1;
840
841 if (!crtc_state->pch_pfit.enabled)
842 return;
843
844 /* Force use of hard-coded filter coefficients
845 * as some pre-programmed values are broken,
846 * e.g. x201.
847 */
848 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
849 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
850 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
851 else
852 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
853 PF_FILTER_MED_3x3);
854 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
855 PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
856 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
857 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
858 }
859
intel_crtc_dpms_overlay_disable(struct intel_crtc * crtc)860 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
861 {
862 if (crtc->overlay)
863 (void) intel_overlay_switch_off(crtc->overlay);
864
865 /* Let userspace switch the overlay on again. In most cases userspace
866 * has to recompute where to put it anyway.
867 */
868 }
869
needs_nv12_wa(const struct intel_crtc_state * crtc_state)870 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
871 {
872 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
873
874 if (!crtc_state->nv12_planes)
875 return false;
876
877 /* WA Display #0827: Gen9:all */
878 if (DISPLAY_VER(dev_priv) == 9)
879 return true;
880
881 return false;
882 }
883
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)884 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
885 {
886 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
887
888 /* Wa_2006604312:icl,ehl */
889 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
890 return true;
891
892 return false;
893 }
894
needs_cursorclk_wa(const struct intel_crtc_state * crtc_state)895 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
896 {
897 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
898
899 /* Wa_1604331009:icl,jsl,ehl */
900 if (is_hdr_mode(crtc_state) &&
901 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
902 DISPLAY_VER(dev_priv) == 11)
903 return true;
904
905 return false;
906 }
907
intel_async_flip_vtd_wa(struct drm_i915_private * i915,enum pipe pipe,bool enable)908 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
909 enum pipe pipe, bool enable)
910 {
911 if (DISPLAY_VER(i915) == 9) {
912 /*
913 * "Plane N strech max must be programmed to 11b (x1)
914 * when Async flips are enabled on that plane."
915 */
916 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
917 SKL_PLANE1_STRETCH_MAX_MASK,
918 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
919 } else {
920 /* Also needed on HSW/BDW albeit undocumented */
921 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
922 HSW_PRI_STRETCH_MAX_MASK,
923 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
924 }
925 }
926
needs_async_flip_vtd_wa(const struct intel_crtc_state * crtc_state)927 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
928 {
929 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
930
931 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
932 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
933 }
934
intel_encoders_audio_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)935 static void intel_encoders_audio_enable(struct intel_atomic_state *state,
936 struct intel_crtc *crtc)
937 {
938 const struct intel_crtc_state *crtc_state =
939 intel_atomic_get_new_crtc_state(state, crtc);
940 const struct drm_connector_state *conn_state;
941 struct drm_connector *conn;
942 int i;
943
944 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
945 struct intel_encoder *encoder =
946 to_intel_encoder(conn_state->best_encoder);
947
948 if (conn_state->crtc != &crtc->base)
949 continue;
950
951 if (encoder->audio_enable)
952 encoder->audio_enable(encoder, crtc_state, conn_state);
953 }
954 }
955
intel_encoders_audio_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)956 static void intel_encoders_audio_disable(struct intel_atomic_state *state,
957 struct intel_crtc *crtc)
958 {
959 const struct intel_crtc_state *old_crtc_state =
960 intel_atomic_get_old_crtc_state(state, crtc);
961 const struct drm_connector_state *old_conn_state;
962 struct drm_connector *conn;
963 int i;
964
965 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
966 struct intel_encoder *encoder =
967 to_intel_encoder(old_conn_state->best_encoder);
968
969 if (old_conn_state->crtc != &crtc->base)
970 continue;
971
972 if (encoder->audio_disable)
973 encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
974 }
975 }
976
977 #define is_enabling(feature, old_crtc_state, new_crtc_state) \
978 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
979 (new_crtc_state)->feature)
980 #define is_disabling(feature, old_crtc_state, new_crtc_state) \
981 ((old_crtc_state)->feature && \
982 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))
983
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)984 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
985 const struct intel_crtc_state *new_crtc_state)
986 {
987 if (!new_crtc_state->hw.active)
988 return false;
989
990 return is_enabling(active_planes, old_crtc_state, new_crtc_state);
991 }
992
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)993 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
994 const struct intel_crtc_state *new_crtc_state)
995 {
996 if (!old_crtc_state->hw.active)
997 return false;
998
999 return is_disabling(active_planes, old_crtc_state, new_crtc_state);
1000 }
1001
vrr_params_changed(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1002 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
1003 const struct intel_crtc_state *new_crtc_state)
1004 {
1005 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline ||
1006 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
1007 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
1008 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
1009 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
1010 old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
1011 old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
1012 }
1013
cmrr_params_changed(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1014 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
1015 const struct intel_crtc_state *new_crtc_state)
1016 {
1017 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m ||
1018 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n;
1019 }
1020
intel_crtc_vrr_enabling(struct intel_atomic_state * state,struct intel_crtc * crtc)1021 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state,
1022 struct intel_crtc *crtc)
1023 {
1024 const struct intel_crtc_state *old_crtc_state =
1025 intel_atomic_get_old_crtc_state(state, crtc);
1026 const struct intel_crtc_state *new_crtc_state =
1027 intel_atomic_get_new_crtc_state(state, crtc);
1028
1029 if (!new_crtc_state->hw.active)
1030 return false;
1031
1032 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
1033 (new_crtc_state->vrr.enable &&
1034 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
1035 vrr_params_changed(old_crtc_state, new_crtc_state)));
1036 }
1037
intel_crtc_vrr_disabling(struct intel_atomic_state * state,struct intel_crtc * crtc)1038 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
1039 struct intel_crtc *crtc)
1040 {
1041 const struct intel_crtc_state *old_crtc_state =
1042 intel_atomic_get_old_crtc_state(state, crtc);
1043 const struct intel_crtc_state *new_crtc_state =
1044 intel_atomic_get_new_crtc_state(state, crtc);
1045
1046 if (!old_crtc_state->hw.active)
1047 return false;
1048
1049 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
1050 (old_crtc_state->vrr.enable &&
1051 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
1052 vrr_params_changed(old_crtc_state, new_crtc_state)));
1053 }
1054
audio_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1055 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
1056 const struct intel_crtc_state *new_crtc_state)
1057 {
1058 if (!new_crtc_state->hw.active)
1059 return false;
1060
1061 return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
1062 (new_crtc_state->has_audio &&
1063 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
1064 }
1065
audio_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1066 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
1067 const struct intel_crtc_state *new_crtc_state)
1068 {
1069 if (!old_crtc_state->hw.active)
1070 return false;
1071
1072 return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
1073 (old_crtc_state->has_audio &&
1074 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
1075 }
1076
1077 #undef is_disabling
1078 #undef is_enabling
1079
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1080 static void intel_post_plane_update(struct intel_atomic_state *state,
1081 struct intel_crtc *crtc)
1082 {
1083 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1084 const struct intel_crtc_state *old_crtc_state =
1085 intel_atomic_get_old_crtc_state(state, crtc);
1086 const struct intel_crtc_state *new_crtc_state =
1087 intel_atomic_get_new_crtc_state(state, crtc);
1088 enum pipe pipe = crtc->pipe;
1089
1090 intel_psr_post_plane_update(state, crtc);
1091
1092 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1093
1094 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1095 intel_update_watermarks(dev_priv);
1096
1097 intel_fbc_post_update(state, crtc);
1098
1099 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1100 !needs_async_flip_vtd_wa(new_crtc_state))
1101 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1102
1103 if (needs_nv12_wa(old_crtc_state) &&
1104 !needs_nv12_wa(new_crtc_state))
1105 skl_wa_827(dev_priv, pipe, false);
1106
1107 if (needs_scalerclk_wa(old_crtc_state) &&
1108 !needs_scalerclk_wa(new_crtc_state))
1109 icl_wa_scalerclkgating(dev_priv, pipe, false);
1110
1111 if (needs_cursorclk_wa(old_crtc_state) &&
1112 !needs_cursorclk_wa(new_crtc_state))
1113 icl_wa_cursorclkgating(dev_priv, pipe, false);
1114
1115 if (intel_crtc_needs_color_update(new_crtc_state))
1116 intel_color_post_update(new_crtc_state);
1117
1118 if (audio_enabling(old_crtc_state, new_crtc_state))
1119 intel_encoders_audio_enable(state, crtc);
1120 }
1121
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1122 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1123 struct intel_crtc *crtc)
1124 {
1125 const struct intel_crtc_state *crtc_state =
1126 intel_atomic_get_new_crtc_state(state, crtc);
1127 u8 update_planes = crtc_state->update_planes;
1128 const struct intel_plane_state __maybe_unused *plane_state;
1129 struct intel_plane *plane;
1130 int i;
1131
1132 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1133 if (plane->pipe == crtc->pipe &&
1134 update_planes & BIT(plane->id))
1135 plane->enable_flip_done(plane);
1136 }
1137 }
1138
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1139 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1140 struct intel_crtc *crtc)
1141 {
1142 const struct intel_crtc_state *crtc_state =
1143 intel_atomic_get_new_crtc_state(state, crtc);
1144 u8 update_planes = crtc_state->update_planes;
1145 const struct intel_plane_state __maybe_unused *plane_state;
1146 struct intel_plane *plane;
1147 int i;
1148
1149 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1150 if (plane->pipe == crtc->pipe &&
1151 update_planes & BIT(plane->id))
1152 plane->disable_flip_done(plane);
1153 }
1154 }
1155
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)1156 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1157 struct intel_crtc *crtc)
1158 {
1159 const struct intel_crtc_state *old_crtc_state =
1160 intel_atomic_get_old_crtc_state(state, crtc);
1161 const struct intel_crtc_state *new_crtc_state =
1162 intel_atomic_get_new_crtc_state(state, crtc);
1163 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
1164 ~new_crtc_state->async_flip_planes;
1165 const struct intel_plane_state *old_plane_state;
1166 struct intel_plane *plane;
1167 bool need_vbl_wait = false;
1168 int i;
1169
1170 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1171 if (plane->need_async_flip_toggle_wa &&
1172 plane->pipe == crtc->pipe &&
1173 disable_async_flip_planes & BIT(plane->id)) {
1174 /*
1175 * Apart from the async flip bit we want to
1176 * preserve the old state for the plane.
1177 */
1178 intel_plane_async_flip(NULL, plane,
1179 old_crtc_state, old_plane_state, false);
1180 need_vbl_wait = true;
1181 }
1182 }
1183
1184 if (need_vbl_wait)
1185 intel_crtc_wait_for_next_vblank(crtc);
1186 }
1187
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1188 static void intel_pre_plane_update(struct intel_atomic_state *state,
1189 struct intel_crtc *crtc)
1190 {
1191 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1192 const struct intel_crtc_state *old_crtc_state =
1193 intel_atomic_get_old_crtc_state(state, crtc);
1194 const struct intel_crtc_state *new_crtc_state =
1195 intel_atomic_get_new_crtc_state(state, crtc);
1196 enum pipe pipe = crtc->pipe;
1197
1198 if (intel_crtc_vrr_disabling(state, crtc)) {
1199 intel_vrr_disable(old_crtc_state);
1200 intel_crtc_update_active_timings(old_crtc_state, false);
1201 }
1202
1203 if (audio_disabling(old_crtc_state, new_crtc_state))
1204 intel_encoders_audio_disable(state, crtc);
1205
1206 intel_drrs_deactivate(old_crtc_state);
1207
1208 intel_psr_pre_plane_update(state, crtc);
1209
1210 if (hsw_ips_pre_update(state, crtc))
1211 intel_crtc_wait_for_next_vblank(crtc);
1212
1213 if (intel_fbc_pre_update(state, crtc))
1214 intel_crtc_wait_for_next_vblank(crtc);
1215
1216 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1217 needs_async_flip_vtd_wa(new_crtc_state))
1218 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1219
1220 /* Display WA 827 */
1221 if (!needs_nv12_wa(old_crtc_state) &&
1222 needs_nv12_wa(new_crtc_state))
1223 skl_wa_827(dev_priv, pipe, true);
1224
1225 /* Wa_2006604312:icl,ehl */
1226 if (!needs_scalerclk_wa(old_crtc_state) &&
1227 needs_scalerclk_wa(new_crtc_state))
1228 icl_wa_scalerclkgating(dev_priv, pipe, true);
1229
1230 /* Wa_1604331009:icl,jsl,ehl */
1231 if (!needs_cursorclk_wa(old_crtc_state) &&
1232 needs_cursorclk_wa(new_crtc_state))
1233 icl_wa_cursorclkgating(dev_priv, pipe, true);
1234
1235 /*
1236 * Vblank time updates from the shadow to live plane control register
1237 * are blocked if the memory self-refresh mode is active at that
1238 * moment. So to make sure the plane gets truly disabled, disable
1239 * first the self-refresh mode. The self-refresh enable bit in turn
1240 * will be checked/applied by the HW only at the next frame start
1241 * event which is after the vblank start event, so we need to have a
1242 * wait-for-vblank between disabling the plane and the pipe.
1243 */
1244 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1245 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1246 intel_crtc_wait_for_next_vblank(crtc);
1247
1248 /*
1249 * IVB workaround: must disable low power watermarks for at least
1250 * one frame before enabling scaling. LP watermarks can be re-enabled
1251 * when scaling is disabled.
1252 *
1253 * WaCxSRDisabledForSpriteScaling:ivb
1254 */
1255 if (old_crtc_state->hw.active &&
1256 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1257 intel_crtc_wait_for_next_vblank(crtc);
1258
1259 /*
1260 * If we're doing a modeset we don't need to do any
1261 * pre-vblank watermark programming here.
1262 */
1263 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1264 /*
1265 * For platforms that support atomic watermarks, program the
1266 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1267 * will be the intermediate values that are safe for both pre- and
1268 * post- vblank; when vblank happens, the 'active' values will be set
1269 * to the final 'target' values and we'll do this again to get the
1270 * optimal watermarks. For gen9+ platforms, the values we program here
1271 * will be the final target values which will get automatically latched
1272 * at vblank time; no further programming will be necessary.
1273 *
1274 * If a platform hasn't been transitioned to atomic watermarks yet,
1275 * we'll continue to update watermarks the old way, if flags tell
1276 * us to.
1277 */
1278 if (!intel_initial_watermarks(state, crtc))
1279 if (new_crtc_state->update_wm_pre)
1280 intel_update_watermarks(dev_priv);
1281 }
1282
1283 /*
1284 * Gen2 reports pipe underruns whenever all planes are disabled.
1285 * So disable underrun reporting before all the planes get disabled.
1286 *
1287 * We do this after .initial_watermarks() so that we have a
1288 * chance of catching underruns with the intermediate watermarks
1289 * vs. the old plane configuration.
1290 */
1291 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1292 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1293
1294 /*
1295 * WA for platforms where async address update enable bit
1296 * is double buffered and only latched at start of vblank.
1297 */
1298 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
1299 intel_crtc_async_flip_disable_wa(state, crtc);
1300 }
1301
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)1302 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1303 struct intel_crtc *crtc)
1304 {
1305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1306 const struct intel_crtc_state *new_crtc_state =
1307 intel_atomic_get_new_crtc_state(state, crtc);
1308 unsigned int update_mask = new_crtc_state->update_planes;
1309 const struct intel_plane_state *old_plane_state;
1310 struct intel_plane *plane;
1311 unsigned fb_bits = 0;
1312 int i;
1313
1314 intel_crtc_dpms_overlay_disable(crtc);
1315
1316 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1317 if (crtc->pipe != plane->pipe ||
1318 !(update_mask & BIT(plane->id)))
1319 continue;
1320
1321 intel_plane_disable_arm(NULL, plane, new_crtc_state);
1322
1323 if (old_plane_state->uapi.visible)
1324 fb_bits |= plane->frontbuffer_bit;
1325 }
1326
1327 intel_frontbuffer_flip(dev_priv, fb_bits);
1328 }
1329
intel_encoders_update_prepare(struct intel_atomic_state * state)1330 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1331 {
1332 struct drm_i915_private *i915 = to_i915(state->base.dev);
1333 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1334 struct intel_crtc *crtc;
1335 int i;
1336
1337 /*
1338 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1339 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1340 */
1341 if (i915->display.dpll.mgr) {
1342 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1343 if (intel_crtc_needs_modeset(new_crtc_state))
1344 continue;
1345
1346 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1347 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1348 }
1349 }
1350 }
1351
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1352 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1353 struct intel_crtc *crtc)
1354 {
1355 const struct intel_crtc_state *crtc_state =
1356 intel_atomic_get_new_crtc_state(state, crtc);
1357 const struct drm_connector_state *conn_state;
1358 struct drm_connector *conn;
1359 int i;
1360
1361 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1362 struct intel_encoder *encoder =
1363 to_intel_encoder(conn_state->best_encoder);
1364
1365 if (conn_state->crtc != &crtc->base)
1366 continue;
1367
1368 if (encoder->pre_pll_enable)
1369 encoder->pre_pll_enable(state, encoder,
1370 crtc_state, conn_state);
1371 }
1372 }
1373
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1374 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1375 struct intel_crtc *crtc)
1376 {
1377 const struct intel_crtc_state *crtc_state =
1378 intel_atomic_get_new_crtc_state(state, crtc);
1379 const struct drm_connector_state *conn_state;
1380 struct drm_connector *conn;
1381 int i;
1382
1383 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1384 struct intel_encoder *encoder =
1385 to_intel_encoder(conn_state->best_encoder);
1386
1387 if (conn_state->crtc != &crtc->base)
1388 continue;
1389
1390 if (encoder->pre_enable)
1391 encoder->pre_enable(state, encoder,
1392 crtc_state, conn_state);
1393 }
1394 }
1395
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1396 static void intel_encoders_enable(struct intel_atomic_state *state,
1397 struct intel_crtc *crtc)
1398 {
1399 const struct intel_crtc_state *crtc_state =
1400 intel_atomic_get_new_crtc_state(state, crtc);
1401 const struct drm_connector_state *conn_state;
1402 struct drm_connector *conn;
1403 int i;
1404
1405 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1406 struct intel_encoder *encoder =
1407 to_intel_encoder(conn_state->best_encoder);
1408
1409 if (conn_state->crtc != &crtc->base)
1410 continue;
1411
1412 if (encoder->enable)
1413 encoder->enable(state, encoder,
1414 crtc_state, conn_state);
1415 intel_opregion_notify_encoder(encoder, true);
1416 }
1417 }
1418
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1419 static void intel_encoders_disable(struct intel_atomic_state *state,
1420 struct intel_crtc *crtc)
1421 {
1422 const struct intel_crtc_state *old_crtc_state =
1423 intel_atomic_get_old_crtc_state(state, crtc);
1424 const struct drm_connector_state *old_conn_state;
1425 struct drm_connector *conn;
1426 int i;
1427
1428 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1429 struct intel_encoder *encoder =
1430 to_intel_encoder(old_conn_state->best_encoder);
1431
1432 if (old_conn_state->crtc != &crtc->base)
1433 continue;
1434
1435 intel_opregion_notify_encoder(encoder, false);
1436 if (encoder->disable)
1437 encoder->disable(state, encoder,
1438 old_crtc_state, old_conn_state);
1439 }
1440 }
1441
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1442 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1443 struct intel_crtc *crtc)
1444 {
1445 const struct intel_crtc_state *old_crtc_state =
1446 intel_atomic_get_old_crtc_state(state, crtc);
1447 const struct drm_connector_state *old_conn_state;
1448 struct drm_connector *conn;
1449 int i;
1450
1451 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1452 struct intel_encoder *encoder =
1453 to_intel_encoder(old_conn_state->best_encoder);
1454
1455 if (old_conn_state->crtc != &crtc->base)
1456 continue;
1457
1458 if (encoder->post_disable)
1459 encoder->post_disable(state, encoder,
1460 old_crtc_state, old_conn_state);
1461 }
1462 }
1463
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1464 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1465 struct intel_crtc *crtc)
1466 {
1467 const struct intel_crtc_state *old_crtc_state =
1468 intel_atomic_get_old_crtc_state(state, crtc);
1469 const struct drm_connector_state *old_conn_state;
1470 struct drm_connector *conn;
1471 int i;
1472
1473 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1474 struct intel_encoder *encoder =
1475 to_intel_encoder(old_conn_state->best_encoder);
1476
1477 if (old_conn_state->crtc != &crtc->base)
1478 continue;
1479
1480 if (encoder->post_pll_disable)
1481 encoder->post_pll_disable(state, encoder,
1482 old_crtc_state, old_conn_state);
1483 }
1484 }
1485
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)1486 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1487 struct intel_crtc *crtc)
1488 {
1489 const struct intel_crtc_state *crtc_state =
1490 intel_atomic_get_new_crtc_state(state, crtc);
1491 const struct drm_connector_state *conn_state;
1492 struct drm_connector *conn;
1493 int i;
1494
1495 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1496 struct intel_encoder *encoder =
1497 to_intel_encoder(conn_state->best_encoder);
1498
1499 if (conn_state->crtc != &crtc->base)
1500 continue;
1501
1502 if (encoder->update_pipe)
1503 encoder->update_pipe(state, encoder,
1504 crtc_state, conn_state);
1505 }
1506 }
1507
ilk_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1508 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1509 {
1510 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1511 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1512
1513 if (crtc_state->has_pch_encoder) {
1514 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1515 &crtc_state->fdi_m_n);
1516 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1517 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1518 &crtc_state->dp_m_n);
1519 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1520 &crtc_state->dp_m2_n2);
1521 }
1522
1523 intel_set_transcoder_timings(crtc_state);
1524
1525 ilk_set_pipeconf(crtc_state);
1526 }
1527
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1528 static void ilk_crtc_enable(struct intel_atomic_state *state,
1529 struct intel_crtc *crtc)
1530 {
1531 const struct intel_crtc_state *new_crtc_state =
1532 intel_atomic_get_new_crtc_state(state, crtc);
1533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1534 enum pipe pipe = crtc->pipe;
1535
1536 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1537 return;
1538
1539 /*
1540 * Sometimes spurious CPU pipe underruns happen during FDI
1541 * training, at least with VGA+HDMI cloning. Suppress them.
1542 *
1543 * On ILK we get an occasional spurious CPU pipe underruns
1544 * between eDP port A enable and vdd enable. Also PCH port
1545 * enable seems to result in the occasional CPU pipe underrun.
1546 *
1547 * Spurious PCH underruns also occur during PCH enabling.
1548 */
1549 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1550 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1551
1552 ilk_configure_cpu_transcoder(new_crtc_state);
1553
1554 intel_set_pipe_src_size(new_crtc_state);
1555
1556 crtc->active = true;
1557
1558 intel_encoders_pre_enable(state, crtc);
1559
1560 if (new_crtc_state->has_pch_encoder) {
1561 ilk_pch_pre_enable(state, crtc);
1562 } else {
1563 assert_fdi_tx_disabled(dev_priv, pipe);
1564 assert_fdi_rx_disabled(dev_priv, pipe);
1565 }
1566
1567 ilk_pfit_enable(new_crtc_state);
1568
1569 /*
1570 * On ILK+ LUT must be loaded before the pipe is running but with
1571 * clocks enabled
1572 */
1573 intel_color_modeset(new_crtc_state);
1574
1575 intel_initial_watermarks(state, crtc);
1576 intel_enable_transcoder(new_crtc_state);
1577
1578 if (new_crtc_state->has_pch_encoder)
1579 ilk_pch_enable(state, crtc);
1580
1581 intel_crtc_vblank_on(new_crtc_state);
1582
1583 intel_encoders_enable(state, crtc);
1584
1585 if (HAS_PCH_CPT(dev_priv))
1586 intel_wait_for_pipe_scanline_moving(crtc);
1587
1588 /*
1589 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1590 * And a second vblank wait is needed at least on ILK with
1591 * some interlaced HDMI modes. Let's do the double wait always
1592 * in case there are more corner cases we don't know about.
1593 */
1594 if (new_crtc_state->has_pch_encoder) {
1595 intel_crtc_wait_for_next_vblank(crtc);
1596 intel_crtc_wait_for_next_vblank(crtc);
1597 }
1598 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1599 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1600 }
1601
1602 /* Display WA #1180: WaDisableScalarClockGating: glk */
glk_need_scaler_clock_gating_wa(const struct intel_crtc_state * crtc_state)1603 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
1604 {
1605 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1606
1607 return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
1608 }
1609
glk_pipe_scaler_clock_gating_wa(struct intel_crtc * crtc,bool enable)1610 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
1611 {
1612 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1613 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1614
1615 intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
1616 mask, enable ? mask : 0);
1617 }
1618
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)1619 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1620 {
1621 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1622 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1623
1624 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1625 HSW_LINETIME(crtc_state->linetime) |
1626 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1627 }
1628
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)1629 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1630 {
1631 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1632 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1633
1634 intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
1635 HSW_FRAME_START_DELAY_MASK,
1636 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
1637 }
1638
hsw_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1639 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1640 {
1641 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1642 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1643 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1644
1645 if (crtc_state->has_pch_encoder) {
1646 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1647 &crtc_state->fdi_m_n);
1648 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1649 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1650 &crtc_state->dp_m_n);
1651 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1652 &crtc_state->dp_m2_n2);
1653 }
1654
1655 intel_set_transcoder_timings(crtc_state);
1656 if (HAS_VRR(dev_priv))
1657 intel_vrr_set_transcoder_timings(crtc_state);
1658
1659 if (cpu_transcoder != TRANSCODER_EDP)
1660 intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder),
1661 crtc_state->pixel_multiplier - 1);
1662
1663 hsw_set_frame_start_delay(crtc_state);
1664
1665 hsw_set_transconf(crtc_state);
1666 }
1667
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1668 static void hsw_crtc_enable(struct intel_atomic_state *state,
1669 struct intel_crtc *crtc)
1670 {
1671 const struct intel_crtc_state *new_crtc_state =
1672 intel_atomic_get_new_crtc_state(state, crtc);
1673 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1674 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1675 struct intel_crtc *pipe_crtc;
1676
1677 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1678 return;
1679
1680 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1681 intel_crtc_joined_pipe_mask(new_crtc_state))
1682 intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe);
1683
1684 intel_encoders_pre_pll_enable(state, crtc);
1685
1686 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1687 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1688 const struct intel_crtc_state *pipe_crtc_state =
1689 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1690
1691 if (pipe_crtc_state->shared_dpll)
1692 intel_enable_shared_dpll(pipe_crtc_state);
1693 }
1694
1695 intel_encoders_pre_enable(state, crtc);
1696
1697 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1698 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1699 const struct intel_crtc_state *pipe_crtc_state =
1700 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1701
1702 intel_dsc_enable(pipe_crtc_state);
1703
1704 if (DISPLAY_VER(dev_priv) >= 13)
1705 intel_uncompressed_joiner_enable(pipe_crtc_state);
1706
1707 intel_set_pipe_src_size(pipe_crtc_state);
1708
1709 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1710 bdw_set_pipe_misc(NULL, pipe_crtc_state);
1711 }
1712
1713 if (!transcoder_is_dsi(cpu_transcoder))
1714 hsw_configure_cpu_transcoder(new_crtc_state);
1715
1716 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1717 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1718 const struct intel_crtc_state *pipe_crtc_state =
1719 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1720
1721 pipe_crtc->active = true;
1722
1723 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
1724 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
1725
1726 if (DISPLAY_VER(dev_priv) >= 9)
1727 skl_pfit_enable(pipe_crtc_state);
1728 else
1729 ilk_pfit_enable(pipe_crtc_state);
1730
1731 /*
1732 * On ILK+ LUT must be loaded before the pipe is running but with
1733 * clocks enabled
1734 */
1735 intel_color_modeset(pipe_crtc_state);
1736
1737 hsw_set_linetime_wm(pipe_crtc_state);
1738
1739 if (DISPLAY_VER(dev_priv) >= 11)
1740 icl_set_pipe_chicken(pipe_crtc_state);
1741
1742 intel_initial_watermarks(state, pipe_crtc);
1743 }
1744
1745 intel_encoders_enable(state, crtc);
1746
1747 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1748 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1749 const struct intel_crtc_state *pipe_crtc_state =
1750 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1751 enum pipe hsw_workaround_pipe;
1752
1753 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
1754 intel_crtc_wait_for_next_vblank(pipe_crtc);
1755 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
1756 }
1757
1758 /*
1759 * If we change the relative order between pipe/planes
1760 * enabling, we need to change the workaround.
1761 */
1762 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
1763 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1764 struct intel_crtc *wa_crtc =
1765 intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1766
1767 intel_crtc_wait_for_next_vblank(wa_crtc);
1768 intel_crtc_wait_for_next_vblank(wa_crtc);
1769 }
1770 }
1771 }
1772
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)1773 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1774 {
1775 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1776 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1777 enum pipe pipe = crtc->pipe;
1778
1779 /* To avoid upsetting the power well on haswell only disable the pfit if
1780 * it's in use. The hw state code will make sure we get this right. */
1781 if (!old_crtc_state->pch_pfit.enabled)
1782 return;
1783
1784 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
1785 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
1786 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
1787 }
1788
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1789 static void ilk_crtc_disable(struct intel_atomic_state *state,
1790 struct intel_crtc *crtc)
1791 {
1792 const struct intel_crtc_state *old_crtc_state =
1793 intel_atomic_get_old_crtc_state(state, crtc);
1794 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1795 enum pipe pipe = crtc->pipe;
1796
1797 /*
1798 * Sometimes spurious CPU pipe underruns happen when the
1799 * pipe is already disabled, but FDI RX/TX is still enabled.
1800 * Happens at least with VGA+HDMI cloning. Suppress them.
1801 */
1802 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1803 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1804
1805 intel_encoders_disable(state, crtc);
1806
1807 intel_crtc_vblank_off(old_crtc_state);
1808
1809 intel_disable_transcoder(old_crtc_state);
1810
1811 ilk_pfit_disable(old_crtc_state);
1812
1813 if (old_crtc_state->has_pch_encoder)
1814 ilk_pch_disable(state, crtc);
1815
1816 intel_encoders_post_disable(state, crtc);
1817
1818 if (old_crtc_state->has_pch_encoder)
1819 ilk_pch_post_disable(state, crtc);
1820
1821 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1822 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1823
1824 intel_disable_shared_dpll(old_crtc_state);
1825 }
1826
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1827 static void hsw_crtc_disable(struct intel_atomic_state *state,
1828 struct intel_crtc *crtc)
1829 {
1830 const struct intel_crtc_state *old_crtc_state =
1831 intel_atomic_get_old_crtc_state(state, crtc);
1832 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1833 struct intel_crtc *pipe_crtc;
1834
1835 /*
1836 * FIXME collapse everything to one hook.
1837 * Need care with mst->ddi interactions.
1838 */
1839 intel_encoders_disable(state, crtc);
1840 intel_encoders_post_disable(state, crtc);
1841
1842 for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
1843 intel_crtc_joined_pipe_mask(old_crtc_state)) {
1844 const struct intel_crtc_state *old_pipe_crtc_state =
1845 intel_atomic_get_old_crtc_state(state, pipe_crtc);
1846
1847 intel_disable_shared_dpll(old_pipe_crtc_state);
1848 }
1849
1850 intel_encoders_post_pll_disable(state, crtc);
1851
1852 for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
1853 intel_crtc_joined_pipe_mask(old_crtc_state))
1854 intel_dmc_disable_pipe(i915, pipe_crtc->pipe);
1855 }
1856
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)1857 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
1858 {
1859 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1860 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1861
1862 if (!crtc_state->gmch_pfit.control)
1863 return;
1864
1865 /*
1866 * The panel fitter should only be adjusted whilst the pipe is disabled,
1867 * according to register description and PRM.
1868 */
1869 drm_WARN_ON(&dev_priv->drm,
1870 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE);
1871 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1872
1873 intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv),
1874 crtc_state->gmch_pfit.pgm_ratios);
1875 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv),
1876 crtc_state->gmch_pfit.control);
1877
1878 /* Border color in case we don't scale up to the full screen. Black by
1879 * default, change to something else for debugging. */
1880 intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0);
1881 }
1882
1883 /* Prefer intel_encoder_is_combo() */
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)1884 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
1885 {
1886 if (phy == PHY_NONE)
1887 return false;
1888 else if (IS_ALDERLAKE_S(dev_priv))
1889 return phy <= PHY_E;
1890 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
1891 return phy <= PHY_D;
1892 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
1893 return phy <= PHY_C;
1894 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
1895 return phy <= PHY_B;
1896 else
1897 /*
1898 * DG2 outputs labelled as "combo PHY" in the bspec use
1899 * SNPS PHYs with completely different programming,
1900 * hence we always return false here.
1901 */
1902 return false;
1903 }
1904
1905 /* Prefer intel_encoder_is_tc() */
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)1906 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
1907 {
1908 /*
1909 * Discrete GPU phy's are not attached to FIA's to support TC
1910 * subsystem Legacy or non-legacy, and only support native DP/HDMI
1911 */
1912 if (IS_DGFX(dev_priv))
1913 return false;
1914
1915 if (DISPLAY_VER(dev_priv) >= 13)
1916 return phy >= PHY_F && phy <= PHY_I;
1917 else if (IS_TIGERLAKE(dev_priv))
1918 return phy >= PHY_D && phy <= PHY_I;
1919 else if (IS_ICELAKE(dev_priv))
1920 return phy >= PHY_C && phy <= PHY_F;
1921
1922 return false;
1923 }
1924
1925 /* Prefer intel_encoder_is_snps() */
intel_phy_is_snps(struct drm_i915_private * dev_priv,enum phy phy)1926 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
1927 {
1928 /*
1929 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
1930 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
1931 */
1932 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
1933 }
1934
1935 /* Prefer intel_encoder_to_phy() */
intel_port_to_phy(struct drm_i915_private * i915,enum port port)1936 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
1937 {
1938 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
1939 return PHY_D + port - PORT_D_XELPD;
1940 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
1941 return PHY_F + port - PORT_TC1;
1942 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
1943 return PHY_B + port - PORT_TC1;
1944 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
1945 return PHY_C + port - PORT_TC1;
1946 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1947 port == PORT_D)
1948 return PHY_A;
1949
1950 return PHY_A + port - PORT_A;
1951 }
1952
1953 /* Prefer intel_encoder_to_tc() */
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)1954 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
1955 {
1956 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
1957 return TC_PORT_NONE;
1958
1959 if (DISPLAY_VER(dev_priv) >= 12)
1960 return TC_PORT_1 + port - PORT_TC1;
1961 else
1962 return TC_PORT_1 + port - PORT_C;
1963 }
1964
intel_encoder_to_phy(struct intel_encoder * encoder)1965 enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
1966 {
1967 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1968
1969 return intel_port_to_phy(i915, encoder->port);
1970 }
1971
intel_encoder_is_combo(struct intel_encoder * encoder)1972 bool intel_encoder_is_combo(struct intel_encoder *encoder)
1973 {
1974 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1975
1976 return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
1977 }
1978
intel_encoder_is_snps(struct intel_encoder * encoder)1979 bool intel_encoder_is_snps(struct intel_encoder *encoder)
1980 {
1981 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1982
1983 return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
1984 }
1985
intel_encoder_is_tc(struct intel_encoder * encoder)1986 bool intel_encoder_is_tc(struct intel_encoder *encoder)
1987 {
1988 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1989
1990 return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
1991 }
1992
intel_encoder_to_tc(struct intel_encoder * encoder)1993 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
1994 {
1995 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1996
1997 return intel_port_to_tc(i915, encoder->port);
1998 }
1999
2000 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)2001 intel_aux_power_domain(struct intel_digital_port *dig_port)
2002 {
2003 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2004
2005 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2006 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
2007
2008 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
2009 }
2010
get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * mask)2011 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2012 struct intel_power_domain_mask *mask)
2013 {
2014 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2016 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2017 struct drm_encoder *encoder;
2018 enum pipe pipe = crtc->pipe;
2019
2020 bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2021
2022 if (!crtc_state->hw.active)
2023 return;
2024
2025 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2026 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2027 if (crtc_state->pch_pfit.enabled ||
2028 crtc_state->pch_pfit.force_thru)
2029 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2030
2031 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2032 crtc_state->uapi.encoder_mask) {
2033 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2034
2035 set_bit(intel_encoder->power_domain, mask->bits);
2036 }
2037
2038 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2039 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2040
2041 if (crtc_state->shared_dpll)
2042 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2043
2044 if (crtc_state->dsc.compression_enable)
2045 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2046 }
2047
intel_modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * old_domains)2048 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2049 struct intel_power_domain_mask *old_domains)
2050 {
2051 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2052 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2053 enum intel_display_power_domain domain;
2054 struct intel_power_domain_mask domains, new_domains;
2055
2056 get_crtc_power_domains(crtc_state, &domains);
2057
2058 bitmap_andnot(new_domains.bits,
2059 domains.bits,
2060 crtc->enabled_power_domains.mask.bits,
2061 POWER_DOMAIN_NUM);
2062 bitmap_andnot(old_domains->bits,
2063 crtc->enabled_power_domains.mask.bits,
2064 domains.bits,
2065 POWER_DOMAIN_NUM);
2066
2067 for_each_power_domain(domain, &new_domains)
2068 intel_display_power_get_in_set(dev_priv,
2069 &crtc->enabled_power_domains,
2070 domain);
2071 }
2072
intel_modeset_put_crtc_power_domains(struct intel_crtc * crtc,struct intel_power_domain_mask * domains)2073 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2074 struct intel_power_domain_mask *domains)
2075 {
2076 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2077 &crtc->enabled_power_domains,
2078 domains);
2079 }
2080
i9xx_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)2081 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2082 {
2083 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2084 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2085
2086 if (intel_crtc_has_dp_encoder(crtc_state)) {
2087 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2088 &crtc_state->dp_m_n);
2089 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2090 &crtc_state->dp_m2_n2);
2091 }
2092
2093 intel_set_transcoder_timings(crtc_state);
2094
2095 i9xx_set_pipeconf(crtc_state);
2096 }
2097
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2098 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2099 struct intel_crtc *crtc)
2100 {
2101 const struct intel_crtc_state *new_crtc_state =
2102 intel_atomic_get_new_crtc_state(state, crtc);
2103 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2104 enum pipe pipe = crtc->pipe;
2105
2106 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2107 return;
2108
2109 i9xx_configure_cpu_transcoder(new_crtc_state);
2110
2111 intel_set_pipe_src_size(new_crtc_state);
2112
2113 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
2114
2115 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2116 intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe),
2117 CHV_BLEND_LEGACY);
2118 intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0);
2119 }
2120
2121 crtc->active = true;
2122
2123 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2124
2125 intel_encoders_pre_pll_enable(state, crtc);
2126
2127 if (IS_CHERRYVIEW(dev_priv))
2128 chv_enable_pll(new_crtc_state);
2129 else
2130 vlv_enable_pll(new_crtc_state);
2131
2132 intel_encoders_pre_enable(state, crtc);
2133
2134 i9xx_pfit_enable(new_crtc_state);
2135
2136 intel_color_modeset(new_crtc_state);
2137
2138 intel_initial_watermarks(state, crtc);
2139 intel_enable_transcoder(new_crtc_state);
2140
2141 intel_crtc_vblank_on(new_crtc_state);
2142
2143 intel_encoders_enable(state, crtc);
2144 }
2145
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2146 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2147 struct intel_crtc *crtc)
2148 {
2149 const struct intel_crtc_state *new_crtc_state =
2150 intel_atomic_get_new_crtc_state(state, crtc);
2151 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2152 enum pipe pipe = crtc->pipe;
2153
2154 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2155 return;
2156
2157 i9xx_configure_cpu_transcoder(new_crtc_state);
2158
2159 intel_set_pipe_src_size(new_crtc_state);
2160
2161 crtc->active = true;
2162
2163 if (DISPLAY_VER(dev_priv) != 2)
2164 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2165
2166 intel_encoders_pre_enable(state, crtc);
2167
2168 i9xx_enable_pll(new_crtc_state);
2169
2170 i9xx_pfit_enable(new_crtc_state);
2171
2172 intel_color_modeset(new_crtc_state);
2173
2174 if (!intel_initial_watermarks(state, crtc))
2175 intel_update_watermarks(dev_priv);
2176 intel_enable_transcoder(new_crtc_state);
2177
2178 intel_crtc_vblank_on(new_crtc_state);
2179
2180 intel_encoders_enable(state, crtc);
2181
2182 /* prevents spurious underruns */
2183 if (DISPLAY_VER(dev_priv) == 2)
2184 intel_crtc_wait_for_next_vblank(crtc);
2185 }
2186
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)2187 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2188 {
2189 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2190 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2191
2192 if (!old_crtc_state->gmch_pfit.control)
2193 return;
2194
2195 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2196
2197 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2198 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)));
2199 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0);
2200 }
2201
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2202 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2203 struct intel_crtc *crtc)
2204 {
2205 struct intel_crtc_state *old_crtc_state =
2206 intel_atomic_get_old_crtc_state(state, crtc);
2207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2208 enum pipe pipe = crtc->pipe;
2209
2210 /*
2211 * On gen2 planes are double buffered but the pipe isn't, so we must
2212 * wait for planes to fully turn off before disabling the pipe.
2213 */
2214 if (DISPLAY_VER(dev_priv) == 2)
2215 intel_crtc_wait_for_next_vblank(crtc);
2216
2217 intel_encoders_disable(state, crtc);
2218
2219 intel_crtc_vblank_off(old_crtc_state);
2220
2221 intel_disable_transcoder(old_crtc_state);
2222
2223 i9xx_pfit_disable(old_crtc_state);
2224
2225 intel_encoders_post_disable(state, crtc);
2226
2227 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2228 if (IS_CHERRYVIEW(dev_priv))
2229 chv_disable_pll(dev_priv, pipe);
2230 else if (IS_VALLEYVIEW(dev_priv))
2231 vlv_disable_pll(dev_priv, pipe);
2232 else
2233 i9xx_disable_pll(old_crtc_state);
2234 }
2235
2236 intel_encoders_post_pll_disable(state, crtc);
2237
2238 if (DISPLAY_VER(dev_priv) != 2)
2239 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2240
2241 if (!dev_priv->display.funcs.wm->initial_watermarks)
2242 intel_update_watermarks(dev_priv);
2243
2244 /* clock the pipe down to 640x480@60 to potentially save power */
2245 if (IS_I830(dev_priv))
2246 i830_enable_pipe(dev_priv, pipe);
2247 }
2248
intel_encoder_destroy(struct drm_encoder * encoder)2249 void intel_encoder_destroy(struct drm_encoder *encoder)
2250 {
2251 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2252
2253 drm_encoder_cleanup(encoder);
2254 kfree(intel_encoder);
2255 }
2256
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)2257 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2258 {
2259 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2260
2261 /* GDG double wide on either pipe, otherwise pipe A only */
2262 return DISPLAY_VER(dev_priv) < 4 &&
2263 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2264 }
2265
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)2266 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2267 {
2268 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2269 struct drm_rect src;
2270
2271 /*
2272 * We only use IF-ID interlacing. If we ever use
2273 * PF-ID we'll need to adjust the pixel_rate here.
2274 */
2275
2276 if (!crtc_state->pch_pfit.enabled)
2277 return pixel_rate;
2278
2279 drm_rect_init(&src, 0, 0,
2280 drm_rect_width(&crtc_state->pipe_src) << 16,
2281 drm_rect_height(&crtc_state->pipe_src) << 16);
2282
2283 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2284 pixel_rate);
2285 }
2286
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)2287 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2288 const struct drm_display_mode *timings)
2289 {
2290 mode->hdisplay = timings->crtc_hdisplay;
2291 mode->htotal = timings->crtc_htotal;
2292 mode->hsync_start = timings->crtc_hsync_start;
2293 mode->hsync_end = timings->crtc_hsync_end;
2294
2295 mode->vdisplay = timings->crtc_vdisplay;
2296 mode->vtotal = timings->crtc_vtotal;
2297 mode->vsync_start = timings->crtc_vsync_start;
2298 mode->vsync_end = timings->crtc_vsync_end;
2299
2300 mode->flags = timings->flags;
2301 mode->type = DRM_MODE_TYPE_DRIVER;
2302
2303 mode->clock = timings->crtc_clock;
2304
2305 drm_mode_set_name(mode);
2306 }
2307
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)2308 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2309 {
2310 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2311
2312 if (HAS_GMCH(dev_priv))
2313 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2314 crtc_state->pixel_rate =
2315 crtc_state->hw.pipe_mode.crtc_clock;
2316 else
2317 crtc_state->pixel_rate =
2318 ilk_pipe_pixel_rate(crtc_state);
2319 }
2320
intel_joiner_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2321 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2322 struct drm_display_mode *mode)
2323 {
2324 int num_pipes = intel_joiner_num_pipes(crtc_state);
2325
2326 if (num_pipes < 2)
2327 return;
2328
2329 mode->crtc_clock /= num_pipes;
2330 mode->crtc_hdisplay /= num_pipes;
2331 mode->crtc_hblank_start /= num_pipes;
2332 mode->crtc_hblank_end /= num_pipes;
2333 mode->crtc_hsync_start /= num_pipes;
2334 mode->crtc_hsync_end /= num_pipes;
2335 mode->crtc_htotal /= num_pipes;
2336 }
2337
intel_splitter_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2338 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2339 struct drm_display_mode *mode)
2340 {
2341 int overlap = crtc_state->splitter.pixel_overlap;
2342 int n = crtc_state->splitter.link_count;
2343
2344 if (!crtc_state->splitter.enable)
2345 return;
2346
2347 /*
2348 * eDP MSO uses segment timings from EDID for transcoder
2349 * timings, but full mode for everything else.
2350 *
2351 * h_full = (h_segment - pixel_overlap) * link_count
2352 */
2353 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2354 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2355 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2356 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2357 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2358 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2359 mode->crtc_clock *= n;
2360 }
2361
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)2362 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2363 {
2364 struct drm_display_mode *mode = &crtc_state->hw.mode;
2365 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2366 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2367
2368 /*
2369 * Start with the adjusted_mode crtc timings, which
2370 * have been filled with the transcoder timings.
2371 */
2372 drm_mode_copy(pipe_mode, adjusted_mode);
2373
2374 /* Expand MSO per-segment transcoder timings to full */
2375 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2376
2377 /*
2378 * We want the full numbers in adjusted_mode normal timings,
2379 * adjusted_mode crtc timings are left with the raw transcoder
2380 * timings.
2381 */
2382 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2383
2384 /* Populate the "user" mode with full numbers */
2385 drm_mode_copy(mode, pipe_mode);
2386 intel_mode_from_crtc_timings(mode, mode);
2387 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2388 (intel_joiner_num_pipes(crtc_state) ?: 1);
2389 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2390
2391 /* Derive per-pipe timings in case joiner is used */
2392 intel_joiner_adjust_timings(crtc_state, pipe_mode);
2393 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2394
2395 intel_crtc_compute_pixel_rate(crtc_state);
2396 }
2397
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)2398 void intel_encoder_get_config(struct intel_encoder *encoder,
2399 struct intel_crtc_state *crtc_state)
2400 {
2401 encoder->get_config(encoder, crtc_state);
2402
2403 intel_crtc_readout_derived_state(crtc_state);
2404 }
2405
intel_joiner_compute_pipe_src(struct intel_crtc_state * crtc_state)2406 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2407 {
2408 int num_pipes = intel_joiner_num_pipes(crtc_state);
2409 int width, height;
2410
2411 if (num_pipes < 2)
2412 return;
2413
2414 width = drm_rect_width(&crtc_state->pipe_src);
2415 height = drm_rect_height(&crtc_state->pipe_src);
2416
2417 drm_rect_init(&crtc_state->pipe_src, 0, 0,
2418 width / num_pipes, height);
2419 }
2420
intel_crtc_compute_pipe_src(struct intel_crtc_state * crtc_state)2421 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2422 {
2423 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2424 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2425
2426 intel_joiner_compute_pipe_src(crtc_state);
2427
2428 /*
2429 * Pipe horizontal size must be even in:
2430 * - DVO ganged mode
2431 * - LVDS dual channel mode
2432 * - Double wide pipe
2433 */
2434 if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2435 if (crtc_state->double_wide) {
2436 drm_dbg_kms(&i915->drm,
2437 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2438 crtc->base.base.id, crtc->base.name);
2439 return -EINVAL;
2440 }
2441
2442 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2443 intel_is_dual_link_lvds(i915)) {
2444 drm_dbg_kms(&i915->drm,
2445 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2446 crtc->base.base.id, crtc->base.name);
2447 return -EINVAL;
2448 }
2449 }
2450
2451 return 0;
2452 }
2453
intel_crtc_compute_pipe_mode(struct intel_crtc_state * crtc_state)2454 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2455 {
2456 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2457 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2458 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2459 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2460 int clock_limit = i915->display.cdclk.max_dotclk_freq;
2461
2462 /*
2463 * Start with the adjusted_mode crtc timings, which
2464 * have been filled with the transcoder timings.
2465 */
2466 drm_mode_copy(pipe_mode, adjusted_mode);
2467
2468 /* Expand MSO per-segment transcoder timings to full */
2469 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2470
2471 /* Derive per-pipe timings in case joiner is used */
2472 intel_joiner_adjust_timings(crtc_state, pipe_mode);
2473 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2474
2475 if (DISPLAY_VER(i915) < 4) {
2476 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2477
2478 /*
2479 * Enable double wide mode when the dot clock
2480 * is > 90% of the (display) core speed.
2481 */
2482 if (intel_crtc_supports_double_wide(crtc) &&
2483 pipe_mode->crtc_clock > clock_limit) {
2484 clock_limit = i915->display.cdclk.max_dotclk_freq;
2485 crtc_state->double_wide = true;
2486 }
2487 }
2488
2489 if (pipe_mode->crtc_clock > clock_limit) {
2490 drm_dbg_kms(&i915->drm,
2491 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2492 crtc->base.base.id, crtc->base.name,
2493 pipe_mode->crtc_clock, clock_limit,
2494 str_yes_no(crtc_state->double_wide));
2495 return -EINVAL;
2496 }
2497
2498 return 0;
2499 }
2500
intel_crtc_compute_config(struct intel_atomic_state * state,struct intel_crtc * crtc)2501 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2502 struct intel_crtc *crtc)
2503 {
2504 struct intel_crtc_state *crtc_state =
2505 intel_atomic_get_new_crtc_state(state, crtc);
2506 int ret;
2507
2508 ret = intel_dpll_crtc_compute_clock(state, crtc);
2509 if (ret)
2510 return ret;
2511
2512 ret = intel_crtc_compute_pipe_src(crtc_state);
2513 if (ret)
2514 return ret;
2515
2516 ret = intel_crtc_compute_pipe_mode(crtc_state);
2517 if (ret)
2518 return ret;
2519
2520 intel_crtc_compute_pixel_rate(crtc_state);
2521
2522 if (crtc_state->has_pch_encoder)
2523 return ilk_fdi_compute_config(crtc, crtc_state);
2524
2525 return 0;
2526 }
2527
2528 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)2529 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2530 {
2531 while (*num > DATA_LINK_M_N_MASK ||
2532 *den > DATA_LINK_M_N_MASK) {
2533 *num >>= 1;
2534 *den >>= 1;
2535 }
2536 }
2537
compute_m_n(u32 * ret_m,u32 * ret_n,u32 m,u32 n,u32 constant_n)2538 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2539 u32 m, u32 n, u32 constant_n)
2540 {
2541 if (constant_n)
2542 *ret_n = constant_n;
2543 else
2544 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2545
2546 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2547 intel_reduce_m_n_ratio(ret_m, ret_n);
2548 }
2549
2550 void
intel_link_compute_m_n(u16 bits_per_pixel_x16,int nlanes,int pixel_clock,int link_clock,int bw_overhead,struct intel_link_m_n * m_n)2551 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
2552 int pixel_clock, int link_clock,
2553 int bw_overhead,
2554 struct intel_link_m_n *m_n)
2555 {
2556 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
2557 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
2558 bw_overhead);
2559 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes);
2560
2561 /*
2562 * Windows/BIOS uses fixed M/N values always. Follow suit.
2563 *
2564 * Also several DP dongles in particular seem to be fussy
2565 * about too large link M/N values. Presumably the 20bit
2566 * value used by Windows/BIOS is acceptable to everyone.
2567 */
2568 m_n->tu = 64;
2569 compute_m_n(&m_n->data_m, &m_n->data_n,
2570 data_m, data_n,
2571 0x8000000);
2572
2573 compute_m_n(&m_n->link_m, &m_n->link_n,
2574 pixel_clock, link_symbol_clock,
2575 0x80000);
2576 }
2577
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)2578 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2579 {
2580 /*
2581 * There may be no VBT; and if the BIOS enabled SSC we can
2582 * just keep using it to avoid unnecessary flicker. Whereas if the
2583 * BIOS isn't using it, don't assume it will work even if the VBT
2584 * indicates as much.
2585 */
2586 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2587 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2588 PCH_DREF_CONTROL) &
2589 DREF_SSC1_ENABLE;
2590
2591 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2592 drm_dbg_kms(&dev_priv->drm,
2593 "SSC %s by BIOS, overriding VBT which says %s\n",
2594 str_enabled_disabled(bios_lvds_use_ssc),
2595 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2596 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2597 }
2598 }
2599 }
2600
intel_zero_m_n(struct intel_link_m_n * m_n)2601 void intel_zero_m_n(struct intel_link_m_n *m_n)
2602 {
2603 /* corresponds to 0 register value */
2604 memset(m_n, 0, sizeof(*m_n));
2605 m_n->tu = 1;
2606 }
2607
intel_set_m_n(struct drm_i915_private * i915,const struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)2608 void intel_set_m_n(struct drm_i915_private *i915,
2609 const struct intel_link_m_n *m_n,
2610 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2611 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2612 {
2613 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2614 intel_de_write(i915, data_n_reg, m_n->data_n);
2615 intel_de_write(i915, link_m_reg, m_n->link_m);
2616 /*
2617 * On BDW+ writing LINK_N arms the double buffered update
2618 * of all the M/N registers, so it must be written last.
2619 */
2620 intel_de_write(i915, link_n_reg, m_n->link_n);
2621 }
2622
intel_cpu_transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)2623 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2624 enum transcoder transcoder)
2625 {
2626 if (IS_HASWELL(dev_priv))
2627 return transcoder == TRANSCODER_EDP;
2628
2629 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2630 }
2631
intel_cpu_transcoder_set_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2632 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2633 enum transcoder transcoder,
2634 const struct intel_link_m_n *m_n)
2635 {
2636 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2637 enum pipe pipe = crtc->pipe;
2638
2639 if (DISPLAY_VER(dev_priv) >= 5)
2640 intel_set_m_n(dev_priv, m_n,
2641 PIPE_DATA_M1(dev_priv, transcoder),
2642 PIPE_DATA_N1(dev_priv, transcoder),
2643 PIPE_LINK_M1(dev_priv, transcoder),
2644 PIPE_LINK_N1(dev_priv, transcoder));
2645 else
2646 intel_set_m_n(dev_priv, m_n,
2647 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2648 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2649 }
2650
intel_cpu_transcoder_set_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2651 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2652 enum transcoder transcoder,
2653 const struct intel_link_m_n *m_n)
2654 {
2655 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2656
2657 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2658 return;
2659
2660 intel_set_m_n(dev_priv, m_n,
2661 PIPE_DATA_M2(dev_priv, transcoder),
2662 PIPE_DATA_N2(dev_priv, transcoder),
2663 PIPE_LINK_M2(dev_priv, transcoder),
2664 PIPE_LINK_N2(dev_priv, transcoder));
2665 }
2666
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)2667 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2668 {
2669 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2670 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2671 enum pipe pipe = crtc->pipe;
2672 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2673 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2674 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2675 int vsyncshift = 0;
2676
2677 /* We need to be careful not to changed the adjusted mode, for otherwise
2678 * the hw state checker will get angry at the mismatch. */
2679 crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2680 crtc_vtotal = adjusted_mode->crtc_vtotal;
2681 crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2682 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2683
2684 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2685 /* the chip adds 2 halflines automatically */
2686 crtc_vtotal -= 1;
2687 crtc_vblank_end -= 1;
2688
2689 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2690 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2691 else
2692 vsyncshift = adjusted_mode->crtc_hsync_start -
2693 adjusted_mode->crtc_htotal / 2;
2694 if (vsyncshift < 0)
2695 vsyncshift += adjusted_mode->crtc_htotal;
2696 }
2697
2698 /*
2699 * VBLANK_START no longer works on ADL+, instead we must use
2700 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
2701 */
2702 if (DISPLAY_VER(dev_priv) >= 13) {
2703 intel_de_write(dev_priv,
2704 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
2705 crtc_vblank_start - crtc_vdisplay);
2706
2707 /*
2708 * VBLANK_START not used by hw, just clear it
2709 * to make it stand out in register dumps.
2710 */
2711 crtc_vblank_start = 1;
2712 }
2713
2714 if (DISPLAY_VER(dev_priv) >= 4)
2715 intel_de_write(dev_priv,
2716 TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder),
2717 vsyncshift);
2718
2719 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
2720 HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
2721 HTOTAL(adjusted_mode->crtc_htotal - 1));
2722 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
2723 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
2724 HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
2725 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
2726 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
2727 HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
2728
2729 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
2730 VACTIVE(crtc_vdisplay - 1) |
2731 VTOTAL(crtc_vtotal - 1));
2732 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
2733 VBLANK_START(crtc_vblank_start - 1) |
2734 VBLANK_END(crtc_vblank_end - 1));
2735 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
2736 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
2737 VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
2738
2739 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2740 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2741 * documented on the DDI_FUNC_CTL register description, EDP Input Select
2742 * bits. */
2743 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2744 (pipe == PIPE_B || pipe == PIPE_C))
2745 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe),
2746 VACTIVE(crtc_vdisplay - 1) |
2747 VTOTAL(crtc_vtotal - 1));
2748 }
2749
intel_set_transcoder_timings_lrr(const struct intel_crtc_state * crtc_state)2750 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
2751 {
2752 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2754 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2755 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2756 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2757
2758 crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2759 crtc_vtotal = adjusted_mode->crtc_vtotal;
2760 crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2761 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2762
2763 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
2764
2765 /*
2766 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
2767 * But let's write it anyway to keep the state checker happy.
2768 */
2769 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
2770 VBLANK_START(crtc_vblank_start - 1) |
2771 VBLANK_END(crtc_vblank_end - 1));
2772 /*
2773 * The double buffer latch point for TRANS_VTOTAL
2774 * is the transcoder's undelayed vblank.
2775 */
2776 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
2777 VACTIVE(crtc_vdisplay - 1) |
2778 VTOTAL(crtc_vtotal - 1));
2779 }
2780
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)2781 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2782 {
2783 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2784 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2785 int width = drm_rect_width(&crtc_state->pipe_src);
2786 int height = drm_rect_height(&crtc_state->pipe_src);
2787 enum pipe pipe = crtc->pipe;
2788
2789 /* pipesrc controls the size that is scaled from, which should
2790 * always be the user's requested size.
2791 */
2792 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
2793 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2794 }
2795
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)2796 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2797 {
2798 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2799 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2800
2801 if (DISPLAY_VER(dev_priv) == 2)
2802 return false;
2803
2804 if (DISPLAY_VER(dev_priv) >= 9 ||
2805 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2806 return intel_de_read(dev_priv,
2807 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
2808 else
2809 return intel_de_read(dev_priv,
2810 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
2811 }
2812
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)2813 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2814 struct intel_crtc_state *pipe_config)
2815 {
2816 struct drm_device *dev = crtc->base.dev;
2817 struct drm_i915_private *dev_priv = to_i915(dev);
2818 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2819 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2820 u32 tmp;
2821
2822 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder));
2823 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
2824 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
2825
2826 if (!transcoder_is_dsi(cpu_transcoder)) {
2827 tmp = intel_de_read(dev_priv,
2828 TRANS_HBLANK(dev_priv, cpu_transcoder));
2829 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
2830 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
2831 }
2832
2833 tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder));
2834 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
2835 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
2836
2837 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder));
2838 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
2839 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
2840
2841 /* FIXME TGL+ DSI transcoders have this! */
2842 if (!transcoder_is_dsi(cpu_transcoder)) {
2843 tmp = intel_de_read(dev_priv,
2844 TRANS_VBLANK(dev_priv, cpu_transcoder));
2845 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
2846 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
2847 }
2848 tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder));
2849 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
2850 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
2851
2852 if (intel_pipe_is_interlaced(pipe_config)) {
2853 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
2854 adjusted_mode->crtc_vtotal += 1;
2855 adjusted_mode->crtc_vblank_end += 1;
2856 }
2857
2858 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
2859 adjusted_mode->crtc_vblank_start =
2860 adjusted_mode->crtc_vdisplay +
2861 intel_de_read(dev_priv,
2862 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder));
2863 }
2864
intel_joiner_adjust_pipe_src(struct intel_crtc_state * crtc_state)2865 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2866 {
2867 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2868 int num_pipes = intel_joiner_num_pipes(crtc_state);
2869 enum pipe primary_pipe, pipe = crtc->pipe;
2870 int width;
2871
2872 if (num_pipes < 2)
2873 return;
2874
2875 primary_pipe = joiner_primary_pipe(crtc_state);
2876 width = drm_rect_width(&crtc_state->pipe_src);
2877
2878 drm_rect_translate_to(&crtc_state->pipe_src,
2879 (pipe - primary_pipe) * width, 0);
2880 }
2881
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)2882 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
2883 struct intel_crtc_state *pipe_config)
2884 {
2885 struct drm_device *dev = crtc->base.dev;
2886 struct drm_i915_private *dev_priv = to_i915(dev);
2887 u32 tmp;
2888
2889 tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe));
2890
2891 drm_rect_init(&pipe_config->pipe_src, 0, 0,
2892 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
2893 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
2894
2895 intel_joiner_adjust_pipe_src(pipe_config);
2896 }
2897
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)2898 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
2899 {
2900 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2901 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2902 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2903 u32 val = 0;
2904
2905 /*
2906 * - We keep both pipes enabled on 830
2907 * - During modeset the pipe is still disabled and must remain so
2908 * - During fastset the pipe is already enabled and must remain so
2909 */
2910 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
2911 val |= TRANSCONF_ENABLE;
2912
2913 if (crtc_state->double_wide)
2914 val |= TRANSCONF_DOUBLE_WIDE;
2915
2916 /* only g4x and later have fancy bpc/dither controls */
2917 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2918 IS_CHERRYVIEW(dev_priv)) {
2919 /* Bspec claims that we can't use dithering for 30bpp pipes. */
2920 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
2921 val |= TRANSCONF_DITHER_EN |
2922 TRANSCONF_DITHER_TYPE_SP;
2923
2924 switch (crtc_state->pipe_bpp) {
2925 default:
2926 /* Case prevented by intel_choose_pipe_bpp_dither. */
2927 MISSING_CASE(crtc_state->pipe_bpp);
2928 fallthrough;
2929 case 18:
2930 val |= TRANSCONF_BPC_6;
2931 break;
2932 case 24:
2933 val |= TRANSCONF_BPC_8;
2934 break;
2935 case 30:
2936 val |= TRANSCONF_BPC_10;
2937 break;
2938 }
2939 }
2940
2941 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2942 if (DISPLAY_VER(dev_priv) < 4 ||
2943 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2944 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
2945 else
2946 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
2947 } else {
2948 val |= TRANSCONF_INTERLACE_PROGRESSIVE;
2949 }
2950
2951 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2952 crtc_state->limited_color_range)
2953 val |= TRANSCONF_COLOR_RANGE_SELECT;
2954
2955 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
2956
2957 if (crtc_state->wgc_enable)
2958 val |= TRANSCONF_WGC_ENABLE;
2959
2960 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
2961
2962 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
2963 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
2964 }
2965
i9xx_has_pfit(struct drm_i915_private * dev_priv)2966 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
2967 {
2968 if (IS_I830(dev_priv))
2969 return false;
2970
2971 return DISPLAY_VER(dev_priv) >= 4 ||
2972 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
2973 }
2974
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)2975 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
2976 {
2977 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2978 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2979 enum pipe pipe;
2980 u32 tmp;
2981
2982 if (!i9xx_has_pfit(dev_priv))
2983 return;
2984
2985 tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
2986 if (!(tmp & PFIT_ENABLE))
2987 return;
2988
2989 /* Check whether the pfit is attached to our pipe. */
2990 if (DISPLAY_VER(dev_priv) >= 4)
2991 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
2992 else
2993 pipe = PIPE_B;
2994
2995 if (pipe != crtc->pipe)
2996 return;
2997
2998 crtc_state->gmch_pfit.control = tmp;
2999 crtc_state->gmch_pfit.pgm_ratios =
3000 intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
3001 }
3002
3003 static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc * crtc)3004 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
3005 {
3006 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3007 u32 tmp;
3008
3009 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3010
3011 if (tmp & PIPE_MISC_YUV420_ENABLE) {
3012 /* We support 4:2:0 in full blend mode only */
3013 drm_WARN_ON(&dev_priv->drm,
3014 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
3015
3016 return INTEL_OUTPUT_FORMAT_YCBCR420;
3017 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
3018 return INTEL_OUTPUT_FORMAT_YCBCR444;
3019 } else {
3020 return INTEL_OUTPUT_FORMAT_RGB;
3021 }
3022 }
3023
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3024 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3025 struct intel_crtc_state *pipe_config)
3026 {
3027 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3028 enum intel_display_power_domain power_domain;
3029 intel_wakeref_t wakeref;
3030 u32 tmp;
3031 bool ret;
3032
3033 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3034 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3035 if (!wakeref)
3036 return false;
3037
3038 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3039 pipe_config->sink_format = pipe_config->output_format;
3040 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3041 pipe_config->shared_dpll = NULL;
3042
3043 ret = false;
3044
3045 tmp = intel_de_read(dev_priv,
3046 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3047 if (!(tmp & TRANSCONF_ENABLE))
3048 goto out;
3049
3050 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3051 IS_CHERRYVIEW(dev_priv)) {
3052 switch (tmp & TRANSCONF_BPC_MASK) {
3053 case TRANSCONF_BPC_6:
3054 pipe_config->pipe_bpp = 18;
3055 break;
3056 case TRANSCONF_BPC_8:
3057 pipe_config->pipe_bpp = 24;
3058 break;
3059 case TRANSCONF_BPC_10:
3060 pipe_config->pipe_bpp = 30;
3061 break;
3062 default:
3063 MISSING_CASE(tmp);
3064 break;
3065 }
3066 }
3067
3068 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3069 (tmp & TRANSCONF_COLOR_RANGE_SELECT))
3070 pipe_config->limited_color_range = true;
3071
3072 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
3073
3074 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3075
3076 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3077 (tmp & TRANSCONF_WGC_ENABLE))
3078 pipe_config->wgc_enable = true;
3079
3080 intel_color_get_config(pipe_config);
3081
3082 if (DISPLAY_VER(dev_priv) < 4)
3083 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
3084
3085 intel_get_transcoder_timings(crtc, pipe_config);
3086 intel_get_pipe_src_size(crtc, pipe_config);
3087
3088 i9xx_get_pfit_config(pipe_config);
3089
3090 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
3091
3092 if (DISPLAY_VER(dev_priv) >= 4) {
3093 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
3094 pipe_config->pixel_multiplier =
3095 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3096 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3097 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3098 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3099 tmp = pipe_config->dpll_hw_state.i9xx.dpll;
3100 pipe_config->pixel_multiplier =
3101 ((tmp & SDVO_MULTIPLIER_MASK)
3102 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3103 } else {
3104 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3105 * port and will be fixed up in the encoder->get_config
3106 * function. */
3107 pipe_config->pixel_multiplier = 1;
3108 }
3109
3110 if (IS_CHERRYVIEW(dev_priv))
3111 chv_crtc_clock_get(pipe_config);
3112 else if (IS_VALLEYVIEW(dev_priv))
3113 vlv_crtc_clock_get(pipe_config);
3114 else
3115 i9xx_crtc_clock_get(pipe_config);
3116
3117 /*
3118 * Normally the dotclock is filled in by the encoder .get_config()
3119 * but in case the pipe is enabled w/o any ports we need a sane
3120 * default.
3121 */
3122 pipe_config->hw.adjusted_mode.crtc_clock =
3123 pipe_config->port_clock / pipe_config->pixel_multiplier;
3124
3125 ret = true;
3126
3127 out:
3128 intel_display_power_put(dev_priv, power_domain, wakeref);
3129
3130 return ret;
3131 }
3132
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)3133 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3134 {
3135 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3136 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3137 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3138 u32 val = 0;
3139
3140 /*
3141 * - During modeset the pipe is still disabled and must remain so
3142 * - During fastset the pipe is already enabled and must remain so
3143 */
3144 if (!intel_crtc_needs_modeset(crtc_state))
3145 val |= TRANSCONF_ENABLE;
3146
3147 switch (crtc_state->pipe_bpp) {
3148 default:
3149 /* Case prevented by intel_choose_pipe_bpp_dither. */
3150 MISSING_CASE(crtc_state->pipe_bpp);
3151 fallthrough;
3152 case 18:
3153 val |= TRANSCONF_BPC_6;
3154 break;
3155 case 24:
3156 val |= TRANSCONF_BPC_8;
3157 break;
3158 case 30:
3159 val |= TRANSCONF_BPC_10;
3160 break;
3161 case 36:
3162 val |= TRANSCONF_BPC_12;
3163 break;
3164 }
3165
3166 if (crtc_state->dither)
3167 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3168
3169 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3170 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3171 else
3172 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3173
3174 /*
3175 * This would end up with an odd purple hue over
3176 * the entire display. Make sure we don't do it.
3177 */
3178 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3179 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3180
3181 if (crtc_state->limited_color_range &&
3182 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3183 val |= TRANSCONF_COLOR_RANGE_SELECT;
3184
3185 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3186 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
3187
3188 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
3189
3190 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3191 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3192
3193 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
3194 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
3195 }
3196
hsw_set_transconf(const struct intel_crtc_state * crtc_state)3197 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3198 {
3199 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3201 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3202 u32 val = 0;
3203
3204 /*
3205 * - During modeset the pipe is still disabled and must remain so
3206 * - During fastset the pipe is already enabled and must remain so
3207 */
3208 if (!intel_crtc_needs_modeset(crtc_state))
3209 val |= TRANSCONF_ENABLE;
3210
3211 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3212 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3213
3214 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3215 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3216 else
3217 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3218
3219 if (IS_HASWELL(dev_priv) &&
3220 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3221 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
3222
3223 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
3224 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
3225 }
3226
bdw_set_pipe_misc(struct intel_dsb * dsb,const struct intel_crtc_state * crtc_state)3227 static void bdw_set_pipe_misc(struct intel_dsb *dsb,
3228 const struct intel_crtc_state *crtc_state)
3229 {
3230 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3231 struct intel_display *display = to_intel_display(crtc->base.dev);
3232 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3233 u32 val = 0;
3234
3235 switch (crtc_state->pipe_bpp) {
3236 case 18:
3237 val |= PIPE_MISC_BPC_6;
3238 break;
3239 case 24:
3240 val |= PIPE_MISC_BPC_8;
3241 break;
3242 case 30:
3243 val |= PIPE_MISC_BPC_10;
3244 break;
3245 case 36:
3246 /* Port output 12BPC defined for ADLP+ */
3247 if (DISPLAY_VER(dev_priv) >= 13)
3248 val |= PIPE_MISC_BPC_12_ADLP;
3249 break;
3250 default:
3251 MISSING_CASE(crtc_state->pipe_bpp);
3252 break;
3253 }
3254
3255 if (crtc_state->dither)
3256 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
3257
3258 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3259 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3260 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
3261
3262 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3263 val |= PIPE_MISC_YUV420_ENABLE |
3264 PIPE_MISC_YUV420_MODE_FULL_BLEND;
3265
3266 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3267 val |= PIPE_MISC_HDR_MODE_PRECISION;
3268
3269 if (DISPLAY_VER(dev_priv) >= 12)
3270 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
3271
3272 /* allow PSR with sprite enabled */
3273 if (IS_BROADWELL(dev_priv))
3274 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
3275
3276 intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val);
3277 }
3278
bdw_get_pipe_misc_bpp(struct intel_crtc * crtc)3279 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
3280 {
3281 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3282 u32 tmp;
3283
3284 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3285
3286 switch (tmp & PIPE_MISC_BPC_MASK) {
3287 case PIPE_MISC_BPC_6:
3288 return 18;
3289 case PIPE_MISC_BPC_8:
3290 return 24;
3291 case PIPE_MISC_BPC_10:
3292 return 30;
3293 /*
3294 * PORT OUTPUT 12 BPC defined for ADLP+.
3295 *
3296 * TODO:
3297 * For previous platforms with DSI interface, bits 5:7
3298 * are used for storing pipe_bpp irrespective of dithering.
3299 * Since the value of 12 BPC is not defined for these bits
3300 * on older platforms, need to find a workaround for 12 BPC
3301 * MIPI DSI HW readout.
3302 */
3303 case PIPE_MISC_BPC_12_ADLP:
3304 if (DISPLAY_VER(dev_priv) >= 13)
3305 return 36;
3306 fallthrough;
3307 default:
3308 MISSING_CASE(tmp);
3309 return 0;
3310 }
3311 }
3312
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)3313 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3314 {
3315 /*
3316 * Account for spread spectrum to avoid
3317 * oversubscribing the link. Max center spread
3318 * is 2.5%; use 5% for safety's sake.
3319 */
3320 u32 bps = target_clock * bpp * 21 / 20;
3321 return DIV_ROUND_UP(bps, link_bw * 8);
3322 }
3323
intel_get_m_n(struct drm_i915_private * i915,struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)3324 void intel_get_m_n(struct drm_i915_private *i915,
3325 struct intel_link_m_n *m_n,
3326 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3327 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3328 {
3329 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3330 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3331 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3332 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3333 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3334 }
3335
intel_cpu_transcoder_get_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3336 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3337 enum transcoder transcoder,
3338 struct intel_link_m_n *m_n)
3339 {
3340 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3341 enum pipe pipe = crtc->pipe;
3342
3343 if (DISPLAY_VER(dev_priv) >= 5)
3344 intel_get_m_n(dev_priv, m_n,
3345 PIPE_DATA_M1(dev_priv, transcoder),
3346 PIPE_DATA_N1(dev_priv, transcoder),
3347 PIPE_LINK_M1(dev_priv, transcoder),
3348 PIPE_LINK_N1(dev_priv, transcoder));
3349 else
3350 intel_get_m_n(dev_priv, m_n,
3351 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3352 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3353 }
3354
intel_cpu_transcoder_get_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3355 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3356 enum transcoder transcoder,
3357 struct intel_link_m_n *m_n)
3358 {
3359 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3360
3361 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3362 return;
3363
3364 intel_get_m_n(dev_priv, m_n,
3365 PIPE_DATA_M2(dev_priv, transcoder),
3366 PIPE_DATA_N2(dev_priv, transcoder),
3367 PIPE_LINK_M2(dev_priv, transcoder),
3368 PIPE_LINK_N2(dev_priv, transcoder));
3369 }
3370
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)3371 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3372 {
3373 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3374 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3375 u32 ctl, pos, size;
3376 enum pipe pipe;
3377
3378 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3379 if ((ctl & PF_ENABLE) == 0)
3380 return;
3381
3382 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3383 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
3384 else
3385 pipe = crtc->pipe;
3386
3387 crtc_state->pch_pfit.enabled = true;
3388
3389 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3390 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3391
3392 drm_rect_init(&crtc_state->pch_pfit.dst,
3393 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
3394 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
3395 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
3396 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
3397
3398 /*
3399 * We currently do not free assignements of panel fitters on
3400 * ivb/hsw (since we don't use the higher upscaling modes which
3401 * differentiates them) so just WARN about this case for now.
3402 */
3403 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
3404 }
3405
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3406 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3407 struct intel_crtc_state *pipe_config)
3408 {
3409 struct drm_device *dev = crtc->base.dev;
3410 struct drm_i915_private *dev_priv = to_i915(dev);
3411 enum intel_display_power_domain power_domain;
3412 intel_wakeref_t wakeref;
3413 u32 tmp;
3414 bool ret;
3415
3416 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3417 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3418 if (!wakeref)
3419 return false;
3420
3421 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3422 pipe_config->shared_dpll = NULL;
3423
3424 ret = false;
3425 tmp = intel_de_read(dev_priv,
3426 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3427 if (!(tmp & TRANSCONF_ENABLE))
3428 goto out;
3429
3430 switch (tmp & TRANSCONF_BPC_MASK) {
3431 case TRANSCONF_BPC_6:
3432 pipe_config->pipe_bpp = 18;
3433 break;
3434 case TRANSCONF_BPC_8:
3435 pipe_config->pipe_bpp = 24;
3436 break;
3437 case TRANSCONF_BPC_10:
3438 pipe_config->pipe_bpp = 30;
3439 break;
3440 case TRANSCONF_BPC_12:
3441 pipe_config->pipe_bpp = 36;
3442 break;
3443 default:
3444 break;
3445 }
3446
3447 if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
3448 pipe_config->limited_color_range = true;
3449
3450 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
3451 case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
3452 case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
3453 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3454 break;
3455 default:
3456 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3457 break;
3458 }
3459
3460 pipe_config->sink_format = pipe_config->output_format;
3461
3462 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
3463
3464 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3465
3466 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
3467
3468 intel_color_get_config(pipe_config);
3469
3470 pipe_config->pixel_multiplier = 1;
3471
3472 ilk_pch_get_config(pipe_config);
3473
3474 intel_get_transcoder_timings(crtc, pipe_config);
3475 intel_get_pipe_src_size(crtc, pipe_config);
3476
3477 ilk_get_pfit_config(pipe_config);
3478
3479 ret = true;
3480
3481 out:
3482 intel_display_power_put(dev_priv, power_domain, wakeref);
3483
3484 return ret;
3485 }
3486
joiner_pipes(struct drm_i915_private * i915)3487 static u8 joiner_pipes(struct drm_i915_private *i915)
3488 {
3489 u8 pipes;
3490
3491 if (DISPLAY_VER(i915) >= 12)
3492 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3493 else if (DISPLAY_VER(i915) >= 11)
3494 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3495 else
3496 pipes = 0;
3497
3498 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask;
3499 }
3500
transcoder_ddi_func_is_enabled(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)3501 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3502 enum transcoder cpu_transcoder)
3503 {
3504 enum intel_display_power_domain power_domain;
3505 intel_wakeref_t wakeref;
3506 u32 tmp = 0;
3507
3508 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3509
3510 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3511 tmp = intel_de_read(dev_priv,
3512 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
3513
3514 return tmp & TRANS_DDI_FUNC_ENABLE;
3515 }
3516
enabled_joiner_pipes(struct drm_i915_private * dev_priv,u8 * primary_pipes,u8 * secondary_pipes)3517 static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
3518 u8 *primary_pipes, u8 *secondary_pipes)
3519 {
3520 struct intel_crtc *crtc;
3521
3522 *primary_pipes = 0;
3523 *secondary_pipes = 0;
3524
3525 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3526 joiner_pipes(dev_priv)) {
3527 enum intel_display_power_domain power_domain;
3528 enum pipe pipe = crtc->pipe;
3529 intel_wakeref_t wakeref;
3530
3531 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3532 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3533 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3534
3535 if (!(tmp & BIG_JOINER_ENABLE))
3536 continue;
3537
3538 if (tmp & PRIMARY_BIG_JOINER_ENABLE)
3539 *primary_pipes |= BIT(pipe);
3540 else
3541 *secondary_pipes |= BIT(pipe);
3542 }
3543
3544 if (DISPLAY_VER(dev_priv) < 13)
3545 continue;
3546
3547 power_domain = POWER_DOMAIN_PIPE(pipe);
3548 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3549 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3550
3551 if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
3552 *primary_pipes |= BIT(pipe);
3553 if (tmp & UNCOMPRESSED_JOINER_SECONDARY)
3554 *secondary_pipes |= BIT(pipe);
3555 }
3556 }
3557
3558 /* Joiner pipes should always be consecutive primary and secondary */
3559 drm_WARN(&dev_priv->drm, *secondary_pipes != *primary_pipes << 1,
3560 "Joiner misconfigured (primary pipes 0x%x, secondary pipes 0x%x)\n",
3561 *primary_pipes, *secondary_pipes);
3562 }
3563
get_joiner_primary_pipe(enum pipe pipe,u8 primary_pipes,u8 secondary_pipes)3564 static enum pipe get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes)
3565 {
3566 if ((secondary_pipes & BIT(pipe)) == 0)
3567 return pipe;
3568
3569 /* ignore everything above our pipe */
3570 primary_pipes &= ~GENMASK(7, pipe);
3571
3572 /* highest remaining bit should be our primary pipe */
3573 return fls(primary_pipes) - 1;
3574 }
3575
get_joiner_secondary_pipes(enum pipe pipe,u8 primary_pipes,u8 secondary_pipes)3576 static u8 get_joiner_secondary_pipes(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes)
3577 {
3578 enum pipe primary_pipe, next_primary_pipe;
3579
3580 primary_pipe = get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes);
3581
3582 if ((primary_pipes & BIT(primary_pipe)) == 0)
3583 return 0;
3584
3585 /* ignore our primary pipe and everything below it */
3586 primary_pipes &= ~GENMASK(primary_pipe, 0);
3587 /* make sure a high bit is set for the ffs() */
3588 primary_pipes |= BIT(7);
3589 /* lowest remaining bit should be the next primary pipe */
3590 next_primary_pipe = ffs(primary_pipes) - 1;
3591
3592 return secondary_pipes & GENMASK(next_primary_pipe - 1, primary_pipe);
3593 }
3594
hsw_panel_transcoders(struct drm_i915_private * i915)3595 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3596 {
3597 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3598
3599 if (DISPLAY_VER(i915) >= 11)
3600 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3601
3602 return panel_transcoder_mask;
3603 }
3604
hsw_enabled_transcoders(struct intel_crtc * crtc)3605 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3606 {
3607 struct drm_device *dev = crtc->base.dev;
3608 struct drm_i915_private *dev_priv = to_i915(dev);
3609 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3610 enum transcoder cpu_transcoder;
3611 u8 primary_pipes, secondary_pipes;
3612 u8 enabled_transcoders = 0;
3613
3614 /*
3615 * XXX: Do intel_display_power_get_if_enabled before reading this (for
3616 * consistency and less surprising code; it's in always on power).
3617 */
3618 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3619 panel_transcoder_mask) {
3620 enum intel_display_power_domain power_domain;
3621 intel_wakeref_t wakeref;
3622 enum pipe trans_pipe;
3623 u32 tmp = 0;
3624
3625 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3626 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3627 tmp = intel_de_read(dev_priv,
3628 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
3629
3630 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3631 continue;
3632
3633 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3634 default:
3635 drm_WARN(dev, 1,
3636 "unknown pipe linked to transcoder %s\n",
3637 transcoder_name(cpu_transcoder));
3638 fallthrough;
3639 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3640 case TRANS_DDI_EDP_INPUT_A_ON:
3641 trans_pipe = PIPE_A;
3642 break;
3643 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3644 trans_pipe = PIPE_B;
3645 break;
3646 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3647 trans_pipe = PIPE_C;
3648 break;
3649 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3650 trans_pipe = PIPE_D;
3651 break;
3652 }
3653
3654 if (trans_pipe == crtc->pipe)
3655 enabled_transcoders |= BIT(cpu_transcoder);
3656 }
3657
3658 /* single pipe or joiner primary */
3659 cpu_transcoder = (enum transcoder) crtc->pipe;
3660 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3661 enabled_transcoders |= BIT(cpu_transcoder);
3662
3663 /* joiner secondary -> consider the primary pipe's transcoder as well */
3664 enabled_joiner_pipes(dev_priv, &primary_pipes, &secondary_pipes);
3665 if (secondary_pipes & BIT(crtc->pipe)) {
3666 cpu_transcoder = (enum transcoder)
3667 get_joiner_primary_pipe(crtc->pipe, primary_pipes, secondary_pipes);
3668 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3669 enabled_transcoders |= BIT(cpu_transcoder);
3670 }
3671
3672 return enabled_transcoders;
3673 }
3674
has_edp_transcoders(u8 enabled_transcoders)3675 static bool has_edp_transcoders(u8 enabled_transcoders)
3676 {
3677 return enabled_transcoders & BIT(TRANSCODER_EDP);
3678 }
3679
has_dsi_transcoders(u8 enabled_transcoders)3680 static bool has_dsi_transcoders(u8 enabled_transcoders)
3681 {
3682 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3683 BIT(TRANSCODER_DSI_1));
3684 }
3685
has_pipe_transcoders(u8 enabled_transcoders)3686 static bool has_pipe_transcoders(u8 enabled_transcoders)
3687 {
3688 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3689 BIT(TRANSCODER_DSI_0) |
3690 BIT(TRANSCODER_DSI_1));
3691 }
3692
assert_enabled_transcoders(struct drm_i915_private * i915,u8 enabled_transcoders)3693 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3694 u8 enabled_transcoders)
3695 {
3696 /* Only one type of transcoder please */
3697 drm_WARN_ON(&i915->drm,
3698 has_edp_transcoders(enabled_transcoders) +
3699 has_dsi_transcoders(enabled_transcoders) +
3700 has_pipe_transcoders(enabled_transcoders) > 1);
3701
3702 /* Only DSI transcoders can be ganged */
3703 drm_WARN_ON(&i915->drm,
3704 !has_dsi_transcoders(enabled_transcoders) &&
3705 !is_power_of_2(enabled_transcoders));
3706 }
3707
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3708 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3709 struct intel_crtc_state *pipe_config,
3710 struct intel_display_power_domain_set *power_domain_set)
3711 {
3712 struct drm_device *dev = crtc->base.dev;
3713 struct drm_i915_private *dev_priv = to_i915(dev);
3714 unsigned long enabled_transcoders;
3715 u32 tmp;
3716
3717 enabled_transcoders = hsw_enabled_transcoders(crtc);
3718 if (!enabled_transcoders)
3719 return false;
3720
3721 assert_enabled_transcoders(dev_priv, enabled_transcoders);
3722
3723 /*
3724 * With the exception of DSI we should only ever have
3725 * a single enabled transcoder. With DSI let's just
3726 * pick the first one.
3727 */
3728 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3729
3730 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3731 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3732 return false;
3733
3734 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3735 tmp = intel_de_read(dev_priv,
3736 TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder));
3737
3738 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3739 pipe_config->pch_pfit.force_thru = true;
3740 }
3741
3742 tmp = intel_de_read(dev_priv,
3743 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3744
3745 return tmp & TRANSCONF_ENABLE;
3746 }
3747
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3748 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3749 struct intel_crtc_state *pipe_config,
3750 struct intel_display_power_domain_set *power_domain_set)
3751 {
3752 struct intel_display *display = to_intel_display(crtc);
3753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3754 enum transcoder cpu_transcoder;
3755 enum port port;
3756 u32 tmp;
3757
3758 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3759 if (port == PORT_A)
3760 cpu_transcoder = TRANSCODER_DSI_A;
3761 else
3762 cpu_transcoder = TRANSCODER_DSI_C;
3763
3764 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3765 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3766 continue;
3767
3768 /*
3769 * The PLL needs to be enabled with a valid divider
3770 * configuration, otherwise accessing DSI registers will hang
3771 * the machine. See BSpec North Display Engine
3772 * registers/MIPI[BXT]. We can break out here early, since we
3773 * need the same DSI PLL to be enabled for both DSI ports.
3774 */
3775 if (!bxt_dsi_pll_is_enabled(dev_priv))
3776 break;
3777
3778 /* XXX: this works for video mode only */
3779 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port));
3780 if (!(tmp & DPI_ENABLE))
3781 continue;
3782
3783 tmp = intel_de_read(display, MIPI_CTRL(display, port));
3784 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
3785 continue;
3786
3787 pipe_config->cpu_transcoder = cpu_transcoder;
3788 break;
3789 }
3790
3791 return transcoder_is_dsi(pipe_config->cpu_transcoder);
3792 }
3793
intel_joiner_get_config(struct intel_crtc_state * crtc_state)3794 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
3795 {
3796 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3797 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3798 u8 primary_pipes, secondary_pipes;
3799 enum pipe pipe = crtc->pipe;
3800
3801 enabled_joiner_pipes(i915, &primary_pipes, &secondary_pipes);
3802
3803 if (((primary_pipes | secondary_pipes) & BIT(pipe)) == 0)
3804 return;
3805
3806 crtc_state->joiner_pipes =
3807 BIT(get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes)) |
3808 get_joiner_secondary_pipes(pipe, primary_pipes, secondary_pipes);
3809 }
3810
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3811 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
3812 struct intel_crtc_state *pipe_config)
3813 {
3814 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3815 bool active;
3816 u32 tmp;
3817
3818 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3819 POWER_DOMAIN_PIPE(crtc->pipe)))
3820 return false;
3821
3822 pipe_config->shared_dpll = NULL;
3823
3824 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
3825
3826 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
3827 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
3828 drm_WARN_ON(&dev_priv->drm, active);
3829 active = true;
3830 }
3831
3832 if (!active)
3833 goto out;
3834
3835 intel_joiner_get_config(pipe_config);
3836 intel_dsc_get_config(pipe_config);
3837
3838 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
3839 DISPLAY_VER(dev_priv) >= 11)
3840 intel_get_transcoder_timings(crtc, pipe_config);
3841
3842 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
3843 intel_vrr_get_config(pipe_config);
3844
3845 intel_get_pipe_src_size(crtc, pipe_config);
3846
3847 if (IS_HASWELL(dev_priv)) {
3848 u32 tmp = intel_de_read(dev_priv,
3849 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3850
3851 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
3852 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3853 else
3854 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3855 } else {
3856 pipe_config->output_format =
3857 bdw_get_pipe_misc_output_format(crtc);
3858 }
3859
3860 pipe_config->sink_format = pipe_config->output_format;
3861
3862 intel_color_get_config(pipe_config);
3863
3864 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
3865 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
3866 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3867 pipe_config->ips_linetime =
3868 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
3869
3870 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3871 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
3872 if (DISPLAY_VER(dev_priv) >= 9)
3873 skl_scaler_get_config(pipe_config);
3874 else
3875 ilk_get_pfit_config(pipe_config);
3876 }
3877
3878 hsw_ips_get_config(pipe_config);
3879
3880 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
3881 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3882 pipe_config->pixel_multiplier =
3883 intel_de_read(dev_priv,
3884 TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1;
3885 } else {
3886 pipe_config->pixel_multiplier = 1;
3887 }
3888
3889 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3890 tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
3891
3892 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
3893 } else {
3894 /* no idea if this is correct */
3895 pipe_config->framestart_delay = 1;
3896 }
3897
3898 out:
3899 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
3900
3901 return active;
3902 }
3903
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)3904 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
3905 {
3906 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3907 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3908
3909 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
3910 return false;
3911
3912 crtc_state->hw.active = true;
3913
3914 intel_crtc_readout_derived_state(crtc_state);
3915
3916 return true;
3917 }
3918
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)3919 int intel_dotclock_calculate(int link_freq,
3920 const struct intel_link_m_n *m_n)
3921 {
3922 /*
3923 * The calculation for the data clock -> pixel clock is:
3924 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
3925 * But we want to avoid losing precison if possible, so:
3926 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
3927 *
3928 * and for link freq (10kbs units) -> pixel clock it is:
3929 * link_symbol_clock = link_freq * 10 / link_symbol_size
3930 * pixel_clock = (m * link_symbol_clock) / n
3931 * or for more precision:
3932 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
3933 */
3934
3935 if (!m_n->link_n)
3936 return 0;
3937
3938 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
3939 m_n->link_n * intel_dp_link_symbol_size(link_freq));
3940 }
3941
intel_crtc_dotclock(const struct intel_crtc_state * pipe_config)3942 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
3943 {
3944 int dotclock;
3945
3946 if (intel_crtc_has_dp_encoder(pipe_config))
3947 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
3948 &pipe_config->dp_m_n);
3949 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
3950 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
3951 pipe_config->pipe_bpp);
3952 else
3953 dotclock = pipe_config->port_clock;
3954
3955 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
3956 !intel_crtc_has_dp_encoder(pipe_config))
3957 dotclock *= 2;
3958
3959 if (pipe_config->pixel_multiplier)
3960 dotclock /= pipe_config->pixel_multiplier;
3961
3962 return dotclock;
3963 }
3964
3965 /* Returns the currently programmed mode of the given encoder. */
3966 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)3967 intel_encoder_current_mode(struct intel_encoder *encoder)
3968 {
3969 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3970 struct intel_crtc_state *crtc_state;
3971 struct drm_display_mode *mode;
3972 struct intel_crtc *crtc;
3973 enum pipe pipe;
3974
3975 if (!encoder->get_hw_state(encoder, &pipe))
3976 return NULL;
3977
3978 crtc = intel_crtc_for_pipe(dev_priv, pipe);
3979
3980 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
3981 if (!mode)
3982 return NULL;
3983
3984 crtc_state = intel_crtc_state_alloc(crtc);
3985 if (!crtc_state) {
3986 kfree(mode);
3987 return NULL;
3988 }
3989
3990 if (!intel_crtc_get_pipe_config(crtc_state)) {
3991 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
3992 kfree(mode);
3993 return NULL;
3994 }
3995
3996 intel_encoder_get_config(encoder, crtc_state);
3997
3998 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
3999
4000 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
4001
4002 return mode;
4003 }
4004
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)4005 static bool encoders_cloneable(const struct intel_encoder *a,
4006 const struct intel_encoder *b)
4007 {
4008 /* masks could be asymmetric, so check both ways */
4009 return a == b || (a->cloneable & BIT(b->type) &&
4010 b->cloneable & BIT(a->type));
4011 }
4012
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4013 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4014 struct intel_crtc *crtc,
4015 struct intel_encoder *encoder)
4016 {
4017 struct intel_encoder *source_encoder;
4018 struct drm_connector *connector;
4019 struct drm_connector_state *connector_state;
4020 int i;
4021
4022 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4023 if (connector_state->crtc != &crtc->base)
4024 continue;
4025
4026 source_encoder =
4027 to_intel_encoder(connector_state->best_encoder);
4028 if (!encoders_cloneable(encoder, source_encoder))
4029 return false;
4030 }
4031
4032 return true;
4033 }
4034
icl_add_linked_planes(struct intel_atomic_state * state)4035 static int icl_add_linked_planes(struct intel_atomic_state *state)
4036 {
4037 struct intel_plane *plane, *linked;
4038 struct intel_plane_state *plane_state, *linked_plane_state;
4039 int i;
4040
4041 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4042 linked = plane_state->planar_linked_plane;
4043
4044 if (!linked)
4045 continue;
4046
4047 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4048 if (IS_ERR(linked_plane_state))
4049 return PTR_ERR(linked_plane_state);
4050
4051 drm_WARN_ON(state->base.dev,
4052 linked_plane_state->planar_linked_plane != plane);
4053 drm_WARN_ON(state->base.dev,
4054 linked_plane_state->planar_slave == plane_state->planar_slave);
4055 }
4056
4057 return 0;
4058 }
4059
icl_check_nv12_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)4060 static int icl_check_nv12_planes(struct intel_atomic_state *state,
4061 struct intel_crtc *crtc)
4062 {
4063 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4064 struct intel_crtc_state *crtc_state =
4065 intel_atomic_get_new_crtc_state(state, crtc);
4066 struct intel_plane *plane, *linked;
4067 struct intel_plane_state *plane_state;
4068 int i;
4069
4070 if (DISPLAY_VER(dev_priv) < 11)
4071 return 0;
4072
4073 /*
4074 * Destroy all old plane links and make the slave plane invisible
4075 * in the crtc_state->active_planes mask.
4076 */
4077 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4078 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4079 continue;
4080
4081 plane_state->planar_linked_plane = NULL;
4082 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4083 crtc_state->enabled_planes &= ~BIT(plane->id);
4084 crtc_state->active_planes &= ~BIT(plane->id);
4085 crtc_state->update_planes |= BIT(plane->id);
4086 crtc_state->data_rate[plane->id] = 0;
4087 crtc_state->rel_data_rate[plane->id] = 0;
4088 }
4089
4090 plane_state->planar_slave = false;
4091 }
4092
4093 if (!crtc_state->nv12_planes)
4094 return 0;
4095
4096 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4097 struct intel_plane_state *linked_state = NULL;
4098
4099 if (plane->pipe != crtc->pipe ||
4100 !(crtc_state->nv12_planes & BIT(plane->id)))
4101 continue;
4102
4103 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4104 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4105 continue;
4106
4107 if (crtc_state->active_planes & BIT(linked->id))
4108 continue;
4109
4110 linked_state = intel_atomic_get_plane_state(state, linked);
4111 if (IS_ERR(linked_state))
4112 return PTR_ERR(linked_state);
4113
4114 break;
4115 }
4116
4117 if (!linked_state) {
4118 drm_dbg_kms(&dev_priv->drm,
4119 "Need %d free Y planes for planar YUV\n",
4120 hweight8(crtc_state->nv12_planes));
4121
4122 return -EINVAL;
4123 }
4124
4125 plane_state->planar_linked_plane = linked;
4126
4127 linked_state->planar_slave = true;
4128 linked_state->planar_linked_plane = plane;
4129 crtc_state->enabled_planes |= BIT(linked->id);
4130 crtc_state->active_planes |= BIT(linked->id);
4131 crtc_state->update_planes |= BIT(linked->id);
4132 crtc_state->data_rate[linked->id] =
4133 crtc_state->data_rate_y[plane->id];
4134 crtc_state->rel_data_rate[linked->id] =
4135 crtc_state->rel_data_rate_y[plane->id];
4136 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4137 linked->base.name, plane->base.name);
4138
4139 /* Copy parameters to slave plane */
4140 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4141 linked_state->color_ctl = plane_state->color_ctl;
4142 linked_state->view = plane_state->view;
4143 linked_state->decrypt = plane_state->decrypt;
4144
4145 intel_plane_copy_hw_state(linked_state, plane_state);
4146 linked_state->uapi.src = plane_state->uapi.src;
4147 linked_state->uapi.dst = plane_state->uapi.dst;
4148
4149 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4150 if (linked->id == PLANE_7)
4151 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4152 else if (linked->id == PLANE_6)
4153 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4154 else if (linked->id == PLANE_5)
4155 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4156 else if (linked->id == PLANE_4)
4157 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4158 else
4159 MISSING_CASE(linked->id);
4160 }
4161 }
4162
4163 return 0;
4164 }
4165
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)4166 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4167 {
4168 const struct drm_display_mode *pipe_mode =
4169 &crtc_state->hw.pipe_mode;
4170 int linetime_wm;
4171
4172 if (!crtc_state->hw.enable)
4173 return 0;
4174
4175 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4176 pipe_mode->crtc_clock);
4177
4178 return min(linetime_wm, 0x1ff);
4179 }
4180
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)4181 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4182 const struct intel_cdclk_state *cdclk_state)
4183 {
4184 const struct drm_display_mode *pipe_mode =
4185 &crtc_state->hw.pipe_mode;
4186 int linetime_wm;
4187
4188 if (!crtc_state->hw.enable)
4189 return 0;
4190
4191 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4192 cdclk_state->logical.cdclk);
4193
4194 return min(linetime_wm, 0x1ff);
4195 }
4196
skl_linetime_wm(const struct intel_crtc_state * crtc_state)4197 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4198 {
4199 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4201 const struct drm_display_mode *pipe_mode =
4202 &crtc_state->hw.pipe_mode;
4203 int linetime_wm;
4204
4205 if (!crtc_state->hw.enable)
4206 return 0;
4207
4208 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4209 crtc_state->pixel_rate);
4210
4211 /* Display WA #1135: BXT:ALL GLK:ALL */
4212 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4213 skl_watermark_ipc_enabled(dev_priv))
4214 linetime_wm /= 2;
4215
4216 return min(linetime_wm, 0x1ff);
4217 }
4218
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)4219 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4220 struct intel_crtc *crtc)
4221 {
4222 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4223 struct intel_crtc_state *crtc_state =
4224 intel_atomic_get_new_crtc_state(state, crtc);
4225 const struct intel_cdclk_state *cdclk_state;
4226
4227 if (DISPLAY_VER(dev_priv) >= 9)
4228 crtc_state->linetime = skl_linetime_wm(crtc_state);
4229 else
4230 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4231
4232 if (!hsw_crtc_supports_ips(crtc))
4233 return 0;
4234
4235 cdclk_state = intel_atomic_get_cdclk_state(state);
4236 if (IS_ERR(cdclk_state))
4237 return PTR_ERR(cdclk_state);
4238
4239 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4240 cdclk_state);
4241
4242 return 0;
4243 }
4244
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)4245 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4246 struct intel_crtc *crtc)
4247 {
4248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4249 struct intel_crtc_state *crtc_state =
4250 intel_atomic_get_new_crtc_state(state, crtc);
4251 int ret;
4252
4253 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4254 intel_crtc_needs_modeset(crtc_state) &&
4255 !crtc_state->hw.active)
4256 crtc_state->update_wm_post = true;
4257
4258 if (intel_crtc_needs_modeset(crtc_state)) {
4259 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4260 if (ret)
4261 return ret;
4262 }
4263
4264 ret = intel_color_check(state, crtc);
4265 if (ret)
4266 return ret;
4267
4268 ret = intel_compute_pipe_wm(state, crtc);
4269 if (ret) {
4270 drm_dbg_kms(&dev_priv->drm,
4271 "Target pipe watermarks are invalid\n");
4272 return ret;
4273 }
4274
4275 /*
4276 * Calculate 'intermediate' watermarks that satisfy both the
4277 * old state and the new state. We can program these
4278 * immediately.
4279 */
4280 ret = intel_compute_intermediate_wm(state, crtc);
4281 if (ret) {
4282 drm_dbg_kms(&dev_priv->drm,
4283 "No valid intermediate pipe watermarks are possible\n");
4284 return ret;
4285 }
4286
4287 if (DISPLAY_VER(dev_priv) >= 9) {
4288 if (intel_crtc_needs_modeset(crtc_state) ||
4289 intel_crtc_needs_fastset(crtc_state)) {
4290 ret = skl_update_scaler_crtc(crtc_state);
4291 if (ret)
4292 return ret;
4293 }
4294
4295 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4296 if (ret)
4297 return ret;
4298 }
4299
4300 if (HAS_IPS(dev_priv)) {
4301 ret = hsw_ips_compute_config(state, crtc);
4302 if (ret)
4303 return ret;
4304 }
4305
4306 if (DISPLAY_VER(dev_priv) >= 9 ||
4307 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4308 ret = hsw_compute_linetime_wm(state, crtc);
4309 if (ret)
4310 return ret;
4311
4312 }
4313
4314 ret = intel_psr2_sel_fetch_update(state, crtc);
4315 if (ret)
4316 return ret;
4317
4318 return 0;
4319 }
4320
4321 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * crtc_state)4322 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4323 struct intel_crtc_state *crtc_state)
4324 {
4325 struct drm_connector *connector = conn_state->connector;
4326 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4327 const struct drm_display_info *info = &connector->display_info;
4328 int bpp;
4329
4330 switch (conn_state->max_bpc) {
4331 case 6 ... 7:
4332 bpp = 6 * 3;
4333 break;
4334 case 8 ... 9:
4335 bpp = 8 * 3;
4336 break;
4337 case 10 ... 11:
4338 bpp = 10 * 3;
4339 break;
4340 case 12 ... 16:
4341 bpp = 12 * 3;
4342 break;
4343 default:
4344 MISSING_CASE(conn_state->max_bpc);
4345 return -EINVAL;
4346 }
4347
4348 if (bpp < crtc_state->pipe_bpp) {
4349 drm_dbg_kms(&i915->drm,
4350 "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4351 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4352 connector->base.id, connector->name,
4353 bpp, 3 * info->bpc,
4354 3 * conn_state->max_requested_bpc,
4355 crtc_state->pipe_bpp);
4356
4357 crtc_state->pipe_bpp = bpp;
4358 }
4359
4360 return 0;
4361 }
4362
4363 static int
compute_baseline_pipe_bpp(struct intel_atomic_state * state,struct intel_crtc * crtc)4364 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4365 struct intel_crtc *crtc)
4366 {
4367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4368 struct intel_crtc_state *crtc_state =
4369 intel_atomic_get_new_crtc_state(state, crtc);
4370 struct drm_connector *connector;
4371 struct drm_connector_state *connector_state;
4372 int bpp, i;
4373
4374 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4375 IS_CHERRYVIEW(dev_priv)))
4376 bpp = 10*3;
4377 else if (DISPLAY_VER(dev_priv) >= 5)
4378 bpp = 12*3;
4379 else
4380 bpp = 8*3;
4381
4382 crtc_state->pipe_bpp = bpp;
4383
4384 /* Clamp display bpp to connector max bpp */
4385 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4386 int ret;
4387
4388 if (connector_state->crtc != &crtc->base)
4389 continue;
4390
4391 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4392 if (ret)
4393 return ret;
4394 }
4395
4396 return 0;
4397 }
4398
check_digital_port_conflicts(struct intel_atomic_state * state)4399 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4400 {
4401 struct drm_device *dev = state->base.dev;
4402 struct drm_connector *connector;
4403 struct drm_connector_list_iter conn_iter;
4404 unsigned int used_ports = 0;
4405 unsigned int used_mst_ports = 0;
4406 bool ret = true;
4407
4408 /*
4409 * We're going to peek into connector->state,
4410 * hence connection_mutex must be held.
4411 */
4412 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
4413
4414 /*
4415 * Walk the connector list instead of the encoder
4416 * list to detect the problem on ddi platforms
4417 * where there's just one encoder per digital port.
4418 */
4419 drm_connector_list_iter_begin(dev, &conn_iter);
4420 drm_for_each_connector_iter(connector, &conn_iter) {
4421 struct drm_connector_state *connector_state;
4422 struct intel_encoder *encoder;
4423
4424 connector_state =
4425 drm_atomic_get_new_connector_state(&state->base,
4426 connector);
4427 if (!connector_state)
4428 connector_state = connector->state;
4429
4430 if (!connector_state->best_encoder)
4431 continue;
4432
4433 encoder = to_intel_encoder(connector_state->best_encoder);
4434
4435 drm_WARN_ON(dev, !connector_state->crtc);
4436
4437 switch (encoder->type) {
4438 case INTEL_OUTPUT_DDI:
4439 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
4440 break;
4441 fallthrough;
4442 case INTEL_OUTPUT_DP:
4443 case INTEL_OUTPUT_HDMI:
4444 case INTEL_OUTPUT_EDP:
4445 /* the same port mustn't appear more than once */
4446 if (used_ports & BIT(encoder->port))
4447 ret = false;
4448
4449 used_ports |= BIT(encoder->port);
4450 break;
4451 case INTEL_OUTPUT_DP_MST:
4452 used_mst_ports |=
4453 1 << encoder->port;
4454 break;
4455 default:
4456 break;
4457 }
4458 }
4459 drm_connector_list_iter_end(&conn_iter);
4460
4461 /* can't mix MST and SST/HDMI on the same port */
4462 if (used_ports & used_mst_ports)
4463 return false;
4464
4465 return ret;
4466 }
4467
4468 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * crtc)4469 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
4470 struct intel_crtc *crtc)
4471 {
4472 struct intel_crtc_state *crtc_state =
4473 intel_atomic_get_new_crtc_state(state, crtc);
4474
4475 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
4476
4477 drm_property_replace_blob(&crtc_state->hw.degamma_lut,
4478 crtc_state->uapi.degamma_lut);
4479 drm_property_replace_blob(&crtc_state->hw.gamma_lut,
4480 crtc_state->uapi.gamma_lut);
4481 drm_property_replace_blob(&crtc_state->hw.ctm,
4482 crtc_state->uapi.ctm);
4483 }
4484
4485 static void
intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state * state,struct intel_crtc * crtc)4486 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
4487 struct intel_crtc *crtc)
4488 {
4489 struct intel_crtc_state *crtc_state =
4490 intel_atomic_get_new_crtc_state(state, crtc);
4491
4492 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
4493
4494 crtc_state->hw.enable = crtc_state->uapi.enable;
4495 crtc_state->hw.active = crtc_state->uapi.active;
4496 drm_mode_copy(&crtc_state->hw.mode,
4497 &crtc_state->uapi.mode);
4498 drm_mode_copy(&crtc_state->hw.adjusted_mode,
4499 &crtc_state->uapi.adjusted_mode);
4500 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
4501
4502 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
4503 }
4504
4505 static void
copy_joiner_crtc_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * secondary_crtc)4506 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state,
4507 struct intel_crtc *secondary_crtc)
4508 {
4509 struct intel_crtc_state *secondary_crtc_state =
4510 intel_atomic_get_new_crtc_state(state, secondary_crtc);
4511 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state);
4512 const struct intel_crtc_state *primary_crtc_state =
4513 intel_atomic_get_new_crtc_state(state, primary_crtc);
4514
4515 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut,
4516 primary_crtc_state->hw.degamma_lut);
4517 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut,
4518 primary_crtc_state->hw.gamma_lut);
4519 drm_property_replace_blob(&secondary_crtc_state->hw.ctm,
4520 primary_crtc_state->hw.ctm);
4521
4522 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed;
4523 }
4524
4525 static int
copy_joiner_crtc_state_modeset(struct intel_atomic_state * state,struct intel_crtc * secondary_crtc)4526 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state,
4527 struct intel_crtc *secondary_crtc)
4528 {
4529 struct intel_crtc_state *secondary_crtc_state =
4530 intel_atomic_get_new_crtc_state(state, secondary_crtc);
4531 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state);
4532 const struct intel_crtc_state *primary_crtc_state =
4533 intel_atomic_get_new_crtc_state(state, primary_crtc);
4534 struct intel_crtc_state *saved_state;
4535
4536 WARN_ON(primary_crtc_state->joiner_pipes !=
4537 secondary_crtc_state->joiner_pipes);
4538
4539 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL);
4540 if (!saved_state)
4541 return -ENOMEM;
4542
4543 /* preserve some things from the slave's original crtc state */
4544 saved_state->uapi = secondary_crtc_state->uapi;
4545 saved_state->scaler_state = secondary_crtc_state->scaler_state;
4546 saved_state->shared_dpll = secondary_crtc_state->shared_dpll;
4547 saved_state->crc_enabled = secondary_crtc_state->crc_enabled;
4548
4549 intel_crtc_free_hw_state(secondary_crtc_state);
4550 if (secondary_crtc_state->dp_tunnel_ref.tunnel)
4551 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref);
4552 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state));
4553 kfree(saved_state);
4554
4555 /* Re-init hw state */
4556 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw));
4557 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable;
4558 secondary_crtc_state->hw.active = primary_crtc_state->hw.active;
4559 drm_mode_copy(&secondary_crtc_state->hw.mode,
4560 &primary_crtc_state->hw.mode);
4561 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode,
4562 &primary_crtc_state->hw.pipe_mode);
4563 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode,
4564 &primary_crtc_state->hw.adjusted_mode);
4565 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter;
4566
4567 if (primary_crtc_state->dp_tunnel_ref.tunnel)
4568 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel,
4569 &secondary_crtc_state->dp_tunnel_ref);
4570
4571 copy_joiner_crtc_state_nomodeset(state, secondary_crtc);
4572
4573 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed;
4574 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed;
4575 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed;
4576
4577 WARN_ON(primary_crtc_state->joiner_pipes !=
4578 secondary_crtc_state->joiner_pipes);
4579
4580 return 0;
4581 }
4582
4583 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc * crtc)4584 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
4585 struct intel_crtc *crtc)
4586 {
4587 struct intel_crtc_state *crtc_state =
4588 intel_atomic_get_new_crtc_state(state, crtc);
4589 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4590 struct intel_crtc_state *saved_state;
4591
4592 saved_state = intel_crtc_state_alloc(crtc);
4593 if (!saved_state)
4594 return -ENOMEM;
4595
4596 /* free the old crtc_state->hw members */
4597 intel_crtc_free_hw_state(crtc_state);
4598
4599 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state);
4600
4601 /* FIXME: before the switch to atomic started, a new pipe_config was
4602 * kzalloc'd. Code that depends on any field being zero should be
4603 * fixed, so that the crtc_state can be safely duplicated. For now,
4604 * only fields that are know to not cause problems are preserved. */
4605
4606 saved_state->uapi = crtc_state->uapi;
4607 saved_state->inherited = crtc_state->inherited;
4608 saved_state->scaler_state = crtc_state->scaler_state;
4609 saved_state->shared_dpll = crtc_state->shared_dpll;
4610 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
4611 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
4612 sizeof(saved_state->icl_port_dplls));
4613 saved_state->crc_enabled = crtc_state->crc_enabled;
4614 if (IS_G4X(dev_priv) ||
4615 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4616 saved_state->wm = crtc_state->wm;
4617
4618 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
4619 kfree(saved_state);
4620
4621 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
4622
4623 return 0;
4624 }
4625
4626 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc * crtc,const struct intel_link_bw_limits * limits)4627 intel_modeset_pipe_config(struct intel_atomic_state *state,
4628 struct intel_crtc *crtc,
4629 const struct intel_link_bw_limits *limits)
4630 {
4631 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4632 struct intel_crtc_state *crtc_state =
4633 intel_atomic_get_new_crtc_state(state, crtc);
4634 struct drm_connector *connector;
4635 struct drm_connector_state *connector_state;
4636 int pipe_src_w, pipe_src_h;
4637 int base_bpp, ret, i;
4638
4639 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
4640
4641 crtc_state->framestart_delay = 1;
4642
4643 /*
4644 * Sanitize sync polarity flags based on requested ones. If neither
4645 * positive or negative polarity is requested, treat this as meaning
4646 * negative polarity.
4647 */
4648 if (!(crtc_state->hw.adjusted_mode.flags &
4649 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
4650 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4651
4652 if (!(crtc_state->hw.adjusted_mode.flags &
4653 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
4654 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4655
4656 ret = compute_baseline_pipe_bpp(state, crtc);
4657 if (ret)
4658 return ret;
4659
4660 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
4661 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
4662
4663 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
4664 drm_dbg_kms(&i915->drm,
4665 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
4666 crtc->base.base.id, crtc->base.name,
4667 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
4668 crtc_state->bw_constrained = true;
4669 }
4670
4671 base_bpp = crtc_state->pipe_bpp;
4672
4673 /*
4674 * Determine the real pipe dimensions. Note that stereo modes can
4675 * increase the actual pipe size due to the frame doubling and
4676 * insertion of additional space for blanks between the frame. This
4677 * is stored in the crtc timings. We use the requested mode to do this
4678 * computation to clearly distinguish it from the adjusted mode, which
4679 * can be changed by the connectors in the below retry loop.
4680 */
4681 drm_mode_get_hv_timing(&crtc_state->hw.mode,
4682 &pipe_src_w, &pipe_src_h);
4683 drm_rect_init(&crtc_state->pipe_src, 0, 0,
4684 pipe_src_w, pipe_src_h);
4685
4686 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4687 struct intel_encoder *encoder =
4688 to_intel_encoder(connector_state->best_encoder);
4689
4690 if (connector_state->crtc != &crtc->base)
4691 continue;
4692
4693 if (!check_single_encoder_cloning(state, crtc, encoder)) {
4694 drm_dbg_kms(&i915->drm,
4695 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
4696 encoder->base.base.id, encoder->base.name);
4697 return -EINVAL;
4698 }
4699
4700 /*
4701 * Determine output_types before calling the .compute_config()
4702 * hooks so that the hooks can use this information safely.
4703 */
4704 if (encoder->compute_output_type)
4705 crtc_state->output_types |=
4706 BIT(encoder->compute_output_type(encoder, crtc_state,
4707 connector_state));
4708 else
4709 crtc_state->output_types |= BIT(encoder->type);
4710 }
4711
4712 /* Ensure the port clock defaults are reset when retrying. */
4713 crtc_state->port_clock = 0;
4714 crtc_state->pixel_multiplier = 1;
4715
4716 /* Fill in default crtc timings, allow encoders to overwrite them. */
4717 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
4718 CRTC_STEREO_DOUBLE);
4719
4720 /* Pass our mode to the connectors and the CRTC to give them a chance to
4721 * adjust it according to limitations or connector properties, and also
4722 * a chance to reject the mode entirely.
4723 */
4724 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4725 struct intel_encoder *encoder =
4726 to_intel_encoder(connector_state->best_encoder);
4727
4728 if (connector_state->crtc != &crtc->base)
4729 continue;
4730
4731 ret = encoder->compute_config(encoder, crtc_state,
4732 connector_state);
4733 if (ret == -EDEADLK)
4734 return ret;
4735 if (ret < 0) {
4736 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
4737 encoder->base.base.id, encoder->base.name, ret);
4738 return ret;
4739 }
4740 }
4741
4742 /* Set default port clock if not overwritten by the encoder. Needs to be
4743 * done afterwards in case the encoder adjusts the mode. */
4744 if (!crtc_state->port_clock)
4745 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
4746 * crtc_state->pixel_multiplier;
4747
4748 ret = intel_crtc_compute_config(state, crtc);
4749 if (ret == -EDEADLK)
4750 return ret;
4751 if (ret < 0) {
4752 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
4753 crtc->base.base.id, crtc->base.name, ret);
4754 return ret;
4755 }
4756
4757 /* Dithering seems to not pass-through bits correctly when it should, so
4758 * only enable it on 6bpc panels and when its not a compliance
4759 * test requesting 6bpc video pattern.
4760 */
4761 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
4762 !crtc_state->dither_force_disable;
4763 drm_dbg_kms(&i915->drm,
4764 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
4765 crtc->base.base.id, crtc->base.name,
4766 base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
4767
4768 return 0;
4769 }
4770
4771 static int
intel_modeset_pipe_config_late(struct intel_atomic_state * state,struct intel_crtc * crtc)4772 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
4773 struct intel_crtc *crtc)
4774 {
4775 struct intel_crtc_state *crtc_state =
4776 intel_atomic_get_new_crtc_state(state, crtc);
4777 struct drm_connector_state *conn_state;
4778 struct drm_connector *connector;
4779 int i;
4780
4781 for_each_new_connector_in_state(&state->base, connector,
4782 conn_state, i) {
4783 struct intel_encoder *encoder =
4784 to_intel_encoder(conn_state->best_encoder);
4785 int ret;
4786
4787 if (conn_state->crtc != &crtc->base ||
4788 !encoder->compute_config_late)
4789 continue;
4790
4791 ret = encoder->compute_config_late(encoder, crtc_state,
4792 conn_state);
4793 if (ret)
4794 return ret;
4795 }
4796
4797 return 0;
4798 }
4799
intel_fuzzy_clock_check(int clock1,int clock2)4800 bool intel_fuzzy_clock_check(int clock1, int clock2)
4801 {
4802 int diff;
4803
4804 if (clock1 == clock2)
4805 return true;
4806
4807 if (!clock1 || !clock2)
4808 return false;
4809
4810 diff = abs(clock1 - clock2);
4811
4812 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
4813 return true;
4814
4815 return false;
4816 }
4817
4818 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)4819 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
4820 const struct intel_link_m_n *m2_n2)
4821 {
4822 return m_n->tu == m2_n2->tu &&
4823 m_n->data_m == m2_n2->data_m &&
4824 m_n->data_n == m2_n2->data_n &&
4825 m_n->link_m == m2_n2->link_m &&
4826 m_n->link_n == m2_n2->link_n;
4827 }
4828
4829 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)4830 intel_compare_infoframe(const union hdmi_infoframe *a,
4831 const union hdmi_infoframe *b)
4832 {
4833 return memcmp(a, b, sizeof(*a)) == 0;
4834 }
4835
4836 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)4837 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
4838 const struct drm_dp_vsc_sdp *b)
4839 {
4840 return a->pixelformat == b->pixelformat &&
4841 a->colorimetry == b->colorimetry &&
4842 a->bpc == b->bpc &&
4843 a->dynamic_range == b->dynamic_range &&
4844 a->content_type == b->content_type;
4845 }
4846
4847 static bool
intel_compare_dp_as_sdp(const struct drm_dp_as_sdp * a,const struct drm_dp_as_sdp * b)4848 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a,
4849 const struct drm_dp_as_sdp *b)
4850 {
4851 return a->vtotal == b->vtotal &&
4852 a->target_rr == b->target_rr &&
4853 a->duration_incr_ms == b->duration_incr_ms &&
4854 a->duration_decr_ms == b->duration_decr_ms &&
4855 a->mode == b->mode;
4856 }
4857
4858 static bool
intel_compare_buffer(const u8 * a,const u8 * b,size_t len)4859 intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
4860 {
4861 return memcmp(a, b, len) == 0;
4862 }
4863
4864 static void __printf(5, 6)
pipe_config_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)4865 pipe_config_mismatch(struct drm_printer *p, bool fastset,
4866 const struct intel_crtc *crtc,
4867 const char *name, const char *format, ...)
4868 {
4869 struct va_format vaf;
4870 va_list args;
4871
4872 va_start(args, format);
4873 vaf.fmt = format;
4874 vaf.va = &args;
4875
4876 if (fastset)
4877 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
4878 crtc->base.base.id, crtc->base.name, name, &vaf);
4879 else
4880 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n",
4881 crtc->base.base.id, crtc->base.name, name, &vaf);
4882
4883 va_end(args);
4884 }
4885
4886 static void
pipe_config_infoframe_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)4887 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
4888 const struct intel_crtc *crtc,
4889 const char *name,
4890 const union hdmi_infoframe *a,
4891 const union hdmi_infoframe *b)
4892 {
4893 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4894 const char *loglevel;
4895
4896 if (fastset) {
4897 if (!drm_debug_enabled(DRM_UT_KMS))
4898 return;
4899
4900 loglevel = KERN_DEBUG;
4901 } else {
4902 loglevel = KERN_ERR;
4903 }
4904
4905 pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
4906
4907 drm_printf(p, "expected:\n");
4908 hdmi_infoframe_log(loglevel, i915->drm.dev, a);
4909 drm_printf(p, "found:\n");
4910 hdmi_infoframe_log(loglevel, i915->drm.dev, b);
4911 }
4912
4913 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)4914 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
4915 const struct intel_crtc *crtc,
4916 const char *name,
4917 const struct drm_dp_vsc_sdp *a,
4918 const struct drm_dp_vsc_sdp *b)
4919 {
4920 pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
4921
4922 drm_printf(p, "expected:\n");
4923 drm_dp_vsc_sdp_log(p, a);
4924 drm_printf(p, "found:\n");
4925 drm_dp_vsc_sdp_log(p, b);
4926 }
4927
4928 static void
pipe_config_dp_as_sdp_mismatch(struct drm_i915_private * i915,bool fastset,const char * name,const struct drm_dp_as_sdp * a,const struct drm_dp_as_sdp * b)4929 pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
4930 bool fastset, const char *name,
4931 const struct drm_dp_as_sdp *a,
4932 const struct drm_dp_as_sdp *b)
4933 {
4934 struct drm_printer p;
4935
4936 if (fastset) {
4937 p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
4938
4939 drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
4940 } else {
4941 p = drm_err_printer(&i915->drm, NULL);
4942
4943 drm_printf(&p, "mismatch in %s dp sdp\n", name);
4944 }
4945
4946 drm_printf(&p, "expected:\n");
4947 drm_dp_as_sdp_log(&p, a);
4948 drm_printf(&p, "found:\n");
4949 drm_dp_as_sdp_log(&p, b);
4950 }
4951
4952 /* Returns the length up to and including the last differing byte */
4953 static size_t
memcmp_diff_len(const u8 * a,const u8 * b,size_t len)4954 memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
4955 {
4956 int i;
4957
4958 for (i = len - 1; i >= 0; i--) {
4959 if (a[i] != b[i])
4960 return i + 1;
4961 }
4962
4963 return 0;
4964 }
4965
4966 static void
pipe_config_buffer_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const u8 * a,const u8 * b,size_t len)4967 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
4968 const struct intel_crtc *crtc,
4969 const char *name,
4970 const u8 *a, const u8 *b, size_t len)
4971 {
4972 const char *loglevel;
4973
4974 if (fastset) {
4975 if (!drm_debug_enabled(DRM_UT_KMS))
4976 return;
4977
4978 loglevel = KERN_DEBUG;
4979 } else {
4980 loglevel = KERN_ERR;
4981 }
4982
4983 pipe_config_mismatch(p, fastset, crtc, name, "buffer");
4984
4985 /* only dump up to the last difference */
4986 len = memcmp_diff_len(a, b, len);
4987
4988 print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
4989 16, 0, a, len, false);
4990 print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
4991 16, 0, b, len, false);
4992 }
4993
4994 static void
pipe_config_pll_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const struct intel_dpll_hw_state * a,const struct intel_dpll_hw_state * b)4995 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
4996 const struct intel_crtc *crtc,
4997 const char *name,
4998 const struct intel_dpll_hw_state *a,
4999 const struct intel_dpll_hw_state *b)
5000 {
5001 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5002
5003 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
5004
5005 drm_printf(p, "expected:\n");
5006 intel_dpll_dump_hw_state(i915, p, a);
5007 drm_printf(p, "found:\n");
5008 intel_dpll_dump_hw_state(i915, p, b);
5009 }
5010
5011 static void
pipe_config_cx0pll_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const struct intel_cx0pll_state * a,const struct intel_cx0pll_state * b)5012 pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
5013 const struct intel_crtc *crtc,
5014 const char *name,
5015 const struct intel_cx0pll_state *a,
5016 const struct intel_cx0pll_state *b)
5017 {
5018 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5019 char *chipname = a->use_c10 ? "C10" : "C20";
5020
5021 pipe_config_mismatch(p, fastset, crtc, name, chipname);
5022
5023 drm_printf(p, "expected:\n");
5024 intel_cx0pll_dump_hw_state(i915, a);
5025 drm_printf(p, "found:\n");
5026 intel_cx0pll_dump_hw_state(i915, b);
5027 }
5028
5029 bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)5030 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5031 const struct intel_crtc_state *pipe_config,
5032 bool fastset)
5033 {
5034 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5035 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5036 struct drm_printer p;
5037 bool ret = true;
5038
5039 if (fastset)
5040 p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
5041 else
5042 p = drm_err_printer(&dev_priv->drm, NULL);
5043
5044 #define PIPE_CONF_CHECK_X(name) do { \
5045 if (current_config->name != pipe_config->name) { \
5046 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
5047 __stringify(name) " is bool"); \
5048 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5049 "(expected 0x%08x, found 0x%08x)", \
5050 current_config->name, \
5051 pipe_config->name); \
5052 ret = false; \
5053 } \
5054 } while (0)
5055
5056 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5057 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5058 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
5059 __stringify(name) " is bool"); \
5060 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5061 "(expected 0x%08x, found 0x%08x)", \
5062 current_config->name & (mask), \
5063 pipe_config->name & (mask)); \
5064 ret = false; \
5065 } \
5066 } while (0)
5067
5068 #define PIPE_CONF_CHECK_I(name) do { \
5069 if (current_config->name != pipe_config->name) { \
5070 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
5071 __stringify(name) " is bool"); \
5072 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5073 "(expected %i, found %i)", \
5074 current_config->name, \
5075 pipe_config->name); \
5076 ret = false; \
5077 } \
5078 } while (0)
5079
5080 #define PIPE_CONF_CHECK_LLI(name) do { \
5081 if (current_config->name != pipe_config->name) { \
5082 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5083 "(expected %lli, found %lli)", \
5084 current_config->name, \
5085 pipe_config->name); \
5086 ret = false; \
5087 } \
5088 } while (0)
5089
5090 #define PIPE_CONF_CHECK_BOOL(name) do { \
5091 if (current_config->name != pipe_config->name) { \
5092 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
5093 __stringify(name) " is not bool"); \
5094 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5095 "(expected %s, found %s)", \
5096 str_yes_no(current_config->name), \
5097 str_yes_no(pipe_config->name)); \
5098 ret = false; \
5099 } \
5100 } while (0)
5101
5102 #define PIPE_CONF_CHECK_P(name) do { \
5103 if (current_config->name != pipe_config->name) { \
5104 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5105 "(expected %p, found %p)", \
5106 current_config->name, \
5107 pipe_config->name); \
5108 ret = false; \
5109 } \
5110 } while (0)
5111
5112 #define PIPE_CONF_CHECK_M_N(name) do { \
5113 if (!intel_compare_link_m_n(¤t_config->name, \
5114 &pipe_config->name)) { \
5115 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5116 "(expected tu %i data %i/%i link %i/%i, " \
5117 "found tu %i, data %i/%i link %i/%i)", \
5118 current_config->name.tu, \
5119 current_config->name.data_m, \
5120 current_config->name.data_n, \
5121 current_config->name.link_m, \
5122 current_config->name.link_n, \
5123 pipe_config->name.tu, \
5124 pipe_config->name.data_m, \
5125 pipe_config->name.data_n, \
5126 pipe_config->name.link_m, \
5127 pipe_config->name.link_n); \
5128 ret = false; \
5129 } \
5130 } while (0)
5131
5132 #define PIPE_CONF_CHECK_PLL(name) do { \
5133 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \
5134 &pipe_config->name)) { \
5135 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
5136 ¤t_config->name, \
5137 &pipe_config->name); \
5138 ret = false; \
5139 } \
5140 } while (0)
5141
5142 #define PIPE_CONF_CHECK_PLL_CX0(name) do { \
5143 if (!intel_cx0pll_compare_hw_state(¤t_config->name, \
5144 &pipe_config->name)) { \
5145 pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \
5146 ¤t_config->name, \
5147 &pipe_config->name); \
5148 ret = false; \
5149 } \
5150 } while (0)
5151
5152 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5153 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5154 PIPE_CONF_CHECK_I(name.crtc_htotal); \
5155 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5156 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5157 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5158 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5159 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5160 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5161 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5162 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5163 if (!fastset || !pipe_config->update_lrr) { \
5164 PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5165 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5166 } \
5167 } while (0)
5168
5169 #define PIPE_CONF_CHECK_RECT(name) do { \
5170 PIPE_CONF_CHECK_I(name.x1); \
5171 PIPE_CONF_CHECK_I(name.x2); \
5172 PIPE_CONF_CHECK_I(name.y1); \
5173 PIPE_CONF_CHECK_I(name.y2); \
5174 } while (0)
5175
5176 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5177 if ((current_config->name ^ pipe_config->name) & (mask)) { \
5178 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5179 "(%x) (expected %i, found %i)", \
5180 (mask), \
5181 current_config->name & (mask), \
5182 pipe_config->name & (mask)); \
5183 ret = false; \
5184 } \
5185 } while (0)
5186
5187 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5188 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
5189 &pipe_config->infoframes.name)) { \
5190 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \
5191 ¤t_config->infoframes.name, \
5192 &pipe_config->infoframes.name); \
5193 ret = false; \
5194 } \
5195 } while (0)
5196
5197 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5198 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
5199 &pipe_config->infoframes.name)) { \
5200 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
5201 ¤t_config->infoframes.name, \
5202 &pipe_config->infoframes.name); \
5203 ret = false; \
5204 } \
5205 } while (0)
5206
5207 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
5208 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \
5209 &pipe_config->infoframes.name)) { \
5210 pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5211 ¤t_config->infoframes.name, \
5212 &pipe_config->infoframes.name); \
5213 ret = false; \
5214 } \
5215 } while (0)
5216
5217 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \
5218 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
5219 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
5220 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
5221 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \
5222 current_config->name, \
5223 pipe_config->name, \
5224 (len)); \
5225 ret = false; \
5226 } \
5227 } while (0)
5228
5229 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
5230 if (current_config->gamma_mode == pipe_config->gamma_mode && \
5231 !intel_color_lut_equal(current_config, \
5232 current_config->lut, pipe_config->lut, \
5233 is_pre_csc_lut)) { \
5234 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \
5235 "hw_state doesn't match sw_state"); \
5236 ret = false; \
5237 } \
5238 } while (0)
5239
5240 #define PIPE_CONF_CHECK_CSC(name) do { \
5241 PIPE_CONF_CHECK_X(name.preoff[0]); \
5242 PIPE_CONF_CHECK_X(name.preoff[1]); \
5243 PIPE_CONF_CHECK_X(name.preoff[2]); \
5244 PIPE_CONF_CHECK_X(name.coeff[0]); \
5245 PIPE_CONF_CHECK_X(name.coeff[1]); \
5246 PIPE_CONF_CHECK_X(name.coeff[2]); \
5247 PIPE_CONF_CHECK_X(name.coeff[3]); \
5248 PIPE_CONF_CHECK_X(name.coeff[4]); \
5249 PIPE_CONF_CHECK_X(name.coeff[5]); \
5250 PIPE_CONF_CHECK_X(name.coeff[6]); \
5251 PIPE_CONF_CHECK_X(name.coeff[7]); \
5252 PIPE_CONF_CHECK_X(name.coeff[8]); \
5253 PIPE_CONF_CHECK_X(name.postoff[0]); \
5254 PIPE_CONF_CHECK_X(name.postoff[1]); \
5255 PIPE_CONF_CHECK_X(name.postoff[2]); \
5256 } while (0)
5257
5258 #define PIPE_CONF_QUIRK(quirk) \
5259 ((current_config->quirks | pipe_config->quirks) & (quirk))
5260
5261 PIPE_CONF_CHECK_BOOL(hw.enable);
5262 PIPE_CONF_CHECK_BOOL(hw.active);
5263
5264 PIPE_CONF_CHECK_I(cpu_transcoder);
5265 PIPE_CONF_CHECK_I(mst_master_transcoder);
5266
5267 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5268 PIPE_CONF_CHECK_I(fdi_lanes);
5269 PIPE_CONF_CHECK_M_N(fdi_m_n);
5270
5271 PIPE_CONF_CHECK_I(lane_count);
5272 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5273
5274 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5275 if (!fastset || !pipe_config->update_m_n)
5276 PIPE_CONF_CHECK_M_N(dp_m_n);
5277 } else {
5278 PIPE_CONF_CHECK_M_N(dp_m_n);
5279 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5280 }
5281
5282 PIPE_CONF_CHECK_X(output_types);
5283
5284 PIPE_CONF_CHECK_I(framestart_delay);
5285 PIPE_CONF_CHECK_I(msa_timing_delay);
5286
5287 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5288 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5289
5290 PIPE_CONF_CHECK_I(pixel_multiplier);
5291
5292 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5293 DRM_MODE_FLAG_INTERLACE);
5294
5295 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5296 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5297 DRM_MODE_FLAG_PHSYNC);
5298 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5299 DRM_MODE_FLAG_NHSYNC);
5300 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5301 DRM_MODE_FLAG_PVSYNC);
5302 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5303 DRM_MODE_FLAG_NVSYNC);
5304 }
5305
5306 PIPE_CONF_CHECK_I(output_format);
5307 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5308 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5309 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5310 PIPE_CONF_CHECK_BOOL(limited_color_range);
5311
5312 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5313 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5314 PIPE_CONF_CHECK_BOOL(has_infoframe);
5315 PIPE_CONF_CHECK_BOOL(enhanced_framing);
5316 PIPE_CONF_CHECK_BOOL(fec_enable);
5317
5318 if (!fastset) {
5319 PIPE_CONF_CHECK_BOOL(has_audio);
5320 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
5321 }
5322
5323 PIPE_CONF_CHECK_X(gmch_pfit.control);
5324 /* pfit ratios are autocomputed by the hw on gen4+ */
5325 if (DISPLAY_VER(dev_priv) < 4)
5326 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5327 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5328
5329 /*
5330 * Changing the EDP transcoder input mux
5331 * (A_ONOFF vs. A_ON) requires a full modeset.
5332 */
5333 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5334
5335 if (!fastset) {
5336 PIPE_CONF_CHECK_RECT(pipe_src);
5337
5338 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5339 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5340
5341 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5342 PIPE_CONF_CHECK_I(pixel_rate);
5343
5344 PIPE_CONF_CHECK_X(gamma_mode);
5345 if (IS_CHERRYVIEW(dev_priv))
5346 PIPE_CONF_CHECK_X(cgm_mode);
5347 else
5348 PIPE_CONF_CHECK_X(csc_mode);
5349 PIPE_CONF_CHECK_BOOL(gamma_enable);
5350 PIPE_CONF_CHECK_BOOL(csc_enable);
5351 PIPE_CONF_CHECK_BOOL(wgc_enable);
5352
5353 PIPE_CONF_CHECK_I(linetime);
5354 PIPE_CONF_CHECK_I(ips_linetime);
5355
5356 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
5357 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
5358
5359 PIPE_CONF_CHECK_CSC(csc);
5360 PIPE_CONF_CHECK_CSC(output_csc);
5361 }
5362
5363 /*
5364 * Panel replay has to be enabled before link training. PSR doesn't have
5365 * this requirement -> check these only if using panel replay
5366 */
5367 if (current_config->active_planes &&
5368 (current_config->has_panel_replay ||
5369 pipe_config->has_panel_replay)) {
5370 PIPE_CONF_CHECK_BOOL(has_psr);
5371 PIPE_CONF_CHECK_BOOL(has_sel_update);
5372 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5373 PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
5374 PIPE_CONF_CHECK_BOOL(has_panel_replay);
5375 }
5376
5377 PIPE_CONF_CHECK_BOOL(double_wide);
5378
5379 if (dev_priv->display.dpll.mgr)
5380 PIPE_CONF_CHECK_P(shared_dpll);
5381
5382 /* FIXME convert everything over the dpll_mgr */
5383 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
5384 PIPE_CONF_CHECK_PLL(dpll_hw_state);
5385
5386 /* FIXME convert MTL+ platforms over to dpll_mgr */
5387 if (DISPLAY_VER(dev_priv) >= 14)
5388 PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
5389
5390 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5391 PIPE_CONF_CHECK_X(dsi_pll.div);
5392
5393 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5394 PIPE_CONF_CHECK_I(pipe_bpp);
5395
5396 if (!fastset || !pipe_config->update_m_n) {
5397 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5398 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5399 }
5400 PIPE_CONF_CHECK_I(port_clock);
5401
5402 PIPE_CONF_CHECK_I(min_voltage_level);
5403
5404 if (current_config->has_psr || pipe_config->has_psr)
5405 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5406 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5407 else
5408 PIPE_CONF_CHECK_X(infoframes.enable);
5409
5410 PIPE_CONF_CHECK_X(infoframes.gcp);
5411 PIPE_CONF_CHECK_INFOFRAME(avi);
5412 PIPE_CONF_CHECK_INFOFRAME(spd);
5413 PIPE_CONF_CHECK_INFOFRAME(hdmi);
5414 PIPE_CONF_CHECK_INFOFRAME(drm);
5415 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5416 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
5417
5418 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5419 PIPE_CONF_CHECK_I(master_transcoder);
5420 PIPE_CONF_CHECK_X(joiner_pipes);
5421
5422 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable);
5423 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb);
5424 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422);
5425 PIPE_CONF_CHECK_BOOL(dsc.config.native_422);
5426 PIPE_CONF_CHECK_BOOL(dsc.config.native_420);
5427 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable);
5428 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth);
5429 PIPE_CONF_CHECK_I(dsc.config.bits_per_component);
5430 PIPE_CONF_CHECK_I(dsc.config.pic_width);
5431 PIPE_CONF_CHECK_I(dsc.config.pic_height);
5432 PIPE_CONF_CHECK_I(dsc.config.slice_width);
5433 PIPE_CONF_CHECK_I(dsc.config.slice_height);
5434 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay);
5435 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay);
5436 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval);
5437 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval);
5438 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value);
5439 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset);
5440 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp);
5441 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp);
5442 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset);
5443 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset);
5444 PIPE_CONF_CHECK_I(dsc.config.initial_offset);
5445 PIPE_CONF_CHECK_I(dsc.config.final_offset);
5446 PIPE_CONF_CHECK_I(dsc.config.rc_model_size);
5447 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0);
5448 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1);
5449 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size);
5450 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset);
5451 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
5452
5453 PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
5454 PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
5455 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
5456
5457 PIPE_CONF_CHECK_BOOL(splitter.enable);
5458 PIPE_CONF_CHECK_I(splitter.link_count);
5459 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5460
5461 if (!fastset) {
5462 PIPE_CONF_CHECK_BOOL(vrr.enable);
5463 PIPE_CONF_CHECK_I(vrr.vmin);
5464 PIPE_CONF_CHECK_I(vrr.vmax);
5465 PIPE_CONF_CHECK_I(vrr.flipline);
5466 PIPE_CONF_CHECK_I(vrr.pipeline_full);
5467 PIPE_CONF_CHECK_I(vrr.guardband);
5468 PIPE_CONF_CHECK_I(vrr.vsync_start);
5469 PIPE_CONF_CHECK_I(vrr.vsync_end);
5470 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
5471 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n);
5472 PIPE_CONF_CHECK_BOOL(cmrr.enable);
5473 }
5474
5475 #undef PIPE_CONF_CHECK_X
5476 #undef PIPE_CONF_CHECK_I
5477 #undef PIPE_CONF_CHECK_LLI
5478 #undef PIPE_CONF_CHECK_BOOL
5479 #undef PIPE_CONF_CHECK_P
5480 #undef PIPE_CONF_CHECK_FLAGS
5481 #undef PIPE_CONF_CHECK_COLOR_LUT
5482 #undef PIPE_CONF_CHECK_TIMINGS
5483 #undef PIPE_CONF_CHECK_RECT
5484 #undef PIPE_CONF_QUIRK
5485
5486 return ret;
5487 }
5488
5489 static void
intel_verify_planes(struct intel_atomic_state * state)5490 intel_verify_planes(struct intel_atomic_state *state)
5491 {
5492 struct intel_plane *plane;
5493 const struct intel_plane_state *plane_state;
5494 int i;
5495
5496 for_each_new_intel_plane_in_state(state, plane,
5497 plane_state, i)
5498 assert_plane(plane, plane_state->planar_slave ||
5499 plane_state->uapi.visible);
5500 }
5501
intel_modeset_pipe(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state,const char * reason)5502 static int intel_modeset_pipe(struct intel_atomic_state *state,
5503 struct intel_crtc_state *crtc_state,
5504 const char *reason)
5505 {
5506 struct drm_i915_private *i915 = to_i915(state->base.dev);
5507 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5508 int ret;
5509
5510 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5511 crtc->base.base.id, crtc->base.name, reason);
5512
5513 ret = drm_atomic_add_affected_connectors(&state->base,
5514 &crtc->base);
5515 if (ret)
5516 return ret;
5517
5518 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc);
5519 if (ret)
5520 return ret;
5521
5522 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5523 if (ret)
5524 return ret;
5525
5526 ret = intel_atomic_add_affected_planes(state, crtc);
5527 if (ret)
5528 return ret;
5529
5530 crtc_state->uapi.mode_changed = true;
5531
5532 return 0;
5533 }
5534
5535 /**
5536 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes
5537 * @state: intel atomic state
5538 * @reason: the reason for the full modeset
5539 * @mask: mask of pipes to modeset
5540 *
5541 * Add pipes in @mask to @state and force a full modeset on the enabled ones
5542 * due to the description in @reason.
5543 * This function can be called only before new plane states are computed.
5544 *
5545 * Returns 0 in case of success, negative error code otherwise.
5546 */
intel_modeset_pipes_in_mask_early(struct intel_atomic_state * state,const char * reason,u8 mask)5547 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
5548 const char *reason, u8 mask)
5549 {
5550 struct drm_i915_private *i915 = to_i915(state->base.dev);
5551 struct intel_crtc *crtc;
5552
5553 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) {
5554 struct intel_crtc_state *crtc_state;
5555 int ret;
5556
5557 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5558 if (IS_ERR(crtc_state))
5559 return PTR_ERR(crtc_state);
5560
5561 if (!crtc_state->hw.enable ||
5562 intel_crtc_needs_modeset(crtc_state))
5563 continue;
5564
5565 ret = intel_modeset_pipe(state, crtc_state, reason);
5566 if (ret)
5567 return ret;
5568 }
5569
5570 return 0;
5571 }
5572
5573 static void
intel_crtc_flag_modeset(struct intel_crtc_state * crtc_state)5574 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
5575 {
5576 crtc_state->uapi.mode_changed = true;
5577
5578 crtc_state->update_pipe = false;
5579 crtc_state->update_m_n = false;
5580 crtc_state->update_lrr = false;
5581 }
5582
5583 /**
5584 * intel_modeset_all_pipes_late - force a full modeset on all pipes
5585 * @state: intel atomic state
5586 * @reason: the reason for the full modeset
5587 *
5588 * Add all pipes to @state and force a full modeset on the active ones due to
5589 * the description in @reason.
5590 * This function can be called only after new plane states are computed already.
5591 *
5592 * Returns 0 in case of success, negative error code otherwise.
5593 */
intel_modeset_all_pipes_late(struct intel_atomic_state * state,const char * reason)5594 int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
5595 const char *reason)
5596 {
5597 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5598 struct intel_crtc *crtc;
5599
5600 for_each_intel_crtc(&dev_priv->drm, crtc) {
5601 struct intel_crtc_state *crtc_state;
5602 int ret;
5603
5604 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5605 if (IS_ERR(crtc_state))
5606 return PTR_ERR(crtc_state);
5607
5608 if (!crtc_state->hw.active ||
5609 intel_crtc_needs_modeset(crtc_state))
5610 continue;
5611
5612 ret = intel_modeset_pipe(state, crtc_state, reason);
5613 if (ret)
5614 return ret;
5615
5616 intel_crtc_flag_modeset(crtc_state);
5617
5618 crtc_state->update_planes |= crtc_state->active_planes;
5619 crtc_state->async_flip_planes = 0;
5620 crtc_state->do_async_flip = false;
5621 }
5622
5623 return 0;
5624 }
5625
intel_modeset_commit_pipes(struct drm_i915_private * i915,u8 pipe_mask,struct drm_modeset_acquire_ctx * ctx)5626 int intel_modeset_commit_pipes(struct drm_i915_private *i915,
5627 u8 pipe_mask,
5628 struct drm_modeset_acquire_ctx *ctx)
5629 {
5630 struct drm_atomic_state *state;
5631 struct intel_crtc *crtc;
5632 int ret;
5633
5634 state = drm_atomic_state_alloc(&i915->drm);
5635 if (!state)
5636 return -ENOMEM;
5637
5638 state->acquire_ctx = ctx;
5639 to_intel_atomic_state(state)->internal = true;
5640
5641 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
5642 struct intel_crtc_state *crtc_state =
5643 intel_atomic_get_crtc_state(state, crtc);
5644
5645 if (IS_ERR(crtc_state)) {
5646 ret = PTR_ERR(crtc_state);
5647 goto out;
5648 }
5649
5650 crtc_state->uapi.connectors_changed = true;
5651 }
5652
5653 ret = drm_atomic_commit(state);
5654 out:
5655 drm_atomic_state_put(state);
5656
5657 return ret;
5658 }
5659
5660 /*
5661 * This implements the workaround described in the "notes" section of the mode
5662 * set sequence documentation. When going from no pipes or single pipe to
5663 * multiple pipes, and planes are enabled after the pipe, we need to wait at
5664 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
5665 */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)5666 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
5667 {
5668 struct intel_crtc_state *crtc_state;
5669 struct intel_crtc *crtc;
5670 struct intel_crtc_state *first_crtc_state = NULL;
5671 struct intel_crtc_state *other_crtc_state = NULL;
5672 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
5673 int i;
5674
5675 /* look at all crtc's that are going to be enabled in during modeset */
5676 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5677 if (!crtc_state->hw.active ||
5678 !intel_crtc_needs_modeset(crtc_state))
5679 continue;
5680
5681 if (first_crtc_state) {
5682 other_crtc_state = crtc_state;
5683 break;
5684 } else {
5685 first_crtc_state = crtc_state;
5686 first_pipe = crtc->pipe;
5687 }
5688 }
5689
5690 /* No workaround needed? */
5691 if (!first_crtc_state)
5692 return 0;
5693
5694 /* w/a possibly needed, check how many crtc's are already enabled. */
5695 for_each_intel_crtc(state->base.dev, crtc) {
5696 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5697 if (IS_ERR(crtc_state))
5698 return PTR_ERR(crtc_state);
5699
5700 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
5701
5702 if (!crtc_state->hw.active ||
5703 intel_crtc_needs_modeset(crtc_state))
5704 continue;
5705
5706 /* 2 or more enabled crtcs means no need for w/a */
5707 if (enabled_pipe != INVALID_PIPE)
5708 return 0;
5709
5710 enabled_pipe = crtc->pipe;
5711 }
5712
5713 if (enabled_pipe != INVALID_PIPE)
5714 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
5715 else if (other_crtc_state)
5716 other_crtc_state->hsw_workaround_pipe = first_pipe;
5717
5718 return 0;
5719 }
5720
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)5721 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
5722 u8 active_pipes)
5723 {
5724 const struct intel_crtc_state *crtc_state;
5725 struct intel_crtc *crtc;
5726 int i;
5727
5728 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5729 if (crtc_state->hw.active)
5730 active_pipes |= BIT(crtc->pipe);
5731 else
5732 active_pipes &= ~BIT(crtc->pipe);
5733 }
5734
5735 return active_pipes;
5736 }
5737
intel_modeset_checks(struct intel_atomic_state * state)5738 static int intel_modeset_checks(struct intel_atomic_state *state)
5739 {
5740 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5741
5742 state->modeset = true;
5743
5744 if (IS_HASWELL(dev_priv))
5745 return hsw_mode_set_planes_workaround(state);
5746
5747 return 0;
5748 }
5749
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)5750 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
5751 struct intel_crtc_state *new_crtc_state)
5752 {
5753 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5754 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5755
5756 /* only allow LRR when the timings stay within the VRR range */
5757 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
5758 new_crtc_state->update_lrr = false;
5759
5760 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
5761 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
5762 crtc->base.base.id, crtc->base.name);
5763 else
5764 new_crtc_state->uapi.mode_changed = false;
5765
5766 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
5767 &new_crtc_state->dp_m_n))
5768 new_crtc_state->update_m_n = false;
5769
5770 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
5771 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
5772 new_crtc_state->update_lrr = false;
5773
5774 if (intel_crtc_needs_modeset(new_crtc_state))
5775 intel_crtc_flag_modeset(new_crtc_state);
5776 else
5777 new_crtc_state->update_pipe = true;
5778 }
5779
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)5780 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
5781 struct intel_crtc *crtc,
5782 u8 plane_ids_mask)
5783 {
5784 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5785 struct intel_plane *plane;
5786
5787 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5788 struct intel_plane_state *plane_state;
5789
5790 if ((plane_ids_mask & BIT(plane->id)) == 0)
5791 continue;
5792
5793 plane_state = intel_atomic_get_plane_state(state, plane);
5794 if (IS_ERR(plane_state))
5795 return PTR_ERR(plane_state);
5796 }
5797
5798 return 0;
5799 }
5800
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)5801 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
5802 struct intel_crtc *crtc)
5803 {
5804 const struct intel_crtc_state *old_crtc_state =
5805 intel_atomic_get_old_crtc_state(state, crtc);
5806 const struct intel_crtc_state *new_crtc_state =
5807 intel_atomic_get_new_crtc_state(state, crtc);
5808
5809 return intel_crtc_add_planes_to_state(state, crtc,
5810 old_crtc_state->enabled_planes |
5811 new_crtc_state->enabled_planes);
5812 }
5813
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)5814 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
5815 {
5816 /* See {hsw,vlv,ivb}_plane_ratio() */
5817 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
5818 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5819 IS_IVYBRIDGE(dev_priv);
5820 }
5821
intel_crtc_add_joiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)5822 static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state,
5823 struct intel_crtc *crtc,
5824 struct intel_crtc *other)
5825 {
5826 const struct intel_plane_state __maybe_unused *plane_state;
5827 struct intel_plane *plane;
5828 u8 plane_ids = 0;
5829 int i;
5830
5831 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5832 if (plane->pipe == crtc->pipe)
5833 plane_ids |= BIT(plane->id);
5834 }
5835
5836 return intel_crtc_add_planes_to_state(state, other, plane_ids);
5837 }
5838
intel_joiner_add_affected_planes(struct intel_atomic_state * state)5839 static int intel_joiner_add_affected_planes(struct intel_atomic_state *state)
5840 {
5841 struct drm_i915_private *i915 = to_i915(state->base.dev);
5842 const struct intel_crtc_state *crtc_state;
5843 struct intel_crtc *crtc;
5844 int i;
5845
5846 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5847 struct intel_crtc *other;
5848
5849 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
5850 crtc_state->joiner_pipes) {
5851 int ret;
5852
5853 if (crtc == other)
5854 continue;
5855
5856 ret = intel_crtc_add_joiner_planes(state, crtc, other);
5857 if (ret)
5858 return ret;
5859 }
5860 }
5861
5862 return 0;
5863 }
5864
intel_atomic_check_planes(struct intel_atomic_state * state)5865 static int intel_atomic_check_planes(struct intel_atomic_state *state)
5866 {
5867 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5868 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
5869 struct intel_plane_state __maybe_unused *plane_state;
5870 struct intel_plane *plane;
5871 struct intel_crtc *crtc;
5872 int i, ret;
5873
5874 ret = icl_add_linked_planes(state);
5875 if (ret)
5876 return ret;
5877
5878 ret = intel_joiner_add_affected_planes(state);
5879 if (ret)
5880 return ret;
5881
5882 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5883 ret = intel_plane_atomic_check(state, plane);
5884 if (ret) {
5885 drm_dbg_atomic(&dev_priv->drm,
5886 "[PLANE:%d:%s] atomic driver check failed\n",
5887 plane->base.base.id, plane->base.name);
5888 return ret;
5889 }
5890 }
5891
5892 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5893 new_crtc_state, i) {
5894 u8 old_active_planes, new_active_planes;
5895
5896 ret = icl_check_nv12_planes(state, crtc);
5897 if (ret)
5898 return ret;
5899
5900 /*
5901 * On some platforms the number of active planes affects
5902 * the planes' minimum cdclk calculation. Add such planes
5903 * to the state before we compute the minimum cdclk.
5904 */
5905 if (!active_planes_affects_min_cdclk(dev_priv))
5906 continue;
5907
5908 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5909 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5910
5911 if (hweight8(old_active_planes) == hweight8(new_active_planes))
5912 continue;
5913
5914 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
5915 if (ret)
5916 return ret;
5917 }
5918
5919 return 0;
5920 }
5921
intel_atomic_check_crtcs(struct intel_atomic_state * state)5922 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
5923 {
5924 struct intel_crtc_state __maybe_unused *crtc_state;
5925 struct intel_crtc *crtc;
5926 int i;
5927
5928 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5929 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5930 int ret;
5931
5932 ret = intel_crtc_atomic_check(state, crtc);
5933 if (ret) {
5934 drm_dbg_atomic(&i915->drm,
5935 "[CRTC:%d:%s] atomic driver check failed\n",
5936 crtc->base.base.id, crtc->base.name);
5937 return ret;
5938 }
5939 }
5940
5941 return 0;
5942 }
5943
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)5944 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
5945 u8 transcoders)
5946 {
5947 const struct intel_crtc_state *new_crtc_state;
5948 struct intel_crtc *crtc;
5949 int i;
5950
5951 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5952 if (new_crtc_state->hw.enable &&
5953 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
5954 intel_crtc_needs_modeset(new_crtc_state))
5955 return true;
5956 }
5957
5958 return false;
5959 }
5960
intel_pipes_need_modeset(struct intel_atomic_state * state,u8 pipes)5961 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
5962 u8 pipes)
5963 {
5964 const struct intel_crtc_state *new_crtc_state;
5965 struct intel_crtc *crtc;
5966 int i;
5967
5968 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5969 if (new_crtc_state->hw.enable &&
5970 pipes & BIT(crtc->pipe) &&
5971 intel_crtc_needs_modeset(new_crtc_state))
5972 return true;
5973 }
5974
5975 return false;
5976 }
5977
intel_atomic_check_joiner(struct intel_atomic_state * state,struct intel_crtc * primary_crtc)5978 static int intel_atomic_check_joiner(struct intel_atomic_state *state,
5979 struct intel_crtc *primary_crtc)
5980 {
5981 struct drm_i915_private *i915 = to_i915(state->base.dev);
5982 struct intel_crtc_state *primary_crtc_state =
5983 intel_atomic_get_new_crtc_state(state, primary_crtc);
5984 struct intel_crtc *secondary_crtc;
5985
5986 if (!primary_crtc_state->joiner_pipes)
5987 return 0;
5988
5989 /* sanity check */
5990 if (drm_WARN_ON(&i915->drm,
5991 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state)))
5992 return -EINVAL;
5993
5994 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) {
5995 drm_dbg_kms(&i915->drm,
5996 "[CRTC:%d:%s] Cannot act as joiner primary "
5997 "(need 0x%x as pipes, only 0x%x possible)\n",
5998 primary_crtc->base.base.id, primary_crtc->base.name,
5999 primary_crtc_state->joiner_pipes, joiner_pipes(i915));
6000 return -EINVAL;
6001 }
6002
6003 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
6004 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
6005 struct intel_crtc_state *secondary_crtc_state;
6006 int ret;
6007
6008 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc);
6009 if (IS_ERR(secondary_crtc_state))
6010 return PTR_ERR(secondary_crtc_state);
6011
6012 /* primary being enabled, secondary was already configured? */
6013 if (secondary_crtc_state->uapi.enable) {
6014 drm_dbg_kms(&i915->drm,
6015 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but "
6016 "[CRTC:%d:%s] claiming this CRTC for joiner.\n",
6017 secondary_crtc->base.base.id, secondary_crtc->base.name,
6018 primary_crtc->base.base.id, primary_crtc->base.name);
6019 return -EINVAL;
6020 }
6021
6022 /*
6023 * The state copy logic assumes the primary crtc gets processed
6024 * before the secondary crtc during the main compute_config loop.
6025 * This works because the crtcs are created in pipe order,
6026 * and the hardware requires primary pipe < secondary pipe as well.
6027 * Should that change we need to rethink the logic.
6028 */
6029 if (WARN_ON(drm_crtc_index(&primary_crtc->base) >
6030 drm_crtc_index(&secondary_crtc->base)))
6031 return -EINVAL;
6032
6033 drm_dbg_kms(&i915->drm,
6034 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n",
6035 secondary_crtc->base.base.id, secondary_crtc->base.name,
6036 primary_crtc->base.base.id, primary_crtc->base.name);
6037
6038 secondary_crtc_state->joiner_pipes =
6039 primary_crtc_state->joiner_pipes;
6040
6041 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc);
6042 if (ret)
6043 return ret;
6044 }
6045
6046 return 0;
6047 }
6048
kill_joiner_secondaries(struct intel_atomic_state * state,struct intel_crtc * primary_crtc)6049 static void kill_joiner_secondaries(struct intel_atomic_state *state,
6050 struct intel_crtc *primary_crtc)
6051 {
6052 struct drm_i915_private *i915 = to_i915(state->base.dev);
6053 struct intel_crtc_state *primary_crtc_state =
6054 intel_atomic_get_new_crtc_state(state, primary_crtc);
6055 struct intel_crtc *secondary_crtc;
6056
6057 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
6058 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
6059 struct intel_crtc_state *secondary_crtc_state =
6060 intel_atomic_get_new_crtc_state(state, secondary_crtc);
6061
6062 secondary_crtc_state->joiner_pipes = 0;
6063
6064 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc);
6065 }
6066
6067 primary_crtc_state->joiner_pipes = 0;
6068 }
6069
6070 /**
6071 * DOC: asynchronous flip implementation
6072 *
6073 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6074 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6075 * Correspondingly, support is currently added for primary plane only.
6076 *
6077 * Async flip can only change the plane surface address, so anything else
6078 * changing is rejected from the intel_async_flip_check_hw() function.
6079 * Once this check is cleared, flip done interrupt is enabled using
6080 * the intel_crtc_enable_flip_done() function.
6081 *
6082 * As soon as the surface address register is written, flip done interrupt is
6083 * generated and the requested events are sent to the usersapce in the interrupt
6084 * handler itself. The timestamp and sequence sent during the flip done event
6085 * correspond to the last vblank and have no relation to the actual time when
6086 * the flip done event was sent.
6087 */
intel_async_flip_check_uapi(struct intel_atomic_state * state,struct intel_crtc * crtc)6088 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6089 struct intel_crtc *crtc)
6090 {
6091 struct drm_i915_private *i915 = to_i915(state->base.dev);
6092 const struct intel_crtc_state *new_crtc_state =
6093 intel_atomic_get_new_crtc_state(state, crtc);
6094 const struct intel_plane_state *old_plane_state;
6095 struct intel_plane_state *new_plane_state;
6096 struct intel_plane *plane;
6097 int i;
6098
6099 if (!new_crtc_state->uapi.async_flip)
6100 return 0;
6101
6102 if (!new_crtc_state->uapi.active) {
6103 drm_dbg_kms(&i915->drm,
6104 "[CRTC:%d:%s] not active\n",
6105 crtc->base.base.id, crtc->base.name);
6106 return -EINVAL;
6107 }
6108
6109 if (intel_crtc_needs_modeset(new_crtc_state)) {
6110 drm_dbg_kms(&i915->drm,
6111 "[CRTC:%d:%s] modeset required\n",
6112 crtc->base.base.id, crtc->base.name);
6113 return -EINVAL;
6114 }
6115
6116 /*
6117 * FIXME: joiner+async flip is busted currently.
6118 * Remove this check once the issues are fixed.
6119 */
6120 if (new_crtc_state->joiner_pipes) {
6121 drm_dbg_kms(&i915->drm,
6122 "[CRTC:%d:%s] async flip disallowed with joiner\n",
6123 crtc->base.base.id, crtc->base.name);
6124 return -EINVAL;
6125 }
6126
6127 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6128 new_plane_state, i) {
6129 if (plane->pipe != crtc->pipe)
6130 continue;
6131
6132 /*
6133 * TODO: Async flip is only supported through the page flip IOCTL
6134 * as of now. So support currently added for primary plane only.
6135 * Support for other planes on platforms on which supports
6136 * this(vlv/chv and icl+) should be added when async flip is
6137 * enabled in the atomic IOCTL path.
6138 */
6139 if (!plane->async_flip) {
6140 drm_dbg_kms(&i915->drm,
6141 "[PLANE:%d:%s] async flip not supported\n",
6142 plane->base.base.id, plane->base.name);
6143 return -EINVAL;
6144 }
6145
6146 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6147 drm_dbg_kms(&i915->drm,
6148 "[PLANE:%d:%s] no old or new framebuffer\n",
6149 plane->base.base.id, plane->base.name);
6150 return -EINVAL;
6151 }
6152 }
6153
6154 return 0;
6155 }
6156
intel_async_flip_check_hw(struct intel_atomic_state * state,struct intel_crtc * crtc)6157 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6158 {
6159 struct drm_i915_private *i915 = to_i915(state->base.dev);
6160 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6161 const struct intel_plane_state *new_plane_state, *old_plane_state;
6162 struct intel_plane *plane;
6163 int i;
6164
6165 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6166 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6167
6168 if (!new_crtc_state->uapi.async_flip)
6169 return 0;
6170
6171 if (!new_crtc_state->hw.active) {
6172 drm_dbg_kms(&i915->drm,
6173 "[CRTC:%d:%s] not active\n",
6174 crtc->base.base.id, crtc->base.name);
6175 return -EINVAL;
6176 }
6177
6178 if (intel_crtc_needs_modeset(new_crtc_state)) {
6179 drm_dbg_kms(&i915->drm,
6180 "[CRTC:%d:%s] modeset required\n",
6181 crtc->base.base.id, crtc->base.name);
6182 return -EINVAL;
6183 }
6184
6185 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6186 drm_dbg_kms(&i915->drm,
6187 "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6188 crtc->base.base.id, crtc->base.name);
6189 return -EINVAL;
6190 }
6191
6192 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6193 new_plane_state, i) {
6194 if (plane->pipe != crtc->pipe)
6195 continue;
6196
6197 /*
6198 * Only async flip capable planes should be in the state
6199 * if we're really about to ask the hardware to perform
6200 * an async flip. We should never get this far otherwise.
6201 */
6202 if (drm_WARN_ON(&i915->drm,
6203 new_crtc_state->do_async_flip && !plane->async_flip))
6204 return -EINVAL;
6205
6206 /*
6207 * Only check async flip capable planes other planes
6208 * may be involved in the initial commit due to
6209 * the wm0/ddb optimization.
6210 *
6211 * TODO maybe should track which planes actually
6212 * were requested to do the async flip...
6213 */
6214 if (!plane->async_flip)
6215 continue;
6216
6217 /*
6218 * FIXME: This check is kept generic for all platforms.
6219 * Need to verify this for all gen9 platforms to enable
6220 * this selectively if required.
6221 */
6222 switch (new_plane_state->hw.fb->modifier) {
6223 case DRM_FORMAT_MOD_LINEAR:
6224 /*
6225 * FIXME: Async on Linear buffer is supported on ICL as
6226 * but with additional alignment and fbc restrictions
6227 * need to be taken care of. These aren't applicable for
6228 * gen12+.
6229 */
6230 if (DISPLAY_VER(i915) < 12) {
6231 drm_dbg_kms(&i915->drm,
6232 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
6233 plane->base.base.id, plane->base.name,
6234 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
6235 return -EINVAL;
6236 }
6237 break;
6238
6239 case I915_FORMAT_MOD_X_TILED:
6240 case I915_FORMAT_MOD_Y_TILED:
6241 case I915_FORMAT_MOD_Yf_TILED:
6242 case I915_FORMAT_MOD_4_TILED:
6243 case I915_FORMAT_MOD_4_TILED_BMG_CCS:
6244 case I915_FORMAT_MOD_4_TILED_LNL_CCS:
6245 break;
6246 default:
6247 drm_dbg_kms(&i915->drm,
6248 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
6249 plane->base.base.id, plane->base.name,
6250 new_plane_state->hw.fb->modifier);
6251 return -EINVAL;
6252 }
6253
6254 if (new_plane_state->hw.fb->format->num_planes > 1) {
6255 drm_dbg_kms(&i915->drm,
6256 "[PLANE:%d:%s] Planar formats do not support async flips\n",
6257 plane->base.base.id, plane->base.name);
6258 return -EINVAL;
6259 }
6260
6261 /*
6262 * We turn the first async flip request into a sync flip
6263 * so that we can reconfigure the plane (eg. change modifier).
6264 */
6265 if (!new_crtc_state->do_async_flip)
6266 continue;
6267
6268 if (old_plane_state->view.color_plane[0].mapping_stride !=
6269 new_plane_state->view.color_plane[0].mapping_stride) {
6270 drm_dbg_kms(&i915->drm,
6271 "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6272 plane->base.base.id, plane->base.name);
6273 return -EINVAL;
6274 }
6275
6276 if (old_plane_state->hw.fb->modifier !=
6277 new_plane_state->hw.fb->modifier) {
6278 drm_dbg_kms(&i915->drm,
6279 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6280 plane->base.base.id, plane->base.name);
6281 return -EINVAL;
6282 }
6283
6284 if (old_plane_state->hw.fb->format !=
6285 new_plane_state->hw.fb->format) {
6286 drm_dbg_kms(&i915->drm,
6287 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6288 plane->base.base.id, plane->base.name);
6289 return -EINVAL;
6290 }
6291
6292 if (old_plane_state->hw.rotation !=
6293 new_plane_state->hw.rotation) {
6294 drm_dbg_kms(&i915->drm,
6295 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6296 plane->base.base.id, plane->base.name);
6297 return -EINVAL;
6298 }
6299
6300 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6301 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6302 drm_dbg_kms(&i915->drm,
6303 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6304 plane->base.base.id, plane->base.name);
6305 return -EINVAL;
6306 }
6307
6308 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6309 drm_dbg_kms(&i915->drm,
6310 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6311 plane->base.base.id, plane->base.name);
6312 return -EINVAL;
6313 }
6314
6315 if (old_plane_state->hw.pixel_blend_mode !=
6316 new_plane_state->hw.pixel_blend_mode) {
6317 drm_dbg_kms(&i915->drm,
6318 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6319 plane->base.base.id, plane->base.name);
6320 return -EINVAL;
6321 }
6322
6323 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6324 drm_dbg_kms(&i915->drm,
6325 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6326 plane->base.base.id, plane->base.name);
6327 return -EINVAL;
6328 }
6329
6330 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6331 drm_dbg_kms(&i915->drm,
6332 "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6333 plane->base.base.id, plane->base.name);
6334 return -EINVAL;
6335 }
6336
6337 /* plane decryption is allow to change only in synchronous flips */
6338 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6339 drm_dbg_kms(&i915->drm,
6340 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6341 plane->base.base.id, plane->base.name);
6342 return -EINVAL;
6343 }
6344 }
6345
6346 return 0;
6347 }
6348
intel_joiner_add_affected_crtcs(struct intel_atomic_state * state)6349 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
6350 {
6351 struct drm_i915_private *i915 = to_i915(state->base.dev);
6352 const struct intel_plane_state *plane_state;
6353 struct intel_crtc_state *crtc_state;
6354 struct intel_plane *plane;
6355 struct intel_crtc *crtc;
6356 u8 affected_pipes = 0;
6357 u8 modeset_pipes = 0;
6358 int i;
6359
6360 /*
6361 * Any plane which is in use by the joiner needs its crtc.
6362 * Pull those in first as this will not have happened yet
6363 * if the plane remains disabled according to uapi.
6364 */
6365 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6366 crtc = to_intel_crtc(plane_state->hw.crtc);
6367 if (!crtc)
6368 continue;
6369
6370 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6371 if (IS_ERR(crtc_state))
6372 return PTR_ERR(crtc_state);
6373 }
6374
6375 /* Now pull in all joined crtcs */
6376 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6377 affected_pipes |= crtc_state->joiner_pipes;
6378 if (intel_crtc_needs_modeset(crtc_state))
6379 modeset_pipes |= crtc_state->joiner_pipes;
6380 }
6381
6382 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6383 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6384 if (IS_ERR(crtc_state))
6385 return PTR_ERR(crtc_state);
6386 }
6387
6388 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6389 int ret;
6390
6391 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6392
6393 crtc_state->uapi.mode_changed = true;
6394
6395 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6396 if (ret)
6397 return ret;
6398
6399 ret = intel_atomic_add_affected_planes(state, crtc);
6400 if (ret)
6401 return ret;
6402 }
6403
6404 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6405 /* Kill old joiner link, we may re-establish afterwards */
6406 if (intel_crtc_needs_modeset(crtc_state) &&
6407 intel_crtc_is_joiner_primary(crtc_state))
6408 kill_joiner_secondaries(state, crtc);
6409 }
6410
6411 return 0;
6412 }
6413
intel_atomic_check_config(struct intel_atomic_state * state,struct intel_link_bw_limits * limits,enum pipe * failed_pipe)6414 static int intel_atomic_check_config(struct intel_atomic_state *state,
6415 struct intel_link_bw_limits *limits,
6416 enum pipe *failed_pipe)
6417 {
6418 struct drm_i915_private *i915 = to_i915(state->base.dev);
6419 struct intel_crtc_state *new_crtc_state;
6420 struct intel_crtc *crtc;
6421 int ret;
6422 int i;
6423
6424 *failed_pipe = INVALID_PIPE;
6425
6426 ret = intel_joiner_add_affected_crtcs(state);
6427 if (ret)
6428 return ret;
6429
6430 ret = intel_fdi_add_affected_crtcs(state);
6431 if (ret)
6432 return ret;
6433
6434 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6435 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6436 if (intel_crtc_is_joiner_secondary(new_crtc_state))
6437 copy_joiner_crtc_state_nomodeset(state, crtc);
6438 else
6439 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6440 continue;
6441 }
6442
6443 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
6444 continue;
6445
6446 ret = intel_crtc_prepare_cleared_state(state, crtc);
6447 if (ret)
6448 goto fail;
6449
6450 if (!new_crtc_state->hw.enable)
6451 continue;
6452
6453 ret = intel_modeset_pipe_config(state, crtc, limits);
6454 if (ret)
6455 goto fail;
6456 }
6457
6458 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6459 if (!intel_crtc_needs_modeset(new_crtc_state))
6460 continue;
6461
6462 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
6463 continue;
6464
6465 if (!new_crtc_state->hw.enable)
6466 continue;
6467
6468 ret = intel_modeset_pipe_config_late(state, crtc);
6469 if (ret)
6470 goto fail;
6471 }
6472
6473 fail:
6474 if (ret)
6475 *failed_pipe = crtc->pipe;
6476
6477 return ret;
6478 }
6479
intel_atomic_check_config_and_link(struct intel_atomic_state * state)6480 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
6481 {
6482 struct intel_link_bw_limits new_limits;
6483 struct intel_link_bw_limits old_limits;
6484 int ret;
6485
6486 intel_link_bw_init_limits(state, &new_limits);
6487 old_limits = new_limits;
6488
6489 while (true) {
6490 enum pipe failed_pipe;
6491
6492 ret = intel_atomic_check_config(state, &new_limits,
6493 &failed_pipe);
6494 if (ret) {
6495 /*
6496 * The bpp limit for a pipe is below the minimum it supports, set the
6497 * limit to the minimum and recalculate the config.
6498 */
6499 if (ret == -EINVAL &&
6500 intel_link_bw_set_bpp_limit_for_pipe(state,
6501 &old_limits,
6502 &new_limits,
6503 failed_pipe))
6504 continue;
6505
6506 break;
6507 }
6508
6509 old_limits = new_limits;
6510
6511 ret = intel_link_bw_atomic_check(state, &new_limits);
6512 if (ret != -EAGAIN)
6513 break;
6514 }
6515
6516 return ret;
6517 }
6518 /**
6519 * intel_atomic_check - validate state object
6520 * @dev: drm device
6521 * @_state: state to validate
6522 */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)6523 int intel_atomic_check(struct drm_device *dev,
6524 struct drm_atomic_state *_state)
6525 {
6526 struct drm_i915_private *dev_priv = to_i915(dev);
6527 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6528 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6529 struct intel_crtc *crtc;
6530 int ret, i;
6531 bool any_ms = false;
6532
6533 if (!intel_display_driver_check_access(dev_priv))
6534 return -ENODEV;
6535
6536 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6537 new_crtc_state, i) {
6538 /*
6539 * crtc's state no longer considered to be inherited
6540 * after the first userspace/client initiated commit.
6541 */
6542 if (!state->internal)
6543 new_crtc_state->inherited = false;
6544
6545 if (new_crtc_state->inherited != old_crtc_state->inherited)
6546 new_crtc_state->uapi.mode_changed = true;
6547
6548 if (new_crtc_state->uapi.scaling_filter !=
6549 old_crtc_state->uapi.scaling_filter)
6550 new_crtc_state->uapi.mode_changed = true;
6551 }
6552
6553 intel_vrr_check_modeset(state);
6554
6555 ret = drm_atomic_helper_check_modeset(dev, &state->base);
6556 if (ret)
6557 goto fail;
6558
6559 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6560 ret = intel_async_flip_check_uapi(state, crtc);
6561 if (ret)
6562 return ret;
6563 }
6564
6565 ret = intel_atomic_check_config_and_link(state);
6566 if (ret)
6567 goto fail;
6568
6569 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6570 if (!intel_crtc_needs_modeset(new_crtc_state))
6571 continue;
6572
6573 if (intel_crtc_is_joiner_secondary(new_crtc_state)) {
6574 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6575 continue;
6576 }
6577
6578 ret = intel_atomic_check_joiner(state, crtc);
6579 if (ret)
6580 goto fail;
6581 }
6582
6583 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6584 new_crtc_state, i) {
6585 if (!intel_crtc_needs_modeset(new_crtc_state))
6586 continue;
6587
6588 intel_joiner_adjust_pipe_src(new_crtc_state);
6589
6590 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6591 }
6592
6593 /**
6594 * Check if fastset is allowed by external dependencies like other
6595 * pipes and transcoders.
6596 *
6597 * Right now it only forces a fullmodeset when the MST master
6598 * transcoder did not changed but the pipe of the master transcoder
6599 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6600 * in case of port synced crtcs, if one of the synced crtcs
6601 * needs a full modeset, all other synced crtcs should be
6602 * forced a full modeset.
6603 */
6604 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6605 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6606 continue;
6607
6608 if (intel_dp_mst_crtc_needs_modeset(state, crtc))
6609 intel_crtc_flag_modeset(new_crtc_state);
6610
6611 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6612 enum transcoder master = new_crtc_state->mst_master_transcoder;
6613
6614 if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
6615 intel_crtc_flag_modeset(new_crtc_state);
6616 }
6617
6618 if (is_trans_port_sync_mode(new_crtc_state)) {
6619 u8 trans = new_crtc_state->sync_mode_slaves_mask;
6620
6621 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6622 trans |= BIT(new_crtc_state->master_transcoder);
6623
6624 if (intel_cpu_transcoders_need_modeset(state, trans))
6625 intel_crtc_flag_modeset(new_crtc_state);
6626 }
6627
6628 if (new_crtc_state->joiner_pipes) {
6629 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes))
6630 intel_crtc_flag_modeset(new_crtc_state);
6631 }
6632 }
6633
6634 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6635 new_crtc_state, i) {
6636 if (!intel_crtc_needs_modeset(new_crtc_state))
6637 continue;
6638
6639 any_ms = true;
6640
6641 intel_release_shared_dplls(state, crtc);
6642 }
6643
6644 if (any_ms && !check_digital_port_conflicts(state)) {
6645 drm_dbg_kms(&dev_priv->drm,
6646 "rejecting conflicting digital port configuration\n");
6647 ret = -EINVAL;
6648 goto fail;
6649 }
6650
6651 ret = intel_atomic_check_planes(state);
6652 if (ret)
6653 goto fail;
6654
6655 ret = intel_compute_global_watermarks(state);
6656 if (ret)
6657 goto fail;
6658
6659 ret = intel_bw_atomic_check(state);
6660 if (ret)
6661 goto fail;
6662
6663 ret = intel_cdclk_atomic_check(state, &any_ms);
6664 if (ret)
6665 goto fail;
6666
6667 if (intel_any_crtc_needs_modeset(state))
6668 any_ms = true;
6669
6670 if (any_ms) {
6671 ret = intel_modeset_checks(state);
6672 if (ret)
6673 goto fail;
6674
6675 ret = intel_modeset_calc_cdclk(state);
6676 if (ret)
6677 return ret;
6678 }
6679
6680 ret = intel_pmdemand_atomic_check(state);
6681 if (ret)
6682 goto fail;
6683
6684 ret = intel_atomic_check_crtcs(state);
6685 if (ret)
6686 goto fail;
6687
6688 ret = intel_fbc_atomic_check(state);
6689 if (ret)
6690 goto fail;
6691
6692 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6693 new_crtc_state, i) {
6694 intel_color_assert_luts(new_crtc_state);
6695
6696 ret = intel_async_flip_check_hw(state, crtc);
6697 if (ret)
6698 goto fail;
6699
6700 /* Either full modeset or fastset (or neither), never both */
6701 drm_WARN_ON(&dev_priv->drm,
6702 intel_crtc_needs_modeset(new_crtc_state) &&
6703 intel_crtc_needs_fastset(new_crtc_state));
6704
6705 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6706 !intel_crtc_needs_fastset(new_crtc_state))
6707 continue;
6708
6709 intel_crtc_state_dump(new_crtc_state, state,
6710 intel_crtc_needs_modeset(new_crtc_state) ?
6711 "modeset" : "fastset");
6712 }
6713
6714 return 0;
6715
6716 fail:
6717 if (ret == -EDEADLK)
6718 return ret;
6719
6720 /*
6721 * FIXME would probably be nice to know which crtc specifically
6722 * caused the failure, in cases where we can pinpoint it.
6723 */
6724 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6725 new_crtc_state, i)
6726 intel_crtc_state_dump(new_crtc_state, state, "failed");
6727
6728 return ret;
6729 }
6730
intel_atomic_prepare_commit(struct intel_atomic_state * state)6731 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6732 {
6733 struct intel_crtc_state __maybe_unused *crtc_state;
6734 struct intel_crtc *crtc;
6735 int i, ret;
6736
6737 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6738 if (ret < 0)
6739 return ret;
6740
6741 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
6742 intel_color_prepare_commit(state, crtc);
6743
6744 return 0;
6745 }
6746
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)6747 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6748 struct intel_crtc_state *crtc_state)
6749 {
6750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6751
6752 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6753 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6754
6755 if (crtc_state->has_pch_encoder) {
6756 enum pipe pch_transcoder =
6757 intel_crtc_pch_transcoder(crtc);
6758
6759 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6760 }
6761 }
6762
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)6763 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6764 const struct intel_crtc_state *new_crtc_state)
6765 {
6766 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6768
6769 /*
6770 * Update pipe size and adjust fitter if needed: the reason for this is
6771 * that in compute_mode_changes we check the native mode (not the pfit
6772 * mode) to see if we can flip rather than do a full mode set. In the
6773 * fastboot case, we'll flip, but if we don't update the pipesrc and
6774 * pfit state, we'll end up with a big fb scanned out into the wrong
6775 * sized surface.
6776 */
6777 intel_set_pipe_src_size(new_crtc_state);
6778
6779 /* on skylake this is done by detaching scalers */
6780 if (DISPLAY_VER(dev_priv) >= 9) {
6781 if (new_crtc_state->pch_pfit.enabled)
6782 skl_pfit_enable(new_crtc_state);
6783 } else if (HAS_PCH_SPLIT(dev_priv)) {
6784 if (new_crtc_state->pch_pfit.enabled)
6785 ilk_pfit_enable(new_crtc_state);
6786 else if (old_crtc_state->pch_pfit.enabled)
6787 ilk_pfit_disable(old_crtc_state);
6788 }
6789
6790 /*
6791 * The register is supposedly single buffered so perhaps
6792 * not 100% correct to do this here. But SKL+ calculate
6793 * this based on the adjust pixel rate so pfit changes do
6794 * affect it and so it must be updated for fastsets.
6795 * HSW/BDW only really need this here for fastboot, after
6796 * that the value should not change without a full modeset.
6797 */
6798 if (DISPLAY_VER(dev_priv) >= 9 ||
6799 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6800 hsw_set_linetime_wm(new_crtc_state);
6801
6802 if (new_crtc_state->update_m_n)
6803 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
6804 &new_crtc_state->dp_m_n);
6805
6806 if (new_crtc_state->update_lrr)
6807 intel_set_transcoder_timings_lrr(new_crtc_state);
6808 }
6809
commit_pipe_pre_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6810 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
6811 struct intel_crtc *crtc)
6812 {
6813 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6814 const struct intel_crtc_state *old_crtc_state =
6815 intel_atomic_get_old_crtc_state(state, crtc);
6816 const struct intel_crtc_state *new_crtc_state =
6817 intel_atomic_get_new_crtc_state(state, crtc);
6818 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6819
6820 /*
6821 * During modesets pipe configuration was programmed as the
6822 * CRTC was enabled.
6823 */
6824 if (!modeset) {
6825 if (intel_crtc_needs_color_update(new_crtc_state))
6826 intel_color_commit_arm(new_crtc_state);
6827
6828 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6829 bdw_set_pipe_misc(NULL, new_crtc_state);
6830
6831 if (intel_crtc_needs_fastset(new_crtc_state))
6832 intel_pipe_fastset(old_crtc_state, new_crtc_state);
6833 }
6834
6835 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
6836
6837 intel_atomic_update_watermarks(state, crtc);
6838 }
6839
commit_pipe_post_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6840 static void commit_pipe_post_planes(struct intel_atomic_state *state,
6841 struct intel_crtc *crtc)
6842 {
6843 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6844 const struct intel_crtc_state *new_crtc_state =
6845 intel_atomic_get_new_crtc_state(state, crtc);
6846
6847 /*
6848 * Disable the scaler(s) after the plane(s) so that we don't
6849 * get a catastrophic underrun even if the two operations
6850 * end up happening in two different frames.
6851 */
6852 if (DISPLAY_VER(dev_priv) >= 9 &&
6853 !intel_crtc_needs_modeset(new_crtc_state))
6854 skl_detach_scalers(new_crtc_state);
6855
6856 if (intel_crtc_vrr_enabling(state, crtc))
6857 intel_vrr_enable(new_crtc_state);
6858 }
6859
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)6860 static void intel_enable_crtc(struct intel_atomic_state *state,
6861 struct intel_crtc *crtc)
6862 {
6863 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6864 const struct intel_crtc_state *new_crtc_state =
6865 intel_atomic_get_new_crtc_state(state, crtc);
6866 struct intel_crtc *pipe_crtc;
6867
6868 if (!intel_crtc_needs_modeset(new_crtc_state))
6869 return;
6870
6871 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
6872 intel_crtc_joined_pipe_mask(new_crtc_state)) {
6873 const struct intel_crtc_state *pipe_crtc_state =
6874 intel_atomic_get_new_crtc_state(state, pipe_crtc);
6875
6876 /* VRR will be enable later, if required */
6877 intel_crtc_update_active_timings(pipe_crtc_state, false);
6878 }
6879
6880 dev_priv->display.funcs.display->crtc_enable(state, crtc);
6881
6882 /* vblanks work again, re-enable pipe CRC. */
6883 intel_crtc_enable_pipe_crc(crtc);
6884 }
6885
intel_pre_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)6886 static void intel_pre_update_crtc(struct intel_atomic_state *state,
6887 struct intel_crtc *crtc)
6888 {
6889 struct drm_i915_private *i915 = to_i915(state->base.dev);
6890 const struct intel_crtc_state *old_crtc_state =
6891 intel_atomic_get_old_crtc_state(state, crtc);
6892 struct intel_crtc_state *new_crtc_state =
6893 intel_atomic_get_new_crtc_state(state, crtc);
6894 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6895
6896 if (old_crtc_state->inherited ||
6897 intel_crtc_needs_modeset(new_crtc_state)) {
6898 if (HAS_DPT(i915))
6899 intel_dpt_configure(crtc);
6900 }
6901
6902 if (!modeset) {
6903 if (new_crtc_state->preload_luts &&
6904 intel_crtc_needs_color_update(new_crtc_state))
6905 intel_color_load_luts(new_crtc_state);
6906
6907 intel_pre_plane_update(state, crtc);
6908
6909 if (intel_crtc_needs_fastset(new_crtc_state))
6910 intel_encoders_update_pipe(state, crtc);
6911
6912 if (DISPLAY_VER(i915) >= 11 &&
6913 intel_crtc_needs_fastset(new_crtc_state))
6914 icl_set_pipe_chicken(new_crtc_state);
6915
6916 if (vrr_params_changed(old_crtc_state, new_crtc_state) ||
6917 cmrr_params_changed(old_crtc_state, new_crtc_state))
6918 intel_vrr_set_transcoder_timings(new_crtc_state);
6919 }
6920
6921 intel_fbc_update(state, crtc);
6922
6923 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
6924
6925 if (!modeset &&
6926 intel_crtc_needs_color_update(new_crtc_state))
6927 intel_color_commit_noarm(new_crtc_state);
6928
6929 intel_crtc_planes_update_noarm(NULL, state, crtc);
6930 }
6931
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)6932 static void intel_update_crtc(struct intel_atomic_state *state,
6933 struct intel_crtc *crtc)
6934 {
6935 const struct intel_crtc_state *old_crtc_state =
6936 intel_atomic_get_old_crtc_state(state, crtc);
6937 struct intel_crtc_state *new_crtc_state =
6938 intel_atomic_get_new_crtc_state(state, crtc);
6939
6940 /* Perform vblank evasion around commit operation */
6941 intel_pipe_update_start(state, crtc);
6942
6943 commit_pipe_pre_planes(state, crtc);
6944
6945 intel_crtc_planes_update_arm(NULL, state, crtc);
6946
6947 commit_pipe_post_planes(state, crtc);
6948
6949 intel_pipe_update_end(state, crtc);
6950
6951 /*
6952 * VRR/Seamless M/N update may need to update frame timings.
6953 *
6954 * FIXME Should be synchronized with the start of vblank somehow...
6955 */
6956 if (intel_crtc_vrr_enabling(state, crtc) ||
6957 new_crtc_state->update_m_n || new_crtc_state->update_lrr)
6958 intel_crtc_update_active_timings(new_crtc_state,
6959 new_crtc_state->vrr.enable);
6960
6961 /*
6962 * We usually enable FIFO underrun interrupts as part of the
6963 * CRTC enable sequence during modesets. But when we inherit a
6964 * valid pipe configuration from the BIOS we need to take care
6965 * of enabling them on the CRTC's first fastset.
6966 */
6967 if (intel_crtc_needs_fastset(new_crtc_state) &&
6968 old_crtc_state->inherited)
6969 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
6970 }
6971
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc * crtc)6972 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
6973 struct intel_crtc *crtc)
6974 {
6975 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6976 const struct intel_crtc_state *old_crtc_state =
6977 intel_atomic_get_old_crtc_state(state, crtc);
6978 struct intel_crtc *pipe_crtc;
6979
6980 /*
6981 * We need to disable pipe CRC before disabling the pipe,
6982 * or we race against vblank off.
6983 */
6984 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
6985 intel_crtc_joined_pipe_mask(old_crtc_state))
6986 intel_crtc_disable_pipe_crc(pipe_crtc);
6987
6988 dev_priv->display.funcs.display->crtc_disable(state, crtc);
6989
6990 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
6991 intel_crtc_joined_pipe_mask(old_crtc_state)) {
6992 const struct intel_crtc_state *new_pipe_crtc_state =
6993 intel_atomic_get_new_crtc_state(state, pipe_crtc);
6994
6995 pipe_crtc->active = false;
6996 intel_fbc_disable(pipe_crtc);
6997
6998 if (!new_pipe_crtc_state->hw.active)
6999 intel_initial_watermarks(state, pipe_crtc);
7000 }
7001 }
7002
intel_commit_modeset_disables(struct intel_atomic_state * state)7003 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7004 {
7005 struct drm_i915_private *i915 = to_i915(state->base.dev);
7006 const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7007 struct intel_crtc *crtc;
7008 u8 disable_pipes = 0;
7009 int i;
7010
7011 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7012 new_crtc_state, i) {
7013 if (!intel_crtc_needs_modeset(new_crtc_state))
7014 continue;
7015
7016 /*
7017 * Needs to be done even for pipes
7018 * that weren't enabled previously.
7019 */
7020 intel_pre_plane_update(state, crtc);
7021
7022 if (!old_crtc_state->hw.active)
7023 continue;
7024
7025 disable_pipes |= BIT(crtc->pipe);
7026 }
7027
7028 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
7029 if ((disable_pipes & BIT(crtc->pipe)) == 0)
7030 continue;
7031
7032 intel_crtc_disable_planes(state, crtc);
7033
7034 drm_vblank_work_flush_all(&crtc->base);
7035 }
7036
7037 /* Only disable port sync and MST slaves */
7038 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
7039 if ((disable_pipes & BIT(crtc->pipe)) == 0)
7040 continue;
7041
7042 if (intel_crtc_is_joiner_secondary(old_crtc_state))
7043 continue;
7044
7045 /* In case of Transcoder port Sync master slave CRTCs can be
7046 * assigned in any order and we need to make sure that
7047 * slave CRTCs are disabled first and then master CRTC since
7048 * Slave vblanks are masked till Master Vblanks.
7049 */
7050 if (!is_trans_port_sync_slave(old_crtc_state) &&
7051 !intel_dp_mst_is_slave_trans(old_crtc_state))
7052 continue;
7053
7054 intel_old_crtc_state_disables(state, crtc);
7055
7056 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
7057 }
7058
7059 /* Disable everything else left on */
7060 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
7061 if ((disable_pipes & BIT(crtc->pipe)) == 0)
7062 continue;
7063
7064 if (intel_crtc_is_joiner_secondary(old_crtc_state))
7065 continue;
7066
7067 intel_old_crtc_state_disables(state, crtc);
7068
7069 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
7070 }
7071
7072 drm_WARN_ON(&i915->drm, disable_pipes);
7073 }
7074
intel_commit_modeset_enables(struct intel_atomic_state * state)7075 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7076 {
7077 struct intel_crtc_state *new_crtc_state;
7078 struct intel_crtc *crtc;
7079 int i;
7080
7081 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7082 if (!new_crtc_state->hw.active)
7083 continue;
7084
7085 intel_enable_crtc(state, crtc);
7086 intel_pre_update_crtc(state, crtc);
7087 }
7088
7089 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7090 if (!new_crtc_state->hw.active)
7091 continue;
7092
7093 intel_update_crtc(state, crtc);
7094 }
7095 }
7096
skl_commit_modeset_enables(struct intel_atomic_state * state)7097 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7098 {
7099 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7100 struct intel_crtc *crtc;
7101 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7102 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7103 u8 update_pipes = 0, modeset_pipes = 0;
7104 int i;
7105
7106 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7107 enum pipe pipe = crtc->pipe;
7108
7109 if (!new_crtc_state->hw.active)
7110 continue;
7111
7112 /* ignore allocations for crtc's that have been turned off. */
7113 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7114 entries[pipe] = old_crtc_state->wm.skl.ddb;
7115 update_pipes |= BIT(pipe);
7116 } else {
7117 modeset_pipes |= BIT(pipe);
7118 }
7119 }
7120
7121 /*
7122 * Whenever the number of active pipes changes, we need to make sure we
7123 * update the pipes in the right order so that their ddb allocations
7124 * never overlap with each other between CRTC updates. Otherwise we'll
7125 * cause pipe underruns and other bad stuff.
7126 *
7127 * So first lets enable all pipes that do not need a fullmodeset as
7128 * those don't have any external dependency.
7129 */
7130 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7131 enum pipe pipe = crtc->pipe;
7132
7133 if ((update_pipes & BIT(pipe)) == 0)
7134 continue;
7135
7136 intel_pre_update_crtc(state, crtc);
7137 }
7138
7139 intel_dbuf_mbus_pre_ddb_update(state);
7140
7141 while (update_pipes) {
7142 /*
7143 * Commit in reverse order to make joiner primary
7144 * send the uapi events after secondaries are done.
7145 */
7146 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
7147 new_crtc_state, i) {
7148 enum pipe pipe = crtc->pipe;
7149
7150 if ((update_pipes & BIT(pipe)) == 0)
7151 continue;
7152
7153 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7154 entries, I915_MAX_PIPES, pipe))
7155 continue;
7156
7157 entries[pipe] = new_crtc_state->wm.skl.ddb;
7158 update_pipes &= ~BIT(pipe);
7159
7160 intel_update_crtc(state, crtc);
7161
7162 /*
7163 * If this is an already active pipe, it's DDB changed,
7164 * and this isn't the last pipe that needs updating
7165 * then we need to wait for a vblank to pass for the
7166 * new ddb allocation to take effect.
7167 */
7168 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7169 &old_crtc_state->wm.skl.ddb) &&
7170 (update_pipes | modeset_pipes))
7171 intel_crtc_wait_for_next_vblank(crtc);
7172 }
7173 }
7174
7175 intel_dbuf_mbus_post_ddb_update(state);
7176
7177 update_pipes = modeset_pipes;
7178
7179 /*
7180 * Enable all pipes that needs a modeset and do not depends on other
7181 * pipes
7182 */
7183 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7184 enum pipe pipe = crtc->pipe;
7185
7186 if ((modeset_pipes & BIT(pipe)) == 0)
7187 continue;
7188
7189 if (intel_crtc_is_joiner_secondary(new_crtc_state))
7190 continue;
7191
7192 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7193 is_trans_port_sync_master(new_crtc_state))
7194 continue;
7195
7196 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
7197
7198 intel_enable_crtc(state, crtc);
7199 }
7200
7201 /*
7202 * Then we enable all remaining pipes that depend on other
7203 * pipes: MST slaves and port sync masters
7204 */
7205 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7206 enum pipe pipe = crtc->pipe;
7207
7208 if ((modeset_pipes & BIT(pipe)) == 0)
7209 continue;
7210
7211 if (intel_crtc_is_joiner_secondary(new_crtc_state))
7212 continue;
7213
7214 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
7215
7216 intel_enable_crtc(state, crtc);
7217 }
7218
7219 /*
7220 * Finally we do the plane updates/etc. for all pipes that got enabled.
7221 */
7222 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7223 enum pipe pipe = crtc->pipe;
7224
7225 if ((update_pipes & BIT(pipe)) == 0)
7226 continue;
7227
7228 intel_pre_update_crtc(state, crtc);
7229 }
7230
7231 /*
7232 * Commit in reverse order to make joiner primary
7233 * send the uapi events after secondaries are done.
7234 */
7235 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) {
7236 enum pipe pipe = crtc->pipe;
7237
7238 if ((update_pipes & BIT(pipe)) == 0)
7239 continue;
7240
7241 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7242 entries, I915_MAX_PIPES, pipe));
7243
7244 entries[pipe] = new_crtc_state->wm.skl.ddb;
7245 update_pipes &= ~BIT(pipe);
7246
7247 intel_update_crtc(state, crtc);
7248 }
7249
7250 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7251 drm_WARN_ON(&dev_priv->drm, update_pipes);
7252 }
7253
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)7254 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7255 {
7256 struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
7257 struct drm_plane *plane;
7258 struct drm_plane_state *new_plane_state;
7259 int ret, i;
7260
7261 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
7262 if (new_plane_state->fence) {
7263 ret = dma_fence_wait_timeout(new_plane_state->fence, false,
7264 i915_fence_timeout(i915));
7265 if (ret <= 0)
7266 break;
7267
7268 dma_fence_put(new_plane_state->fence);
7269 new_plane_state->fence = NULL;
7270 }
7271 }
7272 }
7273
intel_atomic_cleanup_work(struct work_struct * work)7274 static void intel_atomic_cleanup_work(struct work_struct *work)
7275 {
7276 struct intel_atomic_state *state =
7277 container_of(work, struct intel_atomic_state, base.commit_work);
7278 struct drm_i915_private *i915 = to_i915(state->base.dev);
7279 struct intel_crtc_state *old_crtc_state;
7280 struct intel_crtc *crtc;
7281 int i;
7282
7283 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
7284 intel_color_cleanup_commit(old_crtc_state);
7285
7286 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7287 drm_atomic_helper_commit_cleanup_done(&state->base);
7288 drm_atomic_state_put(&state->base);
7289 }
7290
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)7291 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7292 {
7293 struct drm_i915_private *i915 = to_i915(state->base.dev);
7294 struct intel_plane *plane;
7295 struct intel_plane_state *plane_state;
7296 int i;
7297
7298 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7299 struct drm_framebuffer *fb = plane_state->hw.fb;
7300 int cc_plane;
7301 int ret;
7302
7303 if (!fb)
7304 continue;
7305
7306 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7307 if (cc_plane < 0)
7308 continue;
7309
7310 /*
7311 * The layout of the fast clear color value expected by HW
7312 * (the DRM ABI requiring this value to be located in fb at
7313 * offset 0 of cc plane, plane #2 previous generations or
7314 * plane #1 for flat ccs):
7315 * - 4 x 4 bytes per-channel value
7316 * (in surface type specific float/int format provided by the fb user)
7317 * - 8 bytes native color value used by the display
7318 * (converted/written by GPU during a fast clear operation using the
7319 * above per-channel values)
7320 *
7321 * The commit's FB prepare hook already ensured that FB obj is pinned and the
7322 * caller made sure that the object is synced wrt. the related color clear value
7323 * GPU write on it.
7324 */
7325 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7326 fb->offsets[cc_plane] + 16,
7327 &plane_state->ccval,
7328 sizeof(plane_state->ccval));
7329 /* The above could only fail if the FB obj has an unexpected backing store type. */
7330 drm_WARN_ON(&i915->drm, ret);
7331 }
7332 }
7333
intel_atomic_commit_tail(struct intel_atomic_state * state)7334 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7335 {
7336 struct drm_device *dev = state->base.dev;
7337 struct drm_i915_private *dev_priv = to_i915(dev);
7338 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7339 struct intel_crtc *crtc;
7340 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7341 intel_wakeref_t wakeref = 0;
7342 int i;
7343
7344 intel_atomic_commit_fence_wait(state);
7345
7346 intel_td_flush(dev_priv);
7347
7348 drm_atomic_helper_wait_for_dependencies(&state->base);
7349 drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7350 intel_atomic_global_state_wait_for_dependencies(state);
7351
7352 /*
7353 * During full modesets we write a lot of registers, wait
7354 * for PLLs, etc. Doing that while DC states are enabled
7355 * is not a good idea.
7356 *
7357 * During fastsets and other updates we also need to
7358 * disable DC states due to the following scenario:
7359 * 1. DC5 exit and PSR exit happen
7360 * 2. Some or all _noarm() registers are written
7361 * 3. Due to some long delay PSR is re-entered
7362 * 4. DC5 entry -> DMC saves the already written new
7363 * _noarm() registers and the old not yet written
7364 * _arm() registers
7365 * 5. DC5 exit -> DMC restores a mixture of old and
7366 * new register values and arms the update
7367 * 6. PSR exit -> hardware latches a mixture of old and
7368 * new register values -> corrupted frame, or worse
7369 * 7. New _arm() registers are finally written
7370 * 8. Hardware finally latches a complete set of new
7371 * register values, and subsequent frames will be OK again
7372 *
7373 * Also note that due to the pipe CSC hardware issues on
7374 * SKL/GLK DC states must remain off until the pipe CSC
7375 * state readout has happened. Otherwise we risk corrupting
7376 * the CSC latched register values with the readout (see
7377 * skl_read_csc() and skl_color_commit_noarm()).
7378 */
7379 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
7380
7381 intel_atomic_prepare_plane_clear_colors(state);
7382
7383 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7384 new_crtc_state, i) {
7385 if (intel_crtc_needs_modeset(new_crtc_state) ||
7386 intel_crtc_needs_fastset(new_crtc_state))
7387 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7388 }
7389
7390 intel_commit_modeset_disables(state);
7391
7392 intel_dp_tunnel_atomic_alloc_bw(state);
7393
7394 /* FIXME: Eventually get rid of our crtc->config pointer */
7395 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7396 crtc->config = new_crtc_state;
7397
7398 /*
7399 * In XE_LPD+ Pmdemand combines many parameters such as voltage index,
7400 * plls, cdclk frequency, QGV point selection parameter etc. Voltage
7401 * index, cdclk/ddiclk frequencies are supposed to be configured before
7402 * the cdclk config is set.
7403 */
7404 intel_pmdemand_pre_plane_update(state);
7405
7406 if (state->modeset) {
7407 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7408
7409 intel_set_cdclk_pre_plane_update(state);
7410
7411 intel_modeset_verify_disabled(state);
7412 }
7413
7414 intel_sagv_pre_plane_update(state);
7415
7416 /* Complete the events for pipes that have now been disabled */
7417 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7418 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7419
7420 /* Complete events for now disable pipes here. */
7421 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7422 spin_lock_irq(&dev->event_lock);
7423 drm_crtc_send_vblank_event(&crtc->base,
7424 new_crtc_state->uapi.event);
7425 spin_unlock_irq(&dev->event_lock);
7426
7427 new_crtc_state->uapi.event = NULL;
7428 }
7429 }
7430
7431 intel_encoders_update_prepare(state);
7432
7433 intel_dbuf_pre_plane_update(state);
7434
7435 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7436 if (new_crtc_state->do_async_flip)
7437 intel_crtc_enable_flip_done(state, crtc);
7438 }
7439
7440 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7441 dev_priv->display.funcs.display->commit_modeset_enables(state);
7442
7443 intel_wait_for_vblank_workers(state);
7444
7445 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7446 * already, but still need the state for the delayed optimization. To
7447 * fix this:
7448 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7449 * - schedule that vblank worker _before_ calling hw_done
7450 * - at the start of commit_tail, cancel it _synchrously
7451 * - switch over to the vblank wait helper in the core after that since
7452 * we don't need out special handling any more.
7453 */
7454 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7455
7456 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7457 if (new_crtc_state->do_async_flip)
7458 intel_crtc_disable_flip_done(state, crtc);
7459
7460 intel_color_wait_commit(new_crtc_state);
7461 }
7462
7463 /*
7464 * Now that the vblank has passed, we can go ahead and program the
7465 * optimal watermarks on platforms that need two-step watermark
7466 * programming.
7467 *
7468 * TODO: Move this (and other cleanup) to an async worker eventually.
7469 */
7470 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7471 new_crtc_state, i) {
7472 /*
7473 * Gen2 reports pipe underruns whenever all planes are disabled.
7474 * So re-enable underrun reporting after some planes get enabled.
7475 *
7476 * We do this before .optimize_watermarks() so that we have a
7477 * chance of catching underruns with the intermediate watermarks
7478 * vs. the new plane configuration.
7479 */
7480 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7481 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7482
7483 intel_optimize_watermarks(state, crtc);
7484 }
7485
7486 intel_dbuf_post_plane_update(state);
7487
7488 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7489 intel_post_plane_update(state, crtc);
7490
7491 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7492
7493 intel_modeset_verify_crtc(state, crtc);
7494
7495 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
7496 hsw_ips_post_update(state, crtc);
7497
7498 /*
7499 * Activate DRRS after state readout to avoid
7500 * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7501 */
7502 intel_drrs_activate(new_crtc_state);
7503
7504 /*
7505 * DSB cleanup is done in cleanup_work aligning with framebuffer
7506 * cleanup. So copy and reset the dsb structure to sync with
7507 * commit_done and later do dsb cleanup in cleanup_work.
7508 *
7509 * FIXME get rid of this funny new->old swapping
7510 */
7511 old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
7512 old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit);
7513 }
7514
7515 /* Underruns don't always raise interrupts, so check manually */
7516 intel_check_cpu_fifo_underruns(dev_priv);
7517 intel_check_pch_fifo_underruns(dev_priv);
7518
7519 if (state->modeset)
7520 intel_verify_planes(state);
7521
7522 intel_sagv_post_plane_update(state);
7523 if (state->modeset)
7524 intel_set_cdclk_post_plane_update(state);
7525 intel_pmdemand_post_plane_update(state);
7526
7527 drm_atomic_helper_commit_hw_done(&state->base);
7528 intel_atomic_global_state_commit_done(state);
7529
7530 if (state->modeset) {
7531 /* As one of the primary mmio accessors, KMS has a high
7532 * likelihood of triggering bugs in unclaimed access. After we
7533 * finish modesetting, see if an error has been flagged, and if
7534 * so enable debugging for the next modeset - and hope we catch
7535 * the culprit.
7536 */
7537 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7538 }
7539 /*
7540 * Delay re-enabling DC states by 17 ms to avoid the off->on->off
7541 * toggling overhead at and above 60 FPS.
7542 */
7543 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
7544 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7545
7546 /*
7547 * Defer the cleanup of the old state to a separate worker to not
7548 * impede the current task (userspace for blocking modesets) that
7549 * are executed inline. For out-of-line asynchronous modesets/flips,
7550 * deferring to a new worker seems overkill, but we would place a
7551 * schedule point (cond_resched()) here anyway to keep latencies
7552 * down.
7553 */
7554 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7555 queue_work(system_highpri_wq, &state->base.commit_work);
7556 }
7557
intel_atomic_commit_work(struct work_struct * work)7558 static void intel_atomic_commit_work(struct work_struct *work)
7559 {
7560 struct intel_atomic_state *state =
7561 container_of(work, struct intel_atomic_state, base.commit_work);
7562
7563 intel_atomic_commit_tail(state);
7564 }
7565
intel_atomic_track_fbs(struct intel_atomic_state * state)7566 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7567 {
7568 struct intel_plane_state *old_plane_state, *new_plane_state;
7569 struct intel_plane *plane;
7570 int i;
7571
7572 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7573 new_plane_state, i)
7574 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7575 to_intel_frontbuffer(new_plane_state->hw.fb),
7576 plane->frontbuffer_bit);
7577 }
7578
intel_atomic_setup_commit(struct intel_atomic_state * state,bool nonblock)7579 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock)
7580 {
7581 int ret;
7582
7583 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7584 if (ret)
7585 return ret;
7586
7587 ret = intel_atomic_global_state_setup_commit(state);
7588 if (ret)
7589 return ret;
7590
7591 return 0;
7592 }
7593
intel_atomic_swap_state(struct intel_atomic_state * state)7594 static int intel_atomic_swap_state(struct intel_atomic_state *state)
7595 {
7596 int ret;
7597
7598 ret = drm_atomic_helper_swap_state(&state->base, true);
7599 if (ret)
7600 return ret;
7601
7602 intel_atomic_swap_global_state(state);
7603
7604 intel_shared_dpll_swap_state(state);
7605
7606 intel_atomic_track_fbs(state);
7607
7608 return 0;
7609 }
7610
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)7611 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
7612 bool nonblock)
7613 {
7614 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7615 struct drm_i915_private *dev_priv = to_i915(dev);
7616 int ret = 0;
7617
7618 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7619
7620 /*
7621 * The intel_legacy_cursor_update() fast path takes care
7622 * of avoiding the vblank waits for simple cursor
7623 * movement and flips. For cursor on/off and size changes,
7624 * we want to perform the vblank waits so that watermark
7625 * updates happen during the correct frames. Gen9+ have
7626 * double buffered watermarks and so shouldn't need this.
7627 *
7628 * Unset state->legacy_cursor_update before the call to
7629 * drm_atomic_helper_setup_commit() because otherwise
7630 * drm_atomic_helper_wait_for_flip_done() is a noop and
7631 * we get FIFO underruns because we didn't wait
7632 * for vblank.
7633 *
7634 * FIXME doing watermarks and fb cleanup from a vblank worker
7635 * (assuming we had any) would solve these problems.
7636 */
7637 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7638 struct intel_crtc_state *new_crtc_state;
7639 struct intel_crtc *crtc;
7640 int i;
7641
7642 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7643 if (new_crtc_state->wm.need_postvbl_update ||
7644 new_crtc_state->update_wm_post)
7645 state->base.legacy_cursor_update = false;
7646 }
7647
7648 ret = intel_atomic_prepare_commit(state);
7649 if (ret) {
7650 drm_dbg_atomic(&dev_priv->drm,
7651 "Preparing state failed with %i\n", ret);
7652 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7653 return ret;
7654 }
7655
7656 ret = intel_atomic_setup_commit(state, nonblock);
7657 if (!ret)
7658 ret = intel_atomic_swap_state(state);
7659
7660 if (ret) {
7661 struct intel_crtc_state *new_crtc_state;
7662 struct intel_crtc *crtc;
7663 int i;
7664
7665 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7666 intel_color_cleanup_commit(new_crtc_state);
7667
7668 drm_atomic_helper_unprepare_planes(dev, &state->base);
7669 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7670 return ret;
7671 }
7672
7673 drm_atomic_state_get(&state->base);
7674 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7675
7676 if (nonblock && state->modeset) {
7677 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7678 } else if (nonblock) {
7679 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7680 } else {
7681 if (state->modeset)
7682 flush_workqueue(dev_priv->display.wq.modeset);
7683 intel_atomic_commit_tail(state);
7684 }
7685
7686 return 0;
7687 }
7688
7689 /**
7690 * intel_plane_destroy - destroy a plane
7691 * @plane: plane to destroy
7692 *
7693 * Common destruction function for all types of planes (primary, cursor,
7694 * sprite).
7695 */
intel_plane_destroy(struct drm_plane * plane)7696 void intel_plane_destroy(struct drm_plane *plane)
7697 {
7698 drm_plane_cleanup(plane);
7699 kfree(to_intel_plane(plane));
7700 }
7701
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)7702 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7703 struct drm_file *file)
7704 {
7705 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7706 struct drm_crtc *drmmode_crtc;
7707 struct intel_crtc *crtc;
7708
7709 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7710 if (!drmmode_crtc)
7711 return -ENOENT;
7712
7713 crtc = to_intel_crtc(drmmode_crtc);
7714 pipe_from_crtc_id->pipe = crtc->pipe;
7715
7716 return 0;
7717 }
7718
intel_encoder_possible_clones(struct intel_encoder * encoder)7719 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7720 {
7721 struct drm_device *dev = encoder->base.dev;
7722 struct intel_encoder *source_encoder;
7723 u32 possible_clones = 0;
7724
7725 for_each_intel_encoder(dev, source_encoder) {
7726 if (encoders_cloneable(encoder, source_encoder))
7727 possible_clones |= drm_encoder_mask(&source_encoder->base);
7728 }
7729
7730 return possible_clones;
7731 }
7732
intel_encoder_possible_crtcs(struct intel_encoder * encoder)7733 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7734 {
7735 struct drm_device *dev = encoder->base.dev;
7736 struct intel_crtc *crtc;
7737 u32 possible_crtcs = 0;
7738
7739 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7740 possible_crtcs |= drm_crtc_mask(&crtc->base);
7741
7742 return possible_crtcs;
7743 }
7744
ilk_has_edp_a(struct drm_i915_private * dev_priv)7745 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7746 {
7747 if (!IS_MOBILE(dev_priv))
7748 return false;
7749
7750 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7751 return false;
7752
7753 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7754 return false;
7755
7756 return true;
7757 }
7758
intel_ddi_crt_present(struct drm_i915_private * dev_priv)7759 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7760 {
7761 if (DISPLAY_VER(dev_priv) >= 9)
7762 return false;
7763
7764 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
7765 return false;
7766
7767 if (HAS_PCH_LPT_H(dev_priv) &&
7768 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7769 return false;
7770
7771 /* DDI E can't be used if DDI A requires 4 lanes */
7772 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7773 return false;
7774
7775 if (!dev_priv->display.vbt.int_crt_support)
7776 return false;
7777
7778 return true;
7779 }
7780
assert_port_valid(struct drm_i915_private * i915,enum port port)7781 bool assert_port_valid(struct drm_i915_private *i915, enum port port)
7782 {
7783 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
7784 "Platform does not support port %c\n", port_name(port));
7785 }
7786
intel_setup_outputs(struct drm_i915_private * dev_priv)7787 void intel_setup_outputs(struct drm_i915_private *dev_priv)
7788 {
7789 struct intel_display *display = &dev_priv->display;
7790 struct intel_encoder *encoder;
7791 bool dpd_is_edp = false;
7792
7793 intel_pps_unlock_regs_wa(display);
7794
7795 if (!HAS_DISPLAY(dev_priv))
7796 return;
7797
7798 if (HAS_DDI(dev_priv)) {
7799 if (intel_ddi_crt_present(dev_priv))
7800 intel_crt_init(dev_priv);
7801
7802 intel_bios_for_each_encoder(display, intel_ddi_init);
7803
7804 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
7805 vlv_dsi_init(dev_priv);
7806 } else if (HAS_PCH_SPLIT(dev_priv)) {
7807 int found;
7808
7809 /*
7810 * intel_edp_init_connector() depends on this completing first,
7811 * to prevent the registration of both eDP and LVDS and the
7812 * incorrect sharing of the PPS.
7813 */
7814 intel_lvds_init(dev_priv);
7815 intel_crt_init(dev_priv);
7816
7817 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7818
7819 if (ilk_has_edp_a(dev_priv))
7820 g4x_dp_init(dev_priv, DP_A, PORT_A);
7821
7822 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7823 /* PCH SDVOB multiplex with HDMIB */
7824 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7825 if (!found)
7826 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7827 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7828 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7829 }
7830
7831 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7832 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7833
7834 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
7835 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
7836
7837 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
7838 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
7839
7840 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
7841 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
7842 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7843 bool has_edp, has_port;
7844
7845 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
7846 intel_crt_init(dev_priv);
7847
7848 /*
7849 * The DP_DETECTED bit is the latched state of the DDC
7850 * SDA pin at boot. However since eDP doesn't require DDC
7851 * (no way to plug in a DP->HDMI dongle) the DDC pins for
7852 * eDP ports may have been muxed to an alternate function.
7853 * Thus we can't rely on the DP_DETECTED bit alone to detect
7854 * eDP ports. Consult the VBT as well as DP_DETECTED to
7855 * detect eDP ports.
7856 *
7857 * Sadly the straps seem to be missing sometimes even for HDMI
7858 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
7859 * and VBT for the presence of the port. Additionally we can't
7860 * trust the port type the VBT declares as we've seen at least
7861 * HDMI ports that the VBT claim are DP or eDP.
7862 */
7863 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
7864 has_port = intel_bios_is_port_present(display, PORT_B);
7865 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
7866 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
7867 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
7868 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
7869
7870 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
7871 has_port = intel_bios_is_port_present(display, PORT_C);
7872 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
7873 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
7874 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
7875 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
7876
7877 if (IS_CHERRYVIEW(dev_priv)) {
7878 /*
7879 * eDP not supported on port D,
7880 * so no need to worry about it
7881 */
7882 has_port = intel_bios_is_port_present(display, PORT_D);
7883 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
7884 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
7885 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
7886 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
7887 }
7888
7889 vlv_dsi_init(dev_priv);
7890 } else if (IS_PINEVIEW(dev_priv)) {
7891 intel_lvds_init(dev_priv);
7892 intel_crt_init(dev_priv);
7893 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
7894 bool found = false;
7895
7896 if (IS_MOBILE(dev_priv))
7897 intel_lvds_init(dev_priv);
7898
7899 intel_crt_init(dev_priv);
7900
7901 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7902 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
7903 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
7904 if (!found && IS_G4X(dev_priv)) {
7905 drm_dbg_kms(&dev_priv->drm,
7906 "probing HDMI on SDVOB\n");
7907 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
7908 }
7909
7910 if (!found && IS_G4X(dev_priv))
7911 g4x_dp_init(dev_priv, DP_B, PORT_B);
7912 }
7913
7914 /* Before G4X SDVOC doesn't have its own detect register */
7915
7916 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7917 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
7918 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
7919 }
7920
7921 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
7922
7923 if (IS_G4X(dev_priv)) {
7924 drm_dbg_kms(&dev_priv->drm,
7925 "probing HDMI on SDVOC\n");
7926 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
7927 }
7928 if (IS_G4X(dev_priv))
7929 g4x_dp_init(dev_priv, DP_C, PORT_C);
7930 }
7931
7932 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
7933 g4x_dp_init(dev_priv, DP_D, PORT_D);
7934
7935 if (SUPPORTS_TV(dev_priv))
7936 intel_tv_init(display);
7937 } else if (DISPLAY_VER(dev_priv) == 2) {
7938 if (IS_I85X(dev_priv))
7939 intel_lvds_init(dev_priv);
7940
7941 intel_crt_init(dev_priv);
7942 intel_dvo_init(dev_priv);
7943 }
7944
7945 for_each_intel_encoder(&dev_priv->drm, encoder) {
7946 encoder->base.possible_crtcs =
7947 intel_encoder_possible_crtcs(encoder);
7948 encoder->base.possible_clones =
7949 intel_encoder_possible_clones(encoder);
7950 }
7951
7952 intel_init_pch_refclk(dev_priv);
7953
7954 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
7955 }
7956
max_dotclock(struct drm_i915_private * i915)7957 static int max_dotclock(struct drm_i915_private *i915)
7958 {
7959 int max_dotclock = i915->display.cdclk.max_dotclk_freq;
7960
7961 /* icl+ might use joiner */
7962 if (DISPLAY_VER(i915) >= 11)
7963 max_dotclock *= 2;
7964
7965 return max_dotclock;
7966 }
7967
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)7968 enum drm_mode_status intel_mode_valid(struct drm_device *dev,
7969 const struct drm_display_mode *mode)
7970 {
7971 struct drm_i915_private *dev_priv = to_i915(dev);
7972 int hdisplay_max, htotal_max;
7973 int vdisplay_max, vtotal_max;
7974
7975 /*
7976 * Can't reject DBLSCAN here because Xorg ddxen can add piles
7977 * of DBLSCAN modes to the output's mode list when they detect
7978 * the scaling mode property on the connector. And they don't
7979 * ask the kernel to validate those modes in any way until
7980 * modeset time at which point the client gets a protocol error.
7981 * So in order to not upset those clients we silently ignore the
7982 * DBLSCAN flag on such connectors. For other connectors we will
7983 * reject modes with the DBLSCAN flag in encoder->compute_config().
7984 * And we always reject DBLSCAN modes in connector->mode_valid()
7985 * as we never want such modes on the connector's mode list.
7986 */
7987
7988 if (mode->vscan > 1)
7989 return MODE_NO_VSCAN;
7990
7991 if (mode->flags & DRM_MODE_FLAG_HSKEW)
7992 return MODE_H_ILLEGAL;
7993
7994 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
7995 DRM_MODE_FLAG_NCSYNC |
7996 DRM_MODE_FLAG_PCSYNC))
7997 return MODE_HSYNC;
7998
7999 if (mode->flags & (DRM_MODE_FLAG_BCAST |
8000 DRM_MODE_FLAG_PIXMUX |
8001 DRM_MODE_FLAG_CLKDIV2))
8002 return MODE_BAD;
8003
8004 /*
8005 * Reject clearly excessive dotclocks early to
8006 * avoid having to worry about huge integers later.
8007 */
8008 if (mode->clock > max_dotclock(dev_priv))
8009 return MODE_CLOCK_HIGH;
8010
8011 /* Transcoder timing limits */
8012 if (DISPLAY_VER(dev_priv) >= 11) {
8013 hdisplay_max = 16384;
8014 vdisplay_max = 8192;
8015 htotal_max = 16384;
8016 vtotal_max = 8192;
8017 } else if (DISPLAY_VER(dev_priv) >= 9 ||
8018 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8019 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8020 vdisplay_max = 4096;
8021 htotal_max = 8192;
8022 vtotal_max = 8192;
8023 } else if (DISPLAY_VER(dev_priv) >= 3) {
8024 hdisplay_max = 4096;
8025 vdisplay_max = 4096;
8026 htotal_max = 8192;
8027 vtotal_max = 8192;
8028 } else {
8029 hdisplay_max = 2048;
8030 vdisplay_max = 2048;
8031 htotal_max = 4096;
8032 vtotal_max = 4096;
8033 }
8034
8035 if (mode->hdisplay > hdisplay_max ||
8036 mode->hsync_start > htotal_max ||
8037 mode->hsync_end > htotal_max ||
8038 mode->htotal > htotal_max)
8039 return MODE_H_ILLEGAL;
8040
8041 if (mode->vdisplay > vdisplay_max ||
8042 mode->vsync_start > vtotal_max ||
8043 mode->vsync_end > vtotal_max ||
8044 mode->vtotal > vtotal_max)
8045 return MODE_V_ILLEGAL;
8046
8047 return MODE_OK;
8048 }
8049
intel_cpu_transcoder_mode_valid(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode)8050 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
8051 const struct drm_display_mode *mode)
8052 {
8053 /*
8054 * Additional transcoder timing limits,
8055 * excluding BXT/GLK DSI transcoders.
8056 */
8057 if (DISPLAY_VER(dev_priv) >= 5) {
8058 if (mode->hdisplay < 64 ||
8059 mode->htotal - mode->hdisplay < 32)
8060 return MODE_H_ILLEGAL;
8061
8062 if (mode->vtotal - mode->vdisplay < 5)
8063 return MODE_V_ILLEGAL;
8064 } else {
8065 if (mode->htotal - mode->hdisplay < 32)
8066 return MODE_H_ILLEGAL;
8067
8068 if (mode->vtotal - mode->vdisplay < 3)
8069 return MODE_V_ILLEGAL;
8070 }
8071
8072 /*
8073 * Cantiga+ cannot handle modes with a hsync front porch of 0.
8074 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8075 */
8076 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
8077 mode->hsync_start == mode->hdisplay)
8078 return MODE_H_ILLEGAL;
8079
8080 return MODE_OK;
8081 }
8082
8083 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool joiner)8084 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8085 const struct drm_display_mode *mode,
8086 bool joiner)
8087 {
8088 int plane_width_max, plane_height_max;
8089
8090 /*
8091 * intel_mode_valid() should be
8092 * sufficient on older platforms.
8093 */
8094 if (DISPLAY_VER(dev_priv) < 9)
8095 return MODE_OK;
8096
8097 /*
8098 * Most people will probably want a fullscreen
8099 * plane so let's not advertize modes that are
8100 * too big for that.
8101 */
8102 if (DISPLAY_VER(dev_priv) >= 11) {
8103 plane_width_max = 5120 << joiner;
8104 plane_height_max = 4320;
8105 } else {
8106 plane_width_max = 5120;
8107 plane_height_max = 4096;
8108 }
8109
8110 if (mode->hdisplay > plane_width_max)
8111 return MODE_H_ILLEGAL;
8112
8113 if (mode->vdisplay > plane_height_max)
8114 return MODE_V_ILLEGAL;
8115
8116 return MODE_OK;
8117 }
8118
8119 static const struct intel_display_funcs skl_display_funcs = {
8120 .get_pipe_config = hsw_get_pipe_config,
8121 .crtc_enable = hsw_crtc_enable,
8122 .crtc_disable = hsw_crtc_disable,
8123 .commit_modeset_enables = skl_commit_modeset_enables,
8124 .get_initial_plane_config = skl_get_initial_plane_config,
8125 .fixup_initial_plane_config = skl_fixup_initial_plane_config,
8126 };
8127
8128 static const struct intel_display_funcs ddi_display_funcs = {
8129 .get_pipe_config = hsw_get_pipe_config,
8130 .crtc_enable = hsw_crtc_enable,
8131 .crtc_disable = hsw_crtc_disable,
8132 .commit_modeset_enables = intel_commit_modeset_enables,
8133 .get_initial_plane_config = i9xx_get_initial_plane_config,
8134 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8135 };
8136
8137 static const struct intel_display_funcs pch_split_display_funcs = {
8138 .get_pipe_config = ilk_get_pipe_config,
8139 .crtc_enable = ilk_crtc_enable,
8140 .crtc_disable = ilk_crtc_disable,
8141 .commit_modeset_enables = intel_commit_modeset_enables,
8142 .get_initial_plane_config = i9xx_get_initial_plane_config,
8143 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8144 };
8145
8146 static const struct intel_display_funcs vlv_display_funcs = {
8147 .get_pipe_config = i9xx_get_pipe_config,
8148 .crtc_enable = valleyview_crtc_enable,
8149 .crtc_disable = i9xx_crtc_disable,
8150 .commit_modeset_enables = intel_commit_modeset_enables,
8151 .get_initial_plane_config = i9xx_get_initial_plane_config,
8152 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8153 };
8154
8155 static const struct intel_display_funcs i9xx_display_funcs = {
8156 .get_pipe_config = i9xx_get_pipe_config,
8157 .crtc_enable = i9xx_crtc_enable,
8158 .crtc_disable = i9xx_crtc_disable,
8159 .commit_modeset_enables = intel_commit_modeset_enables,
8160 .get_initial_plane_config = i9xx_get_initial_plane_config,
8161 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8162 };
8163
8164 /**
8165 * intel_init_display_hooks - initialize the display modesetting hooks
8166 * @dev_priv: device private
8167 */
intel_init_display_hooks(struct drm_i915_private * dev_priv)8168 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8169 {
8170 if (DISPLAY_VER(dev_priv) >= 9) {
8171 dev_priv->display.funcs.display = &skl_display_funcs;
8172 } else if (HAS_DDI(dev_priv)) {
8173 dev_priv->display.funcs.display = &ddi_display_funcs;
8174 } else if (HAS_PCH_SPLIT(dev_priv)) {
8175 dev_priv->display.funcs.display = &pch_split_display_funcs;
8176 } else if (IS_CHERRYVIEW(dev_priv) ||
8177 IS_VALLEYVIEW(dev_priv)) {
8178 dev_priv->display.funcs.display = &vlv_display_funcs;
8179 } else {
8180 dev_priv->display.funcs.display = &i9xx_display_funcs;
8181 }
8182 }
8183
intel_initial_commit(struct drm_device * dev)8184 int intel_initial_commit(struct drm_device *dev)
8185 {
8186 struct drm_atomic_state *state = NULL;
8187 struct drm_modeset_acquire_ctx ctx;
8188 struct intel_crtc *crtc;
8189 int ret = 0;
8190
8191 state = drm_atomic_state_alloc(dev);
8192 if (!state)
8193 return -ENOMEM;
8194
8195 drm_modeset_acquire_init(&ctx, 0);
8196
8197 state->acquire_ctx = &ctx;
8198 to_intel_atomic_state(state)->internal = true;
8199
8200 retry:
8201 for_each_intel_crtc(dev, crtc) {
8202 struct intel_crtc_state *crtc_state =
8203 intel_atomic_get_crtc_state(state, crtc);
8204
8205 if (IS_ERR(crtc_state)) {
8206 ret = PTR_ERR(crtc_state);
8207 goto out;
8208 }
8209
8210 if (crtc_state->hw.active) {
8211 struct intel_encoder *encoder;
8212
8213 ret = drm_atomic_add_affected_planes(state, &crtc->base);
8214 if (ret)
8215 goto out;
8216
8217 /*
8218 * FIXME hack to force a LUT update to avoid the
8219 * plane update forcing the pipe gamma on without
8220 * having a proper LUT loaded. Remove once we
8221 * have readout for pipe gamma enable.
8222 */
8223 crtc_state->uapi.color_mgmt_changed = true;
8224
8225 for_each_intel_encoder_mask(dev, encoder,
8226 crtc_state->uapi.encoder_mask) {
8227 if (encoder->initial_fastset_check &&
8228 !encoder->initial_fastset_check(encoder, crtc_state)) {
8229 ret = drm_atomic_add_affected_connectors(state,
8230 &crtc->base);
8231 if (ret)
8232 goto out;
8233 }
8234 }
8235 }
8236 }
8237
8238 ret = drm_atomic_commit(state);
8239
8240 out:
8241 if (ret == -EDEADLK) {
8242 drm_atomic_state_clear(state);
8243 drm_modeset_backoff(&ctx);
8244 goto retry;
8245 }
8246
8247 drm_atomic_state_put(state);
8248
8249 drm_modeset_drop_locks(&ctx);
8250 drm_modeset_acquire_fini(&ctx);
8251
8252 return ret;
8253 }
8254
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8255 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8256 {
8257 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8258 enum transcoder cpu_transcoder = (enum transcoder)pipe;
8259 /* 640x480@60Hz, ~25175 kHz */
8260 struct dpll clock = {
8261 .m1 = 18,
8262 .m2 = 7,
8263 .p1 = 13,
8264 .p2 = 4,
8265 .n = 2,
8266 };
8267 u32 dpll, fp;
8268 int i;
8269
8270 drm_WARN_ON(&dev_priv->drm,
8271 i9xx_calc_dpll_params(48000, &clock) != 25154);
8272
8273 drm_dbg_kms(&dev_priv->drm,
8274 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8275 pipe_name(pipe), clock.vco, clock.dot);
8276
8277 fp = i9xx_dpll_compute_fp(&clock);
8278 dpll = DPLL_DVO_2X_MODE |
8279 DPLL_VGA_MODE_DIS |
8280 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8281 PLL_P2_DIVIDE_BY_4 |
8282 PLL_REF_INPUT_DREFCLK |
8283 DPLL_VCO_ENABLE;
8284
8285 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
8286 HACTIVE(640 - 1) | HTOTAL(800 - 1));
8287 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
8288 HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
8289 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
8290 HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
8291 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
8292 VACTIVE(480 - 1) | VTOTAL(525 - 1));
8293 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
8294 VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
8295 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
8296 VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
8297 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
8298 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
8299
8300 intel_de_write(dev_priv, FP0(pipe), fp);
8301 intel_de_write(dev_priv, FP1(pipe), fp);
8302
8303 /*
8304 * Apparently we need to have VGA mode enabled prior to changing
8305 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8306 * dividers, even though the register value does change.
8307 */
8308 intel_de_write(dev_priv, DPLL(dev_priv, pipe),
8309 dpll & ~DPLL_VGA_MODE_DIS);
8310 intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
8311
8312 /* Wait for the clocks to stabilize. */
8313 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
8314 udelay(150);
8315
8316 /* The pixel multiplier can only be updated once the
8317 * DPLL is enabled and the clocks are stable.
8318 *
8319 * So write it again.
8320 */
8321 intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
8322
8323 /* We do this three times for luck */
8324 for (i = 0; i < 3 ; i++) {
8325 intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
8326 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
8327 udelay(150); /* wait for warmup */
8328 }
8329
8330 intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), TRANSCONF_ENABLE);
8331 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe));
8332
8333 intel_wait_for_pipe_scanline_moving(crtc);
8334 }
8335
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8336 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8337 {
8338 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8339
8340 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8341 pipe_name(pipe));
8342
8343 drm_WARN_ON(&dev_priv->drm,
8344 intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_A)) & DISP_ENABLE);
8345 drm_WARN_ON(&dev_priv->drm,
8346 intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_B)) & DISP_ENABLE);
8347 drm_WARN_ON(&dev_priv->drm,
8348 intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_C)) & DISP_ENABLE);
8349 drm_WARN_ON(&dev_priv->drm,
8350 intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & MCURSOR_MODE_MASK);
8351 drm_WARN_ON(&dev_priv->drm,
8352 intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_B)) & MCURSOR_MODE_MASK);
8353
8354 intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), 0);
8355 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe));
8356
8357 intel_wait_for_pipe_scanline_stopped(crtc);
8358
8359 intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS);
8360 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
8361 }
8362
intel_hpd_poll_fini(struct drm_i915_private * i915)8363 void intel_hpd_poll_fini(struct drm_i915_private *i915)
8364 {
8365 struct intel_connector *connector;
8366 struct drm_connector_list_iter conn_iter;
8367
8368 /* Kill all the work that may have been queued by hpd. */
8369 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8370 for_each_intel_connector_iter(connector, &conn_iter) {
8371 if (connector->modeset_retry_work.func &&
8372 cancel_work_sync(&connector->modeset_retry_work))
8373 drm_connector_put(&connector->base);
8374 if (connector->hdcp.shim) {
8375 cancel_delayed_work_sync(&connector->hdcp.check_work);
8376 cancel_work_sync(&connector->hdcp.prop_work);
8377 }
8378 }
8379 drm_connector_list_iter_end(&conn_iter);
8380 }
8381
intel_scanout_needs_vtd_wa(struct drm_i915_private * i915)8382 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
8383 {
8384 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
8385 }
8386