• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_atomic.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_dp_helper.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_rect.h>
46 #include <linux/dma_remapping.h>
47 
48 /* Primary plane formats for gen <= 3 */
49 static const uint32_t i8xx_primary_formats[] = {
50 	DRM_FORMAT_C8,
51 	DRM_FORMAT_RGB565,
52 	DRM_FORMAT_XRGB1555,
53 	DRM_FORMAT_XRGB8888,
54 };
55 
56 /* Primary plane formats for gen >= 4 */
57 static const uint32_t i965_primary_formats[] = {
58 	DRM_FORMAT_C8,
59 	DRM_FORMAT_RGB565,
60 	DRM_FORMAT_XRGB8888,
61 	DRM_FORMAT_XBGR8888,
62 	DRM_FORMAT_XRGB2101010,
63 	DRM_FORMAT_XBGR2101010,
64 };
65 
66 static const uint32_t skl_primary_formats[] = {
67 	DRM_FORMAT_C8,
68 	DRM_FORMAT_RGB565,
69 	DRM_FORMAT_XRGB8888,
70 	DRM_FORMAT_XBGR8888,
71 	DRM_FORMAT_ARGB8888,
72 	DRM_FORMAT_ABGR8888,
73 	DRM_FORMAT_XRGB2101010,
74 	DRM_FORMAT_XBGR2101010,
75 	DRM_FORMAT_YUYV,
76 	DRM_FORMAT_YVYU,
77 	DRM_FORMAT_UYVY,
78 	DRM_FORMAT_VYUY,
79 };
80 
81 /* Cursor formats */
82 static const uint32_t intel_cursor_formats[] = {
83 	DRM_FORMAT_ARGB8888,
84 };
85 
86 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
87 
88 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
89 				struct intel_crtc_state *pipe_config);
90 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
91 				   struct intel_crtc_state *pipe_config);
92 
93 static int intel_framebuffer_init(struct drm_device *dev,
94 				  struct intel_framebuffer *ifb,
95 				  struct drm_mode_fb_cmd2 *mode_cmd,
96 				  struct drm_i915_gem_object *obj);
97 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
98 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
99 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
100 					 struct intel_link_m_n *m_n,
101 					 struct intel_link_m_n *m2_n2);
102 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
103 static void haswell_set_pipeconf(struct drm_crtc *crtc);
104 static void intel_set_pipe_csc(struct drm_crtc *crtc);
105 static void vlv_prepare_pll(struct intel_crtc *crtc,
106 			    const struct intel_crtc_state *pipe_config);
107 static void chv_prepare_pll(struct intel_crtc *crtc,
108 			    const struct intel_crtc_state *pipe_config);
109 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112 	struct intel_crtc_state *crtc_state);
113 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
114 			   int num_connectors);
115 static void skylake_pfit_enable(struct intel_crtc *crtc);
116 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117 static void ironlake_pfit_enable(struct intel_crtc *crtc);
118 static void intel_modeset_setup_hw_state(struct drm_device *dev);
119 static void intel_pre_disable_primary(struct drm_crtc *crtc);
120 
121 typedef struct {
122 	int	min, max;
123 } intel_range_t;
124 
125 typedef struct {
126 	int	dot_limit;
127 	int	p2_slow, p2_fast;
128 } intel_p2_t;
129 
130 typedef struct intel_limit intel_limit_t;
131 struct intel_limit {
132 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
133 	intel_p2_t	    p2;
134 };
135 
136 /* returns HPLL frequency in kHz */
valleyview_get_vco(struct drm_i915_private * dev_priv)137 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
138 {
139 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140 
141 	/* Obtain SKU information */
142 	mutex_lock(&dev_priv->sb_lock);
143 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144 		CCK_FUSE_HPLL_FREQ_MASK;
145 	mutex_unlock(&dev_priv->sb_lock);
146 
147 	return vco_freq[hpll_freq] * 1000;
148 }
149 
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)150 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
151 				  const char *name, u32 reg)
152 {
153 	u32 val;
154 	int divider;
155 
156 	if (dev_priv->hpll_freq == 0)
157 		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
158 
159 	mutex_lock(&dev_priv->sb_lock);
160 	val = vlv_cck_read(dev_priv, reg);
161 	mutex_unlock(&dev_priv->sb_lock);
162 
163 	divider = val & CCK_FREQUENCY_VALUES;
164 
165 	WARN((val & CCK_FREQUENCY_STATUS) !=
166 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
167 	     "%s change in progress\n", name);
168 
169 	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
170 }
171 
172 int
intel_pch_rawclk(struct drm_device * dev)173 intel_pch_rawclk(struct drm_device *dev)
174 {
175 	struct drm_i915_private *dev_priv = dev->dev_private;
176 
177 	WARN_ON(!HAS_PCH_SPLIT(dev));
178 
179 	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
180 }
181 
182 /* hrawclock is 1/4 the FSB frequency */
intel_hrawclk(struct drm_device * dev)183 int intel_hrawclk(struct drm_device *dev)
184 {
185 	struct drm_i915_private *dev_priv = dev->dev_private;
186 	uint32_t clkcfg;
187 
188 	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
189 	if (IS_VALLEYVIEW(dev))
190 		return 200;
191 
192 	clkcfg = I915_READ(CLKCFG);
193 	switch (clkcfg & CLKCFG_FSB_MASK) {
194 	case CLKCFG_FSB_400:
195 		return 100;
196 	case CLKCFG_FSB_533:
197 		return 133;
198 	case CLKCFG_FSB_667:
199 		return 166;
200 	case CLKCFG_FSB_800:
201 		return 200;
202 	case CLKCFG_FSB_1067:
203 		return 266;
204 	case CLKCFG_FSB_1333:
205 		return 333;
206 	/* these two are just a guess; one of them might be right */
207 	case CLKCFG_FSB_1600:
208 	case CLKCFG_FSB_1600_ALT:
209 		return 400;
210 	default:
211 		return 133;
212 	}
213 }
214 
intel_update_czclk(struct drm_i915_private * dev_priv)215 static void intel_update_czclk(struct drm_i915_private *dev_priv)
216 {
217 	if (!IS_VALLEYVIEW(dev_priv))
218 		return;
219 
220 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
221 						      CCK_CZ_CLOCK_CONTROL);
222 
223 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
224 }
225 
226 static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device * dev)227 intel_fdi_link_freq(struct drm_device *dev)
228 {
229 	if (IS_GEN5(dev)) {
230 		struct drm_i915_private *dev_priv = dev->dev_private;
231 		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
232 	} else
233 		return 27;
234 }
235 
236 static const intel_limit_t intel_limits_i8xx_dac = {
237 	.dot = { .min = 25000, .max = 350000 },
238 	.vco = { .min = 908000, .max = 1512000 },
239 	.n = { .min = 2, .max = 16 },
240 	.m = { .min = 96, .max = 140 },
241 	.m1 = { .min = 18, .max = 26 },
242 	.m2 = { .min = 6, .max = 16 },
243 	.p = { .min = 4, .max = 128 },
244 	.p1 = { .min = 2, .max = 33 },
245 	.p2 = { .dot_limit = 165000,
246 		.p2_slow = 4, .p2_fast = 2 },
247 };
248 
249 static const intel_limit_t intel_limits_i8xx_dvo = {
250 	.dot = { .min = 25000, .max = 350000 },
251 	.vco = { .min = 908000, .max = 1512000 },
252 	.n = { .min = 2, .max = 16 },
253 	.m = { .min = 96, .max = 140 },
254 	.m1 = { .min = 18, .max = 26 },
255 	.m2 = { .min = 6, .max = 16 },
256 	.p = { .min = 4, .max = 128 },
257 	.p1 = { .min = 2, .max = 33 },
258 	.p2 = { .dot_limit = 165000,
259 		.p2_slow = 4, .p2_fast = 4 },
260 };
261 
262 static const intel_limit_t intel_limits_i8xx_lvds = {
263 	.dot = { .min = 25000, .max = 350000 },
264 	.vco = { .min = 908000, .max = 1512000 },
265 	.n = { .min = 2, .max = 16 },
266 	.m = { .min = 96, .max = 140 },
267 	.m1 = { .min = 18, .max = 26 },
268 	.m2 = { .min = 6, .max = 16 },
269 	.p = { .min = 4, .max = 128 },
270 	.p1 = { .min = 1, .max = 6 },
271 	.p2 = { .dot_limit = 165000,
272 		.p2_slow = 14, .p2_fast = 7 },
273 };
274 
275 static const intel_limit_t intel_limits_i9xx_sdvo = {
276 	.dot = { .min = 20000, .max = 400000 },
277 	.vco = { .min = 1400000, .max = 2800000 },
278 	.n = { .min = 1, .max = 6 },
279 	.m = { .min = 70, .max = 120 },
280 	.m1 = { .min = 8, .max = 18 },
281 	.m2 = { .min = 3, .max = 7 },
282 	.p = { .min = 5, .max = 80 },
283 	.p1 = { .min = 1, .max = 8 },
284 	.p2 = { .dot_limit = 200000,
285 		.p2_slow = 10, .p2_fast = 5 },
286 };
287 
288 static const intel_limit_t intel_limits_i9xx_lvds = {
289 	.dot = { .min = 20000, .max = 400000 },
290 	.vco = { .min = 1400000, .max = 2800000 },
291 	.n = { .min = 1, .max = 6 },
292 	.m = { .min = 70, .max = 120 },
293 	.m1 = { .min = 8, .max = 18 },
294 	.m2 = { .min = 3, .max = 7 },
295 	.p = { .min = 7, .max = 98 },
296 	.p1 = { .min = 1, .max = 8 },
297 	.p2 = { .dot_limit = 112000,
298 		.p2_slow = 14, .p2_fast = 7 },
299 };
300 
301 
302 static const intel_limit_t intel_limits_g4x_sdvo = {
303 	.dot = { .min = 25000, .max = 270000 },
304 	.vco = { .min = 1750000, .max = 3500000},
305 	.n = { .min = 1, .max = 4 },
306 	.m = { .min = 104, .max = 138 },
307 	.m1 = { .min = 17, .max = 23 },
308 	.m2 = { .min = 5, .max = 11 },
309 	.p = { .min = 10, .max = 30 },
310 	.p1 = { .min = 1, .max = 3},
311 	.p2 = { .dot_limit = 270000,
312 		.p2_slow = 10,
313 		.p2_fast = 10
314 	},
315 };
316 
317 static const intel_limit_t intel_limits_g4x_hdmi = {
318 	.dot = { .min = 22000, .max = 400000 },
319 	.vco = { .min = 1750000, .max = 3500000},
320 	.n = { .min = 1, .max = 4 },
321 	.m = { .min = 104, .max = 138 },
322 	.m1 = { .min = 16, .max = 23 },
323 	.m2 = { .min = 5, .max = 11 },
324 	.p = { .min = 5, .max = 80 },
325 	.p1 = { .min = 1, .max = 8},
326 	.p2 = { .dot_limit = 165000,
327 		.p2_slow = 10, .p2_fast = 5 },
328 };
329 
330 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
331 	.dot = { .min = 20000, .max = 115000 },
332 	.vco = { .min = 1750000, .max = 3500000 },
333 	.n = { .min = 1, .max = 3 },
334 	.m = { .min = 104, .max = 138 },
335 	.m1 = { .min = 17, .max = 23 },
336 	.m2 = { .min = 5, .max = 11 },
337 	.p = { .min = 28, .max = 112 },
338 	.p1 = { .min = 2, .max = 8 },
339 	.p2 = { .dot_limit = 0,
340 		.p2_slow = 14, .p2_fast = 14
341 	},
342 };
343 
344 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
345 	.dot = { .min = 80000, .max = 224000 },
346 	.vco = { .min = 1750000, .max = 3500000 },
347 	.n = { .min = 1, .max = 3 },
348 	.m = { .min = 104, .max = 138 },
349 	.m1 = { .min = 17, .max = 23 },
350 	.m2 = { .min = 5, .max = 11 },
351 	.p = { .min = 14, .max = 42 },
352 	.p1 = { .min = 2, .max = 6 },
353 	.p2 = { .dot_limit = 0,
354 		.p2_slow = 7, .p2_fast = 7
355 	},
356 };
357 
358 static const intel_limit_t intel_limits_pineview_sdvo = {
359 	.dot = { .min = 20000, .max = 400000},
360 	.vco = { .min = 1700000, .max = 3500000 },
361 	/* Pineview's Ncounter is a ring counter */
362 	.n = { .min = 3, .max = 6 },
363 	.m = { .min = 2, .max = 256 },
364 	/* Pineview only has one combined m divider, which we treat as m2. */
365 	.m1 = { .min = 0, .max = 0 },
366 	.m2 = { .min = 0, .max = 254 },
367 	.p = { .min = 5, .max = 80 },
368 	.p1 = { .min = 1, .max = 8 },
369 	.p2 = { .dot_limit = 200000,
370 		.p2_slow = 10, .p2_fast = 5 },
371 };
372 
373 static const intel_limit_t intel_limits_pineview_lvds = {
374 	.dot = { .min = 20000, .max = 400000 },
375 	.vco = { .min = 1700000, .max = 3500000 },
376 	.n = { .min = 3, .max = 6 },
377 	.m = { .min = 2, .max = 256 },
378 	.m1 = { .min = 0, .max = 0 },
379 	.m2 = { .min = 0, .max = 254 },
380 	.p = { .min = 7, .max = 112 },
381 	.p1 = { .min = 1, .max = 8 },
382 	.p2 = { .dot_limit = 112000,
383 		.p2_slow = 14, .p2_fast = 14 },
384 };
385 
386 /* Ironlake / Sandybridge
387  *
388  * We calculate clock using (register_value + 2) for N/M1/M2, so here
389  * the range value for them is (actual_value - 2).
390  */
391 static const intel_limit_t intel_limits_ironlake_dac = {
392 	.dot = { .min = 25000, .max = 350000 },
393 	.vco = { .min = 1760000, .max = 3510000 },
394 	.n = { .min = 1, .max = 5 },
395 	.m = { .min = 79, .max = 127 },
396 	.m1 = { .min = 12, .max = 22 },
397 	.m2 = { .min = 5, .max = 9 },
398 	.p = { .min = 5, .max = 80 },
399 	.p1 = { .min = 1, .max = 8 },
400 	.p2 = { .dot_limit = 225000,
401 		.p2_slow = 10, .p2_fast = 5 },
402 };
403 
404 static const intel_limit_t intel_limits_ironlake_single_lvds = {
405 	.dot = { .min = 25000, .max = 350000 },
406 	.vco = { .min = 1760000, .max = 3510000 },
407 	.n = { .min = 1, .max = 3 },
408 	.m = { .min = 79, .max = 118 },
409 	.m1 = { .min = 12, .max = 22 },
410 	.m2 = { .min = 5, .max = 9 },
411 	.p = { .min = 28, .max = 112 },
412 	.p1 = { .min = 2, .max = 8 },
413 	.p2 = { .dot_limit = 225000,
414 		.p2_slow = 14, .p2_fast = 14 },
415 };
416 
417 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
418 	.dot = { .min = 25000, .max = 350000 },
419 	.vco = { .min = 1760000, .max = 3510000 },
420 	.n = { .min = 1, .max = 3 },
421 	.m = { .min = 79, .max = 127 },
422 	.m1 = { .min = 12, .max = 22 },
423 	.m2 = { .min = 5, .max = 9 },
424 	.p = { .min = 14, .max = 56 },
425 	.p1 = { .min = 2, .max = 8 },
426 	.p2 = { .dot_limit = 225000,
427 		.p2_slow = 7, .p2_fast = 7 },
428 };
429 
430 /* LVDS 100mhz refclk limits. */
431 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
432 	.dot = { .min = 25000, .max = 350000 },
433 	.vco = { .min = 1760000, .max = 3510000 },
434 	.n = { .min = 1, .max = 2 },
435 	.m = { .min = 79, .max = 126 },
436 	.m1 = { .min = 12, .max = 22 },
437 	.m2 = { .min = 5, .max = 9 },
438 	.p = { .min = 28, .max = 112 },
439 	.p1 = { .min = 2, .max = 8 },
440 	.p2 = { .dot_limit = 225000,
441 		.p2_slow = 14, .p2_fast = 14 },
442 };
443 
444 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
445 	.dot = { .min = 25000, .max = 350000 },
446 	.vco = { .min = 1760000, .max = 3510000 },
447 	.n = { .min = 1, .max = 3 },
448 	.m = { .min = 79, .max = 126 },
449 	.m1 = { .min = 12, .max = 22 },
450 	.m2 = { .min = 5, .max = 9 },
451 	.p = { .min = 14, .max = 42 },
452 	.p1 = { .min = 2, .max = 6 },
453 	.p2 = { .dot_limit = 225000,
454 		.p2_slow = 7, .p2_fast = 7 },
455 };
456 
457 static const intel_limit_t intel_limits_vlv = {
458 	 /*
459 	  * These are the data rate limits (measured in fast clocks)
460 	  * since those are the strictest limits we have. The fast
461 	  * clock and actual rate limits are more relaxed, so checking
462 	  * them would make no difference.
463 	  */
464 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
465 	.vco = { .min = 4000000, .max = 6000000 },
466 	.n = { .min = 1, .max = 7 },
467 	.m1 = { .min = 2, .max = 3 },
468 	.m2 = { .min = 11, .max = 156 },
469 	.p1 = { .min = 2, .max = 3 },
470 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
471 };
472 
473 static const intel_limit_t intel_limits_chv = {
474 	/*
475 	 * These are the data rate limits (measured in fast clocks)
476 	 * since those are the strictest limits we have.  The fast
477 	 * clock and actual rate limits are more relaxed, so checking
478 	 * them would make no difference.
479 	 */
480 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
481 	.vco = { .min = 4800000, .max = 6480000 },
482 	.n = { .min = 1, .max = 1 },
483 	.m1 = { .min = 2, .max = 2 },
484 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
485 	.p1 = { .min = 2, .max = 4 },
486 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
487 };
488 
489 static const intel_limit_t intel_limits_bxt = {
490 	/* FIXME: find real dot limits */
491 	.dot = { .min = 0, .max = INT_MAX },
492 	.vco = { .min = 4800000, .max = 6700000 },
493 	.n = { .min = 1, .max = 1 },
494 	.m1 = { .min = 2, .max = 2 },
495 	/* FIXME: find real m2 limits */
496 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
497 	.p1 = { .min = 2, .max = 4 },
498 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
499 };
500 
501 static bool
needs_modeset(struct drm_crtc_state * state)502 needs_modeset(struct drm_crtc_state *state)
503 {
504 	return drm_atomic_crtc_needs_modeset(state);
505 }
506 
507 /**
508  * Returns whether any output on the specified pipe is of the specified type
509  */
intel_pipe_has_type(struct intel_crtc * crtc,enum intel_output_type type)510 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
511 {
512 	struct drm_device *dev = crtc->base.dev;
513 	struct intel_encoder *encoder;
514 
515 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
516 		if (encoder->type == type)
517 			return true;
518 
519 	return false;
520 }
521 
522 /**
523  * Returns whether any output on the specified pipe will have the specified
524  * type after a staged modeset is complete, i.e., the same as
525  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
526  * encoder->crtc.
527  */
intel_pipe_will_have_type(const struct intel_crtc_state * crtc_state,int type)528 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
529 				      int type)
530 {
531 	struct drm_atomic_state *state = crtc_state->base.state;
532 	struct drm_connector *connector;
533 	struct drm_connector_state *connector_state;
534 	struct intel_encoder *encoder;
535 	int i, num_connectors = 0;
536 
537 	for_each_connector_in_state(state, connector, connector_state, i) {
538 		if (connector_state->crtc != crtc_state->base.crtc)
539 			continue;
540 
541 		num_connectors++;
542 
543 		encoder = to_intel_encoder(connector_state->best_encoder);
544 		if (encoder->type == type)
545 			return true;
546 	}
547 
548 	WARN_ON(num_connectors == 0);
549 
550 	return false;
551 }
552 
553 static const intel_limit_t *
intel_ironlake_limit(struct intel_crtc_state * crtc_state,int refclk)554 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
555 {
556 	struct drm_device *dev = crtc_state->base.crtc->dev;
557 	const intel_limit_t *limit;
558 
559 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
560 		if (intel_is_dual_link_lvds(dev)) {
561 			if (refclk == 100000)
562 				limit = &intel_limits_ironlake_dual_lvds_100m;
563 			else
564 				limit = &intel_limits_ironlake_dual_lvds;
565 		} else {
566 			if (refclk == 100000)
567 				limit = &intel_limits_ironlake_single_lvds_100m;
568 			else
569 				limit = &intel_limits_ironlake_single_lvds;
570 		}
571 	} else
572 		limit = &intel_limits_ironlake_dac;
573 
574 	return limit;
575 }
576 
577 static const intel_limit_t *
intel_g4x_limit(struct intel_crtc_state * crtc_state)578 intel_g4x_limit(struct intel_crtc_state *crtc_state)
579 {
580 	struct drm_device *dev = crtc_state->base.crtc->dev;
581 	const intel_limit_t *limit;
582 
583 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
584 		if (intel_is_dual_link_lvds(dev))
585 			limit = &intel_limits_g4x_dual_channel_lvds;
586 		else
587 			limit = &intel_limits_g4x_single_channel_lvds;
588 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
589 		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
590 		limit = &intel_limits_g4x_hdmi;
591 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
592 		limit = &intel_limits_g4x_sdvo;
593 	} else /* The option is for other outputs */
594 		limit = &intel_limits_i9xx_sdvo;
595 
596 	return limit;
597 }
598 
599 static const intel_limit_t *
intel_limit(struct intel_crtc_state * crtc_state,int refclk)600 intel_limit(struct intel_crtc_state *crtc_state, int refclk)
601 {
602 	struct drm_device *dev = crtc_state->base.crtc->dev;
603 	const intel_limit_t *limit;
604 
605 	if (IS_BROXTON(dev))
606 		limit = &intel_limits_bxt;
607 	else if (HAS_PCH_SPLIT(dev))
608 		limit = intel_ironlake_limit(crtc_state, refclk);
609 	else if (IS_G4X(dev)) {
610 		limit = intel_g4x_limit(crtc_state);
611 	} else if (IS_PINEVIEW(dev)) {
612 		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
613 			limit = &intel_limits_pineview_lvds;
614 		else
615 			limit = &intel_limits_pineview_sdvo;
616 	} else if (IS_CHERRYVIEW(dev)) {
617 		limit = &intel_limits_chv;
618 	} else if (IS_VALLEYVIEW(dev)) {
619 		limit = &intel_limits_vlv;
620 	} else if (!IS_GEN2(dev)) {
621 		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
622 			limit = &intel_limits_i9xx_lvds;
623 		else
624 			limit = &intel_limits_i9xx_sdvo;
625 	} else {
626 		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
627 			limit = &intel_limits_i8xx_lvds;
628 		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
629 			limit = &intel_limits_i8xx_dvo;
630 		else
631 			limit = &intel_limits_i8xx_dac;
632 	}
633 	return limit;
634 }
635 
636 /*
637  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
638  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
639  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
640  * The helpers' return value is the rate of the clock that is fed to the
641  * display engine's pipe which can be the above fast dot clock rate or a
642  * divided-down version of it.
643  */
644 /* m1 is reserved as 0 in Pineview, n is a ring counter */
pnv_calc_dpll_params(int refclk,intel_clock_t * clock)645 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
646 {
647 	clock->m = clock->m2 + 2;
648 	clock->p = clock->p1 * clock->p2;
649 	if (WARN_ON(clock->n == 0 || clock->p == 0))
650 		return 0;
651 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
652 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
653 
654 	return clock->dot;
655 }
656 
i9xx_dpll_compute_m(struct dpll * dpll)657 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
658 {
659 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
660 }
661 
i9xx_calc_dpll_params(int refclk,intel_clock_t * clock)662 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
663 {
664 	clock->m = i9xx_dpll_compute_m(clock);
665 	clock->p = clock->p1 * clock->p2;
666 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
667 		return 0;
668 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
669 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
670 
671 	return clock->dot;
672 }
673 
vlv_calc_dpll_params(int refclk,intel_clock_t * clock)674 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
675 {
676 	clock->m = clock->m1 * clock->m2;
677 	clock->p = clock->p1 * clock->p2;
678 	if (WARN_ON(clock->n == 0 || clock->p == 0))
679 		return 0;
680 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
681 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
682 
683 	return clock->dot / 5;
684 }
685 
chv_calc_dpll_params(int refclk,intel_clock_t * clock)686 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
687 {
688 	clock->m = clock->m1 * clock->m2;
689 	clock->p = clock->p1 * clock->p2;
690 	if (WARN_ON(clock->n == 0 || clock->p == 0))
691 		return 0;
692 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
693 			clock->n << 22);
694 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
695 
696 	return clock->dot / 5;
697 }
698 
699 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
700 /**
701  * Returns whether the given set of divisors are valid for a given refclk with
702  * the given connectors.
703  */
704 
intel_PLL_is_valid(struct drm_device * dev,const intel_limit_t * limit,const intel_clock_t * clock)705 static bool intel_PLL_is_valid(struct drm_device *dev,
706 			       const intel_limit_t *limit,
707 			       const intel_clock_t *clock)
708 {
709 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
710 		INTELPllInvalid("n out of range\n");
711 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
712 		INTELPllInvalid("p1 out of range\n");
713 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
714 		INTELPllInvalid("m2 out of range\n");
715 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
716 		INTELPllInvalid("m1 out of range\n");
717 
718 	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
719 		if (clock->m1 <= clock->m2)
720 			INTELPllInvalid("m1 <= m2\n");
721 
722 	if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
723 		if (clock->p < limit->p.min || limit->p.max < clock->p)
724 			INTELPllInvalid("p out of range\n");
725 		if (clock->m < limit->m.min || limit->m.max < clock->m)
726 			INTELPllInvalid("m out of range\n");
727 	}
728 
729 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
730 		INTELPllInvalid("vco out of range\n");
731 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
732 	 * connector, etc., rather than just a single range.
733 	 */
734 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
735 		INTELPllInvalid("dot out of range\n");
736 
737 	return true;
738 }
739 
740 static int
i9xx_select_p2_div(const intel_limit_t * limit,const struct intel_crtc_state * crtc_state,int target)741 i9xx_select_p2_div(const intel_limit_t *limit,
742 		   const struct intel_crtc_state *crtc_state,
743 		   int target)
744 {
745 	struct drm_device *dev = crtc_state->base.crtc->dev;
746 
747 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
748 		/*
749 		 * For LVDS just rely on its current settings for dual-channel.
750 		 * We haven't figured out how to reliably set up different
751 		 * single/dual channel state, if we even can.
752 		 */
753 		if (intel_is_dual_link_lvds(dev))
754 			return limit->p2.p2_fast;
755 		else
756 			return limit->p2.p2_slow;
757 	} else {
758 		if (target < limit->p2.dot_limit)
759 			return limit->p2.p2_slow;
760 		else
761 			return limit->p2.p2_fast;
762 	}
763 }
764 
765 static bool
i9xx_find_best_dpll(const intel_limit_t * limit,struct intel_crtc_state * crtc_state,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)766 i9xx_find_best_dpll(const intel_limit_t *limit,
767 		    struct intel_crtc_state *crtc_state,
768 		    int target, int refclk, intel_clock_t *match_clock,
769 		    intel_clock_t *best_clock)
770 {
771 	struct drm_device *dev = crtc_state->base.crtc->dev;
772 	intel_clock_t clock;
773 	int err = target;
774 
775 	memset(best_clock, 0, sizeof(*best_clock));
776 
777 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
778 
779 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
780 	     clock.m1++) {
781 		for (clock.m2 = limit->m2.min;
782 		     clock.m2 <= limit->m2.max; clock.m2++) {
783 			if (clock.m2 >= clock.m1)
784 				break;
785 			for (clock.n = limit->n.min;
786 			     clock.n <= limit->n.max; clock.n++) {
787 				for (clock.p1 = limit->p1.min;
788 					clock.p1 <= limit->p1.max; clock.p1++) {
789 					int this_err;
790 
791 					i9xx_calc_dpll_params(refclk, &clock);
792 					if (!intel_PLL_is_valid(dev, limit,
793 								&clock))
794 						continue;
795 					if (match_clock &&
796 					    clock.p != match_clock->p)
797 						continue;
798 
799 					this_err = abs(clock.dot - target);
800 					if (this_err < err) {
801 						*best_clock = clock;
802 						err = this_err;
803 					}
804 				}
805 			}
806 		}
807 	}
808 
809 	return (err != target);
810 }
811 
812 static bool
pnv_find_best_dpll(const intel_limit_t * limit,struct intel_crtc_state * crtc_state,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)813 pnv_find_best_dpll(const intel_limit_t *limit,
814 		   struct intel_crtc_state *crtc_state,
815 		   int target, int refclk, intel_clock_t *match_clock,
816 		   intel_clock_t *best_clock)
817 {
818 	struct drm_device *dev = crtc_state->base.crtc->dev;
819 	intel_clock_t clock;
820 	int err = target;
821 
822 	memset(best_clock, 0, sizeof(*best_clock));
823 
824 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
825 
826 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
827 	     clock.m1++) {
828 		for (clock.m2 = limit->m2.min;
829 		     clock.m2 <= limit->m2.max; clock.m2++) {
830 			for (clock.n = limit->n.min;
831 			     clock.n <= limit->n.max; clock.n++) {
832 				for (clock.p1 = limit->p1.min;
833 					clock.p1 <= limit->p1.max; clock.p1++) {
834 					int this_err;
835 
836 					pnv_calc_dpll_params(refclk, &clock);
837 					if (!intel_PLL_is_valid(dev, limit,
838 								&clock))
839 						continue;
840 					if (match_clock &&
841 					    clock.p != match_clock->p)
842 						continue;
843 
844 					this_err = abs(clock.dot - target);
845 					if (this_err < err) {
846 						*best_clock = clock;
847 						err = this_err;
848 					}
849 				}
850 			}
851 		}
852 	}
853 
854 	return (err != target);
855 }
856 
857 static bool
g4x_find_best_dpll(const intel_limit_t * limit,struct intel_crtc_state * crtc_state,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)858 g4x_find_best_dpll(const intel_limit_t *limit,
859 		   struct intel_crtc_state *crtc_state,
860 		   int target, int refclk, intel_clock_t *match_clock,
861 		   intel_clock_t *best_clock)
862 {
863 	struct drm_device *dev = crtc_state->base.crtc->dev;
864 	intel_clock_t clock;
865 	int max_n;
866 	bool found = false;
867 	/* approximately equals target * 0.00585 */
868 	int err_most = (target >> 8) + (target >> 9);
869 
870 	memset(best_clock, 0, sizeof(*best_clock));
871 
872 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
873 
874 	max_n = limit->n.max;
875 	/* based on hardware requirement, prefer smaller n to precision */
876 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
877 		/* based on hardware requirement, prefere larger m1,m2 */
878 		for (clock.m1 = limit->m1.max;
879 		     clock.m1 >= limit->m1.min; clock.m1--) {
880 			for (clock.m2 = limit->m2.max;
881 			     clock.m2 >= limit->m2.min; clock.m2--) {
882 				for (clock.p1 = limit->p1.max;
883 				     clock.p1 >= limit->p1.min; clock.p1--) {
884 					int this_err;
885 
886 					i9xx_calc_dpll_params(refclk, &clock);
887 					if (!intel_PLL_is_valid(dev, limit,
888 								&clock))
889 						continue;
890 
891 					this_err = abs(clock.dot - target);
892 					if (this_err < err_most) {
893 						*best_clock = clock;
894 						err_most = this_err;
895 						max_n = clock.n;
896 						found = true;
897 					}
898 				}
899 			}
900 		}
901 	}
902 	return found;
903 }
904 
905 /*
906  * Check if the calculated PLL configuration is more optimal compared to the
907  * best configuration and error found so far. Return the calculated error.
908  */
vlv_PLL_is_optimal(struct drm_device * dev,int target_freq,const intel_clock_t * calculated_clock,const intel_clock_t * best_clock,unsigned int best_error_ppm,unsigned int * error_ppm)909 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
910 			       const intel_clock_t *calculated_clock,
911 			       const intel_clock_t *best_clock,
912 			       unsigned int best_error_ppm,
913 			       unsigned int *error_ppm)
914 {
915 	/*
916 	 * For CHV ignore the error and consider only the P value.
917 	 * Prefer a bigger P value based on HW requirements.
918 	 */
919 	if (IS_CHERRYVIEW(dev)) {
920 		*error_ppm = 0;
921 
922 		return calculated_clock->p > best_clock->p;
923 	}
924 
925 	if (WARN_ON_ONCE(!target_freq))
926 		return false;
927 
928 	*error_ppm = div_u64(1000000ULL *
929 				abs(target_freq - calculated_clock->dot),
930 			     target_freq);
931 	/*
932 	 * Prefer a better P value over a better (smaller) error if the error
933 	 * is small. Ensure this preference for future configurations too by
934 	 * setting the error to 0.
935 	 */
936 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
937 		*error_ppm = 0;
938 
939 		return true;
940 	}
941 
942 	return *error_ppm + 10 < best_error_ppm;
943 }
944 
945 static bool
vlv_find_best_dpll(const intel_limit_t * limit,struct intel_crtc_state * crtc_state,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)946 vlv_find_best_dpll(const intel_limit_t *limit,
947 		   struct intel_crtc_state *crtc_state,
948 		   int target, int refclk, intel_clock_t *match_clock,
949 		   intel_clock_t *best_clock)
950 {
951 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
952 	struct drm_device *dev = crtc->base.dev;
953 	intel_clock_t clock;
954 	unsigned int bestppm = 1000000;
955 	/* min update 19.2 MHz */
956 	int max_n = min(limit->n.max, refclk / 19200);
957 	bool found = false;
958 
959 	target *= 5; /* fast clock */
960 
961 	memset(best_clock, 0, sizeof(*best_clock));
962 
963 	/* based on hardware requirement, prefer smaller n to precision */
964 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
965 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
966 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
967 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
968 				clock.p = clock.p1 * clock.p2;
969 				/* based on hardware requirement, prefer bigger m1,m2 values */
970 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
971 					unsigned int ppm;
972 
973 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
974 								     refclk * clock.m1);
975 
976 					vlv_calc_dpll_params(refclk, &clock);
977 
978 					if (!intel_PLL_is_valid(dev, limit,
979 								&clock))
980 						continue;
981 
982 					if (!vlv_PLL_is_optimal(dev, target,
983 								&clock,
984 								best_clock,
985 								bestppm, &ppm))
986 						continue;
987 
988 					*best_clock = clock;
989 					bestppm = ppm;
990 					found = true;
991 				}
992 			}
993 		}
994 	}
995 
996 	return found;
997 }
998 
999 static bool
chv_find_best_dpll(const intel_limit_t * limit,struct intel_crtc_state * crtc_state,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)1000 chv_find_best_dpll(const intel_limit_t *limit,
1001 		   struct intel_crtc_state *crtc_state,
1002 		   int target, int refclk, intel_clock_t *match_clock,
1003 		   intel_clock_t *best_clock)
1004 {
1005 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1006 	struct drm_device *dev = crtc->base.dev;
1007 	unsigned int best_error_ppm;
1008 	intel_clock_t clock;
1009 	uint64_t m2;
1010 	int found = false;
1011 
1012 	memset(best_clock, 0, sizeof(*best_clock));
1013 	best_error_ppm = 1000000;
1014 
1015 	/*
1016 	 * Based on hardware doc, the n always set to 1, and m1 always
1017 	 * set to 2.  If requires to support 200Mhz refclk, we need to
1018 	 * revisit this because n may not 1 anymore.
1019 	 */
1020 	clock.n = 1, clock.m1 = 2;
1021 	target *= 5;	/* fast clock */
1022 
1023 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1024 		for (clock.p2 = limit->p2.p2_fast;
1025 				clock.p2 >= limit->p2.p2_slow;
1026 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1027 			unsigned int error_ppm;
1028 
1029 			clock.p = clock.p1 * clock.p2;
1030 
1031 			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1032 					clock.n) << 22, refclk * clock.m1);
1033 
1034 			if (m2 > INT_MAX/clock.m1)
1035 				continue;
1036 
1037 			clock.m2 = m2;
1038 
1039 			chv_calc_dpll_params(refclk, &clock);
1040 
1041 			if (!intel_PLL_is_valid(dev, limit, &clock))
1042 				continue;
1043 
1044 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1045 						best_error_ppm, &error_ppm))
1046 				continue;
1047 
1048 			*best_clock = clock;
1049 			best_error_ppm = error_ppm;
1050 			found = true;
1051 		}
1052 	}
1053 
1054 	return found;
1055 }
1056 
bxt_find_best_dpll(struct intel_crtc_state * crtc_state,int target_clock,intel_clock_t * best_clock)1057 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1058 			intel_clock_t *best_clock)
1059 {
1060 	int refclk = i9xx_get_refclk(crtc_state, 0);
1061 
1062 	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1063 				  target_clock, refclk, NULL, best_clock);
1064 }
1065 
intel_crtc_active(struct drm_crtc * crtc)1066 bool intel_crtc_active(struct drm_crtc *crtc)
1067 {
1068 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1069 
1070 	/* Be paranoid as we can arrive here with only partial
1071 	 * state retrieved from the hardware during setup.
1072 	 *
1073 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1074 	 * as Haswell has gained clock readout/fastboot support.
1075 	 *
1076 	 * We can ditch the crtc->primary->fb check as soon as we can
1077 	 * properly reconstruct framebuffers.
1078 	 *
1079 	 * FIXME: The intel_crtc->active here should be switched to
1080 	 * crtc->state->active once we have proper CRTC states wired up
1081 	 * for atomic.
1082 	 */
1083 	return intel_crtc->active && crtc->primary->state->fb &&
1084 		intel_crtc->config->base.adjusted_mode.crtc_clock;
1085 }
1086 
intel_pipe_to_cpu_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1087 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1088 					     enum pipe pipe)
1089 {
1090 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1091 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1092 
1093 	return intel_crtc->config->cpu_transcoder;
1094 }
1095 
pipe_dsl_stopped(struct drm_device * dev,enum pipe pipe)1096 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1097 {
1098 	struct drm_i915_private *dev_priv = dev->dev_private;
1099 	u32 reg = PIPEDSL(pipe);
1100 	u32 line1, line2;
1101 	u32 line_mask;
1102 
1103 	if (IS_GEN2(dev))
1104 		line_mask = DSL_LINEMASK_GEN2;
1105 	else
1106 		line_mask = DSL_LINEMASK_GEN3;
1107 
1108 	line1 = I915_READ(reg) & line_mask;
1109 	msleep(5);
1110 	line2 = I915_READ(reg) & line_mask;
1111 
1112 	return line1 == line2;
1113 }
1114 
1115 /*
1116  * intel_wait_for_pipe_off - wait for pipe to turn off
1117  * @crtc: crtc whose pipe to wait for
1118  *
1119  * After disabling a pipe, we can't wait for vblank in the usual way,
1120  * spinning on the vblank interrupt status bit, since we won't actually
1121  * see an interrupt when the pipe is disabled.
1122  *
1123  * On Gen4 and above:
1124  *   wait for the pipe register state bit to turn off
1125  *
1126  * Otherwise:
1127  *   wait for the display line value to settle (it usually
1128  *   ends up stopping at the start of the next frame).
1129  *
1130  */
intel_wait_for_pipe_off(struct intel_crtc * crtc)1131 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1132 {
1133 	struct drm_device *dev = crtc->base.dev;
1134 	struct drm_i915_private *dev_priv = dev->dev_private;
1135 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1136 	enum pipe pipe = crtc->pipe;
1137 
1138 	if (INTEL_INFO(dev)->gen >= 4) {
1139 		int reg = PIPECONF(cpu_transcoder);
1140 
1141 		/* Wait for the Pipe State to go off */
1142 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1143 			     100))
1144 			WARN(1, "pipe_off wait timed out\n");
1145 	} else {
1146 		/* Wait for the display line to settle */
1147 		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1148 			WARN(1, "pipe_off wait timed out\n");
1149 	}
1150 }
1151 
state_string(bool enabled)1152 static const char *state_string(bool enabled)
1153 {
1154 	return enabled ? "on" : "off";
1155 }
1156 
1157 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1158 void assert_pll(struct drm_i915_private *dev_priv,
1159 		enum pipe pipe, bool state)
1160 {
1161 	u32 val;
1162 	bool cur_state;
1163 
1164 	val = I915_READ(DPLL(pipe));
1165 	cur_state = !!(val & DPLL_VCO_ENABLE);
1166 	I915_STATE_WARN(cur_state != state,
1167 	     "PLL state assertion failure (expected %s, current %s)\n",
1168 	     state_string(state), state_string(cur_state));
1169 }
1170 
1171 /* XXX: the dsi pll is shared between MIPI DSI ports */
assert_dsi_pll(struct drm_i915_private * dev_priv,bool state)1172 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1173 {
1174 	u32 val;
1175 	bool cur_state;
1176 
1177 	mutex_lock(&dev_priv->sb_lock);
1178 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1179 	mutex_unlock(&dev_priv->sb_lock);
1180 
1181 	cur_state = val & DSI_PLL_VCO_EN;
1182 	I915_STATE_WARN(cur_state != state,
1183 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1184 	     state_string(state), state_string(cur_state));
1185 }
1186 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1187 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1188 
1189 struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc * crtc)1190 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1191 {
1192 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1193 
1194 	if (crtc->config->shared_dpll < 0)
1195 		return NULL;
1196 
1197 	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1198 }
1199 
1200 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,bool state)1201 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1202 			struct intel_shared_dpll *pll,
1203 			bool state)
1204 {
1205 	bool cur_state;
1206 	struct intel_dpll_hw_state hw_state;
1207 
1208 	if (WARN (!pll,
1209 		  "asserting DPLL %s with no DPLL\n", state_string(state)))
1210 		return;
1211 
1212 	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1213 	I915_STATE_WARN(cur_state != state,
1214 	     "%s assertion failure (expected %s, current %s)\n",
1215 	     pll->name, state_string(state), state_string(cur_state));
1216 }
1217 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1218 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1219 			  enum pipe pipe, bool state)
1220 {
1221 	bool cur_state;
1222 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1223 								      pipe);
1224 
1225 	if (HAS_DDI(dev_priv->dev)) {
1226 		/* DDI does not have a specific FDI_TX register */
1227 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1228 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1229 	} else {
1230 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1231 		cur_state = !!(val & FDI_TX_ENABLE);
1232 	}
1233 	I915_STATE_WARN(cur_state != state,
1234 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1235 	     state_string(state), state_string(cur_state));
1236 }
1237 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1238 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1239 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1240 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1241 			  enum pipe pipe, bool state)
1242 {
1243 	u32 val;
1244 	bool cur_state;
1245 
1246 	val = I915_READ(FDI_RX_CTL(pipe));
1247 	cur_state = !!(val & FDI_RX_ENABLE);
1248 	I915_STATE_WARN(cur_state != state,
1249 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1250 	     state_string(state), state_string(cur_state));
1251 }
1252 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1253 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1254 
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)1255 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1256 				      enum pipe pipe)
1257 {
1258 	u32 val;
1259 
1260 	/* ILK FDI PLL is always enabled */
1261 	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1262 		return;
1263 
1264 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1265 	if (HAS_DDI(dev_priv->dev))
1266 		return;
1267 
1268 	val = I915_READ(FDI_TX_CTL(pipe));
1269 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1270 }
1271 
assert_fdi_rx_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1272 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1273 		       enum pipe pipe, bool state)
1274 {
1275 	u32 val;
1276 	bool cur_state;
1277 
1278 	val = I915_READ(FDI_RX_CTL(pipe));
1279 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1280 	I915_STATE_WARN(cur_state != state,
1281 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1282 	     state_string(state), state_string(cur_state));
1283 }
1284 
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)1285 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1286 			   enum pipe pipe)
1287 {
1288 	struct drm_device *dev = dev_priv->dev;
1289 	int pp_reg;
1290 	u32 val;
1291 	enum pipe panel_pipe = PIPE_A;
1292 	bool locked = true;
1293 
1294 	if (WARN_ON(HAS_DDI(dev)))
1295 		return;
1296 
1297 	if (HAS_PCH_SPLIT(dev)) {
1298 		u32 port_sel;
1299 
1300 		pp_reg = PCH_PP_CONTROL;
1301 		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1302 
1303 		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1304 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1305 			panel_pipe = PIPE_B;
1306 		/* XXX: else fix for eDP */
1307 	} else if (IS_VALLEYVIEW(dev)) {
1308 		/* presumably write lock depends on pipe, not port select */
1309 		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1310 		panel_pipe = pipe;
1311 	} else {
1312 		pp_reg = PP_CONTROL;
1313 		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1314 			panel_pipe = PIPE_B;
1315 	}
1316 
1317 	val = I915_READ(pp_reg);
1318 	if (!(val & PANEL_POWER_ON) ||
1319 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1320 		locked = false;
1321 
1322 	I915_STATE_WARN(panel_pipe == pipe && locked,
1323 	     "panel assertion failure, pipe %c regs locked\n",
1324 	     pipe_name(pipe));
1325 }
1326 
assert_cursor(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1327 static void assert_cursor(struct drm_i915_private *dev_priv,
1328 			  enum pipe pipe, bool state)
1329 {
1330 	struct drm_device *dev = dev_priv->dev;
1331 	bool cur_state;
1332 
1333 	if (IS_845G(dev) || IS_I865G(dev))
1334 		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1335 	else
1336 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1337 
1338 	I915_STATE_WARN(cur_state != state,
1339 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1340 	     pipe_name(pipe), state_string(state), state_string(cur_state));
1341 }
1342 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1343 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1344 
assert_pipe(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1345 void assert_pipe(struct drm_i915_private *dev_priv,
1346 		 enum pipe pipe, bool state)
1347 {
1348 	bool cur_state;
1349 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1350 								      pipe);
1351 
1352 	/* if we need the pipe quirk it must be always on */
1353 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1354 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1355 		state = true;
1356 
1357 	if (!intel_display_power_is_enabled(dev_priv,
1358 				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1359 		cur_state = false;
1360 	} else {
1361 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1362 		cur_state = !!(val & PIPECONF_ENABLE);
1363 	}
1364 
1365 	I915_STATE_WARN(cur_state != state,
1366 	     "pipe %c assertion failure (expected %s, current %s)\n",
1367 	     pipe_name(pipe), state_string(state), state_string(cur_state));
1368 }
1369 
assert_plane(struct drm_i915_private * dev_priv,enum plane plane,bool state)1370 static void assert_plane(struct drm_i915_private *dev_priv,
1371 			 enum plane plane, bool state)
1372 {
1373 	u32 val;
1374 	bool cur_state;
1375 
1376 	val = I915_READ(DSPCNTR(plane));
1377 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1378 	I915_STATE_WARN(cur_state != state,
1379 	     "plane %c assertion failure (expected %s, current %s)\n",
1380 	     plane_name(plane), state_string(state), state_string(cur_state));
1381 }
1382 
1383 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1384 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1385 
assert_planes_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1386 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1387 				   enum pipe pipe)
1388 {
1389 	struct drm_device *dev = dev_priv->dev;
1390 	int i;
1391 
1392 	/* Primary planes are fixed to pipes on gen4+ */
1393 	if (INTEL_INFO(dev)->gen >= 4) {
1394 		u32 val = I915_READ(DSPCNTR(pipe));
1395 		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1396 		     "plane %c assertion failure, should be disabled but not\n",
1397 		     plane_name(pipe));
1398 		return;
1399 	}
1400 
1401 	/* Need to check both planes against the pipe */
1402 	for_each_pipe(dev_priv, i) {
1403 		u32 val = I915_READ(DSPCNTR(i));
1404 		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1405 			DISPPLANE_SEL_PIPE_SHIFT;
1406 		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1407 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1408 		     plane_name(i), pipe_name(pipe));
1409 	}
1410 }
1411 
assert_sprites_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1412 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1413 				    enum pipe pipe)
1414 {
1415 	struct drm_device *dev = dev_priv->dev;
1416 	int sprite;
1417 
1418 	if (INTEL_INFO(dev)->gen >= 9) {
1419 		for_each_sprite(dev_priv, pipe, sprite) {
1420 			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1421 			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1422 			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1423 			     sprite, pipe_name(pipe));
1424 		}
1425 	} else if (IS_VALLEYVIEW(dev)) {
1426 		for_each_sprite(dev_priv, pipe, sprite) {
1427 			u32 val = I915_READ(SPCNTR(pipe, sprite));
1428 			I915_STATE_WARN(val & SP_ENABLE,
1429 			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1430 			     sprite_name(pipe, sprite), pipe_name(pipe));
1431 		}
1432 	} else if (INTEL_INFO(dev)->gen >= 7) {
1433 		u32 val = I915_READ(SPRCTL(pipe));
1434 		I915_STATE_WARN(val & SPRITE_ENABLE,
1435 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1436 		     plane_name(pipe), pipe_name(pipe));
1437 	} else if (INTEL_INFO(dev)->gen >= 5) {
1438 		u32 val = I915_READ(DVSCNTR(pipe));
1439 		I915_STATE_WARN(val & DVS_ENABLE,
1440 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1441 		     plane_name(pipe), pipe_name(pipe));
1442 	}
1443 }
1444 
assert_vblank_disabled(struct drm_crtc * crtc)1445 static void assert_vblank_disabled(struct drm_crtc *crtc)
1446 {
1447 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1448 		drm_crtc_vblank_put(crtc);
1449 }
1450 
ibx_assert_pch_refclk_enabled(struct drm_i915_private * dev_priv)1451 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1452 {
1453 	u32 val;
1454 	bool enabled;
1455 
1456 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1457 
1458 	val = I915_READ(PCH_DREF_CONTROL);
1459 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1460 			    DREF_SUPERSPREAD_SOURCE_MASK));
1461 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1462 }
1463 
assert_pch_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1464 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1465 					   enum pipe pipe)
1466 {
1467 	u32 val;
1468 	bool enabled;
1469 
1470 	val = I915_READ(PCH_TRANSCONF(pipe));
1471 	enabled = !!(val & TRANS_ENABLE);
1472 	I915_STATE_WARN(enabled,
1473 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1474 	     pipe_name(pipe));
1475 }
1476 
dp_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 port_sel,u32 val)1477 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1478 			    enum pipe pipe, u32 port_sel, u32 val)
1479 {
1480 	if ((val & DP_PORT_EN) == 0)
1481 		return false;
1482 
1483 	if (HAS_PCH_CPT(dev_priv->dev)) {
1484 		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1485 		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1486 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1487 			return false;
1488 	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1489 		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1490 			return false;
1491 	} else {
1492 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1493 			return false;
1494 	}
1495 	return true;
1496 }
1497 
hdmi_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1498 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1499 			      enum pipe pipe, u32 val)
1500 {
1501 	if ((val & SDVO_ENABLE) == 0)
1502 		return false;
1503 
1504 	if (HAS_PCH_CPT(dev_priv->dev)) {
1505 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1506 			return false;
1507 	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1508 		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1509 			return false;
1510 	} else {
1511 		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1512 			return false;
1513 	}
1514 	return true;
1515 }
1516 
lvds_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1517 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1518 			      enum pipe pipe, u32 val)
1519 {
1520 	if ((val & LVDS_PORT_EN) == 0)
1521 		return false;
1522 
1523 	if (HAS_PCH_CPT(dev_priv->dev)) {
1524 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1525 			return false;
1526 	} else {
1527 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1528 			return false;
1529 	}
1530 	return true;
1531 }
1532 
adpa_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1533 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1534 			      enum pipe pipe, u32 val)
1535 {
1536 	if ((val & ADPA_DAC_ENABLE) == 0)
1537 		return false;
1538 	if (HAS_PCH_CPT(dev_priv->dev)) {
1539 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1540 			return false;
1541 	} else {
1542 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1543 			return false;
1544 	}
1545 	return true;
1546 }
1547 
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,int reg,u32 port_sel)1548 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1549 				   enum pipe pipe, int reg, u32 port_sel)
1550 {
1551 	u32 val = I915_READ(reg);
1552 	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1553 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1554 	     reg, pipe_name(pipe));
1555 
1556 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1557 	     && (val & DP_PIPEB_SELECT),
1558 	     "IBX PCH dp port still using transcoder B\n");
1559 }
1560 
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,int reg)1561 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1562 				     enum pipe pipe, int reg)
1563 {
1564 	u32 val = I915_READ(reg);
1565 	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1566 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1567 	     reg, pipe_name(pipe));
1568 
1569 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1570 	     && (val & SDVO_PIPE_B_SELECT),
1571 	     "IBX PCH hdmi port still using transcoder B\n");
1572 }
1573 
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1574 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1575 				      enum pipe pipe)
1576 {
1577 	u32 val;
1578 
1579 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1580 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1581 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1582 
1583 	val = I915_READ(PCH_ADPA);
1584 	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1585 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1586 	     pipe_name(pipe));
1587 
1588 	val = I915_READ(PCH_LVDS);
1589 	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1590 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1591 	     pipe_name(pipe));
1592 
1593 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1594 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1595 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1596 }
1597 
vlv_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)1598 static void vlv_enable_pll(struct intel_crtc *crtc,
1599 			   const struct intel_crtc_state *pipe_config)
1600 {
1601 	struct drm_device *dev = crtc->base.dev;
1602 	struct drm_i915_private *dev_priv = dev->dev_private;
1603 	int reg = DPLL(crtc->pipe);
1604 	u32 dpll = pipe_config->dpll_hw_state.dpll;
1605 
1606 	assert_pipe_disabled(dev_priv, crtc->pipe);
1607 
1608 	/* No really, not for ILK+ */
1609 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1610 
1611 	/* PLL is protected by panel, make sure we can write it */
1612 	if (IS_MOBILE(dev_priv->dev))
1613 		assert_panel_unlocked(dev_priv, crtc->pipe);
1614 
1615 	I915_WRITE(reg, dpll);
1616 	POSTING_READ(reg);
1617 	udelay(150);
1618 
1619 	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1620 		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1621 
1622 	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1623 	POSTING_READ(DPLL_MD(crtc->pipe));
1624 
1625 	/* We do this three times for luck */
1626 	I915_WRITE(reg, dpll);
1627 	POSTING_READ(reg);
1628 	udelay(150); /* wait for warmup */
1629 	I915_WRITE(reg, dpll);
1630 	POSTING_READ(reg);
1631 	udelay(150); /* wait for warmup */
1632 	I915_WRITE(reg, dpll);
1633 	POSTING_READ(reg);
1634 	udelay(150); /* wait for warmup */
1635 }
1636 
chv_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)1637 static void chv_enable_pll(struct intel_crtc *crtc,
1638 			   const struct intel_crtc_state *pipe_config)
1639 {
1640 	struct drm_device *dev = crtc->base.dev;
1641 	struct drm_i915_private *dev_priv = dev->dev_private;
1642 	int pipe = crtc->pipe;
1643 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1644 	u32 tmp;
1645 
1646 	assert_pipe_disabled(dev_priv, crtc->pipe);
1647 
1648 	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1649 
1650 	mutex_lock(&dev_priv->sb_lock);
1651 
1652 	/* Enable back the 10bit clock to display controller */
1653 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1654 	tmp |= DPIO_DCLKP_EN;
1655 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1656 
1657 	mutex_unlock(&dev_priv->sb_lock);
1658 
1659 	/*
1660 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1661 	 */
1662 	udelay(1);
1663 
1664 	/* Enable PLL */
1665 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1666 
1667 	/* Check PLL is locked */
1668 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1669 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1670 
1671 	/* not sure when this should be written */
1672 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1673 	POSTING_READ(DPLL_MD(pipe));
1674 }
1675 
intel_num_dvo_pipes(struct drm_device * dev)1676 static int intel_num_dvo_pipes(struct drm_device *dev)
1677 {
1678 	struct intel_crtc *crtc;
1679 	int count = 0;
1680 
1681 	for_each_intel_crtc(dev, crtc)
1682 		count += crtc->base.state->active &&
1683 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1684 
1685 	return count;
1686 }
1687 
i9xx_enable_pll(struct intel_crtc * crtc)1688 static void i9xx_enable_pll(struct intel_crtc *crtc)
1689 {
1690 	struct drm_device *dev = crtc->base.dev;
1691 	struct drm_i915_private *dev_priv = dev->dev_private;
1692 	int reg = DPLL(crtc->pipe);
1693 	u32 dpll = crtc->config->dpll_hw_state.dpll;
1694 
1695 	assert_pipe_disabled(dev_priv, crtc->pipe);
1696 
1697 	/* No really, not for ILK+ */
1698 	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1699 
1700 	/* PLL is protected by panel, make sure we can write it */
1701 	if (IS_MOBILE(dev) && !IS_I830(dev))
1702 		assert_panel_unlocked(dev_priv, crtc->pipe);
1703 
1704 	/* Enable DVO 2x clock on both PLLs if necessary */
1705 	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1706 		/*
1707 		 * It appears to be important that we don't enable this
1708 		 * for the current pipe before otherwise configuring the
1709 		 * PLL. No idea how this should be handled if multiple
1710 		 * DVO outputs are enabled simultaneosly.
1711 		 */
1712 		dpll |= DPLL_DVO_2X_MODE;
1713 		I915_WRITE(DPLL(!crtc->pipe),
1714 			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1715 	}
1716 
1717 	/*
1718 	 * Apparently we need to have VGA mode enabled prior to changing
1719 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1720 	 * dividers, even though the register value does change.
1721 	 */
1722 	I915_WRITE(reg, 0);
1723 
1724 	I915_WRITE(reg, dpll);
1725 
1726 	/* Wait for the clocks to stabilize. */
1727 	POSTING_READ(reg);
1728 	udelay(150);
1729 
1730 	if (INTEL_INFO(dev)->gen >= 4) {
1731 		I915_WRITE(DPLL_MD(crtc->pipe),
1732 			   crtc->config->dpll_hw_state.dpll_md);
1733 	} else {
1734 		/* The pixel multiplier can only be updated once the
1735 		 * DPLL is enabled and the clocks are stable.
1736 		 *
1737 		 * So write it again.
1738 		 */
1739 		I915_WRITE(reg, dpll);
1740 	}
1741 
1742 	/* We do this three times for luck */
1743 	I915_WRITE(reg, dpll);
1744 	POSTING_READ(reg);
1745 	udelay(150); /* wait for warmup */
1746 	I915_WRITE(reg, dpll);
1747 	POSTING_READ(reg);
1748 	udelay(150); /* wait for warmup */
1749 	I915_WRITE(reg, dpll);
1750 	POSTING_READ(reg);
1751 	udelay(150); /* wait for warmup */
1752 }
1753 
1754 /**
1755  * i9xx_disable_pll - disable a PLL
1756  * @dev_priv: i915 private structure
1757  * @pipe: pipe PLL to disable
1758  *
1759  * Disable the PLL for @pipe, making sure the pipe is off first.
1760  *
1761  * Note!  This is for pre-ILK only.
1762  */
i9xx_disable_pll(struct intel_crtc * crtc)1763 static void i9xx_disable_pll(struct intel_crtc *crtc)
1764 {
1765 	struct drm_device *dev = crtc->base.dev;
1766 	struct drm_i915_private *dev_priv = dev->dev_private;
1767 	enum pipe pipe = crtc->pipe;
1768 
1769 	/* Disable DVO 2x clock on both PLLs if necessary */
1770 	if (IS_I830(dev) &&
1771 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1772 	    !intel_num_dvo_pipes(dev)) {
1773 		I915_WRITE(DPLL(PIPE_B),
1774 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1775 		I915_WRITE(DPLL(PIPE_A),
1776 			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1777 	}
1778 
1779 	/* Don't disable pipe or pipe PLLs if needed */
1780 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1781 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1782 		return;
1783 
1784 	/* Make sure the pipe isn't still relying on us */
1785 	assert_pipe_disabled(dev_priv, pipe);
1786 
1787 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1788 	POSTING_READ(DPLL(pipe));
1789 }
1790 
vlv_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1791 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1792 {
1793 	u32 val;
1794 
1795 	/* Make sure the pipe isn't still relying on us */
1796 	assert_pipe_disabled(dev_priv, pipe);
1797 
1798 	/*
1799 	 * Leave integrated clock source and reference clock enabled for pipe B.
1800 	 * The latter is needed for VGA hotplug / manual detection.
1801 	 */
1802 	val = DPLL_VGA_MODE_DIS;
1803 	if (pipe == PIPE_B)
1804 		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1805 	I915_WRITE(DPLL(pipe), val);
1806 	POSTING_READ(DPLL(pipe));
1807 
1808 }
1809 
chv_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1810 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1811 {
1812 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1813 	u32 val;
1814 
1815 	/* Make sure the pipe isn't still relying on us */
1816 	assert_pipe_disabled(dev_priv, pipe);
1817 
1818 	/* Set PLL en = 0 */
1819 	val = DPLL_SSC_REF_CLK_CHV |
1820 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1821 	if (pipe != PIPE_A)
1822 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1823 	I915_WRITE(DPLL(pipe), val);
1824 	POSTING_READ(DPLL(pipe));
1825 
1826 	mutex_lock(&dev_priv->sb_lock);
1827 
1828 	/* Disable 10bit clock to display controller */
1829 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1830 	val &= ~DPIO_DCLKP_EN;
1831 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1832 
1833 	mutex_unlock(&dev_priv->sb_lock);
1834 }
1835 
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dport,unsigned int expected_mask)1836 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1837 			 struct intel_digital_port *dport,
1838 			 unsigned int expected_mask)
1839 {
1840 	u32 port_mask;
1841 	int dpll_reg;
1842 
1843 	switch (dport->port) {
1844 	case PORT_B:
1845 		port_mask = DPLL_PORTB_READY_MASK;
1846 		dpll_reg = DPLL(0);
1847 		break;
1848 	case PORT_C:
1849 		port_mask = DPLL_PORTC_READY_MASK;
1850 		dpll_reg = DPLL(0);
1851 		expected_mask <<= 4;
1852 		break;
1853 	case PORT_D:
1854 		port_mask = DPLL_PORTD_READY_MASK;
1855 		dpll_reg = DPIO_PHY_STATUS;
1856 		break;
1857 	default:
1858 		BUG();
1859 	}
1860 
1861 	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1862 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1863 		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1864 }
1865 
intel_prepare_shared_dpll(struct intel_crtc * crtc)1866 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1867 {
1868 	struct drm_device *dev = crtc->base.dev;
1869 	struct drm_i915_private *dev_priv = dev->dev_private;
1870 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1871 
1872 	if (WARN_ON(pll == NULL))
1873 		return;
1874 
1875 	WARN_ON(!pll->config.crtc_mask);
1876 	if (pll->active == 0) {
1877 		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1878 		WARN_ON(pll->on);
1879 		assert_shared_dpll_disabled(dev_priv, pll);
1880 
1881 		pll->mode_set(dev_priv, pll);
1882 	}
1883 }
1884 
1885 /**
1886  * intel_enable_shared_dpll - enable PCH PLL
1887  * @dev_priv: i915 private structure
1888  * @pipe: pipe PLL to enable
1889  *
1890  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1891  * drives the transcoder clock.
1892  */
intel_enable_shared_dpll(struct intel_crtc * crtc)1893 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1894 {
1895 	struct drm_device *dev = crtc->base.dev;
1896 	struct drm_i915_private *dev_priv = dev->dev_private;
1897 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1898 
1899 	if (WARN_ON(pll == NULL))
1900 		return;
1901 
1902 	if (WARN_ON(pll->config.crtc_mask == 0))
1903 		return;
1904 
1905 	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1906 		      pll->name, pll->active, pll->on,
1907 		      crtc->base.base.id);
1908 
1909 	if (pll->active++) {
1910 		WARN_ON(!pll->on);
1911 		assert_shared_dpll_enabled(dev_priv, pll);
1912 		return;
1913 	}
1914 	WARN_ON(pll->on);
1915 
1916 	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1917 
1918 	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1919 	pll->enable(dev_priv, pll);
1920 	pll->on = true;
1921 }
1922 
intel_disable_shared_dpll(struct intel_crtc * crtc)1923 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1924 {
1925 	struct drm_device *dev = crtc->base.dev;
1926 	struct drm_i915_private *dev_priv = dev->dev_private;
1927 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1928 
1929 	/* PCH only available on ILK+ */
1930 	if (INTEL_INFO(dev)->gen < 5)
1931 		return;
1932 
1933 	if (pll == NULL)
1934 		return;
1935 
1936 	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1937 		return;
1938 
1939 	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1940 		      pll->name, pll->active, pll->on,
1941 		      crtc->base.base.id);
1942 
1943 	if (WARN_ON(pll->active == 0)) {
1944 		assert_shared_dpll_disabled(dev_priv, pll);
1945 		return;
1946 	}
1947 
1948 	assert_shared_dpll_enabled(dev_priv, pll);
1949 	WARN_ON(!pll->on);
1950 	if (--pll->active)
1951 		return;
1952 
1953 	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1954 	pll->disable(dev_priv, pll);
1955 	pll->on = false;
1956 
1957 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1958 }
1959 
ironlake_enable_pch_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1960 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1961 					   enum pipe pipe)
1962 {
1963 	struct drm_device *dev = dev_priv->dev;
1964 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1965 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1966 	uint32_t reg, val, pipeconf_val;
1967 
1968 	/* PCH only available on ILK+ */
1969 	BUG_ON(!HAS_PCH_SPLIT(dev));
1970 
1971 	/* Make sure PCH DPLL is enabled */
1972 	assert_shared_dpll_enabled(dev_priv,
1973 				   intel_crtc_to_shared_dpll(intel_crtc));
1974 
1975 	/* FDI must be feeding us bits for PCH ports */
1976 	assert_fdi_tx_enabled(dev_priv, pipe);
1977 	assert_fdi_rx_enabled(dev_priv, pipe);
1978 
1979 	if (HAS_PCH_CPT(dev)) {
1980 		/* Workaround: Set the timing override bit before enabling the
1981 		 * pch transcoder. */
1982 		reg = TRANS_CHICKEN2(pipe);
1983 		val = I915_READ(reg);
1984 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1985 		I915_WRITE(reg, val);
1986 	}
1987 
1988 	reg = PCH_TRANSCONF(pipe);
1989 	val = I915_READ(reg);
1990 	pipeconf_val = I915_READ(PIPECONF(pipe));
1991 
1992 	if (HAS_PCH_IBX(dev_priv->dev)) {
1993 		/*
1994 		 * Make the BPC in transcoder be consistent with
1995 		 * that in pipeconf reg. For HDMI we must use 8bpc
1996 		 * here for both 8bpc and 12bpc.
1997 		 */
1998 		val &= ~PIPECONF_BPC_MASK;
1999 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2000 			val |= PIPECONF_8BPC;
2001 		else
2002 			val |= pipeconf_val & PIPECONF_BPC_MASK;
2003 	}
2004 
2005 	val &= ~TRANS_INTERLACE_MASK;
2006 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2007 		if (HAS_PCH_IBX(dev_priv->dev) &&
2008 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2009 			val |= TRANS_LEGACY_INTERLACED_ILK;
2010 		else
2011 			val |= TRANS_INTERLACED;
2012 	else
2013 		val |= TRANS_PROGRESSIVE;
2014 
2015 	I915_WRITE(reg, val | TRANS_ENABLE);
2016 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2017 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2018 }
2019 
lpt_enable_pch_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)2020 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2021 				      enum transcoder cpu_transcoder)
2022 {
2023 	u32 val, pipeconf_val;
2024 
2025 	/* PCH only available on ILK+ */
2026 	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2027 
2028 	/* FDI must be feeding us bits for PCH ports */
2029 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2030 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2031 
2032 	/* Workaround: set timing override bit. */
2033 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2034 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2035 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2036 
2037 	val = TRANS_ENABLE;
2038 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2039 
2040 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2041 	    PIPECONF_INTERLACED_ILK)
2042 		val |= TRANS_INTERLACED;
2043 	else
2044 		val |= TRANS_PROGRESSIVE;
2045 
2046 	I915_WRITE(LPT_TRANSCONF, val);
2047 	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2048 		DRM_ERROR("Failed to enable PCH transcoder\n");
2049 }
2050 
ironlake_disable_pch_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)2051 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2052 					    enum pipe pipe)
2053 {
2054 	struct drm_device *dev = dev_priv->dev;
2055 	uint32_t reg, val;
2056 
2057 	/* FDI relies on the transcoder */
2058 	assert_fdi_tx_disabled(dev_priv, pipe);
2059 	assert_fdi_rx_disabled(dev_priv, pipe);
2060 
2061 	/* Ports must be off as well */
2062 	assert_pch_ports_disabled(dev_priv, pipe);
2063 
2064 	reg = PCH_TRANSCONF(pipe);
2065 	val = I915_READ(reg);
2066 	val &= ~TRANS_ENABLE;
2067 	I915_WRITE(reg, val);
2068 	/* wait for PCH transcoder off, transcoder state */
2069 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2070 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2071 
2072 	if (!HAS_PCH_IBX(dev)) {
2073 		/* Workaround: Clear the timing override chicken bit again. */
2074 		reg = TRANS_CHICKEN2(pipe);
2075 		val = I915_READ(reg);
2076 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2077 		I915_WRITE(reg, val);
2078 	}
2079 }
2080 
lpt_disable_pch_transcoder(struct drm_i915_private * dev_priv)2081 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2082 {
2083 	u32 val;
2084 
2085 	val = I915_READ(LPT_TRANSCONF);
2086 	val &= ~TRANS_ENABLE;
2087 	I915_WRITE(LPT_TRANSCONF, val);
2088 	/* wait for PCH transcoder off, transcoder state */
2089 	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2090 		DRM_ERROR("Failed to disable PCH transcoder\n");
2091 
2092 	/* Workaround: clear timing override bit. */
2093 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2094 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2095 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2096 }
2097 
2098 /**
2099  * intel_enable_pipe - enable a pipe, asserting requirements
2100  * @crtc: crtc responsible for the pipe
2101  *
2102  * Enable @crtc's pipe, making sure that various hardware specific requirements
2103  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2104  */
intel_enable_pipe(struct intel_crtc * crtc)2105 static void intel_enable_pipe(struct intel_crtc *crtc)
2106 {
2107 	struct drm_device *dev = crtc->base.dev;
2108 	struct drm_i915_private *dev_priv = dev->dev_private;
2109 	enum pipe pipe = crtc->pipe;
2110 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2111 								      pipe);
2112 	enum pipe pch_transcoder;
2113 	int reg;
2114 	u32 val;
2115 
2116 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2117 
2118 	assert_planes_disabled(dev_priv, pipe);
2119 	assert_cursor_disabled(dev_priv, pipe);
2120 	assert_sprites_disabled(dev_priv, pipe);
2121 
2122 	if (HAS_PCH_LPT(dev_priv->dev))
2123 		pch_transcoder = TRANSCODER_A;
2124 	else
2125 		pch_transcoder = pipe;
2126 
2127 	/*
2128 	 * A pipe without a PLL won't actually be able to drive bits from
2129 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2130 	 * need the check.
2131 	 */
2132 	if (HAS_GMCH_DISPLAY(dev_priv->dev))
2133 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2134 			assert_dsi_pll_enabled(dev_priv);
2135 		else
2136 			assert_pll_enabled(dev_priv, pipe);
2137 	else {
2138 		if (crtc->config->has_pch_encoder) {
2139 			/* if driving the PCH, we need FDI enabled */
2140 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2141 			assert_fdi_tx_pll_enabled(dev_priv,
2142 						  (enum pipe) cpu_transcoder);
2143 		}
2144 		/* FIXME: assert CPU port conditions for SNB+ */
2145 	}
2146 
2147 	reg = PIPECONF(cpu_transcoder);
2148 	val = I915_READ(reg);
2149 	if (val & PIPECONF_ENABLE) {
2150 		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2151 			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2152 		return;
2153 	}
2154 
2155 	I915_WRITE(reg, val | PIPECONF_ENABLE);
2156 	POSTING_READ(reg);
2157 }
2158 
2159 /**
2160  * intel_disable_pipe - disable a pipe, asserting requirements
2161  * @crtc: crtc whose pipes is to be disabled
2162  *
2163  * Disable the pipe of @crtc, making sure that various hardware
2164  * specific requirements are met, if applicable, e.g. plane
2165  * disabled, panel fitter off, etc.
2166  *
2167  * Will wait until the pipe has shut down before returning.
2168  */
intel_disable_pipe(struct intel_crtc * crtc)2169 static void intel_disable_pipe(struct intel_crtc *crtc)
2170 {
2171 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2172 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2173 	enum pipe pipe = crtc->pipe;
2174 	int reg;
2175 	u32 val;
2176 
2177 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2178 
2179 	/*
2180 	 * Make sure planes won't keep trying to pump pixels to us,
2181 	 * or we might hang the display.
2182 	 */
2183 	assert_planes_disabled(dev_priv, pipe);
2184 	assert_cursor_disabled(dev_priv, pipe);
2185 	assert_sprites_disabled(dev_priv, pipe);
2186 
2187 	reg = PIPECONF(cpu_transcoder);
2188 	val = I915_READ(reg);
2189 	if ((val & PIPECONF_ENABLE) == 0)
2190 		return;
2191 
2192 	/*
2193 	 * Double wide has implications for planes
2194 	 * so best keep it disabled when not needed.
2195 	 */
2196 	if (crtc->config->double_wide)
2197 		val &= ~PIPECONF_DOUBLE_WIDE;
2198 
2199 	/* Don't disable pipe or pipe PLLs if needed */
2200 	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2201 	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2202 		val &= ~PIPECONF_ENABLE;
2203 
2204 	I915_WRITE(reg, val);
2205 	if ((val & PIPECONF_ENABLE) == 0)
2206 		intel_wait_for_pipe_off(crtc);
2207 }
2208 
need_vtd_wa(struct drm_device * dev)2209 static bool need_vtd_wa(struct drm_device *dev)
2210 {
2211 #ifdef CONFIG_INTEL_IOMMU
2212 	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2213 		return true;
2214 #endif
2215 	return false;
2216 }
2217 
2218 unsigned int
intel_tile_height(struct drm_device * dev,uint32_t pixel_format,uint64_t fb_format_modifier,unsigned int plane)2219 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2220 		  uint64_t fb_format_modifier, unsigned int plane)
2221 {
2222 	unsigned int tile_height;
2223 	uint32_t pixel_bytes;
2224 
2225 	switch (fb_format_modifier) {
2226 	case DRM_FORMAT_MOD_NONE:
2227 		tile_height = 1;
2228 		break;
2229 	case I915_FORMAT_MOD_X_TILED:
2230 		tile_height = IS_GEN2(dev) ? 16 : 8;
2231 		break;
2232 	case I915_FORMAT_MOD_Y_TILED:
2233 		tile_height = 32;
2234 		break;
2235 	case I915_FORMAT_MOD_Yf_TILED:
2236 		pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2237 		switch (pixel_bytes) {
2238 		default:
2239 		case 1:
2240 			tile_height = 64;
2241 			break;
2242 		case 2:
2243 		case 4:
2244 			tile_height = 32;
2245 			break;
2246 		case 8:
2247 			tile_height = 16;
2248 			break;
2249 		case 16:
2250 			WARN_ONCE(1,
2251 				  "128-bit pixels are not supported for display!");
2252 			tile_height = 16;
2253 			break;
2254 		}
2255 		break;
2256 	default:
2257 		MISSING_CASE(fb_format_modifier);
2258 		tile_height = 1;
2259 		break;
2260 	}
2261 
2262 	return tile_height;
2263 }
2264 
2265 unsigned int
intel_fb_align_height(struct drm_device * dev,unsigned int height,uint32_t pixel_format,uint64_t fb_format_modifier)2266 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2267 		      uint32_t pixel_format, uint64_t fb_format_modifier)
2268 {
2269 	return ALIGN(height, intel_tile_height(dev, pixel_format,
2270 					       fb_format_modifier, 0));
2271 }
2272 
2273 static int
intel_fill_fb_ggtt_view(struct i915_ggtt_view * view,struct drm_framebuffer * fb,const struct drm_plane_state * plane_state)2274 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2275 			const struct drm_plane_state *plane_state)
2276 {
2277 	struct intel_rotation_info *info = &view->rotation_info;
2278 	unsigned int tile_height, tile_pitch;
2279 
2280 	*view = i915_ggtt_view_normal;
2281 
2282 	if (!plane_state)
2283 		return 0;
2284 
2285 	if (!intel_rotation_90_or_270(plane_state->rotation))
2286 		return 0;
2287 
2288 	*view = i915_ggtt_view_rotated;
2289 
2290 	info->height = fb->height;
2291 	info->pixel_format = fb->pixel_format;
2292 	info->pitch = fb->pitches[0];
2293 	info->uv_offset = fb->offsets[1];
2294 	info->fb_modifier = fb->modifier[0];
2295 
2296 	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2297 					fb->modifier[0], 0);
2298 	tile_pitch = PAGE_SIZE / tile_height;
2299 	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2300 	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2301 	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2302 
2303 	if (info->pixel_format == DRM_FORMAT_NV12) {
2304 		tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2305 						fb->modifier[0], 1);
2306 		tile_pitch = PAGE_SIZE / tile_height;
2307 		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2308 		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2309 						     tile_height);
2310 		info->size_uv = info->width_pages_uv * info->height_pages_uv *
2311 				PAGE_SIZE;
2312 	}
2313 
2314 	return 0;
2315 }
2316 
intel_linear_alignment(struct drm_i915_private * dev_priv)2317 static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2318 {
2319 	if (INTEL_INFO(dev_priv)->gen >= 9)
2320 		return 256 * 1024;
2321 	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2322 		 IS_VALLEYVIEW(dev_priv))
2323 		return 128 * 1024;
2324 	else if (INTEL_INFO(dev_priv)->gen >= 4)
2325 		return 4 * 1024;
2326 	else
2327 		return 0;
2328 }
2329 
2330 int
intel_pin_and_fence_fb_obj(struct drm_plane * plane,struct drm_framebuffer * fb,const struct drm_plane_state * plane_state,struct intel_engine_cs * pipelined,struct drm_i915_gem_request ** pipelined_request)2331 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2332 			   struct drm_framebuffer *fb,
2333 			   const struct drm_plane_state *plane_state,
2334 			   struct intel_engine_cs *pipelined,
2335 			   struct drm_i915_gem_request **pipelined_request)
2336 {
2337 	struct drm_device *dev = fb->dev;
2338 	struct drm_i915_private *dev_priv = dev->dev_private;
2339 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2340 	struct i915_ggtt_view view;
2341 	u32 alignment;
2342 	int ret;
2343 
2344 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2345 
2346 	switch (fb->modifier[0]) {
2347 	case DRM_FORMAT_MOD_NONE:
2348 		alignment = intel_linear_alignment(dev_priv);
2349 		break;
2350 	case I915_FORMAT_MOD_X_TILED:
2351 		if (INTEL_INFO(dev)->gen >= 9)
2352 			alignment = 256 * 1024;
2353 		else {
2354 			/* pin() will align the object as required by fence */
2355 			alignment = 0;
2356 		}
2357 		break;
2358 	case I915_FORMAT_MOD_Y_TILED:
2359 	case I915_FORMAT_MOD_Yf_TILED:
2360 		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2361 			  "Y tiling bo slipped through, driver bug!\n"))
2362 			return -EINVAL;
2363 		alignment = 1 * 1024 * 1024;
2364 		break;
2365 	default:
2366 		MISSING_CASE(fb->modifier[0]);
2367 		return -EINVAL;
2368 	}
2369 
2370 	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2371 	if (ret)
2372 		return ret;
2373 
2374 	/* Note that the w/a also requires 64 PTE of padding following the
2375 	 * bo. We currently fill all unused PTE with the shadow page and so
2376 	 * we should always have valid PTE following the scanout preventing
2377 	 * the VT-d warning.
2378 	 */
2379 	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2380 		alignment = 256 * 1024;
2381 
2382 	/*
2383 	 * Global gtt pte registers are special registers which actually forward
2384 	 * writes to a chunk of system memory. Which means that there is no risk
2385 	 * that the register values disappear as soon as we call
2386 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2387 	 * pin/unpin/fence and not more.
2388 	 */
2389 	intel_runtime_pm_get(dev_priv);
2390 
2391 	dev_priv->mm.interruptible = false;
2392 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
2393 						   pipelined_request, &view);
2394 	if (ret)
2395 		goto err_interruptible;
2396 
2397 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2398 	 * fence, whereas 965+ only requires a fence if using
2399 	 * framebuffer compression.  For simplicity, we always install
2400 	 * a fence as the cost is not that onerous.
2401 	 */
2402 	if (view.type == I915_GGTT_VIEW_NORMAL) {
2403 		ret = i915_gem_object_get_fence(obj);
2404 		if (ret == -EDEADLK) {
2405 			/*
2406 			 * -EDEADLK means there are no free fences
2407 			 * no pending flips.
2408 			 *
2409 			 * This is propagated to atomic, but it uses
2410 			 * -EDEADLK to force a locking recovery, so
2411 			 * change the returned error to -EBUSY.
2412 			 */
2413 			ret = -EBUSY;
2414 			goto err_unpin;
2415 		} else if (ret)
2416 			goto err_unpin;
2417 
2418 		i915_gem_object_pin_fence(obj);
2419 	}
2420 
2421 	dev_priv->mm.interruptible = true;
2422 	intel_runtime_pm_put(dev_priv);
2423 	return 0;
2424 
2425 err_unpin:
2426 	i915_gem_object_unpin_from_display_plane(obj, &view);
2427 err_interruptible:
2428 	dev_priv->mm.interruptible = true;
2429 	intel_runtime_pm_put(dev_priv);
2430 	return ret;
2431 }
2432 
intel_unpin_fb_obj(struct drm_framebuffer * fb,const struct drm_plane_state * plane_state)2433 static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2434 			       const struct drm_plane_state *plane_state)
2435 {
2436 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2437 	struct i915_ggtt_view view;
2438 	int ret;
2439 
2440 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2441 
2442 	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2443 	WARN_ONCE(ret, "Couldn't get view from plane state!");
2444 
2445 	if (view.type == I915_GGTT_VIEW_NORMAL)
2446 		i915_gem_object_unpin_fence(obj);
2447 
2448 	i915_gem_object_unpin_from_display_plane(obj, &view);
2449 }
2450 
2451 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2452  * is assumed to be a power-of-two. */
intel_gen4_compute_page_offset(struct drm_i915_private * dev_priv,int * x,int * y,unsigned int tiling_mode,unsigned int cpp,unsigned int pitch)2453 unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2454 					     int *x, int *y,
2455 					     unsigned int tiling_mode,
2456 					     unsigned int cpp,
2457 					     unsigned int pitch)
2458 {
2459 	if (tiling_mode != I915_TILING_NONE) {
2460 		unsigned int tile_rows, tiles;
2461 
2462 		tile_rows = *y / 8;
2463 		*y %= 8;
2464 
2465 		tiles = *x / (512/cpp);
2466 		*x %= 512/cpp;
2467 
2468 		return tile_rows * pitch * 8 + tiles * 4096;
2469 	} else {
2470 		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2471 		unsigned int offset;
2472 
2473 		offset = *y * pitch + *x * cpp;
2474 		*y = (offset & alignment) / pitch;
2475 		*x = ((offset & alignment) - *y * pitch) / cpp;
2476 		return offset & ~alignment;
2477 	}
2478 }
2479 
i9xx_format_to_fourcc(int format)2480 static int i9xx_format_to_fourcc(int format)
2481 {
2482 	switch (format) {
2483 	case DISPPLANE_8BPP:
2484 		return DRM_FORMAT_C8;
2485 	case DISPPLANE_BGRX555:
2486 		return DRM_FORMAT_XRGB1555;
2487 	case DISPPLANE_BGRX565:
2488 		return DRM_FORMAT_RGB565;
2489 	default:
2490 	case DISPPLANE_BGRX888:
2491 		return DRM_FORMAT_XRGB8888;
2492 	case DISPPLANE_RGBX888:
2493 		return DRM_FORMAT_XBGR8888;
2494 	case DISPPLANE_BGRX101010:
2495 		return DRM_FORMAT_XRGB2101010;
2496 	case DISPPLANE_RGBX101010:
2497 		return DRM_FORMAT_XBGR2101010;
2498 	}
2499 }
2500 
skl_format_to_fourcc(int format,bool rgb_order,bool alpha)2501 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2502 {
2503 	switch (format) {
2504 	case PLANE_CTL_FORMAT_RGB_565:
2505 		return DRM_FORMAT_RGB565;
2506 	default:
2507 	case PLANE_CTL_FORMAT_XRGB_8888:
2508 		if (rgb_order) {
2509 			if (alpha)
2510 				return DRM_FORMAT_ABGR8888;
2511 			else
2512 				return DRM_FORMAT_XBGR8888;
2513 		} else {
2514 			if (alpha)
2515 				return DRM_FORMAT_ARGB8888;
2516 			else
2517 				return DRM_FORMAT_XRGB8888;
2518 		}
2519 	case PLANE_CTL_FORMAT_XRGB_2101010:
2520 		if (rgb_order)
2521 			return DRM_FORMAT_XBGR2101010;
2522 		else
2523 			return DRM_FORMAT_XRGB2101010;
2524 	}
2525 }
2526 
2527 static bool
intel_alloc_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)2528 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2529 			      struct intel_initial_plane_config *plane_config)
2530 {
2531 	struct drm_device *dev = crtc->base.dev;
2532 	struct drm_i915_private *dev_priv = to_i915(dev);
2533 	struct drm_i915_gem_object *obj = NULL;
2534 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2535 	struct drm_framebuffer *fb = &plane_config->fb->base;
2536 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2537 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2538 				    PAGE_SIZE);
2539 
2540 	size_aligned -= base_aligned;
2541 
2542 	if (plane_config->size == 0)
2543 		return false;
2544 
2545 	/* If the FB is too big, just don't use it since fbdev is not very
2546 	 * important and we should probably use that space with FBC or other
2547 	 * features. */
2548 	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2549 		return false;
2550 
2551 	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2552 							     base_aligned,
2553 							     base_aligned,
2554 							     size_aligned);
2555 	if (!obj)
2556 		return false;
2557 
2558 	obj->tiling_mode = plane_config->tiling;
2559 	if (obj->tiling_mode == I915_TILING_X)
2560 		obj->stride = fb->pitches[0];
2561 
2562 	mode_cmd.pixel_format = fb->pixel_format;
2563 	mode_cmd.width = fb->width;
2564 	mode_cmd.height = fb->height;
2565 	mode_cmd.pitches[0] = fb->pitches[0];
2566 	mode_cmd.modifier[0] = fb->modifier[0];
2567 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2568 
2569 	mutex_lock(&dev->struct_mutex);
2570 	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2571 				   &mode_cmd, obj)) {
2572 		DRM_DEBUG_KMS("intel fb init failed\n");
2573 		goto out_unref_obj;
2574 	}
2575 	mutex_unlock(&dev->struct_mutex);
2576 
2577 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2578 	return true;
2579 
2580 out_unref_obj:
2581 	drm_gem_object_unreference(&obj->base);
2582 	mutex_unlock(&dev->struct_mutex);
2583 	return false;
2584 }
2585 
2586 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2587 static void
update_state_fb(struct drm_plane * plane)2588 update_state_fb(struct drm_plane *plane)
2589 {
2590 	if (plane->fb == plane->state->fb)
2591 		return;
2592 
2593 	if (plane->state->fb)
2594 		drm_framebuffer_unreference(plane->state->fb);
2595 	plane->state->fb = plane->fb;
2596 	if (plane->state->fb)
2597 		drm_framebuffer_reference(plane->state->fb);
2598 }
2599 
2600 static void
intel_find_initial_plane_obj(struct intel_crtc * intel_crtc,struct intel_initial_plane_config * plane_config)2601 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2602 			     struct intel_initial_plane_config *plane_config)
2603 {
2604 	struct drm_device *dev = intel_crtc->base.dev;
2605 	struct drm_i915_private *dev_priv = dev->dev_private;
2606 	struct drm_crtc *c;
2607 	struct intel_crtc *i;
2608 	struct drm_i915_gem_object *obj;
2609 	struct drm_plane *primary = intel_crtc->base.primary;
2610 	struct drm_plane_state *plane_state = primary->state;
2611 	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2612 	struct intel_plane *intel_plane = to_intel_plane(primary);
2613 	struct drm_framebuffer *fb;
2614 
2615 	if (!plane_config->fb)
2616 		return;
2617 
2618 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2619 		fb = &plane_config->fb->base;
2620 		goto valid_fb;
2621 	}
2622 
2623 	kfree(plane_config->fb);
2624 
2625 	/*
2626 	 * Failed to alloc the obj, check to see if we should share
2627 	 * an fb with another CRTC instead
2628 	 */
2629 	for_each_crtc(dev, c) {
2630 		i = to_intel_crtc(c);
2631 
2632 		if (c == &intel_crtc->base)
2633 			continue;
2634 
2635 		if (!i->active)
2636 			continue;
2637 
2638 		fb = c->primary->fb;
2639 		if (!fb)
2640 			continue;
2641 
2642 		obj = intel_fb_obj(fb);
2643 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2644 			drm_framebuffer_reference(fb);
2645 			goto valid_fb;
2646 		}
2647 	}
2648 
2649 	/*
2650 	 * We've failed to reconstruct the BIOS FB.  Current display state
2651 	 * indicates that the primary plane is visible, but has a NULL FB,
2652 	 * which will lead to problems later if we don't fix it up.  The
2653 	 * simplest solution is to just disable the primary plane now and
2654 	 * pretend the BIOS never had it enabled.
2655 	 */
2656 	to_intel_plane_state(plane_state)->visible = false;
2657 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2658 	intel_pre_disable_primary(&intel_crtc->base);
2659 	intel_plane->disable_plane(primary, &intel_crtc->base);
2660 
2661 	return;
2662 
2663 valid_fb:
2664 	plane_state->src_x = 0;
2665 	plane_state->src_y = 0;
2666 	plane_state->src_w = fb->width << 16;
2667 	plane_state->src_h = fb->height << 16;
2668 
2669 	plane_state->crtc_x = 0;
2670 	plane_state->crtc_y = 0;
2671 	plane_state->crtc_w = fb->width;
2672 	plane_state->crtc_h = fb->height;
2673 
2674 	obj = intel_fb_obj(fb);
2675 	if (obj->tiling_mode != I915_TILING_NONE)
2676 		dev_priv->preserve_bios_swizzle = true;
2677 
2678 	drm_framebuffer_reference(fb);
2679 	primary->fb = primary->state->fb = fb;
2680 	primary->crtc = primary->state->crtc = &intel_crtc->base;
2681 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2682 	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2683 }
2684 
i9xx_update_primary_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)2685 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2686 				      struct drm_framebuffer *fb,
2687 				      int x, int y)
2688 {
2689 	struct drm_device *dev = crtc->dev;
2690 	struct drm_i915_private *dev_priv = dev->dev_private;
2691 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2692 	struct drm_plane *primary = crtc->primary;
2693 	bool visible = to_intel_plane_state(primary->state)->visible;
2694 	struct drm_i915_gem_object *obj;
2695 	int plane = intel_crtc->plane;
2696 	unsigned long linear_offset;
2697 	u32 dspcntr;
2698 	u32 reg = DSPCNTR(plane);
2699 	int pixel_size;
2700 
2701 	if (!visible || !fb) {
2702 		I915_WRITE(reg, 0);
2703 		if (INTEL_INFO(dev)->gen >= 4)
2704 			I915_WRITE(DSPSURF(plane), 0);
2705 		else
2706 			I915_WRITE(DSPADDR(plane), 0);
2707 		POSTING_READ(reg);
2708 		return;
2709 	}
2710 
2711 	obj = intel_fb_obj(fb);
2712 	if (WARN_ON(obj == NULL))
2713 		return;
2714 
2715 	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2716 
2717 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2718 
2719 	dspcntr |= DISPLAY_PLANE_ENABLE;
2720 
2721 	if (INTEL_INFO(dev)->gen < 4) {
2722 		if (intel_crtc->pipe == PIPE_B)
2723 			dspcntr |= DISPPLANE_SEL_PIPE_B;
2724 
2725 		/* pipesrc and dspsize control the size that is scaled from,
2726 		 * which should always be the user's requested size.
2727 		 */
2728 		I915_WRITE(DSPSIZE(plane),
2729 			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2730 			   (intel_crtc->config->pipe_src_w - 1));
2731 		I915_WRITE(DSPPOS(plane), 0);
2732 	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2733 		I915_WRITE(PRIMSIZE(plane),
2734 			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2735 			   (intel_crtc->config->pipe_src_w - 1));
2736 		I915_WRITE(PRIMPOS(plane), 0);
2737 		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2738 	}
2739 
2740 	switch (fb->pixel_format) {
2741 	case DRM_FORMAT_C8:
2742 		dspcntr |= DISPPLANE_8BPP;
2743 		break;
2744 	case DRM_FORMAT_XRGB1555:
2745 		dspcntr |= DISPPLANE_BGRX555;
2746 		break;
2747 	case DRM_FORMAT_RGB565:
2748 		dspcntr |= DISPPLANE_BGRX565;
2749 		break;
2750 	case DRM_FORMAT_XRGB8888:
2751 		dspcntr |= DISPPLANE_BGRX888;
2752 		break;
2753 	case DRM_FORMAT_XBGR8888:
2754 		dspcntr |= DISPPLANE_RGBX888;
2755 		break;
2756 	case DRM_FORMAT_XRGB2101010:
2757 		dspcntr |= DISPPLANE_BGRX101010;
2758 		break;
2759 	case DRM_FORMAT_XBGR2101010:
2760 		dspcntr |= DISPPLANE_RGBX101010;
2761 		break;
2762 	default:
2763 		BUG();
2764 	}
2765 
2766 	if (INTEL_INFO(dev)->gen >= 4 &&
2767 	    obj->tiling_mode != I915_TILING_NONE)
2768 		dspcntr |= DISPPLANE_TILED;
2769 
2770 	if (IS_G4X(dev))
2771 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2772 
2773 	linear_offset = y * fb->pitches[0] + x * pixel_size;
2774 
2775 	if (INTEL_INFO(dev)->gen >= 4) {
2776 		intel_crtc->dspaddr_offset =
2777 			intel_gen4_compute_page_offset(dev_priv,
2778 						       &x, &y, obj->tiling_mode,
2779 						       pixel_size,
2780 						       fb->pitches[0]);
2781 		linear_offset -= intel_crtc->dspaddr_offset;
2782 	} else {
2783 		intel_crtc->dspaddr_offset = linear_offset;
2784 	}
2785 
2786 	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2787 		dspcntr |= DISPPLANE_ROTATE_180;
2788 
2789 		x += (intel_crtc->config->pipe_src_w - 1);
2790 		y += (intel_crtc->config->pipe_src_h - 1);
2791 
2792 		/* Finding the last pixel of the last line of the display
2793 		data and adding to linear_offset*/
2794 		linear_offset +=
2795 			(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2796 			(intel_crtc->config->pipe_src_w - 1) * pixel_size;
2797 	}
2798 
2799 	intel_crtc->adjusted_x = x;
2800 	intel_crtc->adjusted_y = y;
2801 
2802 	I915_WRITE(reg, dspcntr);
2803 
2804 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2805 	if (INTEL_INFO(dev)->gen >= 4) {
2806 		I915_WRITE(DSPSURF(plane),
2807 			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2808 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2809 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2810 	} else
2811 		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2812 	POSTING_READ(reg);
2813 }
2814 
ironlake_update_primary_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)2815 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2816 					  struct drm_framebuffer *fb,
2817 					  int x, int y)
2818 {
2819 	struct drm_device *dev = crtc->dev;
2820 	struct drm_i915_private *dev_priv = dev->dev_private;
2821 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2822 	struct drm_plane *primary = crtc->primary;
2823 	bool visible = to_intel_plane_state(primary->state)->visible;
2824 	struct drm_i915_gem_object *obj;
2825 	int plane = intel_crtc->plane;
2826 	unsigned long linear_offset;
2827 	u32 dspcntr;
2828 	u32 reg = DSPCNTR(plane);
2829 	int pixel_size;
2830 
2831 	if (!visible || !fb) {
2832 		I915_WRITE(reg, 0);
2833 		I915_WRITE(DSPSURF(plane), 0);
2834 		POSTING_READ(reg);
2835 		return;
2836 	}
2837 
2838 	obj = intel_fb_obj(fb);
2839 	if (WARN_ON(obj == NULL))
2840 		return;
2841 
2842 	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2843 
2844 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2845 
2846 	dspcntr |= DISPLAY_PLANE_ENABLE;
2847 
2848 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2849 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2850 
2851 	switch (fb->pixel_format) {
2852 	case DRM_FORMAT_C8:
2853 		dspcntr |= DISPPLANE_8BPP;
2854 		break;
2855 	case DRM_FORMAT_RGB565:
2856 		dspcntr |= DISPPLANE_BGRX565;
2857 		break;
2858 	case DRM_FORMAT_XRGB8888:
2859 		dspcntr |= DISPPLANE_BGRX888;
2860 		break;
2861 	case DRM_FORMAT_XBGR8888:
2862 		dspcntr |= DISPPLANE_RGBX888;
2863 		break;
2864 	case DRM_FORMAT_XRGB2101010:
2865 		dspcntr |= DISPPLANE_BGRX101010;
2866 		break;
2867 	case DRM_FORMAT_XBGR2101010:
2868 		dspcntr |= DISPPLANE_RGBX101010;
2869 		break;
2870 	default:
2871 		BUG();
2872 	}
2873 
2874 	if (obj->tiling_mode != I915_TILING_NONE)
2875 		dspcntr |= DISPPLANE_TILED;
2876 
2877 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2878 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2879 
2880 	linear_offset = y * fb->pitches[0] + x * pixel_size;
2881 	intel_crtc->dspaddr_offset =
2882 		intel_gen4_compute_page_offset(dev_priv,
2883 					       &x, &y, obj->tiling_mode,
2884 					       pixel_size,
2885 					       fb->pitches[0]);
2886 	linear_offset -= intel_crtc->dspaddr_offset;
2887 	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2888 		dspcntr |= DISPPLANE_ROTATE_180;
2889 
2890 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2891 			x += (intel_crtc->config->pipe_src_w - 1);
2892 			y += (intel_crtc->config->pipe_src_h - 1);
2893 
2894 			/* Finding the last pixel of the last line of the display
2895 			data and adding to linear_offset*/
2896 			linear_offset +=
2897 				(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2898 				(intel_crtc->config->pipe_src_w - 1) * pixel_size;
2899 		}
2900 	}
2901 
2902 	intel_crtc->adjusted_x = x;
2903 	intel_crtc->adjusted_y = y;
2904 
2905 	I915_WRITE(reg, dspcntr);
2906 
2907 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2908 	I915_WRITE(DSPSURF(plane),
2909 		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2910 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2911 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2912 	} else {
2913 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2914 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2915 	}
2916 	POSTING_READ(reg);
2917 }
2918 
intel_fb_stride_alignment(struct drm_device * dev,uint64_t fb_modifier,uint32_t pixel_format)2919 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2920 			      uint32_t pixel_format)
2921 {
2922 	u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2923 
2924 	/*
2925 	 * The stride is either expressed as a multiple of 64 bytes
2926 	 * chunks for linear buffers or in number of tiles for tiled
2927 	 * buffers.
2928 	 */
2929 	switch (fb_modifier) {
2930 	case DRM_FORMAT_MOD_NONE:
2931 		return 64;
2932 	case I915_FORMAT_MOD_X_TILED:
2933 		if (INTEL_INFO(dev)->gen == 2)
2934 			return 128;
2935 		return 512;
2936 	case I915_FORMAT_MOD_Y_TILED:
2937 		/* No need to check for old gens and Y tiling since this is
2938 		 * about the display engine and those will be blocked before
2939 		 * we get here.
2940 		 */
2941 		return 128;
2942 	case I915_FORMAT_MOD_Yf_TILED:
2943 		if (bits_per_pixel == 8)
2944 			return 64;
2945 		else
2946 			return 128;
2947 	default:
2948 		MISSING_CASE(fb_modifier);
2949 		return 64;
2950 	}
2951 }
2952 
intel_plane_obj_offset(struct intel_plane * intel_plane,struct drm_i915_gem_object * obj,unsigned int plane)2953 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2954 			   struct drm_i915_gem_object *obj,
2955 			   unsigned int plane)
2956 {
2957 	const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2958 	struct i915_vma *vma;
2959 	u64 offset;
2960 
2961 	if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2962 		view = &i915_ggtt_view_rotated;
2963 
2964 	vma = i915_gem_obj_to_ggtt_view(obj, view);
2965 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2966 		view->type))
2967 		return -1;
2968 
2969 	offset = vma->node.start;
2970 
2971 	if (plane == 1) {
2972 		offset += vma->ggtt_view.rotation_info.uv_start_page *
2973 			  PAGE_SIZE;
2974 	}
2975 
2976 	WARN_ON(upper_32_bits(offset));
2977 
2978 	return lower_32_bits(offset);
2979 }
2980 
skl_detach_scaler(struct intel_crtc * intel_crtc,int id)2981 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2982 {
2983 	struct drm_device *dev = intel_crtc->base.dev;
2984 	struct drm_i915_private *dev_priv = dev->dev_private;
2985 
2986 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2987 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2988 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2989 }
2990 
2991 /*
2992  * This function detaches (aka. unbinds) unused scalers in hardware
2993  */
skl_detach_scalers(struct intel_crtc * intel_crtc)2994 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2995 {
2996 	struct intel_crtc_scaler_state *scaler_state;
2997 	int i;
2998 
2999 	scaler_state = &intel_crtc->config->scaler_state;
3000 
3001 	/* loop through and disable scalers that aren't in use */
3002 	for (i = 0; i < intel_crtc->num_scalers; i++) {
3003 		if (!scaler_state->scalers[i].in_use)
3004 			skl_detach_scaler(intel_crtc, i);
3005 	}
3006 }
3007 
skl_plane_ctl_format(uint32_t pixel_format)3008 u32 skl_plane_ctl_format(uint32_t pixel_format)
3009 {
3010 	switch (pixel_format) {
3011 	case DRM_FORMAT_C8:
3012 		return PLANE_CTL_FORMAT_INDEXED;
3013 	case DRM_FORMAT_RGB565:
3014 		return PLANE_CTL_FORMAT_RGB_565;
3015 	case DRM_FORMAT_XBGR8888:
3016 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3017 	case DRM_FORMAT_XRGB8888:
3018 		return PLANE_CTL_FORMAT_XRGB_8888;
3019 	/*
3020 	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3021 	 * to be already pre-multiplied. We need to add a knob (or a different
3022 	 * DRM_FORMAT) for user-space to configure that.
3023 	 */
3024 	case DRM_FORMAT_ABGR8888:
3025 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3026 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3027 	case DRM_FORMAT_ARGB8888:
3028 		return PLANE_CTL_FORMAT_XRGB_8888 |
3029 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3030 	case DRM_FORMAT_XRGB2101010:
3031 		return PLANE_CTL_FORMAT_XRGB_2101010;
3032 	case DRM_FORMAT_XBGR2101010:
3033 		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3034 	case DRM_FORMAT_YUYV:
3035 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3036 	case DRM_FORMAT_YVYU:
3037 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3038 	case DRM_FORMAT_UYVY:
3039 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3040 	case DRM_FORMAT_VYUY:
3041 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3042 	default:
3043 		MISSING_CASE(pixel_format);
3044 	}
3045 
3046 	return 0;
3047 }
3048 
skl_plane_ctl_tiling(uint64_t fb_modifier)3049 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3050 {
3051 	switch (fb_modifier) {
3052 	case DRM_FORMAT_MOD_NONE:
3053 		break;
3054 	case I915_FORMAT_MOD_X_TILED:
3055 		return PLANE_CTL_TILED_X;
3056 	case I915_FORMAT_MOD_Y_TILED:
3057 		return PLANE_CTL_TILED_Y;
3058 	case I915_FORMAT_MOD_Yf_TILED:
3059 		return PLANE_CTL_TILED_YF;
3060 	default:
3061 		MISSING_CASE(fb_modifier);
3062 	}
3063 
3064 	return 0;
3065 }
3066 
skl_plane_ctl_rotation(unsigned int rotation)3067 u32 skl_plane_ctl_rotation(unsigned int rotation)
3068 {
3069 	switch (rotation) {
3070 	case BIT(DRM_ROTATE_0):
3071 		break;
3072 	/*
3073 	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3074 	 * while i915 HW rotation is clockwise, thats why this swapping.
3075 	 */
3076 	case BIT(DRM_ROTATE_90):
3077 		return PLANE_CTL_ROTATE_270;
3078 	case BIT(DRM_ROTATE_180):
3079 		return PLANE_CTL_ROTATE_180;
3080 	case BIT(DRM_ROTATE_270):
3081 		return PLANE_CTL_ROTATE_90;
3082 	default:
3083 		MISSING_CASE(rotation);
3084 	}
3085 
3086 	return 0;
3087 }
3088 
skylake_update_primary_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)3089 static void skylake_update_primary_plane(struct drm_crtc *crtc,
3090 					 struct drm_framebuffer *fb,
3091 					 int x, int y)
3092 {
3093 	struct drm_device *dev = crtc->dev;
3094 	struct drm_i915_private *dev_priv = dev->dev_private;
3095 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3096 	struct drm_plane *plane = crtc->primary;
3097 	bool visible = to_intel_plane_state(plane->state)->visible;
3098 	struct drm_i915_gem_object *obj;
3099 	int pipe = intel_crtc->pipe;
3100 	u32 plane_ctl, stride_div, stride;
3101 	u32 tile_height, plane_offset, plane_size;
3102 	unsigned int rotation;
3103 	int x_offset, y_offset;
3104 	u32 surf_addr;
3105 	struct intel_crtc_state *crtc_state = intel_crtc->config;
3106 	struct intel_plane_state *plane_state;
3107 	int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3108 	int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3109 	int scaler_id = -1;
3110 
3111 	plane_state = to_intel_plane_state(plane->state);
3112 
3113 	if (!visible || !fb) {
3114 		I915_WRITE(PLANE_CTL(pipe, 0), 0);
3115 		I915_WRITE(PLANE_SURF(pipe, 0), 0);
3116 		POSTING_READ(PLANE_CTL(pipe, 0));
3117 		return;
3118 	}
3119 
3120 	plane_ctl = PLANE_CTL_ENABLE |
3121 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3122 		    PLANE_CTL_PIPE_CSC_ENABLE;
3123 
3124 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3125 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3126 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3127 
3128 	rotation = plane->state->rotation;
3129 	plane_ctl |= skl_plane_ctl_rotation(rotation);
3130 
3131 	obj = intel_fb_obj(fb);
3132 	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3133 					       fb->pixel_format);
3134 	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3135 
3136 	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3137 
3138 	scaler_id = plane_state->scaler_id;
3139 	src_x = plane_state->src.x1 >> 16;
3140 	src_y = plane_state->src.y1 >> 16;
3141 	src_w = drm_rect_width(&plane_state->src) >> 16;
3142 	src_h = drm_rect_height(&plane_state->src) >> 16;
3143 	dst_x = plane_state->dst.x1;
3144 	dst_y = plane_state->dst.y1;
3145 	dst_w = drm_rect_width(&plane_state->dst);
3146 	dst_h = drm_rect_height(&plane_state->dst);
3147 
3148 	WARN_ON(x != src_x || y != src_y);
3149 
3150 	if (intel_rotation_90_or_270(rotation)) {
3151 		/* stride = Surface height in tiles */
3152 		tile_height = intel_tile_height(dev, fb->pixel_format,
3153 						fb->modifier[0], 0);
3154 		stride = DIV_ROUND_UP(fb->height, tile_height);
3155 		x_offset = stride * tile_height - y - src_h;
3156 		y_offset = x;
3157 		plane_size = (src_w - 1) << 16 | (src_h - 1);
3158 	} else {
3159 		stride = fb->pitches[0] / stride_div;
3160 		x_offset = x;
3161 		y_offset = y;
3162 		plane_size = (src_h - 1) << 16 | (src_w - 1);
3163 	}
3164 	plane_offset = y_offset << 16 | x_offset;
3165 
3166 	intel_crtc->adjusted_x = x_offset;
3167 	intel_crtc->adjusted_y = y_offset;
3168 
3169 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3170 	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3171 	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3172 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3173 
3174 	if (scaler_id >= 0) {
3175 		uint32_t ps_ctrl = 0;
3176 
3177 		WARN_ON(!dst_w || !dst_h);
3178 		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3179 			crtc_state->scaler_state.scalers[scaler_id].mode;
3180 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3181 		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3182 		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3183 		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3184 		I915_WRITE(PLANE_POS(pipe, 0), 0);
3185 	} else {
3186 		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3187 	}
3188 
3189 	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3190 
3191 	POSTING_READ(PLANE_SURF(pipe, 0));
3192 }
3193 
3194 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3195 static int
intel_pipe_set_base_atomic(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,enum mode_set_atomic state)3196 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3197 			   int x, int y, enum mode_set_atomic state)
3198 {
3199 	struct drm_device *dev = crtc->dev;
3200 	struct drm_i915_private *dev_priv = dev->dev_private;
3201 
3202 	if (dev_priv->fbc.disable_fbc)
3203 		dev_priv->fbc.disable_fbc(dev_priv);
3204 
3205 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
3206 
3207 	return 0;
3208 }
3209 
intel_complete_page_flips(struct drm_device * dev)3210 static void intel_complete_page_flips(struct drm_device *dev)
3211 {
3212 	struct drm_crtc *crtc;
3213 
3214 	for_each_crtc(dev, crtc) {
3215 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3216 		enum plane plane = intel_crtc->plane;
3217 
3218 		intel_prepare_page_flip(dev, plane);
3219 		intel_finish_page_flip_plane(dev, plane);
3220 	}
3221 }
3222 
intel_update_primary_planes(struct drm_device * dev)3223 static void intel_update_primary_planes(struct drm_device *dev)
3224 {
3225 	struct drm_crtc *crtc;
3226 
3227 	for_each_crtc(dev, crtc) {
3228 		struct intel_plane *plane = to_intel_plane(crtc->primary);
3229 		struct intel_plane_state *plane_state;
3230 
3231 		drm_modeset_lock_crtc(crtc, &plane->base);
3232 
3233 		plane_state = to_intel_plane_state(plane->base.state);
3234 
3235 		if (plane_state->base.fb)
3236 			plane->commit_plane(&plane->base, plane_state);
3237 
3238 		drm_modeset_unlock_crtc(crtc);
3239 	}
3240 }
3241 
intel_prepare_reset(struct drm_device * dev)3242 void intel_prepare_reset(struct drm_device *dev)
3243 {
3244 	/* no reset support for gen2 */
3245 	if (IS_GEN2(dev))
3246 		return;
3247 
3248 	/* reset doesn't touch the display */
3249 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3250 		return;
3251 
3252 	drm_modeset_lock_all(dev);
3253 	/*
3254 	 * Disabling the crtcs gracefully seems nicer. Also the
3255 	 * g33 docs say we should at least disable all the planes.
3256 	 */
3257 	intel_display_suspend(dev);
3258 }
3259 
intel_finish_reset(struct drm_device * dev)3260 void intel_finish_reset(struct drm_device *dev)
3261 {
3262 	struct drm_i915_private *dev_priv = to_i915(dev);
3263 
3264 	/*
3265 	 * Flips in the rings will be nuked by the reset,
3266 	 * so complete all pending flips so that user space
3267 	 * will get its events and not get stuck.
3268 	 */
3269 	intel_complete_page_flips(dev);
3270 
3271 	/* no reset support for gen2 */
3272 	if (IS_GEN2(dev))
3273 		return;
3274 
3275 	/* reset doesn't touch the display */
3276 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3277 		/*
3278 		 * Flips in the rings have been nuked by the reset,
3279 		 * so update the base address of all primary
3280 		 * planes to the the last fb to make sure we're
3281 		 * showing the correct fb after a reset.
3282 		 *
3283 		 * FIXME: Atomic will make this obsolete since we won't schedule
3284 		 * CS-based flips (which might get lost in gpu resets) any more.
3285 		 */
3286 		intel_update_primary_planes(dev);
3287 		return;
3288 	}
3289 
3290 	/*
3291 	 * The display has been reset as well,
3292 	 * so need a full re-initialization.
3293 	 */
3294 	intel_runtime_pm_disable_interrupts(dev_priv);
3295 	intel_runtime_pm_enable_interrupts(dev_priv);
3296 
3297 	intel_modeset_init_hw(dev);
3298 
3299 	spin_lock_irq(&dev_priv->irq_lock);
3300 	if (dev_priv->display.hpd_irq_setup)
3301 		dev_priv->display.hpd_irq_setup(dev);
3302 	spin_unlock_irq(&dev_priv->irq_lock);
3303 
3304 	intel_display_resume(dev);
3305 
3306 	intel_hpd_init(dev_priv);
3307 
3308 	drm_modeset_unlock_all(dev);
3309 }
3310 
3311 static void
intel_finish_fb(struct drm_framebuffer * old_fb)3312 intel_finish_fb(struct drm_framebuffer *old_fb)
3313 {
3314 	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3315 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3316 	bool was_interruptible = dev_priv->mm.interruptible;
3317 	int ret;
3318 
3319 	/* Big Hammer, we also need to ensure that any pending
3320 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3321 	 * current scanout is retired before unpinning the old
3322 	 * framebuffer. Note that we rely on userspace rendering
3323 	 * into the buffer attached to the pipe they are waiting
3324 	 * on. If not, userspace generates a GPU hang with IPEHR
3325 	 * point to the MI_WAIT_FOR_EVENT.
3326 	 *
3327 	 * This should only fail upon a hung GPU, in which case we
3328 	 * can safely continue.
3329 	 */
3330 	dev_priv->mm.interruptible = false;
3331 	ret = i915_gem_object_wait_rendering(obj, true);
3332 	dev_priv->mm.interruptible = was_interruptible;
3333 
3334 	WARN_ON(ret);
3335 }
3336 
intel_crtc_has_pending_flip(struct drm_crtc * crtc)3337 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3338 {
3339 	struct drm_device *dev = crtc->dev;
3340 	struct drm_i915_private *dev_priv = dev->dev_private;
3341 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3342 	bool pending;
3343 
3344 	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3345 	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3346 		return false;
3347 
3348 	spin_lock_irq(&dev->event_lock);
3349 	pending = to_intel_crtc(crtc)->unpin_work != NULL;
3350 	spin_unlock_irq(&dev->event_lock);
3351 
3352 	return pending;
3353 }
3354 
intel_update_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state)3355 static void intel_update_pipe_config(struct intel_crtc *crtc,
3356 				     struct intel_crtc_state *old_crtc_state)
3357 {
3358 	struct drm_device *dev = crtc->base.dev;
3359 	struct drm_i915_private *dev_priv = dev->dev_private;
3360 	struct intel_crtc_state *pipe_config =
3361 		to_intel_crtc_state(crtc->base.state);
3362 
3363 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3364 	crtc->base.mode = crtc->base.state->mode;
3365 
3366 	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3367 		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3368 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3369 
3370 	if (HAS_DDI(dev))
3371 		intel_set_pipe_csc(&crtc->base);
3372 
3373 	/*
3374 	 * Update pipe size and adjust fitter if needed: the reason for this is
3375 	 * that in compute_mode_changes we check the native mode (not the pfit
3376 	 * mode) to see if we can flip rather than do a full mode set. In the
3377 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3378 	 * pfit state, we'll end up with a big fb scanned out into the wrong
3379 	 * sized surface.
3380 	 */
3381 
3382 	I915_WRITE(PIPESRC(crtc->pipe),
3383 		   ((pipe_config->pipe_src_w - 1) << 16) |
3384 		   (pipe_config->pipe_src_h - 1));
3385 
3386 	/* on skylake this is done by detaching scalers */
3387 	if (INTEL_INFO(dev)->gen >= 9) {
3388 		skl_detach_scalers(crtc);
3389 
3390 		if (pipe_config->pch_pfit.enabled)
3391 			skylake_pfit_enable(crtc);
3392 	} else if (HAS_PCH_SPLIT(dev)) {
3393 		if (pipe_config->pch_pfit.enabled)
3394 			ironlake_pfit_enable(crtc);
3395 		else if (old_crtc_state->pch_pfit.enabled)
3396 			ironlake_pfit_disable(crtc, true);
3397 	}
3398 }
3399 
intel_fdi_normal_train(struct drm_crtc * crtc)3400 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3401 {
3402 	struct drm_device *dev = crtc->dev;
3403 	struct drm_i915_private *dev_priv = dev->dev_private;
3404 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3405 	int pipe = intel_crtc->pipe;
3406 	u32 reg, temp;
3407 
3408 	/* enable normal train */
3409 	reg = FDI_TX_CTL(pipe);
3410 	temp = I915_READ(reg);
3411 	if (IS_IVYBRIDGE(dev)) {
3412 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3413 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3414 	} else {
3415 		temp &= ~FDI_LINK_TRAIN_NONE;
3416 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3417 	}
3418 	I915_WRITE(reg, temp);
3419 
3420 	reg = FDI_RX_CTL(pipe);
3421 	temp = I915_READ(reg);
3422 	if (HAS_PCH_CPT(dev)) {
3423 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3424 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3425 	} else {
3426 		temp &= ~FDI_LINK_TRAIN_NONE;
3427 		temp |= FDI_LINK_TRAIN_NONE;
3428 	}
3429 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3430 
3431 	/* wait one idle pattern time */
3432 	POSTING_READ(reg);
3433 	udelay(1000);
3434 
3435 	/* IVB wants error correction enabled */
3436 	if (IS_IVYBRIDGE(dev))
3437 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3438 			   FDI_FE_ERRC_ENABLE);
3439 }
3440 
3441 /* The FDI link training functions for ILK/Ibexpeak. */
ironlake_fdi_link_train(struct drm_crtc * crtc)3442 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3443 {
3444 	struct drm_device *dev = crtc->dev;
3445 	struct drm_i915_private *dev_priv = dev->dev_private;
3446 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3447 	int pipe = intel_crtc->pipe;
3448 	u32 reg, temp, tries;
3449 
3450 	/* FDI needs bits from pipe first */
3451 	assert_pipe_enabled(dev_priv, pipe);
3452 
3453 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3454 	   for train result */
3455 	reg = FDI_RX_IMR(pipe);
3456 	temp = I915_READ(reg);
3457 	temp &= ~FDI_RX_SYMBOL_LOCK;
3458 	temp &= ~FDI_RX_BIT_LOCK;
3459 	I915_WRITE(reg, temp);
3460 	I915_READ(reg);
3461 	udelay(150);
3462 
3463 	/* enable CPU FDI TX and PCH FDI RX */
3464 	reg = FDI_TX_CTL(pipe);
3465 	temp = I915_READ(reg);
3466 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3467 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3468 	temp &= ~FDI_LINK_TRAIN_NONE;
3469 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3470 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3471 
3472 	reg = FDI_RX_CTL(pipe);
3473 	temp = I915_READ(reg);
3474 	temp &= ~FDI_LINK_TRAIN_NONE;
3475 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3476 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3477 
3478 	POSTING_READ(reg);
3479 	udelay(150);
3480 
3481 	/* Ironlake workaround, enable clock pointer after FDI enable*/
3482 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3483 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3484 		   FDI_RX_PHASE_SYNC_POINTER_EN);
3485 
3486 	reg = FDI_RX_IIR(pipe);
3487 	for (tries = 0; tries < 5; tries++) {
3488 		temp = I915_READ(reg);
3489 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3490 
3491 		if ((temp & FDI_RX_BIT_LOCK)) {
3492 			DRM_DEBUG_KMS("FDI train 1 done.\n");
3493 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3494 			break;
3495 		}
3496 	}
3497 	if (tries == 5)
3498 		DRM_ERROR("FDI train 1 fail!\n");
3499 
3500 	/* Train 2 */
3501 	reg = FDI_TX_CTL(pipe);
3502 	temp = I915_READ(reg);
3503 	temp &= ~FDI_LINK_TRAIN_NONE;
3504 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3505 	I915_WRITE(reg, temp);
3506 
3507 	reg = FDI_RX_CTL(pipe);
3508 	temp = I915_READ(reg);
3509 	temp &= ~FDI_LINK_TRAIN_NONE;
3510 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3511 	I915_WRITE(reg, temp);
3512 
3513 	POSTING_READ(reg);
3514 	udelay(150);
3515 
3516 	reg = FDI_RX_IIR(pipe);
3517 	for (tries = 0; tries < 5; tries++) {
3518 		temp = I915_READ(reg);
3519 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3520 
3521 		if (temp & FDI_RX_SYMBOL_LOCK) {
3522 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3523 			DRM_DEBUG_KMS("FDI train 2 done.\n");
3524 			break;
3525 		}
3526 	}
3527 	if (tries == 5)
3528 		DRM_ERROR("FDI train 2 fail!\n");
3529 
3530 	DRM_DEBUG_KMS("FDI train done\n");
3531 
3532 }
3533 
3534 static const int snb_b_fdi_train_param[] = {
3535 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3536 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3537 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3538 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3539 };
3540 
3541 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct drm_crtc * crtc)3542 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3543 {
3544 	struct drm_device *dev = crtc->dev;
3545 	struct drm_i915_private *dev_priv = dev->dev_private;
3546 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3547 	int pipe = intel_crtc->pipe;
3548 	u32 reg, temp, i, retry;
3549 
3550 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3551 	   for train result */
3552 	reg = FDI_RX_IMR(pipe);
3553 	temp = I915_READ(reg);
3554 	temp &= ~FDI_RX_SYMBOL_LOCK;
3555 	temp &= ~FDI_RX_BIT_LOCK;
3556 	I915_WRITE(reg, temp);
3557 
3558 	POSTING_READ(reg);
3559 	udelay(150);
3560 
3561 	/* enable CPU FDI TX and PCH FDI RX */
3562 	reg = FDI_TX_CTL(pipe);
3563 	temp = I915_READ(reg);
3564 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3565 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3566 	temp &= ~FDI_LINK_TRAIN_NONE;
3567 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3568 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3569 	/* SNB-B */
3570 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3571 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3572 
3573 	I915_WRITE(FDI_RX_MISC(pipe),
3574 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3575 
3576 	reg = FDI_RX_CTL(pipe);
3577 	temp = I915_READ(reg);
3578 	if (HAS_PCH_CPT(dev)) {
3579 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3580 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3581 	} else {
3582 		temp &= ~FDI_LINK_TRAIN_NONE;
3583 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3584 	}
3585 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3586 
3587 	POSTING_READ(reg);
3588 	udelay(150);
3589 
3590 	for (i = 0; i < 4; i++) {
3591 		reg = FDI_TX_CTL(pipe);
3592 		temp = I915_READ(reg);
3593 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3594 		temp |= snb_b_fdi_train_param[i];
3595 		I915_WRITE(reg, temp);
3596 
3597 		POSTING_READ(reg);
3598 		udelay(500);
3599 
3600 		for (retry = 0; retry < 5; retry++) {
3601 			reg = FDI_RX_IIR(pipe);
3602 			temp = I915_READ(reg);
3603 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3604 			if (temp & FDI_RX_BIT_LOCK) {
3605 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3606 				DRM_DEBUG_KMS("FDI train 1 done.\n");
3607 				break;
3608 			}
3609 			udelay(50);
3610 		}
3611 		if (retry < 5)
3612 			break;
3613 	}
3614 	if (i == 4)
3615 		DRM_ERROR("FDI train 1 fail!\n");
3616 
3617 	/* Train 2 */
3618 	reg = FDI_TX_CTL(pipe);
3619 	temp = I915_READ(reg);
3620 	temp &= ~FDI_LINK_TRAIN_NONE;
3621 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3622 	if (IS_GEN6(dev)) {
3623 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3624 		/* SNB-B */
3625 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3626 	}
3627 	I915_WRITE(reg, temp);
3628 
3629 	reg = FDI_RX_CTL(pipe);
3630 	temp = I915_READ(reg);
3631 	if (HAS_PCH_CPT(dev)) {
3632 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3633 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3634 	} else {
3635 		temp &= ~FDI_LINK_TRAIN_NONE;
3636 		temp |= FDI_LINK_TRAIN_PATTERN_2;
3637 	}
3638 	I915_WRITE(reg, temp);
3639 
3640 	POSTING_READ(reg);
3641 	udelay(150);
3642 
3643 	for (i = 0; i < 4; i++) {
3644 		reg = FDI_TX_CTL(pipe);
3645 		temp = I915_READ(reg);
3646 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3647 		temp |= snb_b_fdi_train_param[i];
3648 		I915_WRITE(reg, temp);
3649 
3650 		POSTING_READ(reg);
3651 		udelay(500);
3652 
3653 		for (retry = 0; retry < 5; retry++) {
3654 			reg = FDI_RX_IIR(pipe);
3655 			temp = I915_READ(reg);
3656 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3657 			if (temp & FDI_RX_SYMBOL_LOCK) {
3658 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3659 				DRM_DEBUG_KMS("FDI train 2 done.\n");
3660 				break;
3661 			}
3662 			udelay(50);
3663 		}
3664 		if (retry < 5)
3665 			break;
3666 	}
3667 	if (i == 4)
3668 		DRM_ERROR("FDI train 2 fail!\n");
3669 
3670 	DRM_DEBUG_KMS("FDI train done.\n");
3671 }
3672 
3673 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct drm_crtc * crtc)3674 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3675 {
3676 	struct drm_device *dev = crtc->dev;
3677 	struct drm_i915_private *dev_priv = dev->dev_private;
3678 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3679 	int pipe = intel_crtc->pipe;
3680 	u32 reg, temp, i, j;
3681 
3682 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3683 	   for train result */
3684 	reg = FDI_RX_IMR(pipe);
3685 	temp = I915_READ(reg);
3686 	temp &= ~FDI_RX_SYMBOL_LOCK;
3687 	temp &= ~FDI_RX_BIT_LOCK;
3688 	I915_WRITE(reg, temp);
3689 
3690 	POSTING_READ(reg);
3691 	udelay(150);
3692 
3693 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3694 		      I915_READ(FDI_RX_IIR(pipe)));
3695 
3696 	/* Try each vswing and preemphasis setting twice before moving on */
3697 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3698 		/* disable first in case we need to retry */
3699 		reg = FDI_TX_CTL(pipe);
3700 		temp = I915_READ(reg);
3701 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3702 		temp &= ~FDI_TX_ENABLE;
3703 		I915_WRITE(reg, temp);
3704 
3705 		reg = FDI_RX_CTL(pipe);
3706 		temp = I915_READ(reg);
3707 		temp &= ~FDI_LINK_TRAIN_AUTO;
3708 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3709 		temp &= ~FDI_RX_ENABLE;
3710 		I915_WRITE(reg, temp);
3711 
3712 		/* enable CPU FDI TX and PCH FDI RX */
3713 		reg = FDI_TX_CTL(pipe);
3714 		temp = I915_READ(reg);
3715 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3716 		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3717 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3718 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3719 		temp |= snb_b_fdi_train_param[j/2];
3720 		temp |= FDI_COMPOSITE_SYNC;
3721 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3722 
3723 		I915_WRITE(FDI_RX_MISC(pipe),
3724 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3725 
3726 		reg = FDI_RX_CTL(pipe);
3727 		temp = I915_READ(reg);
3728 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3729 		temp |= FDI_COMPOSITE_SYNC;
3730 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3731 
3732 		POSTING_READ(reg);
3733 		udelay(1); /* should be 0.5us */
3734 
3735 		for (i = 0; i < 4; i++) {
3736 			reg = FDI_RX_IIR(pipe);
3737 			temp = I915_READ(reg);
3738 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3739 
3740 			if (temp & FDI_RX_BIT_LOCK ||
3741 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3742 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3743 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3744 					      i);
3745 				break;
3746 			}
3747 			udelay(1); /* should be 0.5us */
3748 		}
3749 		if (i == 4) {
3750 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3751 			continue;
3752 		}
3753 
3754 		/* Train 2 */
3755 		reg = FDI_TX_CTL(pipe);
3756 		temp = I915_READ(reg);
3757 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3758 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3759 		I915_WRITE(reg, temp);
3760 
3761 		reg = FDI_RX_CTL(pipe);
3762 		temp = I915_READ(reg);
3763 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3764 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3765 		I915_WRITE(reg, temp);
3766 
3767 		POSTING_READ(reg);
3768 		udelay(2); /* should be 1.5us */
3769 
3770 		for (i = 0; i < 4; i++) {
3771 			reg = FDI_RX_IIR(pipe);
3772 			temp = I915_READ(reg);
3773 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3774 
3775 			if (temp & FDI_RX_SYMBOL_LOCK ||
3776 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3777 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3778 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3779 					      i);
3780 				goto train_done;
3781 			}
3782 			udelay(2); /* should be 1.5us */
3783 		}
3784 		if (i == 4)
3785 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3786 	}
3787 
3788 train_done:
3789 	DRM_DEBUG_KMS("FDI train done.\n");
3790 }
3791 
ironlake_fdi_pll_enable(struct intel_crtc * intel_crtc)3792 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3793 {
3794 	struct drm_device *dev = intel_crtc->base.dev;
3795 	struct drm_i915_private *dev_priv = dev->dev_private;
3796 	int pipe = intel_crtc->pipe;
3797 	u32 reg, temp;
3798 
3799 
3800 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3801 	reg = FDI_RX_CTL(pipe);
3802 	temp = I915_READ(reg);
3803 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3804 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3805 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3806 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3807 
3808 	POSTING_READ(reg);
3809 	udelay(200);
3810 
3811 	/* Switch from Rawclk to PCDclk */
3812 	temp = I915_READ(reg);
3813 	I915_WRITE(reg, temp | FDI_PCDCLK);
3814 
3815 	POSTING_READ(reg);
3816 	udelay(200);
3817 
3818 	/* Enable CPU FDI TX PLL, always on for Ironlake */
3819 	reg = FDI_TX_CTL(pipe);
3820 	temp = I915_READ(reg);
3821 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3822 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3823 
3824 		POSTING_READ(reg);
3825 		udelay(100);
3826 	}
3827 }
3828 
ironlake_fdi_pll_disable(struct intel_crtc * intel_crtc)3829 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3830 {
3831 	struct drm_device *dev = intel_crtc->base.dev;
3832 	struct drm_i915_private *dev_priv = dev->dev_private;
3833 	int pipe = intel_crtc->pipe;
3834 	u32 reg, temp;
3835 
3836 	/* Switch from PCDclk to Rawclk */
3837 	reg = FDI_RX_CTL(pipe);
3838 	temp = I915_READ(reg);
3839 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3840 
3841 	/* Disable CPU FDI TX PLL */
3842 	reg = FDI_TX_CTL(pipe);
3843 	temp = I915_READ(reg);
3844 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3845 
3846 	POSTING_READ(reg);
3847 	udelay(100);
3848 
3849 	reg = FDI_RX_CTL(pipe);
3850 	temp = I915_READ(reg);
3851 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3852 
3853 	/* Wait for the clocks to turn off. */
3854 	POSTING_READ(reg);
3855 	udelay(100);
3856 }
3857 
ironlake_fdi_disable(struct drm_crtc * crtc)3858 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3859 {
3860 	struct drm_device *dev = crtc->dev;
3861 	struct drm_i915_private *dev_priv = dev->dev_private;
3862 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3863 	int pipe = intel_crtc->pipe;
3864 	u32 reg, temp;
3865 
3866 	/* disable CPU FDI tx and PCH FDI rx */
3867 	reg = FDI_TX_CTL(pipe);
3868 	temp = I915_READ(reg);
3869 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3870 	POSTING_READ(reg);
3871 
3872 	reg = FDI_RX_CTL(pipe);
3873 	temp = I915_READ(reg);
3874 	temp &= ~(0x7 << 16);
3875 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3876 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3877 
3878 	POSTING_READ(reg);
3879 	udelay(100);
3880 
3881 	/* Ironlake workaround, disable clock pointer after downing FDI */
3882 	if (HAS_PCH_IBX(dev))
3883 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3884 
3885 	/* still set train pattern 1 */
3886 	reg = FDI_TX_CTL(pipe);
3887 	temp = I915_READ(reg);
3888 	temp &= ~FDI_LINK_TRAIN_NONE;
3889 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3890 	I915_WRITE(reg, temp);
3891 
3892 	reg = FDI_RX_CTL(pipe);
3893 	temp = I915_READ(reg);
3894 	if (HAS_PCH_CPT(dev)) {
3895 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3896 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3897 	} else {
3898 		temp &= ~FDI_LINK_TRAIN_NONE;
3899 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3900 	}
3901 	/* BPC in FDI rx is consistent with that in PIPECONF */
3902 	temp &= ~(0x07 << 16);
3903 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3904 	I915_WRITE(reg, temp);
3905 
3906 	POSTING_READ(reg);
3907 	udelay(100);
3908 }
3909 
intel_has_pending_fb_unpin(struct drm_device * dev)3910 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3911 {
3912 	struct intel_crtc *crtc;
3913 
3914 	/* Note that we don't need to be called with mode_config.lock here
3915 	 * as our list of CRTC objects is static for the lifetime of the
3916 	 * device and so cannot disappear as we iterate. Similarly, we can
3917 	 * happily treat the predicates as racy, atomic checks as userspace
3918 	 * cannot claim and pin a new fb without at least acquring the
3919 	 * struct_mutex and so serialising with us.
3920 	 */
3921 	for_each_intel_crtc(dev, crtc) {
3922 		if (atomic_read(&crtc->unpin_work_count) == 0)
3923 			continue;
3924 
3925 		if (crtc->unpin_work)
3926 			intel_wait_for_vblank(dev, crtc->pipe);
3927 
3928 		return true;
3929 	}
3930 
3931 	return false;
3932 }
3933 
page_flip_completed(struct intel_crtc * intel_crtc)3934 static void page_flip_completed(struct intel_crtc *intel_crtc)
3935 {
3936 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3937 	struct intel_unpin_work *work = intel_crtc->unpin_work;
3938 
3939 	/* ensure that the unpin work is consistent wrt ->pending. */
3940 	smp_rmb();
3941 	intel_crtc->unpin_work = NULL;
3942 
3943 	if (work->event)
3944 		drm_send_vblank_event(intel_crtc->base.dev,
3945 				      intel_crtc->pipe,
3946 				      work->event);
3947 
3948 	drm_crtc_vblank_put(&intel_crtc->base);
3949 
3950 	wake_up_all(&dev_priv->pending_flip_queue);
3951 	trace_i915_flip_complete(intel_crtc->plane,
3952 				 work->pending_flip_obj);
3953 
3954 	queue_work(dev_priv->wq, &work->work);
3955 }
3956 
intel_crtc_wait_for_pending_flips(struct drm_crtc * crtc)3957 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3958 {
3959 	struct drm_device *dev = crtc->dev;
3960 	struct drm_i915_private *dev_priv = dev->dev_private;
3961 
3962 	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3963 	if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3964 				       !intel_crtc_has_pending_flip(crtc),
3965 				       60*HZ) == 0)) {
3966 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3967 
3968 		spin_lock_irq(&dev->event_lock);
3969 		if (intel_crtc->unpin_work) {
3970 			WARN_ONCE(1, "Removing stuck page flip\n");
3971 			page_flip_completed(intel_crtc);
3972 		}
3973 		spin_unlock_irq(&dev->event_lock);
3974 	}
3975 
3976 	if (crtc->primary->fb) {
3977 		mutex_lock(&dev->struct_mutex);
3978 		intel_finish_fb(crtc->primary->fb);
3979 		mutex_unlock(&dev->struct_mutex);
3980 	}
3981 }
3982 
3983 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(struct drm_crtc * crtc)3984 static void lpt_program_iclkip(struct drm_crtc *crtc)
3985 {
3986 	struct drm_device *dev = crtc->dev;
3987 	struct drm_i915_private *dev_priv = dev->dev_private;
3988 	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3989 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3990 	u32 temp;
3991 
3992 	mutex_lock(&dev_priv->sb_lock);
3993 
3994 	/* It is necessary to ungate the pixclk gate prior to programming
3995 	 * the divisors, and gate it back when it is done.
3996 	 */
3997 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3998 
3999 	/* Disable SSCCTL */
4000 	intel_sbi_write(dev_priv, SBI_SSCCTL6,
4001 			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
4002 				SBI_SSCCTL_DISABLE,
4003 			SBI_ICLK);
4004 
4005 	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4006 	if (clock == 20000) {
4007 		auxdiv = 1;
4008 		divsel = 0x41;
4009 		phaseinc = 0x20;
4010 	} else {
4011 		/* The iCLK virtual clock root frequency is in MHz,
4012 		 * but the adjusted_mode->crtc_clock in in KHz. To get the
4013 		 * divisors, it is necessary to divide one by another, so we
4014 		 * convert the virtual clock precision to KHz here for higher
4015 		 * precision.
4016 		 */
4017 		u32 iclk_virtual_root_freq = 172800 * 1000;
4018 		u32 iclk_pi_range = 64;
4019 		u32 desired_divisor, msb_divisor_value, pi_value;
4020 
4021 		desired_divisor = (iclk_virtual_root_freq / clock);
4022 		msb_divisor_value = desired_divisor / iclk_pi_range;
4023 		pi_value = desired_divisor % iclk_pi_range;
4024 
4025 		auxdiv = 0;
4026 		divsel = msb_divisor_value - 2;
4027 		phaseinc = pi_value;
4028 	}
4029 
4030 	/* This should not happen with any sane values */
4031 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4032 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4033 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4034 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4035 
4036 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4037 			clock,
4038 			auxdiv,
4039 			divsel,
4040 			phasedir,
4041 			phaseinc);
4042 
4043 	/* Program SSCDIVINTPHASE6 */
4044 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4045 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4046 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4047 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4048 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4049 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4050 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4051 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4052 
4053 	/* Program SSCAUXDIV */
4054 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4055 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4056 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4057 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4058 
4059 	/* Enable modulator and associated divider */
4060 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4061 	temp &= ~SBI_SSCCTL_DISABLE;
4062 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4063 
4064 	/* Wait for initialization time */
4065 	udelay(24);
4066 
4067 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4068 
4069 	mutex_unlock(&dev_priv->sb_lock);
4070 }
4071 
ironlake_pch_transcoder_set_timings(struct intel_crtc * crtc,enum pipe pch_transcoder)4072 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4073 						enum pipe pch_transcoder)
4074 {
4075 	struct drm_device *dev = crtc->base.dev;
4076 	struct drm_i915_private *dev_priv = dev->dev_private;
4077 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4078 
4079 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4080 		   I915_READ(HTOTAL(cpu_transcoder)));
4081 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4082 		   I915_READ(HBLANK(cpu_transcoder)));
4083 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4084 		   I915_READ(HSYNC(cpu_transcoder)));
4085 
4086 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4087 		   I915_READ(VTOTAL(cpu_transcoder)));
4088 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4089 		   I915_READ(VBLANK(cpu_transcoder)));
4090 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4091 		   I915_READ(VSYNC(cpu_transcoder)));
4092 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4093 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4094 }
4095 
cpt_set_fdi_bc_bifurcation(struct drm_device * dev,bool enable)4096 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4097 {
4098 	struct drm_i915_private *dev_priv = dev->dev_private;
4099 	uint32_t temp;
4100 
4101 	temp = I915_READ(SOUTH_CHICKEN1);
4102 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4103 		return;
4104 
4105 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4106 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4107 
4108 	temp &= ~FDI_BC_BIFURCATION_SELECT;
4109 	if (enable)
4110 		temp |= FDI_BC_BIFURCATION_SELECT;
4111 
4112 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4113 	I915_WRITE(SOUTH_CHICKEN1, temp);
4114 	POSTING_READ(SOUTH_CHICKEN1);
4115 }
4116 
ivybridge_update_fdi_bc_bifurcation(struct intel_crtc * intel_crtc)4117 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4118 {
4119 	struct drm_device *dev = intel_crtc->base.dev;
4120 
4121 	switch (intel_crtc->pipe) {
4122 	case PIPE_A:
4123 		break;
4124 	case PIPE_B:
4125 		if (intel_crtc->config->fdi_lanes > 2)
4126 			cpt_set_fdi_bc_bifurcation(dev, false);
4127 		else
4128 			cpt_set_fdi_bc_bifurcation(dev, true);
4129 
4130 		break;
4131 	case PIPE_C:
4132 		cpt_set_fdi_bc_bifurcation(dev, true);
4133 
4134 		break;
4135 	default:
4136 		BUG();
4137 	}
4138 }
4139 
4140 /*
4141  * Enable PCH resources required for PCH ports:
4142  *   - PCH PLLs
4143  *   - FDI training & RX/TX
4144  *   - update transcoder timings
4145  *   - DP transcoding bits
4146  *   - transcoder
4147  */
ironlake_pch_enable(struct drm_crtc * crtc)4148 static void ironlake_pch_enable(struct drm_crtc *crtc)
4149 {
4150 	struct drm_device *dev = crtc->dev;
4151 	struct drm_i915_private *dev_priv = dev->dev_private;
4152 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4153 	int pipe = intel_crtc->pipe;
4154 	u32 reg, temp;
4155 
4156 	assert_pch_transcoder_disabled(dev_priv, pipe);
4157 
4158 	if (IS_IVYBRIDGE(dev))
4159 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4160 
4161 	/* Write the TU size bits before fdi link training, so that error
4162 	 * detection works. */
4163 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4164 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4165 
4166 	/* For PCH output, training FDI link */
4167 	dev_priv->display.fdi_link_train(crtc);
4168 
4169 	/* We need to program the right clock selection before writing the pixel
4170 	 * mutliplier into the DPLL. */
4171 	if (HAS_PCH_CPT(dev)) {
4172 		u32 sel;
4173 
4174 		temp = I915_READ(PCH_DPLL_SEL);
4175 		temp |= TRANS_DPLL_ENABLE(pipe);
4176 		sel = TRANS_DPLLB_SEL(pipe);
4177 		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4178 			temp |= sel;
4179 		else
4180 			temp &= ~sel;
4181 		I915_WRITE(PCH_DPLL_SEL, temp);
4182 	}
4183 
4184 	/* XXX: pch pll's can be enabled any time before we enable the PCH
4185 	 * transcoder, and we actually should do this to not upset any PCH
4186 	 * transcoder that already use the clock when we share it.
4187 	 *
4188 	 * Note that enable_shared_dpll tries to do the right thing, but
4189 	 * get_shared_dpll unconditionally resets the pll - we need that to have
4190 	 * the right LVDS enable sequence. */
4191 	intel_enable_shared_dpll(intel_crtc);
4192 
4193 	/* set transcoder timing, panel must allow it */
4194 	assert_panel_unlocked(dev_priv, pipe);
4195 	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4196 
4197 	intel_fdi_normal_train(crtc);
4198 
4199 	/* For PCH DP, enable TRANS_DP_CTL */
4200 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4201 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4202 		reg = TRANS_DP_CTL(pipe);
4203 		temp = I915_READ(reg);
4204 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4205 			  TRANS_DP_SYNC_MASK |
4206 			  TRANS_DP_BPC_MASK);
4207 		temp |= TRANS_DP_OUTPUT_ENABLE;
4208 		temp |= bpc << 9; /* same format but at 11:9 */
4209 
4210 		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
4211 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4212 		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
4213 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4214 
4215 		switch (intel_trans_dp_port_sel(crtc)) {
4216 		case PCH_DP_B:
4217 			temp |= TRANS_DP_PORT_SEL_B;
4218 			break;
4219 		case PCH_DP_C:
4220 			temp |= TRANS_DP_PORT_SEL_C;
4221 			break;
4222 		case PCH_DP_D:
4223 			temp |= TRANS_DP_PORT_SEL_D;
4224 			break;
4225 		default:
4226 			BUG();
4227 		}
4228 
4229 		I915_WRITE(reg, temp);
4230 	}
4231 
4232 	ironlake_enable_pch_transcoder(dev_priv, pipe);
4233 }
4234 
lpt_pch_enable(struct drm_crtc * crtc)4235 static void lpt_pch_enable(struct drm_crtc *crtc)
4236 {
4237 	struct drm_device *dev = crtc->dev;
4238 	struct drm_i915_private *dev_priv = dev->dev_private;
4239 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4240 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4241 
4242 	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4243 
4244 	lpt_program_iclkip(crtc);
4245 
4246 	/* Set transcoder timing. */
4247 	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4248 
4249 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4250 }
4251 
intel_get_shared_dpll(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)4252 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4253 						struct intel_crtc_state *crtc_state)
4254 {
4255 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4256 	struct intel_shared_dpll *pll;
4257 	struct intel_shared_dpll_config *shared_dpll;
4258 	enum intel_dpll_id i;
4259 	int max = dev_priv->num_shared_dpll;
4260 
4261 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4262 
4263 	if (HAS_PCH_IBX(dev_priv->dev)) {
4264 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4265 		i = (enum intel_dpll_id) crtc->pipe;
4266 		pll = &dev_priv->shared_dplls[i];
4267 
4268 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4269 			      crtc->base.base.id, pll->name);
4270 
4271 		WARN_ON(shared_dpll[i].crtc_mask);
4272 
4273 		goto found;
4274 	}
4275 
4276 	if (IS_BROXTON(dev_priv->dev)) {
4277 		/* PLL is attached to port in bxt */
4278 		struct intel_encoder *encoder;
4279 		struct intel_digital_port *intel_dig_port;
4280 
4281 		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4282 		if (WARN_ON(!encoder))
4283 			return NULL;
4284 
4285 		intel_dig_port = enc_to_dig_port(&encoder->base);
4286 		/* 1:1 mapping between ports and PLLs */
4287 		i = (enum intel_dpll_id)intel_dig_port->port;
4288 		pll = &dev_priv->shared_dplls[i];
4289 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4290 			crtc->base.base.id, pll->name);
4291 		WARN_ON(shared_dpll[i].crtc_mask);
4292 
4293 		goto found;
4294 	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4295 		/* Do not consider SPLL */
4296 		max = 2;
4297 
4298 	for (i = 0; i < max; i++) {
4299 		pll = &dev_priv->shared_dplls[i];
4300 
4301 		/* Only want to check enabled timings first */
4302 		if (shared_dpll[i].crtc_mask == 0)
4303 			continue;
4304 
4305 		if (memcmp(&crtc_state->dpll_hw_state,
4306 			   &shared_dpll[i].hw_state,
4307 			   sizeof(crtc_state->dpll_hw_state)) == 0) {
4308 			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4309 				      crtc->base.base.id, pll->name,
4310 				      shared_dpll[i].crtc_mask,
4311 				      pll->active);
4312 			goto found;
4313 		}
4314 	}
4315 
4316 	/* Ok no matching timings, maybe there's a free one? */
4317 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4318 		pll = &dev_priv->shared_dplls[i];
4319 		if (shared_dpll[i].crtc_mask == 0) {
4320 			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4321 				      crtc->base.base.id, pll->name);
4322 			goto found;
4323 		}
4324 	}
4325 
4326 	return NULL;
4327 
4328 found:
4329 	if (shared_dpll[i].crtc_mask == 0)
4330 		shared_dpll[i].hw_state =
4331 			crtc_state->dpll_hw_state;
4332 
4333 	crtc_state->shared_dpll = i;
4334 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4335 			 pipe_name(crtc->pipe));
4336 
4337 	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4338 
4339 	return pll;
4340 }
4341 
intel_shared_dpll_commit(struct drm_atomic_state * state)4342 static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4343 {
4344 	struct drm_i915_private *dev_priv = to_i915(state->dev);
4345 	struct intel_shared_dpll_config *shared_dpll;
4346 	struct intel_shared_dpll *pll;
4347 	enum intel_dpll_id i;
4348 
4349 	if (!to_intel_atomic_state(state)->dpll_set)
4350 		return;
4351 
4352 	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4353 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4354 		pll = &dev_priv->shared_dplls[i];
4355 		pll->config = shared_dpll[i];
4356 	}
4357 }
4358 
cpt_verify_modeset(struct drm_device * dev,int pipe)4359 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4360 {
4361 	struct drm_i915_private *dev_priv = dev->dev_private;
4362 	int dslreg = PIPEDSL(pipe);
4363 	u32 temp;
4364 
4365 	temp = I915_READ(dslreg);
4366 	udelay(500);
4367 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4368 		if (wait_for(I915_READ(dslreg) != temp, 5))
4369 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4370 	}
4371 }
4372 
4373 static int
skl_update_scaler(struct intel_crtc_state * crtc_state,bool force_detach,unsigned scaler_user,int * scaler_id,unsigned int rotation,int src_w,int src_h,int dst_w,int dst_h)4374 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4375 		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4376 		  int src_w, int src_h, int dst_w, int dst_h)
4377 {
4378 	struct intel_crtc_scaler_state *scaler_state =
4379 		&crtc_state->scaler_state;
4380 	struct intel_crtc *intel_crtc =
4381 		to_intel_crtc(crtc_state->base.crtc);
4382 	int need_scaling;
4383 
4384 	need_scaling = intel_rotation_90_or_270(rotation) ?
4385 		(src_h != dst_w || src_w != dst_h):
4386 		(src_w != dst_w || src_h != dst_h);
4387 
4388 	/*
4389 	 * if plane is being disabled or scaler is no more required or force detach
4390 	 *  - free scaler binded to this plane/crtc
4391 	 *  - in order to do this, update crtc->scaler_usage
4392 	 *
4393 	 * Here scaler state in crtc_state is set free so that
4394 	 * scaler can be assigned to other user. Actual register
4395 	 * update to free the scaler is done in plane/panel-fit programming.
4396 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4397 	 */
4398 	if (force_detach || !need_scaling) {
4399 		if (*scaler_id >= 0) {
4400 			scaler_state->scaler_users &= ~(1 << scaler_user);
4401 			scaler_state->scalers[*scaler_id].in_use = 0;
4402 
4403 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4404 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4405 				intel_crtc->pipe, scaler_user, *scaler_id,
4406 				scaler_state->scaler_users);
4407 			*scaler_id = -1;
4408 		}
4409 		return 0;
4410 	}
4411 
4412 	/* range checks */
4413 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4414 		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4415 
4416 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4417 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4418 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4419 			"size is out of scaler range\n",
4420 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4421 		return -EINVAL;
4422 	}
4423 
4424 	/* mark this plane as a scaler user in crtc_state */
4425 	scaler_state->scaler_users |= (1 << scaler_user);
4426 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4427 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4428 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4429 		scaler_state->scaler_users);
4430 
4431 	return 0;
4432 }
4433 
4434 /**
4435  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4436  *
4437  * @state: crtc's scaler state
4438  *
4439  * Return
4440  *     0 - scaler_usage updated successfully
4441  *    error - requested scaling cannot be supported or other error condition
4442  */
skl_update_scaler_crtc(struct intel_crtc_state * state)4443 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4444 {
4445 	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4446 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4447 
4448 	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4449 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4450 
4451 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4452 		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4453 		state->pipe_src_w, state->pipe_src_h,
4454 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4455 }
4456 
4457 /**
4458  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4459  *
4460  * @state: crtc's scaler state
4461  * @plane_state: atomic plane state to update
4462  *
4463  * Return
4464  *     0 - scaler_usage updated successfully
4465  *    error - requested scaling cannot be supported or other error condition
4466  */
skl_update_scaler_plane(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)4467 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4468 				   struct intel_plane_state *plane_state)
4469 {
4470 
4471 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4472 	struct intel_plane *intel_plane =
4473 		to_intel_plane(plane_state->base.plane);
4474 	struct drm_framebuffer *fb = plane_state->base.fb;
4475 	int ret;
4476 
4477 	bool force_detach = !fb || !plane_state->visible;
4478 
4479 	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4480 		      intel_plane->base.base.id, intel_crtc->pipe,
4481 		      drm_plane_index(&intel_plane->base));
4482 
4483 	ret = skl_update_scaler(crtc_state, force_detach,
4484 				drm_plane_index(&intel_plane->base),
4485 				&plane_state->scaler_id,
4486 				plane_state->base.rotation,
4487 				drm_rect_width(&plane_state->src) >> 16,
4488 				drm_rect_height(&plane_state->src) >> 16,
4489 				drm_rect_width(&plane_state->dst),
4490 				drm_rect_height(&plane_state->dst));
4491 
4492 	if (ret || plane_state->scaler_id < 0)
4493 		return ret;
4494 
4495 	/* check colorkey */
4496 	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4497 		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4498 			      intel_plane->base.base.id);
4499 		return -EINVAL;
4500 	}
4501 
4502 	/* Check src format */
4503 	switch (fb->pixel_format) {
4504 	case DRM_FORMAT_RGB565:
4505 	case DRM_FORMAT_XBGR8888:
4506 	case DRM_FORMAT_XRGB8888:
4507 	case DRM_FORMAT_ABGR8888:
4508 	case DRM_FORMAT_ARGB8888:
4509 	case DRM_FORMAT_XRGB2101010:
4510 	case DRM_FORMAT_XBGR2101010:
4511 	case DRM_FORMAT_YUYV:
4512 	case DRM_FORMAT_YVYU:
4513 	case DRM_FORMAT_UYVY:
4514 	case DRM_FORMAT_VYUY:
4515 		break;
4516 	default:
4517 		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4518 			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4519 		return -EINVAL;
4520 	}
4521 
4522 	return 0;
4523 }
4524 
skylake_scaler_disable(struct intel_crtc * crtc)4525 static void skylake_scaler_disable(struct intel_crtc *crtc)
4526 {
4527 	int i;
4528 
4529 	for (i = 0; i < crtc->num_scalers; i++)
4530 		skl_detach_scaler(crtc, i);
4531 }
4532 
skylake_pfit_enable(struct intel_crtc * crtc)4533 static void skylake_pfit_enable(struct intel_crtc *crtc)
4534 {
4535 	struct drm_device *dev = crtc->base.dev;
4536 	struct drm_i915_private *dev_priv = dev->dev_private;
4537 	int pipe = crtc->pipe;
4538 	struct intel_crtc_scaler_state *scaler_state =
4539 		&crtc->config->scaler_state;
4540 
4541 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4542 
4543 	if (crtc->config->pch_pfit.enabled) {
4544 		int id;
4545 
4546 		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4547 			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4548 			return;
4549 		}
4550 
4551 		id = scaler_state->scaler_id;
4552 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4553 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4554 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4555 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4556 
4557 		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4558 	}
4559 }
4560 
ironlake_pfit_enable(struct intel_crtc * crtc)4561 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4562 {
4563 	struct drm_device *dev = crtc->base.dev;
4564 	struct drm_i915_private *dev_priv = dev->dev_private;
4565 	int pipe = crtc->pipe;
4566 
4567 	if (crtc->config->pch_pfit.enabled) {
4568 		/* Force use of hard-coded filter coefficients
4569 		 * as some pre-programmed values are broken,
4570 		 * e.g. x201.
4571 		 */
4572 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4573 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4574 						 PF_PIPE_SEL_IVB(pipe));
4575 		else
4576 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4577 		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4578 		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4579 	}
4580 }
4581 
hsw_enable_ips(struct intel_crtc * crtc)4582 void hsw_enable_ips(struct intel_crtc *crtc)
4583 {
4584 	struct drm_device *dev = crtc->base.dev;
4585 	struct drm_i915_private *dev_priv = dev->dev_private;
4586 
4587 	if (!crtc->config->ips_enabled)
4588 		return;
4589 
4590 	/* We can only enable IPS after we enable a plane and wait for a vblank */
4591 	intel_wait_for_vblank(dev, crtc->pipe);
4592 
4593 	assert_plane_enabled(dev_priv, crtc->plane);
4594 	if (IS_BROADWELL(dev)) {
4595 		mutex_lock(&dev_priv->rps.hw_lock);
4596 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4597 		mutex_unlock(&dev_priv->rps.hw_lock);
4598 		/* Quoting Art Runyan: "its not safe to expect any particular
4599 		 * value in IPS_CTL bit 31 after enabling IPS through the
4600 		 * mailbox." Moreover, the mailbox may return a bogus state,
4601 		 * so we need to just enable it and continue on.
4602 		 */
4603 	} else {
4604 		I915_WRITE(IPS_CTL, IPS_ENABLE);
4605 		/* The bit only becomes 1 in the next vblank, so this wait here
4606 		 * is essentially intel_wait_for_vblank. If we don't have this
4607 		 * and don't wait for vblanks until the end of crtc_enable, then
4608 		 * the HW state readout code will complain that the expected
4609 		 * IPS_CTL value is not the one we read. */
4610 		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4611 			DRM_ERROR("Timed out waiting for IPS enable\n");
4612 	}
4613 }
4614 
hsw_disable_ips(struct intel_crtc * crtc)4615 void hsw_disable_ips(struct intel_crtc *crtc)
4616 {
4617 	struct drm_device *dev = crtc->base.dev;
4618 	struct drm_i915_private *dev_priv = dev->dev_private;
4619 
4620 	if (!crtc->config->ips_enabled)
4621 		return;
4622 
4623 	assert_plane_enabled(dev_priv, crtc->plane);
4624 	if (IS_BROADWELL(dev)) {
4625 		mutex_lock(&dev_priv->rps.hw_lock);
4626 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4627 		mutex_unlock(&dev_priv->rps.hw_lock);
4628 		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4629 		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4630 			DRM_ERROR("Timed out waiting for IPS disable\n");
4631 	} else {
4632 		I915_WRITE(IPS_CTL, 0);
4633 		POSTING_READ(IPS_CTL);
4634 	}
4635 
4636 	/* We need to wait for a vblank before we can disable the plane. */
4637 	intel_wait_for_vblank(dev, crtc->pipe);
4638 }
4639 
4640 /** Loads the palette/gamma unit for the CRTC with the prepared values */
intel_crtc_load_lut(struct drm_crtc * crtc)4641 static void intel_crtc_load_lut(struct drm_crtc *crtc)
4642 {
4643 	struct drm_device *dev = crtc->dev;
4644 	struct drm_i915_private *dev_priv = dev->dev_private;
4645 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4646 	enum pipe pipe = intel_crtc->pipe;
4647 	int i;
4648 	bool reenable_ips = false;
4649 
4650 	/* The clocks have to be on to load the palette. */
4651 	if (!crtc->state->active)
4652 		return;
4653 
4654 	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4655 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4656 			assert_dsi_pll_enabled(dev_priv);
4657 		else
4658 			assert_pll_enabled(dev_priv, pipe);
4659 	}
4660 
4661 	/* Workaround : Do not read or write the pipe palette/gamma data while
4662 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4663 	 */
4664 	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4665 	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4666 	     GAMMA_MODE_MODE_SPLIT)) {
4667 		hsw_disable_ips(intel_crtc);
4668 		reenable_ips = true;
4669 	}
4670 
4671 	for (i = 0; i < 256; i++) {
4672 		u32 palreg;
4673 
4674 		if (HAS_GMCH_DISPLAY(dev))
4675 			palreg = PALETTE(pipe, i);
4676 		else
4677 			palreg = LGC_PALETTE(pipe, i);
4678 
4679 		I915_WRITE(palreg,
4680 			   (intel_crtc->lut_r[i] << 16) |
4681 			   (intel_crtc->lut_g[i] << 8) |
4682 			   intel_crtc->lut_b[i]);
4683 	}
4684 
4685 	if (reenable_ips)
4686 		hsw_enable_ips(intel_crtc);
4687 }
4688 
intel_crtc_dpms_overlay_disable(struct intel_crtc * intel_crtc)4689 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4690 {
4691 	if (intel_crtc->overlay) {
4692 		struct drm_device *dev = intel_crtc->base.dev;
4693 		struct drm_i915_private *dev_priv = dev->dev_private;
4694 
4695 		mutex_lock(&dev->struct_mutex);
4696 		dev_priv->mm.interruptible = false;
4697 		(void) intel_overlay_switch_off(intel_crtc->overlay);
4698 		dev_priv->mm.interruptible = true;
4699 		mutex_unlock(&dev->struct_mutex);
4700 	}
4701 
4702 	/* Let userspace switch the overlay on again. In most cases userspace
4703 	 * has to recompute where to put it anyway.
4704 	 */
4705 }
4706 
4707 /**
4708  * intel_post_enable_primary - Perform operations after enabling primary plane
4709  * @crtc: the CRTC whose primary plane was just enabled
4710  *
4711  * Performs potentially sleeping operations that must be done after the primary
4712  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4713  * called due to an explicit primary plane update, or due to an implicit
4714  * re-enable that is caused when a sprite plane is updated to no longer
4715  * completely hide the primary plane.
4716  */
4717 static void
intel_post_enable_primary(struct drm_crtc * crtc)4718 intel_post_enable_primary(struct drm_crtc *crtc)
4719 {
4720 	struct drm_device *dev = crtc->dev;
4721 	struct drm_i915_private *dev_priv = dev->dev_private;
4722 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4723 	int pipe = intel_crtc->pipe;
4724 
4725 	/*
4726 	 * BDW signals flip done immediately if the plane
4727 	 * is disabled, even if the plane enable is already
4728 	 * armed to occur at the next vblank :(
4729 	 */
4730 	if (IS_BROADWELL(dev))
4731 		intel_wait_for_vblank(dev, pipe);
4732 
4733 	/*
4734 	 * FIXME IPS should be fine as long as one plane is
4735 	 * enabled, but in practice it seems to have problems
4736 	 * when going from primary only to sprite only and vice
4737 	 * versa.
4738 	 */
4739 	hsw_enable_ips(intel_crtc);
4740 
4741 	/*
4742 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4743 	 * So don't enable underrun reporting before at least some planes
4744 	 * are enabled.
4745 	 * FIXME: Need to fix the logic to work when we turn off all planes
4746 	 * but leave the pipe running.
4747 	 */
4748 	if (IS_GEN2(dev))
4749 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4750 
4751 	/* Underruns don't raise interrupts, so check manually. */
4752 	if (HAS_GMCH_DISPLAY(dev))
4753 		i9xx_check_fifo_underruns(dev_priv);
4754 }
4755 
4756 /**
4757  * intel_pre_disable_primary - Perform operations before disabling primary plane
4758  * @crtc: the CRTC whose primary plane is to be disabled
4759  *
4760  * Performs potentially sleeping operations that must be done before the
4761  * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4762  * be called due to an explicit primary plane update, or due to an implicit
4763  * disable that is caused when a sprite plane completely hides the primary
4764  * plane.
4765  */
4766 static void
intel_pre_disable_primary(struct drm_crtc * crtc)4767 intel_pre_disable_primary(struct drm_crtc *crtc)
4768 {
4769 	struct drm_device *dev = crtc->dev;
4770 	struct drm_i915_private *dev_priv = dev->dev_private;
4771 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4772 	int pipe = intel_crtc->pipe;
4773 
4774 	/*
4775 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4776 	 * So diasble underrun reporting before all the planes get disabled.
4777 	 * FIXME: Need to fix the logic to work when we turn off all planes
4778 	 * but leave the pipe running.
4779 	 */
4780 	if (IS_GEN2(dev))
4781 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4782 
4783 	/*
4784 	 * Vblank time updates from the shadow to live plane control register
4785 	 * are blocked if the memory self-refresh mode is active at that
4786 	 * moment. So to make sure the plane gets truly disabled, disable
4787 	 * first the self-refresh mode. The self-refresh enable bit in turn
4788 	 * will be checked/applied by the HW only at the next frame start
4789 	 * event which is after the vblank start event, so we need to have a
4790 	 * wait-for-vblank between disabling the plane and the pipe.
4791 	 */
4792 	if (HAS_GMCH_DISPLAY(dev)) {
4793 		intel_set_memory_cxsr(dev_priv, false);
4794 		dev_priv->wm.vlv.cxsr = false;
4795 		intel_wait_for_vblank(dev, pipe);
4796 	}
4797 
4798 	/*
4799 	 * FIXME IPS should be fine as long as one plane is
4800 	 * enabled, but in practice it seems to have problems
4801 	 * when going from primary only to sprite only and vice
4802 	 * versa.
4803 	 */
4804 	hsw_disable_ips(intel_crtc);
4805 }
4806 
intel_post_plane_update(struct intel_crtc * crtc)4807 static void intel_post_plane_update(struct intel_crtc *crtc)
4808 {
4809 	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4810 	struct drm_device *dev = crtc->base.dev;
4811 	struct drm_i915_private *dev_priv = dev->dev_private;
4812 	struct drm_plane *plane;
4813 
4814 	if (atomic->wait_vblank)
4815 		intel_wait_for_vblank(dev, crtc->pipe);
4816 
4817 	intel_frontbuffer_flip(dev, atomic->fb_bits);
4818 
4819 	if (atomic->disable_cxsr)
4820 		crtc->wm.cxsr_allowed = true;
4821 
4822 	if (crtc->atomic.update_wm_post)
4823 		intel_update_watermarks(&crtc->base);
4824 
4825 	if (atomic->update_fbc)
4826 		intel_fbc_update(dev_priv);
4827 
4828 	if (atomic->post_enable_primary)
4829 		intel_post_enable_primary(&crtc->base);
4830 
4831 	drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
4832 		intel_update_sprite_watermarks(plane, &crtc->base,
4833 					       0, 0, 0, false, false);
4834 
4835 	memset(atomic, 0, sizeof(*atomic));
4836 }
4837 
intel_pre_plane_update(struct intel_crtc * crtc)4838 static void intel_pre_plane_update(struct intel_crtc *crtc)
4839 {
4840 	struct drm_device *dev = crtc->base.dev;
4841 	struct drm_i915_private *dev_priv = dev->dev_private;
4842 	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4843 	struct drm_plane *p;
4844 
4845 	/* Track fb's for any planes being disabled */
4846 	drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
4847 		struct intel_plane *plane = to_intel_plane(p);
4848 
4849 		mutex_lock(&dev->struct_mutex);
4850 		i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
4851 				  plane->frontbuffer_bit);
4852 		mutex_unlock(&dev->struct_mutex);
4853 	}
4854 
4855 	if (atomic->wait_for_flips)
4856 		intel_crtc_wait_for_pending_flips(&crtc->base);
4857 
4858 	if (atomic->disable_fbc)
4859 		intel_fbc_disable_crtc(crtc);
4860 
4861 	if (crtc->atomic.disable_ips)
4862 		hsw_disable_ips(crtc);
4863 
4864 	if (atomic->pre_disable_primary)
4865 		intel_pre_disable_primary(&crtc->base);
4866 
4867 	if (atomic->disable_cxsr) {
4868 		crtc->wm.cxsr_allowed = false;
4869 		intel_set_memory_cxsr(dev_priv, false);
4870 	}
4871 }
4872 
intel_crtc_disable_planes(struct drm_crtc * crtc,unsigned plane_mask)4873 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4874 {
4875 	struct drm_device *dev = crtc->dev;
4876 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4877 	struct drm_plane *p;
4878 	int pipe = intel_crtc->pipe;
4879 
4880 	intel_crtc_dpms_overlay_disable(intel_crtc);
4881 
4882 	drm_for_each_plane_mask(p, dev, plane_mask)
4883 		to_intel_plane(p)->disable_plane(p, crtc);
4884 
4885 	/*
4886 	 * FIXME: Once we grow proper nuclear flip support out of this we need
4887 	 * to compute the mask of flip planes precisely. For the time being
4888 	 * consider this a flip to a NULL plane.
4889 	 */
4890 	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4891 }
4892 
ironlake_crtc_enable(struct drm_crtc * crtc)4893 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4894 {
4895 	struct drm_device *dev = crtc->dev;
4896 	struct drm_i915_private *dev_priv = dev->dev_private;
4897 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4898 	struct intel_encoder *encoder;
4899 	int pipe = intel_crtc->pipe;
4900 
4901 	if (WARN_ON(intel_crtc->active))
4902 		return;
4903 
4904 	if (intel_crtc->config->has_pch_encoder)
4905 		intel_prepare_shared_dpll(intel_crtc);
4906 
4907 	if (intel_crtc->config->has_dp_encoder)
4908 		intel_dp_set_m_n(intel_crtc, M1_N1);
4909 
4910 	intel_set_pipe_timings(intel_crtc);
4911 
4912 	if (intel_crtc->config->has_pch_encoder) {
4913 		intel_cpu_transcoder_set_m_n(intel_crtc,
4914 				     &intel_crtc->config->fdi_m_n, NULL);
4915 	}
4916 
4917 	ironlake_set_pipeconf(crtc);
4918 
4919 	intel_crtc->active = true;
4920 
4921 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4922 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4923 
4924 	for_each_encoder_on_crtc(dev, crtc, encoder)
4925 		if (encoder->pre_enable)
4926 			encoder->pre_enable(encoder);
4927 
4928 	if (intel_crtc->config->has_pch_encoder) {
4929 		/* Note: FDI PLL enabling _must_ be done before we enable the
4930 		 * cpu pipes, hence this is separate from all the other fdi/pch
4931 		 * enabling. */
4932 		ironlake_fdi_pll_enable(intel_crtc);
4933 	} else {
4934 		assert_fdi_tx_disabled(dev_priv, pipe);
4935 		assert_fdi_rx_disabled(dev_priv, pipe);
4936 	}
4937 
4938 	ironlake_pfit_enable(intel_crtc);
4939 
4940 	/*
4941 	 * On ILK+ LUT must be loaded before the pipe is running but with
4942 	 * clocks enabled
4943 	 */
4944 	intel_crtc_load_lut(crtc);
4945 
4946 	intel_update_watermarks(crtc);
4947 	intel_enable_pipe(intel_crtc);
4948 
4949 	if (intel_crtc->config->has_pch_encoder)
4950 		ironlake_pch_enable(crtc);
4951 
4952 	assert_vblank_disabled(crtc);
4953 	drm_crtc_vblank_on(crtc);
4954 
4955 	for_each_encoder_on_crtc(dev, crtc, encoder)
4956 		encoder->enable(encoder);
4957 
4958 	if (HAS_PCH_CPT(dev))
4959 		cpt_verify_modeset(dev, intel_crtc->pipe);
4960 }
4961 
4962 /* IPS only exists on ULT machines and is tied to pipe A. */
hsw_crtc_supports_ips(struct intel_crtc * crtc)4963 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4964 {
4965 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4966 }
4967 
haswell_crtc_enable(struct drm_crtc * crtc)4968 static void haswell_crtc_enable(struct drm_crtc *crtc)
4969 {
4970 	struct drm_device *dev = crtc->dev;
4971 	struct drm_i915_private *dev_priv = dev->dev_private;
4972 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4973 	struct intel_encoder *encoder;
4974 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4975 	struct intel_crtc_state *pipe_config =
4976 		to_intel_crtc_state(crtc->state);
4977 	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4978 
4979 	if (WARN_ON(intel_crtc->active))
4980 		return;
4981 
4982 	if (intel_crtc_to_shared_dpll(intel_crtc))
4983 		intel_enable_shared_dpll(intel_crtc);
4984 
4985 	if (intel_crtc->config->has_dp_encoder)
4986 		intel_dp_set_m_n(intel_crtc, M1_N1);
4987 
4988 	intel_set_pipe_timings(intel_crtc);
4989 
4990 	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4991 		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4992 			   intel_crtc->config->pixel_multiplier - 1);
4993 	}
4994 
4995 	if (intel_crtc->config->has_pch_encoder) {
4996 		intel_cpu_transcoder_set_m_n(intel_crtc,
4997 				     &intel_crtc->config->fdi_m_n, NULL);
4998 	}
4999 
5000 	haswell_set_pipeconf(crtc);
5001 
5002 	intel_set_pipe_csc(crtc);
5003 
5004 	intel_crtc->active = true;
5005 
5006 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5007 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5008 		if (encoder->pre_pll_enable)
5009 			encoder->pre_pll_enable(encoder);
5010 		if (encoder->pre_enable)
5011 			encoder->pre_enable(encoder);
5012 	}
5013 
5014 	if (intel_crtc->config->has_pch_encoder) {
5015 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5016 						      true);
5017 		dev_priv->display.fdi_link_train(crtc);
5018 	}
5019 
5020 	if (!is_dsi)
5021 		intel_ddi_enable_pipe_clock(intel_crtc);
5022 
5023 	if (INTEL_INFO(dev)->gen >= 9)
5024 		skylake_pfit_enable(intel_crtc);
5025 	else
5026 		ironlake_pfit_enable(intel_crtc);
5027 
5028 	/*
5029 	 * On ILK+ LUT must be loaded before the pipe is running but with
5030 	 * clocks enabled
5031 	 */
5032 	intel_crtc_load_lut(crtc);
5033 
5034 	intel_ddi_set_pipe_settings(crtc);
5035 	if (!is_dsi)
5036 		intel_ddi_enable_transcoder_func(crtc);
5037 
5038 	intel_update_watermarks(crtc);
5039 	intel_enable_pipe(intel_crtc);
5040 
5041 	if (intel_crtc->config->has_pch_encoder)
5042 		lpt_pch_enable(crtc);
5043 
5044 	if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
5045 		intel_ddi_set_vc_payload_alloc(crtc, true);
5046 
5047 	assert_vblank_disabled(crtc);
5048 	drm_crtc_vblank_on(crtc);
5049 
5050 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5051 		encoder->enable(encoder);
5052 		intel_opregion_notify_encoder(encoder, true);
5053 	}
5054 
5055 	/* If we change the relative order between pipe/planes enabling, we need
5056 	 * to change the workaround. */
5057 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5058 	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5059 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5060 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5061 	}
5062 }
5063 
ironlake_pfit_disable(struct intel_crtc * crtc,bool force)5064 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5065 {
5066 	struct drm_device *dev = crtc->base.dev;
5067 	struct drm_i915_private *dev_priv = dev->dev_private;
5068 	int pipe = crtc->pipe;
5069 
5070 	/* To avoid upsetting the power well on haswell only disable the pfit if
5071 	 * it's in use. The hw state code will make sure we get this right. */
5072 	if (force || crtc->config->pch_pfit.enabled) {
5073 		I915_WRITE(PF_CTL(pipe), 0);
5074 		I915_WRITE(PF_WIN_POS(pipe), 0);
5075 		I915_WRITE(PF_WIN_SZ(pipe), 0);
5076 	}
5077 }
5078 
ironlake_crtc_disable(struct drm_crtc * crtc)5079 static void ironlake_crtc_disable(struct drm_crtc *crtc)
5080 {
5081 	struct drm_device *dev = crtc->dev;
5082 	struct drm_i915_private *dev_priv = dev->dev_private;
5083 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5084 	struct intel_encoder *encoder;
5085 	int pipe = intel_crtc->pipe;
5086 	u32 reg, temp;
5087 
5088 	for_each_encoder_on_crtc(dev, crtc, encoder)
5089 		encoder->disable(encoder);
5090 
5091 	drm_crtc_vblank_off(crtc);
5092 	assert_vblank_disabled(crtc);
5093 
5094 	if (intel_crtc->config->has_pch_encoder)
5095 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5096 
5097 	intel_disable_pipe(intel_crtc);
5098 
5099 	ironlake_pfit_disable(intel_crtc, false);
5100 
5101 	if (intel_crtc->config->has_pch_encoder)
5102 		ironlake_fdi_disable(crtc);
5103 
5104 	for_each_encoder_on_crtc(dev, crtc, encoder)
5105 		if (encoder->post_disable)
5106 			encoder->post_disable(encoder);
5107 
5108 	if (intel_crtc->config->has_pch_encoder) {
5109 		ironlake_disable_pch_transcoder(dev_priv, pipe);
5110 
5111 		if (HAS_PCH_CPT(dev)) {
5112 			/* disable TRANS_DP_CTL */
5113 			reg = TRANS_DP_CTL(pipe);
5114 			temp = I915_READ(reg);
5115 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5116 				  TRANS_DP_PORT_SEL_MASK);
5117 			temp |= TRANS_DP_PORT_SEL_NONE;
5118 			I915_WRITE(reg, temp);
5119 
5120 			/* disable DPLL_SEL */
5121 			temp = I915_READ(PCH_DPLL_SEL);
5122 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5123 			I915_WRITE(PCH_DPLL_SEL, temp);
5124 		}
5125 
5126 		ironlake_fdi_pll_disable(intel_crtc);
5127 	}
5128 }
5129 
haswell_crtc_disable(struct drm_crtc * crtc)5130 static void haswell_crtc_disable(struct drm_crtc *crtc)
5131 {
5132 	struct drm_device *dev = crtc->dev;
5133 	struct drm_i915_private *dev_priv = dev->dev_private;
5134 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5135 	struct intel_encoder *encoder;
5136 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5137 	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5138 
5139 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5140 		intel_opregion_notify_encoder(encoder, false);
5141 		encoder->disable(encoder);
5142 	}
5143 
5144 	drm_crtc_vblank_off(crtc);
5145 	assert_vblank_disabled(crtc);
5146 
5147 	if (intel_crtc->config->has_pch_encoder)
5148 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5149 						      false);
5150 	intel_disable_pipe(intel_crtc);
5151 
5152 	if (intel_crtc->config->dp_encoder_is_mst)
5153 		intel_ddi_set_vc_payload_alloc(crtc, false);
5154 
5155 	if (!is_dsi)
5156 		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5157 
5158 	if (INTEL_INFO(dev)->gen >= 9)
5159 		skylake_scaler_disable(intel_crtc);
5160 	else
5161 		ironlake_pfit_disable(intel_crtc, false);
5162 
5163 	if (!is_dsi)
5164 		intel_ddi_disable_pipe_clock(intel_crtc);
5165 
5166 	if (intel_crtc->config->has_pch_encoder) {
5167 		lpt_disable_pch_transcoder(dev_priv);
5168 		intel_ddi_fdi_disable(crtc);
5169 	}
5170 
5171 	for_each_encoder_on_crtc(dev, crtc, encoder)
5172 		if (encoder->post_disable)
5173 			encoder->post_disable(encoder);
5174 }
5175 
i9xx_pfit_enable(struct intel_crtc * crtc)5176 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5177 {
5178 	struct drm_device *dev = crtc->base.dev;
5179 	struct drm_i915_private *dev_priv = dev->dev_private;
5180 	struct intel_crtc_state *pipe_config = crtc->config;
5181 
5182 	if (!pipe_config->gmch_pfit.control)
5183 		return;
5184 
5185 	/*
5186 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5187 	 * according to register description and PRM.
5188 	 */
5189 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5190 	assert_pipe_disabled(dev_priv, crtc->pipe);
5191 
5192 	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5193 	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5194 
5195 	/* Border color in case we don't scale up to the full screen. Black by
5196 	 * default, change to something else for debugging. */
5197 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5198 }
5199 
port_to_power_domain(enum port port)5200 static enum intel_display_power_domain port_to_power_domain(enum port port)
5201 {
5202 	switch (port) {
5203 	case PORT_A:
5204 		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
5205 	case PORT_B:
5206 		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
5207 	case PORT_C:
5208 		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
5209 	case PORT_D:
5210 		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
5211 	case PORT_E:
5212 		return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5213 	default:
5214 		MISSING_CASE(port);
5215 		return POWER_DOMAIN_PORT_OTHER;
5216 	}
5217 }
5218 
port_to_aux_power_domain(enum port port)5219 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5220 {
5221 	switch (port) {
5222 	case PORT_A:
5223 		return POWER_DOMAIN_AUX_A;
5224 	case PORT_B:
5225 		return POWER_DOMAIN_AUX_B;
5226 	case PORT_C:
5227 		return POWER_DOMAIN_AUX_C;
5228 	case PORT_D:
5229 		return POWER_DOMAIN_AUX_D;
5230 	case PORT_E:
5231 		/* FIXME: Check VBT for actual wiring of PORT E */
5232 		return POWER_DOMAIN_AUX_D;
5233 	default:
5234 		MISSING_CASE(port);
5235 		return POWER_DOMAIN_AUX_A;
5236 	}
5237 }
5238 
5239 #define for_each_power_domain(domain, mask)				\
5240 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
5241 		if ((1 << (domain)) & (mask))
5242 
5243 enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder * intel_encoder)5244 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5245 {
5246 	struct drm_device *dev = intel_encoder->base.dev;
5247 	struct intel_digital_port *intel_dig_port;
5248 
5249 	switch (intel_encoder->type) {
5250 	case INTEL_OUTPUT_UNKNOWN:
5251 		/* Only DDI platforms should ever use this output type */
5252 		WARN_ON_ONCE(!HAS_DDI(dev));
5253 	case INTEL_OUTPUT_DISPLAYPORT:
5254 	case INTEL_OUTPUT_HDMI:
5255 	case INTEL_OUTPUT_EDP:
5256 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5257 		return port_to_power_domain(intel_dig_port->port);
5258 	case INTEL_OUTPUT_DP_MST:
5259 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5260 		return port_to_power_domain(intel_dig_port->port);
5261 	case INTEL_OUTPUT_ANALOG:
5262 		return POWER_DOMAIN_PORT_CRT;
5263 	case INTEL_OUTPUT_DSI:
5264 		return POWER_DOMAIN_PORT_DSI;
5265 	default:
5266 		return POWER_DOMAIN_PORT_OTHER;
5267 	}
5268 }
5269 
5270 enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder * intel_encoder)5271 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5272 {
5273 	struct drm_device *dev = intel_encoder->base.dev;
5274 	struct intel_digital_port *intel_dig_port;
5275 
5276 	switch (intel_encoder->type) {
5277 	case INTEL_OUTPUT_UNKNOWN:
5278 	case INTEL_OUTPUT_HDMI:
5279 		/*
5280 		 * Only DDI platforms should ever use these output types.
5281 		 * We can get here after the HDMI detect code has already set
5282 		 * the type of the shared encoder. Since we can't be sure
5283 		 * what's the status of the given connectors, play safe and
5284 		 * run the DP detection too.
5285 		 */
5286 		WARN_ON_ONCE(!HAS_DDI(dev));
5287 	case INTEL_OUTPUT_DISPLAYPORT:
5288 	case INTEL_OUTPUT_EDP:
5289 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5290 		return port_to_aux_power_domain(intel_dig_port->port);
5291 	case INTEL_OUTPUT_DP_MST:
5292 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5293 		return port_to_aux_power_domain(intel_dig_port->port);
5294 	default:
5295 		MISSING_CASE(intel_encoder->type);
5296 		return POWER_DOMAIN_AUX_A;
5297 	}
5298 }
5299 
get_crtc_power_domains(struct drm_crtc * crtc)5300 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5301 {
5302 	struct drm_device *dev = crtc->dev;
5303 	struct intel_encoder *intel_encoder;
5304 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5305 	enum pipe pipe = intel_crtc->pipe;
5306 	unsigned long mask;
5307 	enum transcoder transcoder;
5308 
5309 	if (!crtc->state->active)
5310 		return 0;
5311 
5312 	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
5313 
5314 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5315 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5316 	if (intel_crtc->config->pch_pfit.enabled ||
5317 	    intel_crtc->config->pch_pfit.force_thru)
5318 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5319 
5320 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5321 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5322 
5323 	return mask;
5324 }
5325 
modeset_get_crtc_power_domains(struct drm_crtc * crtc)5326 static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5327 {
5328 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5329 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5330 	enum intel_display_power_domain domain;
5331 	unsigned long domains, new_domains, old_domains;
5332 
5333 	old_domains = intel_crtc->enabled_power_domains;
5334 	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5335 
5336 	domains = new_domains & ~old_domains;
5337 
5338 	for_each_power_domain(domain, domains)
5339 		intel_display_power_get(dev_priv, domain);
5340 
5341 	return old_domains & ~new_domains;
5342 }
5343 
modeset_put_power_domains(struct drm_i915_private * dev_priv,unsigned long domains)5344 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5345 				      unsigned long domains)
5346 {
5347 	enum intel_display_power_domain domain;
5348 
5349 	for_each_power_domain(domain, domains)
5350 		intel_display_power_put(dev_priv, domain);
5351 }
5352 
modeset_update_crtc_power_domains(struct drm_atomic_state * state)5353 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5354 {
5355 	struct drm_device *dev = state->dev;
5356 	struct drm_i915_private *dev_priv = dev->dev_private;
5357 	unsigned long put_domains[I915_MAX_PIPES] = {};
5358 	struct drm_crtc_state *crtc_state;
5359 	struct drm_crtc *crtc;
5360 	int i;
5361 
5362 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
5363 		if (needs_modeset(crtc->state))
5364 			put_domains[to_intel_crtc(crtc)->pipe] =
5365 				modeset_get_crtc_power_domains(crtc);
5366 	}
5367 
5368 	if (dev_priv->display.modeset_commit_cdclk) {
5369 		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5370 
5371 		if (cdclk != dev_priv->cdclk_freq &&
5372 		    !WARN_ON(!state->allow_modeset))
5373 			dev_priv->display.modeset_commit_cdclk(state);
5374 	}
5375 
5376 	for (i = 0; i < I915_MAX_PIPES; i++)
5377 		if (put_domains[i])
5378 			modeset_put_power_domains(dev_priv, put_domains[i]);
5379 }
5380 
intel_compute_max_dotclk(struct drm_i915_private * dev_priv)5381 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5382 {
5383 	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5384 
5385 	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5386 	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5387 		return max_cdclk_freq;
5388 	else if (IS_CHERRYVIEW(dev_priv))
5389 		return max_cdclk_freq*95/100;
5390 	else if (INTEL_INFO(dev_priv)->gen < 4)
5391 		return 2*max_cdclk_freq*90/100;
5392 	else
5393 		return max_cdclk_freq*90/100;
5394 }
5395 
intel_update_max_cdclk(struct drm_device * dev)5396 static void intel_update_max_cdclk(struct drm_device *dev)
5397 {
5398 	struct drm_i915_private *dev_priv = dev->dev_private;
5399 
5400 	if (IS_SKYLAKE(dev)) {
5401 		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5402 
5403 		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5404 			dev_priv->max_cdclk_freq = 675000;
5405 		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5406 			dev_priv->max_cdclk_freq = 540000;
5407 		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5408 			dev_priv->max_cdclk_freq = 450000;
5409 		else
5410 			dev_priv->max_cdclk_freq = 337500;
5411 	} else if (IS_BROADWELL(dev))  {
5412 		/*
5413 		 * FIXME with extra cooling we can allow
5414 		 * 540 MHz for ULX and 675 Mhz for ULT.
5415 		 * How can we know if extra cooling is
5416 		 * available? PCI ID, VTB, something else?
5417 		 */
5418 		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5419 			dev_priv->max_cdclk_freq = 450000;
5420 		else if (IS_BDW_ULX(dev))
5421 			dev_priv->max_cdclk_freq = 450000;
5422 		else if (IS_BDW_ULT(dev))
5423 			dev_priv->max_cdclk_freq = 540000;
5424 		else
5425 			dev_priv->max_cdclk_freq = 675000;
5426 	} else if (IS_CHERRYVIEW(dev)) {
5427 		dev_priv->max_cdclk_freq = 320000;
5428 	} else if (IS_VALLEYVIEW(dev)) {
5429 		dev_priv->max_cdclk_freq = 400000;
5430 	} else {
5431 		/* otherwise assume cdclk is fixed */
5432 		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5433 	}
5434 
5435 	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5436 
5437 	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5438 			 dev_priv->max_cdclk_freq);
5439 
5440 	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5441 			 dev_priv->max_dotclk_freq);
5442 }
5443 
intel_update_cdclk(struct drm_device * dev)5444 static void intel_update_cdclk(struct drm_device *dev)
5445 {
5446 	struct drm_i915_private *dev_priv = dev->dev_private;
5447 
5448 	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5449 	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5450 			 dev_priv->cdclk_freq);
5451 
5452 	/*
5453 	 * Program the gmbus_freq based on the cdclk frequency.
5454 	 * BSpec erroneously claims we should aim for 4MHz, but
5455 	 * in fact 1MHz is the correct frequency.
5456 	 */
5457 	if (IS_VALLEYVIEW(dev)) {
5458 		/*
5459 		 * Program the gmbus_freq based on the cdclk frequency.
5460 		 * BSpec erroneously claims we should aim for 4MHz, but
5461 		 * in fact 1MHz is the correct frequency.
5462 		 */
5463 		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5464 	}
5465 
5466 	if (dev_priv->max_cdclk_freq == 0)
5467 		intel_update_max_cdclk(dev);
5468 }
5469 
broxton_set_cdclk(struct drm_device * dev,int frequency)5470 static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5471 {
5472 	struct drm_i915_private *dev_priv = dev->dev_private;
5473 	uint32_t divider;
5474 	uint32_t ratio;
5475 	uint32_t current_freq;
5476 	int ret;
5477 
5478 	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5479 	switch (frequency) {
5480 	case 144000:
5481 		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5482 		ratio = BXT_DE_PLL_RATIO(60);
5483 		break;
5484 	case 288000:
5485 		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5486 		ratio = BXT_DE_PLL_RATIO(60);
5487 		break;
5488 	case 384000:
5489 		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5490 		ratio = BXT_DE_PLL_RATIO(60);
5491 		break;
5492 	case 576000:
5493 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5494 		ratio = BXT_DE_PLL_RATIO(60);
5495 		break;
5496 	case 624000:
5497 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5498 		ratio = BXT_DE_PLL_RATIO(65);
5499 		break;
5500 	case 19200:
5501 		/*
5502 		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5503 		 * to suppress GCC warning.
5504 		 */
5505 		ratio = 0;
5506 		divider = 0;
5507 		break;
5508 	default:
5509 		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5510 
5511 		return;
5512 	}
5513 
5514 	mutex_lock(&dev_priv->rps.hw_lock);
5515 	/* Inform power controller of upcoming frequency change */
5516 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5517 				      0x80000000);
5518 	mutex_unlock(&dev_priv->rps.hw_lock);
5519 
5520 	if (ret) {
5521 		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5522 			  ret, frequency);
5523 		return;
5524 	}
5525 
5526 	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5527 	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5528 	current_freq = current_freq * 500 + 1000;
5529 
5530 	/*
5531 	 * DE PLL has to be disabled when
5532 	 * - setting to 19.2MHz (bypass, PLL isn't used)
5533 	 * - before setting to 624MHz (PLL needs toggling)
5534 	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5535 	 */
5536 	if (frequency == 19200 || frequency == 624000 ||
5537 	    current_freq == 624000) {
5538 		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5539 		/* Timeout 200us */
5540 		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5541 			     1))
5542 			DRM_ERROR("timout waiting for DE PLL unlock\n");
5543 	}
5544 
5545 	if (frequency != 19200) {
5546 		uint32_t val;
5547 
5548 		val = I915_READ(BXT_DE_PLL_CTL);
5549 		val &= ~BXT_DE_PLL_RATIO_MASK;
5550 		val |= ratio;
5551 		I915_WRITE(BXT_DE_PLL_CTL, val);
5552 
5553 		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5554 		/* Timeout 200us */
5555 		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5556 			DRM_ERROR("timeout waiting for DE PLL lock\n");
5557 
5558 		val = I915_READ(CDCLK_CTL);
5559 		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5560 		val |= divider;
5561 		/*
5562 		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5563 		 * enable otherwise.
5564 		 */
5565 		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5566 		if (frequency >= 500000)
5567 			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5568 
5569 		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5570 		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5571 		val |= (frequency - 1000) / 500;
5572 		I915_WRITE(CDCLK_CTL, val);
5573 	}
5574 
5575 	mutex_lock(&dev_priv->rps.hw_lock);
5576 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5577 				      DIV_ROUND_UP(frequency, 25000));
5578 	mutex_unlock(&dev_priv->rps.hw_lock);
5579 
5580 	if (ret) {
5581 		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5582 			  ret, frequency);
5583 		return;
5584 	}
5585 
5586 	intel_update_cdclk(dev);
5587 }
5588 
broxton_init_cdclk(struct drm_device * dev)5589 void broxton_init_cdclk(struct drm_device *dev)
5590 {
5591 	struct drm_i915_private *dev_priv = dev->dev_private;
5592 	uint32_t val;
5593 
5594 	/*
5595 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5596 	 * or else the reset will hang because there is no PCH to respond.
5597 	 * Move the handshake programming to initialization sequence.
5598 	 * Previously was left up to BIOS.
5599 	 */
5600 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5601 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5602 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5603 
5604 	/* Enable PG1 for cdclk */
5605 	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5606 
5607 	/* check if cd clock is enabled */
5608 	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5609 		DRM_DEBUG_KMS("Display already initialized\n");
5610 		return;
5611 	}
5612 
5613 	/*
5614 	 * FIXME:
5615 	 * - The initial CDCLK needs to be read from VBT.
5616 	 *   Need to make this change after VBT has changes for BXT.
5617 	 * - check if setting the max (or any) cdclk freq is really necessary
5618 	 *   here, it belongs to modeset time
5619 	 */
5620 	broxton_set_cdclk(dev, 624000);
5621 
5622 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5623 	POSTING_READ(DBUF_CTL);
5624 
5625 	udelay(10);
5626 
5627 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5628 		DRM_ERROR("DBuf power enable timeout!\n");
5629 }
5630 
broxton_uninit_cdclk(struct drm_device * dev)5631 void broxton_uninit_cdclk(struct drm_device *dev)
5632 {
5633 	struct drm_i915_private *dev_priv = dev->dev_private;
5634 
5635 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5636 	POSTING_READ(DBUF_CTL);
5637 
5638 	udelay(10);
5639 
5640 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5641 		DRM_ERROR("DBuf power disable timeout!\n");
5642 
5643 	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5644 	broxton_set_cdclk(dev, 19200);
5645 
5646 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5647 }
5648 
5649 static const struct skl_cdclk_entry {
5650 	unsigned int freq;
5651 	unsigned int vco;
5652 } skl_cdclk_frequencies[] = {
5653 	{ .freq = 308570, .vco = 8640 },
5654 	{ .freq = 337500, .vco = 8100 },
5655 	{ .freq = 432000, .vco = 8640 },
5656 	{ .freq = 450000, .vco = 8100 },
5657 	{ .freq = 540000, .vco = 8100 },
5658 	{ .freq = 617140, .vco = 8640 },
5659 	{ .freq = 675000, .vco = 8100 },
5660 };
5661 
skl_cdclk_decimal(unsigned int freq)5662 static unsigned int skl_cdclk_decimal(unsigned int freq)
5663 {
5664 	return (freq - 1000) / 500;
5665 }
5666 
skl_cdclk_get_vco(unsigned int freq)5667 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5668 {
5669 	unsigned int i;
5670 
5671 	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5672 		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5673 
5674 		if (e->freq == freq)
5675 			return e->vco;
5676 	}
5677 
5678 	return 8100;
5679 }
5680 
5681 static void
skl_dpll0_enable(struct drm_i915_private * dev_priv,unsigned int required_vco)5682 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5683 {
5684 	unsigned int min_freq;
5685 	u32 val;
5686 
5687 	/* select the minimum CDCLK before enabling DPLL 0 */
5688 	val = I915_READ(CDCLK_CTL);
5689 	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5690 	val |= CDCLK_FREQ_337_308;
5691 
5692 	if (required_vco == 8640)
5693 		min_freq = 308570;
5694 	else
5695 		min_freq = 337500;
5696 
5697 	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5698 
5699 	I915_WRITE(CDCLK_CTL, val);
5700 	POSTING_READ(CDCLK_CTL);
5701 
5702 	/*
5703 	 * We always enable DPLL0 with the lowest link rate possible, but still
5704 	 * taking into account the VCO required to operate the eDP panel at the
5705 	 * desired frequency. The usual DP link rates operate with a VCO of
5706 	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5707 	 * The modeset code is responsible for the selection of the exact link
5708 	 * rate later on, with the constraint of choosing a frequency that
5709 	 * works with required_vco.
5710 	 */
5711 	val = I915_READ(DPLL_CTRL1);
5712 
5713 	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5714 		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5715 	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5716 	if (required_vco == 8640)
5717 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5718 					    SKL_DPLL0);
5719 	else
5720 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5721 					    SKL_DPLL0);
5722 
5723 	I915_WRITE(DPLL_CTRL1, val);
5724 	POSTING_READ(DPLL_CTRL1);
5725 
5726 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5727 
5728 	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5729 		DRM_ERROR("DPLL0 not locked\n");
5730 }
5731 
skl_cdclk_pcu_ready(struct drm_i915_private * dev_priv)5732 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5733 {
5734 	int ret;
5735 	u32 val;
5736 
5737 	/* inform PCU we want to change CDCLK */
5738 	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5739 	mutex_lock(&dev_priv->rps.hw_lock);
5740 	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5741 	mutex_unlock(&dev_priv->rps.hw_lock);
5742 
5743 	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5744 }
5745 
skl_cdclk_wait_for_pcu_ready(struct drm_i915_private * dev_priv)5746 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5747 {
5748 	unsigned int i;
5749 
5750 	for (i = 0; i < 15; i++) {
5751 		if (skl_cdclk_pcu_ready(dev_priv))
5752 			return true;
5753 		udelay(10);
5754 	}
5755 
5756 	return false;
5757 }
5758 
skl_set_cdclk(struct drm_i915_private * dev_priv,unsigned int freq)5759 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5760 {
5761 	struct drm_device *dev = dev_priv->dev;
5762 	u32 freq_select, pcu_ack;
5763 
5764 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5765 
5766 	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5767 		DRM_ERROR("failed to inform PCU about cdclk change\n");
5768 		return;
5769 	}
5770 
5771 	/* set CDCLK_CTL */
5772 	switch(freq) {
5773 	case 450000:
5774 	case 432000:
5775 		freq_select = CDCLK_FREQ_450_432;
5776 		pcu_ack = 1;
5777 		break;
5778 	case 540000:
5779 		freq_select = CDCLK_FREQ_540;
5780 		pcu_ack = 2;
5781 		break;
5782 	case 308570:
5783 	case 337500:
5784 	default:
5785 		freq_select = CDCLK_FREQ_337_308;
5786 		pcu_ack = 0;
5787 		break;
5788 	case 617140:
5789 	case 675000:
5790 		freq_select = CDCLK_FREQ_675_617;
5791 		pcu_ack = 3;
5792 		break;
5793 	}
5794 
5795 	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5796 	POSTING_READ(CDCLK_CTL);
5797 
5798 	/* inform PCU of the change */
5799 	mutex_lock(&dev_priv->rps.hw_lock);
5800 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5801 	mutex_unlock(&dev_priv->rps.hw_lock);
5802 
5803 	intel_update_cdclk(dev);
5804 }
5805 
skl_uninit_cdclk(struct drm_i915_private * dev_priv)5806 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5807 {
5808 	/* disable DBUF power */
5809 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5810 	POSTING_READ(DBUF_CTL);
5811 
5812 	udelay(10);
5813 
5814 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5815 		DRM_ERROR("DBuf power disable timeout\n");
5816 
5817 	/*
5818 	 * DMC assumes ownership of LCPLL and will get confused if we touch it.
5819 	 */
5820 	if (dev_priv->csr.dmc_payload) {
5821 		/* disable DPLL0 */
5822 		I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5823 					~LCPLL_PLL_ENABLE);
5824 		if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5825 			DRM_ERROR("Couldn't disable DPLL0\n");
5826 	}
5827 
5828 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5829 }
5830 
skl_init_cdclk(struct drm_i915_private * dev_priv)5831 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5832 {
5833 	u32 val;
5834 	unsigned int required_vco;
5835 
5836 	/* enable PCH reset handshake */
5837 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5838 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
5839 
5840 	/* enable PG1 and Misc I/O */
5841 	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5842 
5843 	/* DPLL0 not enabled (happens on early BIOS versions) */
5844 	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5845 		/* enable DPLL0 */
5846 		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5847 		skl_dpll0_enable(dev_priv, required_vco);
5848 	}
5849 
5850 	/* set CDCLK to the frequency the BIOS chose */
5851 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5852 
5853 	/* enable DBUF power */
5854 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5855 	POSTING_READ(DBUF_CTL);
5856 
5857 	udelay(10);
5858 
5859 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5860 		DRM_ERROR("DBuf power enable timeout\n");
5861 }
5862 
5863 /* Adjust CDclk dividers to allow high res or save power if possible */
valleyview_set_cdclk(struct drm_device * dev,int cdclk)5864 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5865 {
5866 	struct drm_i915_private *dev_priv = dev->dev_private;
5867 	u32 val, cmd;
5868 
5869 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5870 					!= dev_priv->cdclk_freq);
5871 
5872 	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5873 		cmd = 2;
5874 	else if (cdclk == 266667)
5875 		cmd = 1;
5876 	else
5877 		cmd = 0;
5878 
5879 	mutex_lock(&dev_priv->rps.hw_lock);
5880 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5881 	val &= ~DSPFREQGUAR_MASK;
5882 	val |= (cmd << DSPFREQGUAR_SHIFT);
5883 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5884 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5885 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5886 		     50)) {
5887 		DRM_ERROR("timed out waiting for CDclk change\n");
5888 	}
5889 	mutex_unlock(&dev_priv->rps.hw_lock);
5890 
5891 	mutex_lock(&dev_priv->sb_lock);
5892 
5893 	if (cdclk == 400000) {
5894 		u32 divider;
5895 
5896 		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5897 
5898 		/* adjust cdclk divider */
5899 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5900 		val &= ~CCK_FREQUENCY_VALUES;
5901 		val |= divider;
5902 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5903 
5904 		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5905 			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5906 			     50))
5907 			DRM_ERROR("timed out waiting for CDclk change\n");
5908 	}
5909 
5910 	/* adjust self-refresh exit latency value */
5911 	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5912 	val &= ~0x7f;
5913 
5914 	/*
5915 	 * For high bandwidth configs, we set a higher latency in the bunit
5916 	 * so that the core display fetch happens in time to avoid underruns.
5917 	 */
5918 	if (cdclk == 400000)
5919 		val |= 4500 / 250; /* 4.5 usec */
5920 	else
5921 		val |= 3000 / 250; /* 3.0 usec */
5922 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5923 
5924 	mutex_unlock(&dev_priv->sb_lock);
5925 
5926 	intel_update_cdclk(dev);
5927 }
5928 
cherryview_set_cdclk(struct drm_device * dev,int cdclk)5929 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5930 {
5931 	struct drm_i915_private *dev_priv = dev->dev_private;
5932 	u32 val, cmd;
5933 
5934 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5935 						!= dev_priv->cdclk_freq);
5936 
5937 	switch (cdclk) {
5938 	case 333333:
5939 	case 320000:
5940 	case 266667:
5941 	case 200000:
5942 		break;
5943 	default:
5944 		MISSING_CASE(cdclk);
5945 		return;
5946 	}
5947 
5948 	/*
5949 	 * Specs are full of misinformation, but testing on actual
5950 	 * hardware has shown that we just need to write the desired
5951 	 * CCK divider into the Punit register.
5952 	 */
5953 	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5954 
5955 	mutex_lock(&dev_priv->rps.hw_lock);
5956 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5957 	val &= ~DSPFREQGUAR_MASK_CHV;
5958 	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5959 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5960 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5961 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5962 		     50)) {
5963 		DRM_ERROR("timed out waiting for CDclk change\n");
5964 	}
5965 	mutex_unlock(&dev_priv->rps.hw_lock);
5966 
5967 	intel_update_cdclk(dev);
5968 }
5969 
valleyview_calc_cdclk(struct drm_i915_private * dev_priv,int max_pixclk)5970 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5971 				 int max_pixclk)
5972 {
5973 	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5974 	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5975 
5976 	/*
5977 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5978 	 *   200MHz
5979 	 *   267MHz
5980 	 *   320/333MHz (depends on HPLL freq)
5981 	 *   400MHz (VLV only)
5982 	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5983 	 * of the lower bin and adjust if needed.
5984 	 *
5985 	 * We seem to get an unstable or solid color picture at 200MHz.
5986 	 * Not sure what's wrong. For now use 200MHz only when all pipes
5987 	 * are off.
5988 	 */
5989 	if (!IS_CHERRYVIEW(dev_priv) &&
5990 	    max_pixclk > freq_320*limit/100)
5991 		return 400000;
5992 	else if (max_pixclk > 266667*limit/100)
5993 		return freq_320;
5994 	else if (max_pixclk > 0)
5995 		return 266667;
5996 	else
5997 		return 200000;
5998 }
5999 
broxton_calc_cdclk(struct drm_i915_private * dev_priv,int max_pixclk)6000 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6001 			      int max_pixclk)
6002 {
6003 	/*
6004 	 * FIXME:
6005 	 * - remove the guardband, it's not needed on BXT
6006 	 * - set 19.2MHz bypass frequency if there are no active pipes
6007 	 */
6008 	if (max_pixclk > 576000*9/10)
6009 		return 624000;
6010 	else if (max_pixclk > 384000*9/10)
6011 		return 576000;
6012 	else if (max_pixclk > 288000*9/10)
6013 		return 384000;
6014 	else if (max_pixclk > 144000*9/10)
6015 		return 288000;
6016 	else
6017 		return 144000;
6018 }
6019 
6020 /* Compute the max pixel clock for new configuration. Uses atomic state if
6021  * that's non-NULL, look at current state otherwise. */
intel_mode_max_pixclk(struct drm_device * dev,struct drm_atomic_state * state)6022 static int intel_mode_max_pixclk(struct drm_device *dev,
6023 				 struct drm_atomic_state *state)
6024 {
6025 	struct intel_crtc *intel_crtc;
6026 	struct intel_crtc_state *crtc_state;
6027 	int max_pixclk = 0;
6028 
6029 	for_each_intel_crtc(dev, intel_crtc) {
6030 		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6031 		if (IS_ERR(crtc_state))
6032 			return PTR_ERR(crtc_state);
6033 
6034 		if (!crtc_state->base.enable)
6035 			continue;
6036 
6037 		max_pixclk = max(max_pixclk,
6038 				 crtc_state->base.adjusted_mode.crtc_clock);
6039 	}
6040 
6041 	return max_pixclk;
6042 }
6043 
valleyview_modeset_calc_cdclk(struct drm_atomic_state * state)6044 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6045 {
6046 	struct drm_device *dev = state->dev;
6047 	struct drm_i915_private *dev_priv = dev->dev_private;
6048 	int max_pixclk = intel_mode_max_pixclk(dev, state);
6049 
6050 	if (max_pixclk < 0)
6051 		return max_pixclk;
6052 
6053 	to_intel_atomic_state(state)->cdclk =
6054 		valleyview_calc_cdclk(dev_priv, max_pixclk);
6055 
6056 	return 0;
6057 }
6058 
broxton_modeset_calc_cdclk(struct drm_atomic_state * state)6059 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6060 {
6061 	struct drm_device *dev = state->dev;
6062 	struct drm_i915_private *dev_priv = dev->dev_private;
6063 	int max_pixclk = intel_mode_max_pixclk(dev, state);
6064 
6065 	if (max_pixclk < 0)
6066 		return max_pixclk;
6067 
6068 	to_intel_atomic_state(state)->cdclk =
6069 		broxton_calc_cdclk(dev_priv, max_pixclk);
6070 
6071 	return 0;
6072 }
6073 
vlv_program_pfi_credits(struct drm_i915_private * dev_priv)6074 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6075 {
6076 	unsigned int credits, default_credits;
6077 
6078 	if (IS_CHERRYVIEW(dev_priv))
6079 		default_credits = PFI_CREDIT(12);
6080 	else
6081 		default_credits = PFI_CREDIT(8);
6082 
6083 	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6084 		/* CHV suggested value is 31 or 63 */
6085 		if (IS_CHERRYVIEW(dev_priv))
6086 			credits = PFI_CREDIT_63;
6087 		else
6088 			credits = PFI_CREDIT(15);
6089 	} else {
6090 		credits = default_credits;
6091 	}
6092 
6093 	/*
6094 	 * WA - write default credits before re-programming
6095 	 * FIXME: should we also set the resend bit here?
6096 	 */
6097 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6098 		   default_credits);
6099 
6100 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6101 		   credits | PFI_CREDIT_RESEND);
6102 
6103 	/*
6104 	 * FIXME is this guaranteed to clear
6105 	 * immediately or should we poll for it?
6106 	 */
6107 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6108 }
6109 
valleyview_modeset_commit_cdclk(struct drm_atomic_state * old_state)6110 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6111 {
6112 	struct drm_device *dev = old_state->dev;
6113 	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
6114 	struct drm_i915_private *dev_priv = dev->dev_private;
6115 
6116 	/*
6117 	 * FIXME: We can end up here with all power domains off, yet
6118 	 * with a CDCLK frequency other than the minimum. To account
6119 	 * for this take the PIPE-A power domain, which covers the HW
6120 	 * blocks needed for the following programming. This can be
6121 	 * removed once it's guaranteed that we get here either with
6122 	 * the minimum CDCLK set, or the required power domains
6123 	 * enabled.
6124 	 */
6125 	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6126 
6127 	if (IS_CHERRYVIEW(dev))
6128 		cherryview_set_cdclk(dev, req_cdclk);
6129 	else
6130 		valleyview_set_cdclk(dev, req_cdclk);
6131 
6132 	vlv_program_pfi_credits(dev_priv);
6133 
6134 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6135 }
6136 
valleyview_crtc_enable(struct drm_crtc * crtc)6137 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6138 {
6139 	struct drm_device *dev = crtc->dev;
6140 	struct drm_i915_private *dev_priv = to_i915(dev);
6141 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6142 	struct intel_encoder *encoder;
6143 	int pipe = intel_crtc->pipe;
6144 	bool is_dsi;
6145 
6146 	if (WARN_ON(intel_crtc->active))
6147 		return;
6148 
6149 	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
6150 
6151 	if (intel_crtc->config->has_dp_encoder)
6152 		intel_dp_set_m_n(intel_crtc, M1_N1);
6153 
6154 	intel_set_pipe_timings(intel_crtc);
6155 
6156 	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6157 		struct drm_i915_private *dev_priv = dev->dev_private;
6158 
6159 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6160 		I915_WRITE(CHV_CANVAS(pipe), 0);
6161 	}
6162 
6163 	i9xx_set_pipeconf(intel_crtc);
6164 
6165 	intel_crtc->active = true;
6166 
6167 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6168 
6169 	for_each_encoder_on_crtc(dev, crtc, encoder)
6170 		if (encoder->pre_pll_enable)
6171 			encoder->pre_pll_enable(encoder);
6172 
6173 	if (!is_dsi) {
6174 		if (IS_CHERRYVIEW(dev)) {
6175 			chv_prepare_pll(intel_crtc, intel_crtc->config);
6176 			chv_enable_pll(intel_crtc, intel_crtc->config);
6177 		} else {
6178 			vlv_prepare_pll(intel_crtc, intel_crtc->config);
6179 			vlv_enable_pll(intel_crtc, intel_crtc->config);
6180 		}
6181 	}
6182 
6183 	for_each_encoder_on_crtc(dev, crtc, encoder)
6184 		if (encoder->pre_enable)
6185 			encoder->pre_enable(encoder);
6186 
6187 	i9xx_pfit_enable(intel_crtc);
6188 
6189 	intel_crtc_load_lut(crtc);
6190 
6191 	intel_enable_pipe(intel_crtc);
6192 
6193 	assert_vblank_disabled(crtc);
6194 	drm_crtc_vblank_on(crtc);
6195 
6196 	for_each_encoder_on_crtc(dev, crtc, encoder)
6197 		encoder->enable(encoder);
6198 }
6199 
i9xx_set_pll_dividers(struct intel_crtc * crtc)6200 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6201 {
6202 	struct drm_device *dev = crtc->base.dev;
6203 	struct drm_i915_private *dev_priv = dev->dev_private;
6204 
6205 	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6206 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6207 }
6208 
i9xx_crtc_enable(struct drm_crtc * crtc)6209 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6210 {
6211 	struct drm_device *dev = crtc->dev;
6212 	struct drm_i915_private *dev_priv = to_i915(dev);
6213 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6214 	struct intel_encoder *encoder;
6215 	int pipe = intel_crtc->pipe;
6216 
6217 	if (WARN_ON(intel_crtc->active))
6218 		return;
6219 
6220 	i9xx_set_pll_dividers(intel_crtc);
6221 
6222 	if (intel_crtc->config->has_dp_encoder)
6223 		intel_dp_set_m_n(intel_crtc, M1_N1);
6224 
6225 	intel_set_pipe_timings(intel_crtc);
6226 
6227 	i9xx_set_pipeconf(intel_crtc);
6228 
6229 	intel_crtc->active = true;
6230 
6231 	if (!IS_GEN2(dev))
6232 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6233 
6234 	for_each_encoder_on_crtc(dev, crtc, encoder)
6235 		if (encoder->pre_enable)
6236 			encoder->pre_enable(encoder);
6237 
6238 	i9xx_enable_pll(intel_crtc);
6239 
6240 	i9xx_pfit_enable(intel_crtc);
6241 
6242 	intel_crtc_load_lut(crtc);
6243 
6244 	intel_update_watermarks(crtc);
6245 	intel_enable_pipe(intel_crtc);
6246 
6247 	assert_vblank_disabled(crtc);
6248 	drm_crtc_vblank_on(crtc);
6249 
6250 	for_each_encoder_on_crtc(dev, crtc, encoder)
6251 		encoder->enable(encoder);
6252 }
6253 
i9xx_pfit_disable(struct intel_crtc * crtc)6254 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6255 {
6256 	struct drm_device *dev = crtc->base.dev;
6257 	struct drm_i915_private *dev_priv = dev->dev_private;
6258 
6259 	if (!crtc->config->gmch_pfit.control)
6260 		return;
6261 
6262 	assert_pipe_disabled(dev_priv, crtc->pipe);
6263 
6264 	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6265 			 I915_READ(PFIT_CONTROL));
6266 	I915_WRITE(PFIT_CONTROL, 0);
6267 }
6268 
i9xx_crtc_disable(struct drm_crtc * crtc)6269 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6270 {
6271 	struct drm_device *dev = crtc->dev;
6272 	struct drm_i915_private *dev_priv = dev->dev_private;
6273 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6274 	struct intel_encoder *encoder;
6275 	int pipe = intel_crtc->pipe;
6276 
6277 	/*
6278 	 * On gen2 planes are double buffered but the pipe isn't, so we must
6279 	 * wait for planes to fully turn off before disabling the pipe.
6280 	 * We also need to wait on all gmch platforms because of the
6281 	 * self-refresh mode constraint explained above.
6282 	 */
6283 	intel_wait_for_vblank(dev, pipe);
6284 
6285 	for_each_encoder_on_crtc(dev, crtc, encoder)
6286 		encoder->disable(encoder);
6287 
6288 	drm_crtc_vblank_off(crtc);
6289 	assert_vblank_disabled(crtc);
6290 
6291 	intel_disable_pipe(intel_crtc);
6292 
6293 	i9xx_pfit_disable(intel_crtc);
6294 
6295 	for_each_encoder_on_crtc(dev, crtc, encoder)
6296 		if (encoder->post_disable)
6297 			encoder->post_disable(encoder);
6298 
6299 	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
6300 		if (IS_CHERRYVIEW(dev))
6301 			chv_disable_pll(dev_priv, pipe);
6302 		else if (IS_VALLEYVIEW(dev))
6303 			vlv_disable_pll(dev_priv, pipe);
6304 		else
6305 			i9xx_disable_pll(intel_crtc);
6306 	}
6307 
6308 	for_each_encoder_on_crtc(dev, crtc, encoder)
6309 		if (encoder->post_pll_disable)
6310 			encoder->post_pll_disable(encoder);
6311 
6312 	if (!IS_GEN2(dev))
6313 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6314 }
6315 
intel_crtc_disable_noatomic(struct drm_crtc * crtc)6316 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6317 {
6318 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6319 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6320 	enum intel_display_power_domain domain;
6321 	unsigned long domains;
6322 
6323 	if (!intel_crtc->active)
6324 		return;
6325 
6326 	if (to_intel_plane_state(crtc->primary->state)->visible) {
6327 		intel_crtc_wait_for_pending_flips(crtc);
6328 		intel_pre_disable_primary(crtc);
6329 
6330 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6331 		to_intel_plane_state(crtc->primary->state)->visible = false;
6332 	}
6333 
6334 	dev_priv->display.crtc_disable(crtc);
6335 	intel_crtc->active = false;
6336 	intel_update_watermarks(crtc);
6337 	intel_disable_shared_dpll(intel_crtc);
6338 
6339 	domains = intel_crtc->enabled_power_domains;
6340 	for_each_power_domain(domain, domains)
6341 		intel_display_power_put(dev_priv, domain);
6342 	intel_crtc->enabled_power_domains = 0;
6343 }
6344 
6345 /*
6346  * turn all crtc's off, but do not adjust state
6347  * This has to be paired with a call to intel_modeset_setup_hw_state.
6348  */
intel_display_suspend(struct drm_device * dev)6349 int intel_display_suspend(struct drm_device *dev)
6350 {
6351 	struct drm_mode_config *config = &dev->mode_config;
6352 	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6353 	struct drm_atomic_state *state;
6354 	struct drm_crtc *crtc;
6355 	unsigned crtc_mask = 0;
6356 	int ret = 0;
6357 
6358 	if (WARN_ON(!ctx))
6359 		return 0;
6360 
6361 	lockdep_assert_held(&ctx->ww_ctx);
6362 	state = drm_atomic_state_alloc(dev);
6363 	if (WARN_ON(!state))
6364 		return -ENOMEM;
6365 
6366 	state->acquire_ctx = ctx;
6367 	state->allow_modeset = true;
6368 
6369 	for_each_crtc(dev, crtc) {
6370 		struct drm_crtc_state *crtc_state =
6371 			drm_atomic_get_crtc_state(state, crtc);
6372 
6373 		ret = PTR_ERR_OR_ZERO(crtc_state);
6374 		if (ret)
6375 			goto free;
6376 
6377 		if (!crtc_state->active)
6378 			continue;
6379 
6380 		crtc_state->active = false;
6381 		crtc_mask |= 1 << drm_crtc_index(crtc);
6382 	}
6383 
6384 	if (crtc_mask) {
6385 		ret = drm_atomic_commit(state);
6386 
6387 		if (!ret) {
6388 			for_each_crtc(dev, crtc)
6389 				if (crtc_mask & (1 << drm_crtc_index(crtc)))
6390 					crtc->state->active = true;
6391 
6392 			return ret;
6393 		}
6394 	}
6395 
6396 free:
6397 	if (ret)
6398 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6399 	drm_atomic_state_free(state);
6400 	return ret;
6401 }
6402 
intel_encoder_destroy(struct drm_encoder * encoder)6403 void intel_encoder_destroy(struct drm_encoder *encoder)
6404 {
6405 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6406 
6407 	drm_encoder_cleanup(encoder);
6408 	kfree(intel_encoder);
6409 }
6410 
6411 /* Cross check the actual hw state with our own modeset state tracking (and it's
6412  * internal consistency). */
intel_connector_check_state(struct intel_connector * connector)6413 static void intel_connector_check_state(struct intel_connector *connector)
6414 {
6415 	struct drm_crtc *crtc = connector->base.state->crtc;
6416 
6417 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6418 		      connector->base.base.id,
6419 		      connector->base.name);
6420 
6421 	if (connector->get_hw_state(connector)) {
6422 		struct intel_encoder *encoder = connector->encoder;
6423 		struct drm_connector_state *conn_state = connector->base.state;
6424 
6425 		I915_STATE_WARN(!crtc,
6426 			 "connector enabled without attached crtc\n");
6427 
6428 		if (!crtc)
6429 			return;
6430 
6431 		I915_STATE_WARN(!crtc->state->active,
6432 		      "connector is active, but attached crtc isn't\n");
6433 
6434 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6435 			return;
6436 
6437 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6438 			"atomic encoder doesn't match attached encoder\n");
6439 
6440 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6441 			"attached encoder crtc differs from connector crtc\n");
6442 	} else {
6443 		I915_STATE_WARN(crtc && crtc->state->active,
6444 			"attached crtc is active, but connector isn't\n");
6445 		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6446 			"best encoder set without crtc!\n");
6447 	}
6448 }
6449 
intel_connector_init(struct intel_connector * connector)6450 int intel_connector_init(struct intel_connector *connector)
6451 {
6452 	struct drm_connector_state *connector_state;
6453 
6454 	connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
6455 	if (!connector_state)
6456 		return -ENOMEM;
6457 
6458 	connector->base.state = connector_state;
6459 	return 0;
6460 }
6461 
intel_connector_alloc(void)6462 struct intel_connector *intel_connector_alloc(void)
6463 {
6464 	struct intel_connector *connector;
6465 
6466 	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6467 	if (!connector)
6468 		return NULL;
6469 
6470 	if (intel_connector_init(connector) < 0) {
6471 		kfree(connector);
6472 		return NULL;
6473 	}
6474 
6475 	return connector;
6476 }
6477 
6478 /* Simple connector->get_hw_state implementation for encoders that support only
6479  * one connector and no cloning and hence the encoder state determines the state
6480  * of the connector. */
intel_connector_get_hw_state(struct intel_connector * connector)6481 bool intel_connector_get_hw_state(struct intel_connector *connector)
6482 {
6483 	enum pipe pipe = 0;
6484 	struct intel_encoder *encoder = connector->encoder;
6485 
6486 	return encoder->get_hw_state(encoder, &pipe);
6487 }
6488 
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)6489 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6490 {
6491 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6492 		return crtc_state->fdi_lanes;
6493 
6494 	return 0;
6495 }
6496 
ironlake_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config)6497 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6498 				     struct intel_crtc_state *pipe_config)
6499 {
6500 	struct drm_atomic_state *state = pipe_config->base.state;
6501 	struct intel_crtc *other_crtc;
6502 	struct intel_crtc_state *other_crtc_state;
6503 
6504 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6505 		      pipe_name(pipe), pipe_config->fdi_lanes);
6506 	if (pipe_config->fdi_lanes > 4) {
6507 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6508 			      pipe_name(pipe), pipe_config->fdi_lanes);
6509 		return -EINVAL;
6510 	}
6511 
6512 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6513 		if (pipe_config->fdi_lanes > 2) {
6514 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6515 				      pipe_config->fdi_lanes);
6516 			return -EINVAL;
6517 		} else {
6518 			return 0;
6519 		}
6520 	}
6521 
6522 	if (INTEL_INFO(dev)->num_pipes == 2)
6523 		return 0;
6524 
6525 	/* Ivybridge 3 pipe is really complicated */
6526 	switch (pipe) {
6527 	case PIPE_A:
6528 		return 0;
6529 	case PIPE_B:
6530 		if (pipe_config->fdi_lanes <= 2)
6531 			return 0;
6532 
6533 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6534 		other_crtc_state =
6535 			intel_atomic_get_crtc_state(state, other_crtc);
6536 		if (IS_ERR(other_crtc_state))
6537 			return PTR_ERR(other_crtc_state);
6538 
6539 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6540 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6541 				      pipe_name(pipe), pipe_config->fdi_lanes);
6542 			return -EINVAL;
6543 		}
6544 		return 0;
6545 	case PIPE_C:
6546 		if (pipe_config->fdi_lanes > 2) {
6547 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6548 				      pipe_name(pipe), pipe_config->fdi_lanes);
6549 			return -EINVAL;
6550 		}
6551 
6552 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6553 		other_crtc_state =
6554 			intel_atomic_get_crtc_state(state, other_crtc);
6555 		if (IS_ERR(other_crtc_state))
6556 			return PTR_ERR(other_crtc_state);
6557 
6558 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6559 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6560 			return -EINVAL;
6561 		}
6562 		return 0;
6563 	default:
6564 		BUG();
6565 	}
6566 }
6567 
6568 #define RETRY 1
ironlake_fdi_compute_config(struct intel_crtc * intel_crtc,struct intel_crtc_state * pipe_config)6569 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6570 				       struct intel_crtc_state *pipe_config)
6571 {
6572 	struct drm_device *dev = intel_crtc->base.dev;
6573 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6574 	int lane, link_bw, fdi_dotclock, ret;
6575 	bool needs_recompute = false;
6576 
6577 retry:
6578 	/* FDI is a binary signal running at ~2.7GHz, encoding
6579 	 * each output octet as 10 bits. The actual frequency
6580 	 * is stored as a divider into a 100MHz clock, and the
6581 	 * mode pixel clock is stored in units of 1KHz.
6582 	 * Hence the bw of each lane in terms of the mode signal
6583 	 * is:
6584 	 */
6585 	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6586 
6587 	fdi_dotclock = adjusted_mode->crtc_clock;
6588 
6589 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6590 					   pipe_config->pipe_bpp);
6591 
6592 	pipe_config->fdi_lanes = lane;
6593 
6594 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6595 			       link_bw, &pipe_config->fdi_m_n);
6596 
6597 	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6598 				       intel_crtc->pipe, pipe_config);
6599 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6600 		pipe_config->pipe_bpp -= 2*3;
6601 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6602 			      pipe_config->pipe_bpp);
6603 		needs_recompute = true;
6604 		pipe_config->bw_constrained = true;
6605 
6606 		goto retry;
6607 	}
6608 
6609 	if (needs_recompute)
6610 		return RETRY;
6611 
6612 	return ret;
6613 }
6614 
pipe_config_supports_ips(struct drm_i915_private * dev_priv,struct intel_crtc_state * pipe_config)6615 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6616 				     struct intel_crtc_state *pipe_config)
6617 {
6618 	if (pipe_config->pipe_bpp > 24)
6619 		return false;
6620 
6621 	/* HSW can handle pixel rate up to cdclk? */
6622 	if (IS_HASWELL(dev_priv->dev))
6623 		return true;
6624 
6625 	/*
6626 	 * We compare against max which means we must take
6627 	 * the increased cdclk requirement into account when
6628 	 * calculating the new cdclk.
6629 	 *
6630 	 * Should measure whether using a lower cdclk w/o IPS
6631 	 */
6632 	return ilk_pipe_pixel_rate(pipe_config) <=
6633 		dev_priv->max_cdclk_freq * 95 / 100;
6634 }
6635 
hsw_compute_ips_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6636 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6637 				   struct intel_crtc_state *pipe_config)
6638 {
6639 	struct drm_device *dev = crtc->base.dev;
6640 	struct drm_i915_private *dev_priv = dev->dev_private;
6641 
6642 	pipe_config->ips_enabled = i915.enable_ips &&
6643 		hsw_crtc_supports_ips(crtc) &&
6644 		pipe_config_supports_ips(dev_priv, pipe_config);
6645 }
6646 
intel_crtc_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6647 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6648 				     struct intel_crtc_state *pipe_config)
6649 {
6650 	struct drm_device *dev = crtc->base.dev;
6651 	struct drm_i915_private *dev_priv = dev->dev_private;
6652 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6653 
6654 	/* FIXME should check pixel clock limits on all platforms */
6655 	if (INTEL_INFO(dev)->gen < 4) {
6656 		int clock_limit = dev_priv->max_cdclk_freq;
6657 
6658 		/*
6659 		 * Enable pixel doubling when the dot clock
6660 		 * is > 90% of the (display) core speed.
6661 		 *
6662 		 * GDG double wide on either pipe,
6663 		 * otherwise pipe A only.
6664 		 */
6665 		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
6666 		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
6667 			clock_limit *= 2;
6668 			pipe_config->double_wide = true;
6669 		}
6670 
6671 		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
6672 			return -EINVAL;
6673 	}
6674 
6675 	/*
6676 	 * Pipe horizontal size must be even in:
6677 	 * - DVO ganged mode
6678 	 * - LVDS dual channel mode
6679 	 * - Double wide pipe
6680 	 */
6681 	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6682 	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6683 		pipe_config->pipe_src_w &= ~1;
6684 
6685 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6686 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6687 	 */
6688 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6689 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6690 		return -EINVAL;
6691 
6692 	if (HAS_IPS(dev))
6693 		hsw_compute_ips_config(crtc, pipe_config);
6694 
6695 	if (pipe_config->has_pch_encoder)
6696 		return ironlake_fdi_compute_config(crtc, pipe_config);
6697 
6698 	return 0;
6699 }
6700 
skylake_get_display_clock_speed(struct drm_device * dev)6701 static int skylake_get_display_clock_speed(struct drm_device *dev)
6702 {
6703 	struct drm_i915_private *dev_priv = to_i915(dev);
6704 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6705 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6706 	uint32_t linkrate;
6707 
6708 	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6709 		return 24000; /* 24MHz is the cd freq with NSSC ref */
6710 
6711 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6712 		return 540000;
6713 
6714 	linkrate = (I915_READ(DPLL_CTRL1) &
6715 		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6716 
6717 	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6718 	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6719 		/* vco 8640 */
6720 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6721 		case CDCLK_FREQ_450_432:
6722 			return 432000;
6723 		case CDCLK_FREQ_337_308:
6724 			return 308570;
6725 		case CDCLK_FREQ_675_617:
6726 			return 617140;
6727 		default:
6728 			WARN(1, "Unknown cd freq selection\n");
6729 		}
6730 	} else {
6731 		/* vco 8100 */
6732 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6733 		case CDCLK_FREQ_450_432:
6734 			return 450000;
6735 		case CDCLK_FREQ_337_308:
6736 			return 337500;
6737 		case CDCLK_FREQ_675_617:
6738 			return 675000;
6739 		default:
6740 			WARN(1, "Unknown cd freq selection\n");
6741 		}
6742 	}
6743 
6744 	/* error case, do as if DPLL0 isn't enabled */
6745 	return 24000;
6746 }
6747 
broxton_get_display_clock_speed(struct drm_device * dev)6748 static int broxton_get_display_clock_speed(struct drm_device *dev)
6749 {
6750 	struct drm_i915_private *dev_priv = to_i915(dev);
6751 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6752 	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6753 	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6754 	int cdclk;
6755 
6756 	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6757 		return 19200;
6758 
6759 	cdclk = 19200 * pll_ratio / 2;
6760 
6761 	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6762 	case BXT_CDCLK_CD2X_DIV_SEL_1:
6763 		return cdclk;  /* 576MHz or 624MHz */
6764 	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6765 		return cdclk * 2 / 3; /* 384MHz */
6766 	case BXT_CDCLK_CD2X_DIV_SEL_2:
6767 		return cdclk / 2; /* 288MHz */
6768 	case BXT_CDCLK_CD2X_DIV_SEL_4:
6769 		return cdclk / 4; /* 144MHz */
6770 	}
6771 
6772 	/* error case, do as if DE PLL isn't enabled */
6773 	return 19200;
6774 }
6775 
broadwell_get_display_clock_speed(struct drm_device * dev)6776 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6777 {
6778 	struct drm_i915_private *dev_priv = dev->dev_private;
6779 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6780 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6781 
6782 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6783 		return 800000;
6784 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6785 		return 450000;
6786 	else if (freq == LCPLL_CLK_FREQ_450)
6787 		return 450000;
6788 	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6789 		return 540000;
6790 	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6791 		return 337500;
6792 	else
6793 		return 675000;
6794 }
6795 
haswell_get_display_clock_speed(struct drm_device * dev)6796 static int haswell_get_display_clock_speed(struct drm_device *dev)
6797 {
6798 	struct drm_i915_private *dev_priv = dev->dev_private;
6799 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6800 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6801 
6802 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6803 		return 800000;
6804 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6805 		return 450000;
6806 	else if (freq == LCPLL_CLK_FREQ_450)
6807 		return 450000;
6808 	else if (IS_HSW_ULT(dev))
6809 		return 337500;
6810 	else
6811 		return 540000;
6812 }
6813 
valleyview_get_display_clock_speed(struct drm_device * dev)6814 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6815 {
6816 	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6817 				      CCK_DISPLAY_CLOCK_CONTROL);
6818 }
6819 
ilk_get_display_clock_speed(struct drm_device * dev)6820 static int ilk_get_display_clock_speed(struct drm_device *dev)
6821 {
6822 	return 450000;
6823 }
6824 
i945_get_display_clock_speed(struct drm_device * dev)6825 static int i945_get_display_clock_speed(struct drm_device *dev)
6826 {
6827 	return 400000;
6828 }
6829 
i915_get_display_clock_speed(struct drm_device * dev)6830 static int i915_get_display_clock_speed(struct drm_device *dev)
6831 {
6832 	return 333333;
6833 }
6834 
i9xx_misc_get_display_clock_speed(struct drm_device * dev)6835 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6836 {
6837 	return 200000;
6838 }
6839 
pnv_get_display_clock_speed(struct drm_device * dev)6840 static int pnv_get_display_clock_speed(struct drm_device *dev)
6841 {
6842 	u16 gcfgc = 0;
6843 
6844 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6845 
6846 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6847 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6848 		return 266667;
6849 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6850 		return 333333;
6851 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6852 		return 444444;
6853 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6854 		return 200000;
6855 	default:
6856 		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6857 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6858 		return 133333;
6859 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6860 		return 166667;
6861 	}
6862 }
6863 
i915gm_get_display_clock_speed(struct drm_device * dev)6864 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6865 {
6866 	u16 gcfgc = 0;
6867 
6868 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6869 
6870 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6871 		return 133333;
6872 	else {
6873 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6874 		case GC_DISPLAY_CLOCK_333_MHZ:
6875 			return 333333;
6876 		default:
6877 		case GC_DISPLAY_CLOCK_190_200_MHZ:
6878 			return 190000;
6879 		}
6880 	}
6881 }
6882 
i865_get_display_clock_speed(struct drm_device * dev)6883 static int i865_get_display_clock_speed(struct drm_device *dev)
6884 {
6885 	return 266667;
6886 }
6887 
i85x_get_display_clock_speed(struct drm_device * dev)6888 static int i85x_get_display_clock_speed(struct drm_device *dev)
6889 {
6890 	u16 hpllcc = 0;
6891 
6892 	/*
6893 	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6894 	 * encoding is different :(
6895 	 * FIXME is this the right way to detect 852GM/852GMV?
6896 	 */
6897 	if (dev->pdev->revision == 0x1)
6898 		return 133333;
6899 
6900 	pci_bus_read_config_word(dev->pdev->bus,
6901 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6902 
6903 	/* Assume that the hardware is in the high speed state.  This
6904 	 * should be the default.
6905 	 */
6906 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6907 	case GC_CLOCK_133_200:
6908 	case GC_CLOCK_133_200_2:
6909 	case GC_CLOCK_100_200:
6910 		return 200000;
6911 	case GC_CLOCK_166_250:
6912 		return 250000;
6913 	case GC_CLOCK_100_133:
6914 		return 133333;
6915 	case GC_CLOCK_133_266:
6916 	case GC_CLOCK_133_266_2:
6917 	case GC_CLOCK_166_266:
6918 		return 266667;
6919 	}
6920 
6921 	/* Shouldn't happen */
6922 	return 0;
6923 }
6924 
i830_get_display_clock_speed(struct drm_device * dev)6925 static int i830_get_display_clock_speed(struct drm_device *dev)
6926 {
6927 	return 133333;
6928 }
6929 
intel_hpll_vco(struct drm_device * dev)6930 static unsigned int intel_hpll_vco(struct drm_device *dev)
6931 {
6932 	struct drm_i915_private *dev_priv = dev->dev_private;
6933 	static const unsigned int blb_vco[8] = {
6934 		[0] = 3200000,
6935 		[1] = 4000000,
6936 		[2] = 5333333,
6937 		[3] = 4800000,
6938 		[4] = 6400000,
6939 	};
6940 	static const unsigned int pnv_vco[8] = {
6941 		[0] = 3200000,
6942 		[1] = 4000000,
6943 		[2] = 5333333,
6944 		[3] = 4800000,
6945 		[4] = 2666667,
6946 	};
6947 	static const unsigned int cl_vco[8] = {
6948 		[0] = 3200000,
6949 		[1] = 4000000,
6950 		[2] = 5333333,
6951 		[3] = 6400000,
6952 		[4] = 3333333,
6953 		[5] = 3566667,
6954 		[6] = 4266667,
6955 	};
6956 	static const unsigned int elk_vco[8] = {
6957 		[0] = 3200000,
6958 		[1] = 4000000,
6959 		[2] = 5333333,
6960 		[3] = 4800000,
6961 	};
6962 	static const unsigned int ctg_vco[8] = {
6963 		[0] = 3200000,
6964 		[1] = 4000000,
6965 		[2] = 5333333,
6966 		[3] = 6400000,
6967 		[4] = 2666667,
6968 		[5] = 4266667,
6969 	};
6970 	const unsigned int *vco_table;
6971 	unsigned int vco;
6972 	uint8_t tmp = 0;
6973 
6974 	/* FIXME other chipsets? */
6975 	if (IS_GM45(dev))
6976 		vco_table = ctg_vco;
6977 	else if (IS_G4X(dev))
6978 		vco_table = elk_vco;
6979 	else if (IS_CRESTLINE(dev))
6980 		vco_table = cl_vco;
6981 	else if (IS_PINEVIEW(dev))
6982 		vco_table = pnv_vco;
6983 	else if (IS_G33(dev))
6984 		vco_table = blb_vco;
6985 	else
6986 		return 0;
6987 
6988 	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6989 
6990 	vco = vco_table[tmp & 0x7];
6991 	if (vco == 0)
6992 		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6993 	else
6994 		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6995 
6996 	return vco;
6997 }
6998 
gm45_get_display_clock_speed(struct drm_device * dev)6999 static int gm45_get_display_clock_speed(struct drm_device *dev)
7000 {
7001 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7002 	uint16_t tmp = 0;
7003 
7004 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7005 
7006 	cdclk_sel = (tmp >> 12) & 0x1;
7007 
7008 	switch (vco) {
7009 	case 2666667:
7010 	case 4000000:
7011 	case 5333333:
7012 		return cdclk_sel ? 333333 : 222222;
7013 	case 3200000:
7014 		return cdclk_sel ? 320000 : 228571;
7015 	default:
7016 		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7017 		return 222222;
7018 	}
7019 }
7020 
i965gm_get_display_clock_speed(struct drm_device * dev)7021 static int i965gm_get_display_clock_speed(struct drm_device *dev)
7022 {
7023 	static const uint8_t div_3200[] = { 16, 10,  8 };
7024 	static const uint8_t div_4000[] = { 20, 12, 10 };
7025 	static const uint8_t div_5333[] = { 24, 16, 14 };
7026 	const uint8_t *div_table;
7027 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7028 	uint16_t tmp = 0;
7029 
7030 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7031 
7032 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7033 
7034 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7035 		goto fail;
7036 
7037 	switch (vco) {
7038 	case 3200000:
7039 		div_table = div_3200;
7040 		break;
7041 	case 4000000:
7042 		div_table = div_4000;
7043 		break;
7044 	case 5333333:
7045 		div_table = div_5333;
7046 		break;
7047 	default:
7048 		goto fail;
7049 	}
7050 
7051 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7052 
7053 fail:
7054 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7055 	return 200000;
7056 }
7057 
g33_get_display_clock_speed(struct drm_device * dev)7058 static int g33_get_display_clock_speed(struct drm_device *dev)
7059 {
7060 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7061 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7062 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7063 	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7064 	const uint8_t *div_table;
7065 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7066 	uint16_t tmp = 0;
7067 
7068 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7069 
7070 	cdclk_sel = (tmp >> 4) & 0x7;
7071 
7072 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7073 		goto fail;
7074 
7075 	switch (vco) {
7076 	case 3200000:
7077 		div_table = div_3200;
7078 		break;
7079 	case 4000000:
7080 		div_table = div_4000;
7081 		break;
7082 	case 4800000:
7083 		div_table = div_4800;
7084 		break;
7085 	case 5333333:
7086 		div_table = div_5333;
7087 		break;
7088 	default:
7089 		goto fail;
7090 	}
7091 
7092 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7093 
7094 fail:
7095 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7096 	return 190476;
7097 }
7098 
7099 static void
intel_reduce_m_n_ratio(uint32_t * num,uint32_t * den)7100 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7101 {
7102 	while (*num > DATA_LINK_M_N_MASK ||
7103 	       *den > DATA_LINK_M_N_MASK) {
7104 		*num >>= 1;
7105 		*den >>= 1;
7106 	}
7107 }
7108 
compute_m_n(unsigned int m,unsigned int n,uint32_t * ret_m,uint32_t * ret_n)7109 static void compute_m_n(unsigned int m, unsigned int n,
7110 			uint32_t *ret_m, uint32_t *ret_n)
7111 {
7112 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7113 	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7114 	intel_reduce_m_n_ratio(ret_m, ret_n);
7115 }
7116 
7117 void
intel_link_compute_m_n(int bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n)7118 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7119 		       int pixel_clock, int link_clock,
7120 		       struct intel_link_m_n *m_n)
7121 {
7122 	m_n->tu = 64;
7123 
7124 	compute_m_n(bits_per_pixel * pixel_clock,
7125 		    link_clock * nlanes * 8,
7126 		    &m_n->gmch_m, &m_n->gmch_n);
7127 
7128 	compute_m_n(pixel_clock, link_clock,
7129 		    &m_n->link_m, &m_n->link_n);
7130 }
7131 
intel_panel_use_ssc(struct drm_i915_private * dev_priv)7132 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7133 {
7134 	if (i915.panel_use_ssc >= 0)
7135 		return i915.panel_use_ssc != 0;
7136 	return dev_priv->vbt.lvds_use_ssc
7137 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7138 }
7139 
i9xx_get_refclk(const struct intel_crtc_state * crtc_state,int num_connectors)7140 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7141 			   int num_connectors)
7142 {
7143 	struct drm_device *dev = crtc_state->base.crtc->dev;
7144 	struct drm_i915_private *dev_priv = dev->dev_private;
7145 	int refclk;
7146 
7147 	WARN_ON(!crtc_state->base.state);
7148 
7149 	if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
7150 		refclk = 100000;
7151 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7152 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7153 		refclk = dev_priv->vbt.lvds_ssc_freq;
7154 		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7155 	} else if (!IS_GEN2(dev)) {
7156 		refclk = 96000;
7157 	} else {
7158 		refclk = 48000;
7159 	}
7160 
7161 	return refclk;
7162 }
7163 
pnv_dpll_compute_fp(struct dpll * dpll)7164 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7165 {
7166 	return (1 << dpll->n) << 16 | dpll->m2;
7167 }
7168 
i9xx_dpll_compute_fp(struct dpll * dpll)7169 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7170 {
7171 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7172 }
7173 
i9xx_update_pll_dividers(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,intel_clock_t * reduced_clock)7174 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7175 				     struct intel_crtc_state *crtc_state,
7176 				     intel_clock_t *reduced_clock)
7177 {
7178 	struct drm_device *dev = crtc->base.dev;
7179 	u32 fp, fp2 = 0;
7180 
7181 	if (IS_PINEVIEW(dev)) {
7182 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7183 		if (reduced_clock)
7184 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7185 	} else {
7186 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7187 		if (reduced_clock)
7188 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7189 	}
7190 
7191 	crtc_state->dpll_hw_state.fp0 = fp;
7192 
7193 	crtc->lowfreq_avail = false;
7194 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7195 	    reduced_clock) {
7196 		crtc_state->dpll_hw_state.fp1 = fp2;
7197 		crtc->lowfreq_avail = true;
7198 	} else {
7199 		crtc_state->dpll_hw_state.fp1 = fp;
7200 	}
7201 }
7202 
vlv_pllb_recal_opamp(struct drm_i915_private * dev_priv,enum pipe pipe)7203 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7204 		pipe)
7205 {
7206 	u32 reg_val;
7207 
7208 	/*
7209 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7210 	 * and set it to a reasonable value instead.
7211 	 */
7212 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7213 	reg_val &= 0xffffff00;
7214 	reg_val |= 0x00000030;
7215 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7216 
7217 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7218 	reg_val &= 0x8cffffff;
7219 	reg_val = 0x8c000000;
7220 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7221 
7222 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7223 	reg_val &= 0xffffff00;
7224 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7225 
7226 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7227 	reg_val &= 0x00ffffff;
7228 	reg_val |= 0xb0000000;
7229 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7230 }
7231 
intel_pch_transcoder_set_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n)7232 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7233 					 struct intel_link_m_n *m_n)
7234 {
7235 	struct drm_device *dev = crtc->base.dev;
7236 	struct drm_i915_private *dev_priv = dev->dev_private;
7237 	int pipe = crtc->pipe;
7238 
7239 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7240 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7241 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7242 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7243 }
7244 
intel_cpu_transcoder_set_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2)7245 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7246 					 struct intel_link_m_n *m_n,
7247 					 struct intel_link_m_n *m2_n2)
7248 {
7249 	struct drm_device *dev = crtc->base.dev;
7250 	struct drm_i915_private *dev_priv = dev->dev_private;
7251 	int pipe = crtc->pipe;
7252 	enum transcoder transcoder = crtc->config->cpu_transcoder;
7253 
7254 	if (INTEL_INFO(dev)->gen >= 5) {
7255 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7256 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7257 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7258 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7259 		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7260 		 * for gen < 8) and if DRRS is supported (to make sure the
7261 		 * registers are not unnecessarily accessed).
7262 		 */
7263 		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7264 			crtc->config->has_drrs) {
7265 			I915_WRITE(PIPE_DATA_M2(transcoder),
7266 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7267 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7268 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7269 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7270 		}
7271 	} else {
7272 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7273 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7274 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7275 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7276 	}
7277 }
7278 
intel_dp_set_m_n(struct intel_crtc * crtc,enum link_m_n_set m_n)7279 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7280 {
7281 	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7282 
7283 	if (m_n == M1_N1) {
7284 		dp_m_n = &crtc->config->dp_m_n;
7285 		dp_m2_n2 = &crtc->config->dp_m2_n2;
7286 	} else if (m_n == M2_N2) {
7287 
7288 		/*
7289 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7290 		 * needs to be programmed into M1_N1.
7291 		 */
7292 		dp_m_n = &crtc->config->dp_m2_n2;
7293 	} else {
7294 		DRM_ERROR("Unsupported divider value\n");
7295 		return;
7296 	}
7297 
7298 	if (crtc->config->has_pch_encoder)
7299 		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7300 	else
7301 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7302 }
7303 
vlv_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7304 static void vlv_compute_dpll(struct intel_crtc *crtc,
7305 			     struct intel_crtc_state *pipe_config)
7306 {
7307 	u32 dpll, dpll_md;
7308 
7309 	/*
7310 	 * Enable DPIO clock input. We should never disable the reference
7311 	 * clock for pipe B, since VGA hotplug / manual detection depends
7312 	 * on it.
7313 	 */
7314 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7315 		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
7316 	/* We should never disable this, set it here for state tracking */
7317 	if (crtc->pipe == PIPE_B)
7318 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7319 	dpll |= DPLL_VCO_ENABLE;
7320 	pipe_config->dpll_hw_state.dpll = dpll;
7321 
7322 	dpll_md = (pipe_config->pixel_multiplier - 1)
7323 		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7324 	pipe_config->dpll_hw_state.dpll_md = dpll_md;
7325 }
7326 
vlv_prepare_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)7327 static void vlv_prepare_pll(struct intel_crtc *crtc,
7328 			    const struct intel_crtc_state *pipe_config)
7329 {
7330 	struct drm_device *dev = crtc->base.dev;
7331 	struct drm_i915_private *dev_priv = dev->dev_private;
7332 	int pipe = crtc->pipe;
7333 	u32 mdiv;
7334 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7335 	u32 coreclk, reg_val;
7336 
7337 	mutex_lock(&dev_priv->sb_lock);
7338 
7339 	bestn = pipe_config->dpll.n;
7340 	bestm1 = pipe_config->dpll.m1;
7341 	bestm2 = pipe_config->dpll.m2;
7342 	bestp1 = pipe_config->dpll.p1;
7343 	bestp2 = pipe_config->dpll.p2;
7344 
7345 	/* See eDP HDMI DPIO driver vbios notes doc */
7346 
7347 	/* PLL B needs special handling */
7348 	if (pipe == PIPE_B)
7349 		vlv_pllb_recal_opamp(dev_priv, pipe);
7350 
7351 	/* Set up Tx target for periodic Rcomp update */
7352 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7353 
7354 	/* Disable target IRef on PLL */
7355 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7356 	reg_val &= 0x00ffffff;
7357 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7358 
7359 	/* Disable fast lock */
7360 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7361 
7362 	/* Set idtafcrecal before PLL is enabled */
7363 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7364 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7365 	mdiv |= ((bestn << DPIO_N_SHIFT));
7366 	mdiv |= (1 << DPIO_K_SHIFT);
7367 
7368 	/*
7369 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7370 	 * but we don't support that).
7371 	 * Note: don't use the DAC post divider as it seems unstable.
7372 	 */
7373 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7374 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7375 
7376 	mdiv |= DPIO_ENABLE_CALIBRATION;
7377 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7378 
7379 	/* Set HBR and RBR LPF coefficients */
7380 	if (pipe_config->port_clock == 162000 ||
7381 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7382 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7383 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7384 				 0x009f0003);
7385 	else
7386 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7387 				 0x00d0000f);
7388 
7389 	if (pipe_config->has_dp_encoder) {
7390 		/* Use SSC source */
7391 		if (pipe == PIPE_A)
7392 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7393 					 0x0df40000);
7394 		else
7395 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7396 					 0x0df70000);
7397 	} else { /* HDMI or VGA */
7398 		/* Use bend source */
7399 		if (pipe == PIPE_A)
7400 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7401 					 0x0df70000);
7402 		else
7403 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7404 					 0x0df40000);
7405 	}
7406 
7407 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7408 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7409 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7410 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7411 		coreclk |= 0x01000000;
7412 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7413 
7414 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7415 	mutex_unlock(&dev_priv->sb_lock);
7416 }
7417 
chv_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7418 static void chv_compute_dpll(struct intel_crtc *crtc,
7419 			     struct intel_crtc_state *pipe_config)
7420 {
7421 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7422 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
7423 		DPLL_VCO_ENABLE;
7424 	if (crtc->pipe != PIPE_A)
7425 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7426 
7427 	pipe_config->dpll_hw_state.dpll_md =
7428 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7429 }
7430 
chv_prepare_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)7431 static void chv_prepare_pll(struct intel_crtc *crtc,
7432 			    const struct intel_crtc_state *pipe_config)
7433 {
7434 	struct drm_device *dev = crtc->base.dev;
7435 	struct drm_i915_private *dev_priv = dev->dev_private;
7436 	int pipe = crtc->pipe;
7437 	int dpll_reg = DPLL(crtc->pipe);
7438 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7439 	u32 loopfilter, tribuf_calcntr;
7440 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7441 	u32 dpio_val;
7442 	int vco;
7443 
7444 	bestn = pipe_config->dpll.n;
7445 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7446 	bestm1 = pipe_config->dpll.m1;
7447 	bestm2 = pipe_config->dpll.m2 >> 22;
7448 	bestp1 = pipe_config->dpll.p1;
7449 	bestp2 = pipe_config->dpll.p2;
7450 	vco = pipe_config->dpll.vco;
7451 	dpio_val = 0;
7452 	loopfilter = 0;
7453 
7454 	/*
7455 	 * Enable Refclk and SSC
7456 	 */
7457 	I915_WRITE(dpll_reg,
7458 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7459 
7460 	mutex_lock(&dev_priv->sb_lock);
7461 
7462 	/* p1 and p2 divider */
7463 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7464 			5 << DPIO_CHV_S1_DIV_SHIFT |
7465 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7466 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7467 			1 << DPIO_CHV_K_DIV_SHIFT);
7468 
7469 	/* Feedback post-divider - m2 */
7470 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7471 
7472 	/* Feedback refclk divider - n and m1 */
7473 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7474 			DPIO_CHV_M1_DIV_BY_2 |
7475 			1 << DPIO_CHV_N_DIV_SHIFT);
7476 
7477 	/* M2 fraction division */
7478 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7479 
7480 	/* M2 fraction division enable */
7481 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7482 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7483 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7484 	if (bestm2_frac)
7485 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7486 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7487 
7488 	/* Program digital lock detect threshold */
7489 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7490 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7491 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7492 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7493 	if (!bestm2_frac)
7494 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7495 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7496 
7497 	/* Loop filter */
7498 	if (vco == 5400000) {
7499 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7500 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7501 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7502 		tribuf_calcntr = 0x9;
7503 	} else if (vco <= 6200000) {
7504 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7505 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7506 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7507 		tribuf_calcntr = 0x9;
7508 	} else if (vco <= 6480000) {
7509 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7510 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7511 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7512 		tribuf_calcntr = 0x8;
7513 	} else {
7514 		/* Not supported. Apply the same limits as in the max case */
7515 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7516 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7517 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7518 		tribuf_calcntr = 0;
7519 	}
7520 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7521 
7522 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7523 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7524 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7525 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7526 
7527 	/* AFC Recal */
7528 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7529 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7530 			DPIO_AFC_RECAL);
7531 
7532 	mutex_unlock(&dev_priv->sb_lock);
7533 }
7534 
7535 /**
7536  * vlv_force_pll_on - forcibly enable just the PLL
7537  * @dev_priv: i915 private structure
7538  * @pipe: pipe PLL to enable
7539  * @dpll: PLL configuration
7540  *
7541  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7542  * in cases where we need the PLL enabled even when @pipe is not going to
7543  * be enabled.
7544  */
vlv_force_pll_on(struct drm_device * dev,enum pipe pipe,const struct dpll * dpll)7545 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7546 		      const struct dpll *dpll)
7547 {
7548 	struct intel_crtc *crtc =
7549 		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7550 	struct intel_crtc_state pipe_config = {
7551 		.base.crtc = &crtc->base,
7552 		.pixel_multiplier = 1,
7553 		.dpll = *dpll,
7554 	};
7555 
7556 	if (IS_CHERRYVIEW(dev)) {
7557 		chv_compute_dpll(crtc, &pipe_config);
7558 		chv_prepare_pll(crtc, &pipe_config);
7559 		chv_enable_pll(crtc, &pipe_config);
7560 	} else {
7561 		vlv_compute_dpll(crtc, &pipe_config);
7562 		vlv_prepare_pll(crtc, &pipe_config);
7563 		vlv_enable_pll(crtc, &pipe_config);
7564 	}
7565 }
7566 
7567 /**
7568  * vlv_force_pll_off - forcibly disable just the PLL
7569  * @dev_priv: i915 private structure
7570  * @pipe: pipe PLL to disable
7571  *
7572  * Disable the PLL for @pipe. To be used in cases where we need
7573  * the PLL enabled even when @pipe is not going to be enabled.
7574  */
vlv_force_pll_off(struct drm_device * dev,enum pipe pipe)7575 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7576 {
7577 	if (IS_CHERRYVIEW(dev))
7578 		chv_disable_pll(to_i915(dev), pipe);
7579 	else
7580 		vlv_disable_pll(to_i915(dev), pipe);
7581 }
7582 
i9xx_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,intel_clock_t * reduced_clock,int num_connectors)7583 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7584 			      struct intel_crtc_state *crtc_state,
7585 			      intel_clock_t *reduced_clock,
7586 			      int num_connectors)
7587 {
7588 	struct drm_device *dev = crtc->base.dev;
7589 	struct drm_i915_private *dev_priv = dev->dev_private;
7590 	u32 dpll;
7591 	bool is_sdvo;
7592 	struct dpll *clock = &crtc_state->dpll;
7593 
7594 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7595 
7596 	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7597 		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7598 
7599 	dpll = DPLL_VGA_MODE_DIS;
7600 
7601 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7602 		dpll |= DPLLB_MODE_LVDS;
7603 	else
7604 		dpll |= DPLLB_MODE_DAC_SERIAL;
7605 
7606 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7607 		dpll |= (crtc_state->pixel_multiplier - 1)
7608 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7609 	}
7610 
7611 	if (is_sdvo)
7612 		dpll |= DPLL_SDVO_HIGH_SPEED;
7613 
7614 	if (crtc_state->has_dp_encoder)
7615 		dpll |= DPLL_SDVO_HIGH_SPEED;
7616 
7617 	/* compute bitmask from p1 value */
7618 	if (IS_PINEVIEW(dev))
7619 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7620 	else {
7621 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7622 		if (IS_G4X(dev) && reduced_clock)
7623 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7624 	}
7625 	switch (clock->p2) {
7626 	case 5:
7627 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7628 		break;
7629 	case 7:
7630 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7631 		break;
7632 	case 10:
7633 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7634 		break;
7635 	case 14:
7636 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7637 		break;
7638 	}
7639 	if (INTEL_INFO(dev)->gen >= 4)
7640 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7641 
7642 	if (crtc_state->sdvo_tv_clock)
7643 		dpll |= PLL_REF_INPUT_TVCLKINBC;
7644 	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7645 		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7646 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7647 	else
7648 		dpll |= PLL_REF_INPUT_DREFCLK;
7649 
7650 	dpll |= DPLL_VCO_ENABLE;
7651 	crtc_state->dpll_hw_state.dpll = dpll;
7652 
7653 	if (INTEL_INFO(dev)->gen >= 4) {
7654 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7655 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7656 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7657 	}
7658 }
7659 
i8xx_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,intel_clock_t * reduced_clock,int num_connectors)7660 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7661 			      struct intel_crtc_state *crtc_state,
7662 			      intel_clock_t *reduced_clock,
7663 			      int num_connectors)
7664 {
7665 	struct drm_device *dev = crtc->base.dev;
7666 	struct drm_i915_private *dev_priv = dev->dev_private;
7667 	u32 dpll;
7668 	struct dpll *clock = &crtc_state->dpll;
7669 
7670 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7671 
7672 	dpll = DPLL_VGA_MODE_DIS;
7673 
7674 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7675 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7676 	} else {
7677 		if (clock->p1 == 2)
7678 			dpll |= PLL_P1_DIVIDE_BY_TWO;
7679 		else
7680 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7681 		if (clock->p2 == 4)
7682 			dpll |= PLL_P2_DIVIDE_BY_4;
7683 	}
7684 
7685 	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7686 		dpll |= DPLL_DVO_2X_MODE;
7687 
7688 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7689 		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7690 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7691 	else
7692 		dpll |= PLL_REF_INPUT_DREFCLK;
7693 
7694 	dpll |= DPLL_VCO_ENABLE;
7695 	crtc_state->dpll_hw_state.dpll = dpll;
7696 }
7697 
intel_set_pipe_timings(struct intel_crtc * intel_crtc)7698 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7699 {
7700 	struct drm_device *dev = intel_crtc->base.dev;
7701 	struct drm_i915_private *dev_priv = dev->dev_private;
7702 	enum pipe pipe = intel_crtc->pipe;
7703 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7704 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7705 	uint32_t crtc_vtotal, crtc_vblank_end;
7706 	int vsyncshift = 0;
7707 
7708 	/* We need to be careful not to changed the adjusted mode, for otherwise
7709 	 * the hw state checker will get angry at the mismatch. */
7710 	crtc_vtotal = adjusted_mode->crtc_vtotal;
7711 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7712 
7713 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7714 		/* the chip adds 2 halflines automatically */
7715 		crtc_vtotal -= 1;
7716 		crtc_vblank_end -= 1;
7717 
7718 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7719 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7720 		else
7721 			vsyncshift = adjusted_mode->crtc_hsync_start -
7722 				adjusted_mode->crtc_htotal / 2;
7723 		if (vsyncshift < 0)
7724 			vsyncshift += adjusted_mode->crtc_htotal;
7725 	}
7726 
7727 	if (INTEL_INFO(dev)->gen > 3)
7728 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7729 
7730 	I915_WRITE(HTOTAL(cpu_transcoder),
7731 		   (adjusted_mode->crtc_hdisplay - 1) |
7732 		   ((adjusted_mode->crtc_htotal - 1) << 16));
7733 	I915_WRITE(HBLANK(cpu_transcoder),
7734 		   (adjusted_mode->crtc_hblank_start - 1) |
7735 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7736 	I915_WRITE(HSYNC(cpu_transcoder),
7737 		   (adjusted_mode->crtc_hsync_start - 1) |
7738 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7739 
7740 	I915_WRITE(VTOTAL(cpu_transcoder),
7741 		   (adjusted_mode->crtc_vdisplay - 1) |
7742 		   ((crtc_vtotal - 1) << 16));
7743 	I915_WRITE(VBLANK(cpu_transcoder),
7744 		   (adjusted_mode->crtc_vblank_start - 1) |
7745 		   ((crtc_vblank_end - 1) << 16));
7746 	I915_WRITE(VSYNC(cpu_transcoder),
7747 		   (adjusted_mode->crtc_vsync_start - 1) |
7748 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7749 
7750 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7751 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7752 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7753 	 * bits. */
7754 	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7755 	    (pipe == PIPE_B || pipe == PIPE_C))
7756 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7757 
7758 	/* pipesrc controls the size that is scaled from, which should
7759 	 * always be the user's requested size.
7760 	 */
7761 	I915_WRITE(PIPESRC(pipe),
7762 		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7763 		   (intel_crtc->config->pipe_src_h - 1));
7764 }
7765 
intel_get_pipe_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7766 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7767 				   struct intel_crtc_state *pipe_config)
7768 {
7769 	struct drm_device *dev = crtc->base.dev;
7770 	struct drm_i915_private *dev_priv = dev->dev_private;
7771 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7772 	uint32_t tmp;
7773 
7774 	tmp = I915_READ(HTOTAL(cpu_transcoder));
7775 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7776 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7777 	tmp = I915_READ(HBLANK(cpu_transcoder));
7778 	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7779 	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7780 	tmp = I915_READ(HSYNC(cpu_transcoder));
7781 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7782 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7783 
7784 	tmp = I915_READ(VTOTAL(cpu_transcoder));
7785 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7786 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7787 	tmp = I915_READ(VBLANK(cpu_transcoder));
7788 	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7789 	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7790 	tmp = I915_READ(VSYNC(cpu_transcoder));
7791 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7792 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7793 
7794 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7795 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7796 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7797 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7798 	}
7799 
7800 	tmp = I915_READ(PIPESRC(crtc->pipe));
7801 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7802 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7803 
7804 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7805 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7806 }
7807 
intel_mode_from_pipe_config(struct drm_display_mode * mode,struct intel_crtc_state * pipe_config)7808 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7809 				 struct intel_crtc_state *pipe_config)
7810 {
7811 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7812 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7813 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7814 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7815 
7816 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7817 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7818 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7819 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7820 
7821 	mode->flags = pipe_config->base.adjusted_mode.flags;
7822 	mode->type = DRM_MODE_TYPE_DRIVER;
7823 
7824 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7825 	mode->flags |= pipe_config->base.adjusted_mode.flags;
7826 
7827 	mode->hsync = drm_mode_hsync(mode);
7828 	mode->vrefresh = drm_mode_vrefresh(mode);
7829 	drm_mode_set_name(mode);
7830 }
7831 
i9xx_set_pipeconf(struct intel_crtc * intel_crtc)7832 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7833 {
7834 	struct drm_device *dev = intel_crtc->base.dev;
7835 	struct drm_i915_private *dev_priv = dev->dev_private;
7836 	uint32_t pipeconf;
7837 
7838 	pipeconf = 0;
7839 
7840 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7841 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7842 		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7843 
7844 	if (intel_crtc->config->double_wide)
7845 		pipeconf |= PIPECONF_DOUBLE_WIDE;
7846 
7847 	/* only g4x and later have fancy bpc/dither controls */
7848 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
7849 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7850 		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7851 			pipeconf |= PIPECONF_DITHER_EN |
7852 				    PIPECONF_DITHER_TYPE_SP;
7853 
7854 		switch (intel_crtc->config->pipe_bpp) {
7855 		case 18:
7856 			pipeconf |= PIPECONF_6BPC;
7857 			break;
7858 		case 24:
7859 			pipeconf |= PIPECONF_8BPC;
7860 			break;
7861 		case 30:
7862 			pipeconf |= PIPECONF_10BPC;
7863 			break;
7864 		default:
7865 			/* Case prevented by intel_choose_pipe_bpp_dither. */
7866 			BUG();
7867 		}
7868 	}
7869 
7870 	if (HAS_PIPE_CXSR(dev)) {
7871 		if (intel_crtc->lowfreq_avail) {
7872 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7873 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7874 		} else {
7875 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7876 		}
7877 	}
7878 
7879 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7880 		if (INTEL_INFO(dev)->gen < 4 ||
7881 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7882 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7883 		else
7884 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7885 	} else
7886 		pipeconf |= PIPECONF_PROGRESSIVE;
7887 
7888 	if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
7889 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7890 
7891 	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7892 	POSTING_READ(PIPECONF(intel_crtc->pipe));
7893 }
7894 
i9xx_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)7895 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7896 				   struct intel_crtc_state *crtc_state)
7897 {
7898 	struct drm_device *dev = crtc->base.dev;
7899 	struct drm_i915_private *dev_priv = dev->dev_private;
7900 	int refclk, num_connectors = 0;
7901 	intel_clock_t clock;
7902 	bool ok;
7903 	bool is_dsi = false;
7904 	struct intel_encoder *encoder;
7905 	const intel_limit_t *limit;
7906 	struct drm_atomic_state *state = crtc_state->base.state;
7907 	struct drm_connector *connector;
7908 	struct drm_connector_state *connector_state;
7909 	int i;
7910 
7911 	memset(&crtc_state->dpll_hw_state, 0,
7912 	       sizeof(crtc_state->dpll_hw_state));
7913 
7914 	for_each_connector_in_state(state, connector, connector_state, i) {
7915 		if (connector_state->crtc != &crtc->base)
7916 			continue;
7917 
7918 		encoder = to_intel_encoder(connector_state->best_encoder);
7919 
7920 		switch (encoder->type) {
7921 		case INTEL_OUTPUT_DSI:
7922 			is_dsi = true;
7923 			break;
7924 		default:
7925 			break;
7926 		}
7927 
7928 		num_connectors++;
7929 	}
7930 
7931 	if (is_dsi)
7932 		return 0;
7933 
7934 	if (!crtc_state->clock_set) {
7935 		refclk = i9xx_get_refclk(crtc_state, num_connectors);
7936 
7937 		/*
7938 		 * Returns a set of divisors for the desired target clock with
7939 		 * the given refclk, or FALSE.  The returned values represent
7940 		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7941 		 * 2) / p1 / p2.
7942 		 */
7943 		limit = intel_limit(crtc_state, refclk);
7944 		ok = dev_priv->display.find_dpll(limit, crtc_state,
7945 						 crtc_state->port_clock,
7946 						 refclk, NULL, &clock);
7947 		if (!ok) {
7948 			DRM_ERROR("Couldn't find PLL settings for mode!\n");
7949 			return -EINVAL;
7950 		}
7951 
7952 		/* Compat-code for transition, will disappear. */
7953 		crtc_state->dpll.n = clock.n;
7954 		crtc_state->dpll.m1 = clock.m1;
7955 		crtc_state->dpll.m2 = clock.m2;
7956 		crtc_state->dpll.p1 = clock.p1;
7957 		crtc_state->dpll.p2 = clock.p2;
7958 	}
7959 
7960 	if (IS_GEN2(dev)) {
7961 		i8xx_compute_dpll(crtc, crtc_state, NULL,
7962 				  num_connectors);
7963 	} else if (IS_CHERRYVIEW(dev)) {
7964 		chv_compute_dpll(crtc, crtc_state);
7965 	} else if (IS_VALLEYVIEW(dev)) {
7966 		vlv_compute_dpll(crtc, crtc_state);
7967 	} else {
7968 		i9xx_compute_dpll(crtc, crtc_state, NULL,
7969 				  num_connectors);
7970 	}
7971 
7972 	return 0;
7973 }
7974 
i9xx_get_pfit_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7975 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7976 				 struct intel_crtc_state *pipe_config)
7977 {
7978 	struct drm_device *dev = crtc->base.dev;
7979 	struct drm_i915_private *dev_priv = dev->dev_private;
7980 	uint32_t tmp;
7981 
7982 	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7983 		return;
7984 
7985 	tmp = I915_READ(PFIT_CONTROL);
7986 	if (!(tmp & PFIT_ENABLE))
7987 		return;
7988 
7989 	/* Check whether the pfit is attached to our pipe. */
7990 	if (INTEL_INFO(dev)->gen < 4) {
7991 		if (crtc->pipe != PIPE_B)
7992 			return;
7993 	} else {
7994 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7995 			return;
7996 	}
7997 
7998 	pipe_config->gmch_pfit.control = tmp;
7999 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8000 	if (INTEL_INFO(dev)->gen < 5)
8001 		pipe_config->gmch_pfit.lvds_border_bits =
8002 			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
8003 }
8004 
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8005 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8006 			       struct intel_crtc_state *pipe_config)
8007 {
8008 	struct drm_device *dev = crtc->base.dev;
8009 	struct drm_i915_private *dev_priv = dev->dev_private;
8010 	int pipe = pipe_config->cpu_transcoder;
8011 	intel_clock_t clock;
8012 	u32 mdiv;
8013 	int refclk = 100000;
8014 
8015 	/* In case of MIPI DPLL will not even be used */
8016 	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8017 		return;
8018 
8019 	mutex_lock(&dev_priv->sb_lock);
8020 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8021 	mutex_unlock(&dev_priv->sb_lock);
8022 
8023 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8024 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8025 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8026 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8027 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8028 
8029 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8030 }
8031 
8032 static void
i9xx_get_initial_plane_config(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)8033 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8034 			      struct intel_initial_plane_config *plane_config)
8035 {
8036 	struct drm_device *dev = crtc->base.dev;
8037 	struct drm_i915_private *dev_priv = dev->dev_private;
8038 	u32 val, base, offset;
8039 	int pipe = crtc->pipe, plane = crtc->plane;
8040 	int fourcc, pixel_format;
8041 	unsigned int aligned_height;
8042 	struct drm_framebuffer *fb;
8043 	struct intel_framebuffer *intel_fb;
8044 
8045 	val = I915_READ(DSPCNTR(plane));
8046 	if (!(val & DISPLAY_PLANE_ENABLE))
8047 		return;
8048 
8049 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8050 	if (!intel_fb) {
8051 		DRM_DEBUG_KMS("failed to alloc fb\n");
8052 		return;
8053 	}
8054 
8055 	fb = &intel_fb->base;
8056 
8057 	if (INTEL_INFO(dev)->gen >= 4) {
8058 		if (val & DISPPLANE_TILED) {
8059 			plane_config->tiling = I915_TILING_X;
8060 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8061 		}
8062 	}
8063 
8064 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8065 	fourcc = i9xx_format_to_fourcc(pixel_format);
8066 	fb->pixel_format = fourcc;
8067 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8068 
8069 	if (INTEL_INFO(dev)->gen >= 4) {
8070 		if (plane_config->tiling)
8071 			offset = I915_READ(DSPTILEOFF(plane));
8072 		else
8073 			offset = I915_READ(DSPLINOFF(plane));
8074 		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8075 	} else {
8076 		base = I915_READ(DSPADDR(plane));
8077 	}
8078 	plane_config->base = base;
8079 
8080 	val = I915_READ(PIPESRC(pipe));
8081 	fb->width = ((val >> 16) & 0xfff) + 1;
8082 	fb->height = ((val >> 0) & 0xfff) + 1;
8083 
8084 	val = I915_READ(DSPSTRIDE(pipe));
8085 	fb->pitches[0] = val & 0xffffffc0;
8086 
8087 	aligned_height = intel_fb_align_height(dev, fb->height,
8088 					       fb->pixel_format,
8089 					       fb->modifier[0]);
8090 
8091 	plane_config->size = fb->pitches[0] * aligned_height;
8092 
8093 	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8094 		      pipe_name(pipe), plane, fb->width, fb->height,
8095 		      fb->bits_per_pixel, base, fb->pitches[0],
8096 		      plane_config->size);
8097 
8098 	plane_config->fb = intel_fb;
8099 }
8100 
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8101 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8102 			       struct intel_crtc_state *pipe_config)
8103 {
8104 	struct drm_device *dev = crtc->base.dev;
8105 	struct drm_i915_private *dev_priv = dev->dev_private;
8106 	int pipe = pipe_config->cpu_transcoder;
8107 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8108 	intel_clock_t clock;
8109 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8110 	int refclk = 100000;
8111 
8112 	mutex_lock(&dev_priv->sb_lock);
8113 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8114 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8115 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8116 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8117 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8118 	mutex_unlock(&dev_priv->sb_lock);
8119 
8120 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8121 	clock.m2 = (pll_dw0 & 0xff) << 22;
8122 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8123 		clock.m2 |= pll_dw2 & 0x3fffff;
8124 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8125 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8126 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8127 
8128 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8129 }
8130 
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8131 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8132 				 struct intel_crtc_state *pipe_config)
8133 {
8134 	struct drm_device *dev = crtc->base.dev;
8135 	struct drm_i915_private *dev_priv = dev->dev_private;
8136 	uint32_t tmp;
8137 
8138 	if (!intel_display_power_is_enabled(dev_priv,
8139 					    POWER_DOMAIN_PIPE(crtc->pipe)))
8140 		return false;
8141 
8142 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8143 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8144 
8145 	tmp = I915_READ(PIPECONF(crtc->pipe));
8146 	if (!(tmp & PIPECONF_ENABLE))
8147 		return false;
8148 
8149 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
8150 		switch (tmp & PIPECONF_BPC_MASK) {
8151 		case PIPECONF_6BPC:
8152 			pipe_config->pipe_bpp = 18;
8153 			break;
8154 		case PIPECONF_8BPC:
8155 			pipe_config->pipe_bpp = 24;
8156 			break;
8157 		case PIPECONF_10BPC:
8158 			pipe_config->pipe_bpp = 30;
8159 			break;
8160 		default:
8161 			break;
8162 		}
8163 	}
8164 
8165 	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
8166 		pipe_config->limited_color_range = true;
8167 
8168 	if (INTEL_INFO(dev)->gen < 4)
8169 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8170 
8171 	intel_get_pipe_timings(crtc, pipe_config);
8172 
8173 	i9xx_get_pfit_config(crtc, pipe_config);
8174 
8175 	if (INTEL_INFO(dev)->gen >= 4) {
8176 		tmp = I915_READ(DPLL_MD(crtc->pipe));
8177 		pipe_config->pixel_multiplier =
8178 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8179 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8180 		pipe_config->dpll_hw_state.dpll_md = tmp;
8181 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8182 		tmp = I915_READ(DPLL(crtc->pipe));
8183 		pipe_config->pixel_multiplier =
8184 			((tmp & SDVO_MULTIPLIER_MASK)
8185 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8186 	} else {
8187 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8188 		 * port and will be fixed up in the encoder->get_config
8189 		 * function. */
8190 		pipe_config->pixel_multiplier = 1;
8191 	}
8192 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8193 	if (!IS_VALLEYVIEW(dev)) {
8194 		/*
8195 		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8196 		 * on 830. Filter it out here so that we don't
8197 		 * report errors due to that.
8198 		 */
8199 		if (IS_I830(dev))
8200 			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8201 
8202 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8203 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8204 	} else {
8205 		/* Mask out read-only status bits. */
8206 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8207 						     DPLL_PORTC_READY_MASK |
8208 						     DPLL_PORTB_READY_MASK);
8209 	}
8210 
8211 	if (IS_CHERRYVIEW(dev))
8212 		chv_crtc_clock_get(crtc, pipe_config);
8213 	else if (IS_VALLEYVIEW(dev))
8214 		vlv_crtc_clock_get(crtc, pipe_config);
8215 	else
8216 		i9xx_crtc_clock_get(crtc, pipe_config);
8217 
8218 	/*
8219 	 * Normally the dotclock is filled in by the encoder .get_config()
8220 	 * but in case the pipe is enabled w/o any ports we need a sane
8221 	 * default.
8222 	 */
8223 	pipe_config->base.adjusted_mode.crtc_clock =
8224 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8225 
8226 	return true;
8227 }
8228 
ironlake_init_pch_refclk(struct drm_device * dev)8229 static void ironlake_init_pch_refclk(struct drm_device *dev)
8230 {
8231 	struct drm_i915_private *dev_priv = dev->dev_private;
8232 	struct intel_encoder *encoder;
8233 	int i;
8234 	u32 val, final;
8235 	bool has_lvds = false;
8236 	bool has_cpu_edp = false;
8237 	bool has_panel = false;
8238 	bool has_ck505 = false;
8239 	bool can_ssc = false;
8240 	bool using_ssc_source = false;
8241 
8242 	/* We need to take the global config into account */
8243 	for_each_intel_encoder(dev, encoder) {
8244 		switch (encoder->type) {
8245 		case INTEL_OUTPUT_LVDS:
8246 			has_panel = true;
8247 			has_lvds = true;
8248 			break;
8249 		case INTEL_OUTPUT_EDP:
8250 			has_panel = true;
8251 			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8252 				has_cpu_edp = true;
8253 			break;
8254 		default:
8255 			break;
8256 		}
8257 	}
8258 
8259 	if (HAS_PCH_IBX(dev)) {
8260 		has_ck505 = dev_priv->vbt.display_clock_mode;
8261 		can_ssc = has_ck505;
8262 	} else {
8263 		has_ck505 = false;
8264 		can_ssc = true;
8265 	}
8266 
8267 	/* Check if any DPLLs are using the SSC source */
8268 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8269 		u32 temp = I915_READ(PCH_DPLL(i));
8270 
8271 		if (!(temp & DPLL_VCO_ENABLE))
8272 			continue;
8273 
8274 		if ((temp & PLL_REF_INPUT_MASK) ==
8275 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8276 			using_ssc_source = true;
8277 			break;
8278 		}
8279 	}
8280 
8281 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8282 		      has_panel, has_lvds, has_ck505, using_ssc_source);
8283 
8284 	/* Ironlake: try to setup display ref clock before DPLL
8285 	 * enabling. This is only under driver's control after
8286 	 * PCH B stepping, previous chipset stepping should be
8287 	 * ignoring this setting.
8288 	 */
8289 	val = I915_READ(PCH_DREF_CONTROL);
8290 
8291 	/* As we must carefully and slowly disable/enable each source in turn,
8292 	 * compute the final state we want first and check if we need to
8293 	 * make any changes at all.
8294 	 */
8295 	final = val;
8296 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8297 	if (has_ck505)
8298 		final |= DREF_NONSPREAD_CK505_ENABLE;
8299 	else
8300 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8301 
8302 	final &= ~DREF_SSC_SOURCE_MASK;
8303 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8304 	final &= ~DREF_SSC1_ENABLE;
8305 
8306 	if (has_panel) {
8307 		final |= DREF_SSC_SOURCE_ENABLE;
8308 
8309 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8310 			final |= DREF_SSC1_ENABLE;
8311 
8312 		if (has_cpu_edp) {
8313 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8314 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8315 			else
8316 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8317 		} else
8318 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8319 	} else if (using_ssc_source) {
8320 		final |= DREF_SSC_SOURCE_ENABLE;
8321 		final |= DREF_SSC1_ENABLE;
8322 	}
8323 
8324 	if (final == val)
8325 		return;
8326 
8327 	/* Always enable nonspread source */
8328 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8329 
8330 	if (has_ck505)
8331 		val |= DREF_NONSPREAD_CK505_ENABLE;
8332 	else
8333 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8334 
8335 	if (has_panel) {
8336 		val &= ~DREF_SSC_SOURCE_MASK;
8337 		val |= DREF_SSC_SOURCE_ENABLE;
8338 
8339 		/* SSC must be turned on before enabling the CPU output  */
8340 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8341 			DRM_DEBUG_KMS("Using SSC on panel\n");
8342 			val |= DREF_SSC1_ENABLE;
8343 		} else
8344 			val &= ~DREF_SSC1_ENABLE;
8345 
8346 		/* Get SSC going before enabling the outputs */
8347 		I915_WRITE(PCH_DREF_CONTROL, val);
8348 		POSTING_READ(PCH_DREF_CONTROL);
8349 		udelay(200);
8350 
8351 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8352 
8353 		/* Enable CPU source on CPU attached eDP */
8354 		if (has_cpu_edp) {
8355 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8356 				DRM_DEBUG_KMS("Using SSC on eDP\n");
8357 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8358 			} else
8359 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8360 		} else
8361 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8362 
8363 		I915_WRITE(PCH_DREF_CONTROL, val);
8364 		POSTING_READ(PCH_DREF_CONTROL);
8365 		udelay(200);
8366 	} else {
8367 		DRM_DEBUG_KMS("Disabling CPU source output\n");
8368 
8369 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8370 
8371 		/* Turn off CPU output */
8372 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8373 
8374 		I915_WRITE(PCH_DREF_CONTROL, val);
8375 		POSTING_READ(PCH_DREF_CONTROL);
8376 		udelay(200);
8377 
8378 		if (!using_ssc_source) {
8379 			DRM_DEBUG_KMS("Disabling SSC source\n");
8380 
8381 			/* Turn off the SSC source */
8382 			val &= ~DREF_SSC_SOURCE_MASK;
8383 			val |= DREF_SSC_SOURCE_DISABLE;
8384 
8385 			/* Turn off SSC1 */
8386 			val &= ~DREF_SSC1_ENABLE;
8387 
8388 			I915_WRITE(PCH_DREF_CONTROL, val);
8389 			POSTING_READ(PCH_DREF_CONTROL);
8390 			udelay(200);
8391 		}
8392 	}
8393 
8394 	BUG_ON(val != final);
8395 }
8396 
lpt_reset_fdi_mphy(struct drm_i915_private * dev_priv)8397 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8398 {
8399 	uint32_t tmp;
8400 
8401 	tmp = I915_READ(SOUTH_CHICKEN2);
8402 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8403 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8404 
8405 	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8406 			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8407 		DRM_ERROR("FDI mPHY reset assert timeout\n");
8408 
8409 	tmp = I915_READ(SOUTH_CHICKEN2);
8410 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8411 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8412 
8413 	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8414 				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8415 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8416 }
8417 
8418 /* WaMPhyProgramming:hsw */
lpt_program_fdi_mphy(struct drm_i915_private * dev_priv)8419 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8420 {
8421 	uint32_t tmp;
8422 
8423 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8424 	tmp &= ~(0xFF << 24);
8425 	tmp |= (0x12 << 24);
8426 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8427 
8428 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8429 	tmp |= (1 << 11);
8430 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8431 
8432 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8433 	tmp |= (1 << 11);
8434 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8435 
8436 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8437 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8438 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8439 
8440 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8441 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8442 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8443 
8444 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8445 	tmp &= ~(7 << 13);
8446 	tmp |= (5 << 13);
8447 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8448 
8449 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8450 	tmp &= ~(7 << 13);
8451 	tmp |= (5 << 13);
8452 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8453 
8454 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8455 	tmp &= ~0xFF;
8456 	tmp |= 0x1C;
8457 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8458 
8459 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8460 	tmp &= ~0xFF;
8461 	tmp |= 0x1C;
8462 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8463 
8464 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8465 	tmp &= ~(0xFF << 16);
8466 	tmp |= (0x1C << 16);
8467 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8468 
8469 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8470 	tmp &= ~(0xFF << 16);
8471 	tmp |= (0x1C << 16);
8472 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8473 
8474 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8475 	tmp |= (1 << 27);
8476 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8477 
8478 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8479 	tmp |= (1 << 27);
8480 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8481 
8482 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8483 	tmp &= ~(0xF << 28);
8484 	tmp |= (4 << 28);
8485 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8486 
8487 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8488 	tmp &= ~(0xF << 28);
8489 	tmp |= (4 << 28);
8490 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8491 }
8492 
8493 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8494  * Programming" based on the parameters passed:
8495  * - Sequence to enable CLKOUT_DP
8496  * - Sequence to enable CLKOUT_DP without spread
8497  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8498  */
lpt_enable_clkout_dp(struct drm_device * dev,bool with_spread,bool with_fdi)8499 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8500 				 bool with_fdi)
8501 {
8502 	struct drm_i915_private *dev_priv = dev->dev_private;
8503 	uint32_t reg, tmp;
8504 
8505 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8506 		with_spread = true;
8507 	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8508 		with_fdi = false;
8509 
8510 	mutex_lock(&dev_priv->sb_lock);
8511 
8512 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8513 	tmp &= ~SBI_SSCCTL_DISABLE;
8514 	tmp |= SBI_SSCCTL_PATHALT;
8515 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8516 
8517 	udelay(24);
8518 
8519 	if (with_spread) {
8520 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8521 		tmp &= ~SBI_SSCCTL_PATHALT;
8522 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8523 
8524 		if (with_fdi) {
8525 			lpt_reset_fdi_mphy(dev_priv);
8526 			lpt_program_fdi_mphy(dev_priv);
8527 		}
8528 	}
8529 
8530 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8531 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8532 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8533 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8534 
8535 	mutex_unlock(&dev_priv->sb_lock);
8536 }
8537 
8538 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_device * dev)8539 static void lpt_disable_clkout_dp(struct drm_device *dev)
8540 {
8541 	struct drm_i915_private *dev_priv = dev->dev_private;
8542 	uint32_t reg, tmp;
8543 
8544 	mutex_lock(&dev_priv->sb_lock);
8545 
8546 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8547 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8548 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8549 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8550 
8551 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8552 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8553 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8554 			tmp |= SBI_SSCCTL_PATHALT;
8555 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8556 			udelay(32);
8557 		}
8558 		tmp |= SBI_SSCCTL_DISABLE;
8559 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8560 	}
8561 
8562 	mutex_unlock(&dev_priv->sb_lock);
8563 }
8564 
lpt_init_pch_refclk(struct drm_device * dev)8565 static void lpt_init_pch_refclk(struct drm_device *dev)
8566 {
8567 	struct intel_encoder *encoder;
8568 	bool has_vga = false;
8569 
8570 	for_each_intel_encoder(dev, encoder) {
8571 		switch (encoder->type) {
8572 		case INTEL_OUTPUT_ANALOG:
8573 			has_vga = true;
8574 			break;
8575 		default:
8576 			break;
8577 		}
8578 	}
8579 
8580 	if (has_vga)
8581 		lpt_enable_clkout_dp(dev, true, true);
8582 	else
8583 		lpt_disable_clkout_dp(dev);
8584 }
8585 
8586 /*
8587  * Initialize reference clocks when the driver loads
8588  */
intel_init_pch_refclk(struct drm_device * dev)8589 void intel_init_pch_refclk(struct drm_device *dev)
8590 {
8591 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8592 		ironlake_init_pch_refclk(dev);
8593 	else if (HAS_PCH_LPT(dev))
8594 		lpt_init_pch_refclk(dev);
8595 }
8596 
ironlake_get_refclk(struct intel_crtc_state * crtc_state)8597 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
8598 {
8599 	struct drm_device *dev = crtc_state->base.crtc->dev;
8600 	struct drm_i915_private *dev_priv = dev->dev_private;
8601 	struct drm_atomic_state *state = crtc_state->base.state;
8602 	struct drm_connector *connector;
8603 	struct drm_connector_state *connector_state;
8604 	struct intel_encoder *encoder;
8605 	int num_connectors = 0, i;
8606 	bool is_lvds = false;
8607 
8608 	for_each_connector_in_state(state, connector, connector_state, i) {
8609 		if (connector_state->crtc != crtc_state->base.crtc)
8610 			continue;
8611 
8612 		encoder = to_intel_encoder(connector_state->best_encoder);
8613 
8614 		switch (encoder->type) {
8615 		case INTEL_OUTPUT_LVDS:
8616 			is_lvds = true;
8617 			break;
8618 		default:
8619 			break;
8620 		}
8621 		num_connectors++;
8622 	}
8623 
8624 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
8625 		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8626 			      dev_priv->vbt.lvds_ssc_freq);
8627 		return dev_priv->vbt.lvds_ssc_freq;
8628 	}
8629 
8630 	return 120000;
8631 }
8632 
ironlake_set_pipeconf(struct drm_crtc * crtc)8633 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8634 {
8635 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8636 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8637 	int pipe = intel_crtc->pipe;
8638 	uint32_t val;
8639 
8640 	val = 0;
8641 
8642 	switch (intel_crtc->config->pipe_bpp) {
8643 	case 18:
8644 		val |= PIPECONF_6BPC;
8645 		break;
8646 	case 24:
8647 		val |= PIPECONF_8BPC;
8648 		break;
8649 	case 30:
8650 		val |= PIPECONF_10BPC;
8651 		break;
8652 	case 36:
8653 		val |= PIPECONF_12BPC;
8654 		break;
8655 	default:
8656 		/* Case prevented by intel_choose_pipe_bpp_dither. */
8657 		BUG();
8658 	}
8659 
8660 	if (intel_crtc->config->dither)
8661 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8662 
8663 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8664 		val |= PIPECONF_INTERLACED_ILK;
8665 	else
8666 		val |= PIPECONF_PROGRESSIVE;
8667 
8668 	if (intel_crtc->config->limited_color_range)
8669 		val |= PIPECONF_COLOR_RANGE_SELECT;
8670 
8671 	I915_WRITE(PIPECONF(pipe), val);
8672 	POSTING_READ(PIPECONF(pipe));
8673 }
8674 
8675 /*
8676  * Set up the pipe CSC unit.
8677  *
8678  * Currently only full range RGB to limited range RGB conversion
8679  * is supported, but eventually this should handle various
8680  * RGB<->YCbCr scenarios as well.
8681  */
intel_set_pipe_csc(struct drm_crtc * crtc)8682 static void intel_set_pipe_csc(struct drm_crtc *crtc)
8683 {
8684 	struct drm_device *dev = crtc->dev;
8685 	struct drm_i915_private *dev_priv = dev->dev_private;
8686 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8687 	int pipe = intel_crtc->pipe;
8688 	uint16_t coeff = 0x7800; /* 1.0 */
8689 
8690 	/*
8691 	 * TODO: Check what kind of values actually come out of the pipe
8692 	 * with these coeff/postoff values and adjust to get the best
8693 	 * accuracy. Perhaps we even need to take the bpc value into
8694 	 * consideration.
8695 	 */
8696 
8697 	if (intel_crtc->config->limited_color_range)
8698 		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8699 
8700 	/*
8701 	 * GY/GU and RY/RU should be the other way around according
8702 	 * to BSpec, but reality doesn't agree. Just set them up in
8703 	 * a way that results in the correct picture.
8704 	 */
8705 	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8706 	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8707 
8708 	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8709 	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8710 
8711 	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8712 	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8713 
8714 	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8715 	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8716 	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8717 
8718 	if (INTEL_INFO(dev)->gen > 6) {
8719 		uint16_t postoff = 0;
8720 
8721 		if (intel_crtc->config->limited_color_range)
8722 			postoff = (16 * (1 << 12) / 255) & 0x1fff;
8723 
8724 		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8725 		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8726 		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8727 
8728 		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8729 	} else {
8730 		uint32_t mode = CSC_MODE_YUV_TO_RGB;
8731 
8732 		if (intel_crtc->config->limited_color_range)
8733 			mode |= CSC_BLACK_SCREEN_OFFSET;
8734 
8735 		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8736 	}
8737 }
8738 
haswell_set_pipeconf(struct drm_crtc * crtc)8739 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8740 {
8741 	struct drm_device *dev = crtc->dev;
8742 	struct drm_i915_private *dev_priv = dev->dev_private;
8743 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8744 	enum pipe pipe = intel_crtc->pipe;
8745 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8746 	uint32_t val;
8747 
8748 	val = 0;
8749 
8750 	if (IS_HASWELL(dev) && intel_crtc->config->dither)
8751 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8752 
8753 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8754 		val |= PIPECONF_INTERLACED_ILK;
8755 	else
8756 		val |= PIPECONF_PROGRESSIVE;
8757 
8758 	I915_WRITE(PIPECONF(cpu_transcoder), val);
8759 	POSTING_READ(PIPECONF(cpu_transcoder));
8760 
8761 	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8762 	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
8763 
8764 	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
8765 		val = 0;
8766 
8767 		switch (intel_crtc->config->pipe_bpp) {
8768 		case 18:
8769 			val |= PIPEMISC_DITHER_6_BPC;
8770 			break;
8771 		case 24:
8772 			val |= PIPEMISC_DITHER_8_BPC;
8773 			break;
8774 		case 30:
8775 			val |= PIPEMISC_DITHER_10_BPC;
8776 			break;
8777 		case 36:
8778 			val |= PIPEMISC_DITHER_12_BPC;
8779 			break;
8780 		default:
8781 			/* Case prevented by pipe_config_set_bpp. */
8782 			BUG();
8783 		}
8784 
8785 		if (intel_crtc->config->dither)
8786 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8787 
8788 		I915_WRITE(PIPEMISC(pipe), val);
8789 	}
8790 }
8791 
ironlake_compute_clocks(struct drm_crtc * crtc,struct intel_crtc_state * crtc_state,intel_clock_t * clock,bool * has_reduced_clock,intel_clock_t * reduced_clock)8792 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
8793 				    struct intel_crtc_state *crtc_state,
8794 				    intel_clock_t *clock,
8795 				    bool *has_reduced_clock,
8796 				    intel_clock_t *reduced_clock)
8797 {
8798 	struct drm_device *dev = crtc->dev;
8799 	struct drm_i915_private *dev_priv = dev->dev_private;
8800 	int refclk;
8801 	const intel_limit_t *limit;
8802 	bool ret;
8803 
8804 	refclk = ironlake_get_refclk(crtc_state);
8805 
8806 	/*
8807 	 * Returns a set of divisors for the desired target clock with the given
8808 	 * refclk, or FALSE.  The returned values represent the clock equation:
8809 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8810 	 */
8811 	limit = intel_limit(crtc_state, refclk);
8812 	ret = dev_priv->display.find_dpll(limit, crtc_state,
8813 					  crtc_state->port_clock,
8814 					  refclk, NULL, clock);
8815 	if (!ret)
8816 		return false;
8817 
8818 	return true;
8819 }
8820 
ironlake_get_lanes_required(int target_clock,int link_bw,int bpp)8821 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8822 {
8823 	/*
8824 	 * Account for spread spectrum to avoid
8825 	 * oversubscribing the link. Max center spread
8826 	 * is 2.5%; use 5% for safety's sake.
8827 	 */
8828 	u32 bps = target_clock * bpp * 21 / 20;
8829 	return DIV_ROUND_UP(bps, link_bw * 8);
8830 }
8831 
ironlake_needs_fb_cb_tune(struct dpll * dpll,int factor)8832 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8833 {
8834 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8835 }
8836 
ironlake_compute_dpll(struct intel_crtc * intel_crtc,struct intel_crtc_state * crtc_state,u32 * fp,intel_clock_t * reduced_clock,u32 * fp2)8837 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8838 				      struct intel_crtc_state *crtc_state,
8839 				      u32 *fp,
8840 				      intel_clock_t *reduced_clock, u32 *fp2)
8841 {
8842 	struct drm_crtc *crtc = &intel_crtc->base;
8843 	struct drm_device *dev = crtc->dev;
8844 	struct drm_i915_private *dev_priv = dev->dev_private;
8845 	struct drm_atomic_state *state = crtc_state->base.state;
8846 	struct drm_connector *connector;
8847 	struct drm_connector_state *connector_state;
8848 	struct intel_encoder *encoder;
8849 	uint32_t dpll;
8850 	int factor, num_connectors = 0, i;
8851 	bool is_lvds = false, is_sdvo = false;
8852 
8853 	for_each_connector_in_state(state, connector, connector_state, i) {
8854 		if (connector_state->crtc != crtc_state->base.crtc)
8855 			continue;
8856 
8857 		encoder = to_intel_encoder(connector_state->best_encoder);
8858 
8859 		switch (encoder->type) {
8860 		case INTEL_OUTPUT_LVDS:
8861 			is_lvds = true;
8862 			break;
8863 		case INTEL_OUTPUT_SDVO:
8864 		case INTEL_OUTPUT_HDMI:
8865 			is_sdvo = true;
8866 			break;
8867 		default:
8868 			break;
8869 		}
8870 
8871 		num_connectors++;
8872 	}
8873 
8874 	/* Enable autotuning of the PLL clock (if permissible) */
8875 	factor = 21;
8876 	if (is_lvds) {
8877 		if ((intel_panel_use_ssc(dev_priv) &&
8878 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8879 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8880 			factor = 25;
8881 	} else if (crtc_state->sdvo_tv_clock)
8882 		factor = 20;
8883 
8884 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8885 		*fp |= FP_CB_TUNE;
8886 
8887 	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8888 		*fp2 |= FP_CB_TUNE;
8889 
8890 	dpll = 0;
8891 
8892 	if (is_lvds)
8893 		dpll |= DPLLB_MODE_LVDS;
8894 	else
8895 		dpll |= DPLLB_MODE_DAC_SERIAL;
8896 
8897 	dpll |= (crtc_state->pixel_multiplier - 1)
8898 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8899 
8900 	if (is_sdvo)
8901 		dpll |= DPLL_SDVO_HIGH_SPEED;
8902 	if (crtc_state->has_dp_encoder)
8903 		dpll |= DPLL_SDVO_HIGH_SPEED;
8904 
8905 	/* compute bitmask from p1 value */
8906 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8907 	/* also FPA1 */
8908 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8909 
8910 	switch (crtc_state->dpll.p2) {
8911 	case 5:
8912 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8913 		break;
8914 	case 7:
8915 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8916 		break;
8917 	case 10:
8918 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8919 		break;
8920 	case 14:
8921 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8922 		break;
8923 	}
8924 
8925 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
8926 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8927 	else
8928 		dpll |= PLL_REF_INPUT_DREFCLK;
8929 
8930 	return dpll | DPLL_VCO_ENABLE;
8931 }
8932 
ironlake_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8933 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8934 				       struct intel_crtc_state *crtc_state)
8935 {
8936 	struct drm_device *dev = crtc->base.dev;
8937 	intel_clock_t clock, reduced_clock;
8938 	u32 dpll = 0, fp = 0, fp2 = 0;
8939 	bool ok, has_reduced_clock = false;
8940 	bool is_lvds = false;
8941 	struct intel_shared_dpll *pll;
8942 
8943 	memset(&crtc_state->dpll_hw_state, 0,
8944 	       sizeof(crtc_state->dpll_hw_state));
8945 
8946 	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
8947 
8948 	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
8949 	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
8950 
8951 	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
8952 				     &has_reduced_clock, &reduced_clock);
8953 	if (!ok && !crtc_state->clock_set) {
8954 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8955 		return -EINVAL;
8956 	}
8957 	/* Compat-code for transition, will disappear. */
8958 	if (!crtc_state->clock_set) {
8959 		crtc_state->dpll.n = clock.n;
8960 		crtc_state->dpll.m1 = clock.m1;
8961 		crtc_state->dpll.m2 = clock.m2;
8962 		crtc_state->dpll.p1 = clock.p1;
8963 		crtc_state->dpll.p2 = clock.p2;
8964 	}
8965 
8966 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8967 	if (crtc_state->has_pch_encoder) {
8968 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8969 		if (has_reduced_clock)
8970 			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
8971 
8972 		dpll = ironlake_compute_dpll(crtc, crtc_state,
8973 					     &fp, &reduced_clock,
8974 					     has_reduced_clock ? &fp2 : NULL);
8975 
8976 		crtc_state->dpll_hw_state.dpll = dpll;
8977 		crtc_state->dpll_hw_state.fp0 = fp;
8978 		if (has_reduced_clock)
8979 			crtc_state->dpll_hw_state.fp1 = fp2;
8980 		else
8981 			crtc_state->dpll_hw_state.fp1 = fp;
8982 
8983 		pll = intel_get_shared_dpll(crtc, crtc_state);
8984 		if (pll == NULL) {
8985 			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8986 					 pipe_name(crtc->pipe));
8987 			return -EINVAL;
8988 		}
8989 	}
8990 
8991 	if (is_lvds && has_reduced_clock)
8992 		crtc->lowfreq_avail = true;
8993 	else
8994 		crtc->lowfreq_avail = false;
8995 
8996 	return 0;
8997 }
8998 
intel_pch_transcoder_get_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n)8999 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9000 					 struct intel_link_m_n *m_n)
9001 {
9002 	struct drm_device *dev = crtc->base.dev;
9003 	struct drm_i915_private *dev_priv = dev->dev_private;
9004 	enum pipe pipe = crtc->pipe;
9005 
9006 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9007 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9008 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9009 		& ~TU_SIZE_MASK;
9010 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9011 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9012 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9013 }
9014 
intel_cpu_transcoder_get_m_n(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2)9015 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9016 					 enum transcoder transcoder,
9017 					 struct intel_link_m_n *m_n,
9018 					 struct intel_link_m_n *m2_n2)
9019 {
9020 	struct drm_device *dev = crtc->base.dev;
9021 	struct drm_i915_private *dev_priv = dev->dev_private;
9022 	enum pipe pipe = crtc->pipe;
9023 
9024 	if (INTEL_INFO(dev)->gen >= 5) {
9025 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9026 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9027 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9028 			& ~TU_SIZE_MASK;
9029 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9030 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9031 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9032 		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9033 		 * gen < 8) and if DRRS is supported (to make sure the
9034 		 * registers are not unnecessarily read).
9035 		 */
9036 		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9037 			crtc->config->has_drrs) {
9038 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9039 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9040 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9041 					& ~TU_SIZE_MASK;
9042 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9043 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9044 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9045 		}
9046 	} else {
9047 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9048 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9049 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9050 			& ~TU_SIZE_MASK;
9051 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9052 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9053 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9054 	}
9055 }
9056 
intel_dp_get_m_n(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9057 void intel_dp_get_m_n(struct intel_crtc *crtc,
9058 		      struct intel_crtc_state *pipe_config)
9059 {
9060 	if (pipe_config->has_pch_encoder)
9061 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9062 	else
9063 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9064 					     &pipe_config->dp_m_n,
9065 					     &pipe_config->dp_m2_n2);
9066 }
9067 
ironlake_get_fdi_m_n_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9068 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9069 					struct intel_crtc_state *pipe_config)
9070 {
9071 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9072 				     &pipe_config->fdi_m_n, NULL);
9073 }
9074 
skylake_get_pfit_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9075 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9076 				    struct intel_crtc_state *pipe_config)
9077 {
9078 	struct drm_device *dev = crtc->base.dev;
9079 	struct drm_i915_private *dev_priv = dev->dev_private;
9080 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9081 	uint32_t ps_ctrl = 0;
9082 	int id = -1;
9083 	int i;
9084 
9085 	/* find scaler attached to this pipe */
9086 	for (i = 0; i < crtc->num_scalers; i++) {
9087 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9088 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9089 			id = i;
9090 			pipe_config->pch_pfit.enabled = true;
9091 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9092 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9093 			break;
9094 		}
9095 	}
9096 
9097 	scaler_state->scaler_id = id;
9098 	if (id >= 0) {
9099 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9100 	} else {
9101 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9102 	}
9103 }
9104 
9105 static void
skylake_get_initial_plane_config(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)9106 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9107 				 struct intel_initial_plane_config *plane_config)
9108 {
9109 	struct drm_device *dev = crtc->base.dev;
9110 	struct drm_i915_private *dev_priv = dev->dev_private;
9111 	u32 val, base, offset, stride_mult, tiling;
9112 	int pipe = crtc->pipe;
9113 	int fourcc, pixel_format;
9114 	unsigned int aligned_height;
9115 	struct drm_framebuffer *fb;
9116 	struct intel_framebuffer *intel_fb;
9117 
9118 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9119 	if (!intel_fb) {
9120 		DRM_DEBUG_KMS("failed to alloc fb\n");
9121 		return;
9122 	}
9123 
9124 	fb = &intel_fb->base;
9125 
9126 	val = I915_READ(PLANE_CTL(pipe, 0));
9127 	if (!(val & PLANE_CTL_ENABLE))
9128 		goto error;
9129 
9130 	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9131 	fourcc = skl_format_to_fourcc(pixel_format,
9132 				      val & PLANE_CTL_ORDER_RGBX,
9133 				      val & PLANE_CTL_ALPHA_MASK);
9134 	fb->pixel_format = fourcc;
9135 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9136 
9137 	tiling = val & PLANE_CTL_TILED_MASK;
9138 	switch (tiling) {
9139 	case PLANE_CTL_TILED_LINEAR:
9140 		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9141 		break;
9142 	case PLANE_CTL_TILED_X:
9143 		plane_config->tiling = I915_TILING_X;
9144 		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9145 		break;
9146 	case PLANE_CTL_TILED_Y:
9147 		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9148 		break;
9149 	case PLANE_CTL_TILED_YF:
9150 		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9151 		break;
9152 	default:
9153 		MISSING_CASE(tiling);
9154 		goto error;
9155 	}
9156 
9157 	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9158 	plane_config->base = base;
9159 
9160 	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9161 
9162 	val = I915_READ(PLANE_SIZE(pipe, 0));
9163 	fb->height = ((val >> 16) & 0xfff) + 1;
9164 	fb->width = ((val >> 0) & 0x1fff) + 1;
9165 
9166 	val = I915_READ(PLANE_STRIDE(pipe, 0));
9167 	stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9168 						fb->pixel_format);
9169 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9170 
9171 	aligned_height = intel_fb_align_height(dev, fb->height,
9172 					       fb->pixel_format,
9173 					       fb->modifier[0]);
9174 
9175 	plane_config->size = fb->pitches[0] * aligned_height;
9176 
9177 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9178 		      pipe_name(pipe), fb->width, fb->height,
9179 		      fb->bits_per_pixel, base, fb->pitches[0],
9180 		      plane_config->size);
9181 
9182 	plane_config->fb = intel_fb;
9183 	return;
9184 
9185 error:
9186 	kfree(fb);
9187 }
9188 
ironlake_get_pfit_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9189 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9190 				     struct intel_crtc_state *pipe_config)
9191 {
9192 	struct drm_device *dev = crtc->base.dev;
9193 	struct drm_i915_private *dev_priv = dev->dev_private;
9194 	uint32_t tmp;
9195 
9196 	tmp = I915_READ(PF_CTL(crtc->pipe));
9197 
9198 	if (tmp & PF_ENABLE) {
9199 		pipe_config->pch_pfit.enabled = true;
9200 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9201 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9202 
9203 		/* We currently do not free assignements of panel fitters on
9204 		 * ivb/hsw (since we don't use the higher upscaling modes which
9205 		 * differentiates them) so just WARN about this case for now. */
9206 		if (IS_GEN7(dev)) {
9207 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9208 				PF_PIPE_SEL_IVB(crtc->pipe));
9209 		}
9210 	}
9211 }
9212 
9213 static void
ironlake_get_initial_plane_config(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)9214 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9215 				  struct intel_initial_plane_config *plane_config)
9216 {
9217 	struct drm_device *dev = crtc->base.dev;
9218 	struct drm_i915_private *dev_priv = dev->dev_private;
9219 	u32 val, base, offset;
9220 	int pipe = crtc->pipe;
9221 	int fourcc, pixel_format;
9222 	unsigned int aligned_height;
9223 	struct drm_framebuffer *fb;
9224 	struct intel_framebuffer *intel_fb;
9225 
9226 	val = I915_READ(DSPCNTR(pipe));
9227 	if (!(val & DISPLAY_PLANE_ENABLE))
9228 		return;
9229 
9230 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9231 	if (!intel_fb) {
9232 		DRM_DEBUG_KMS("failed to alloc fb\n");
9233 		return;
9234 	}
9235 
9236 	fb = &intel_fb->base;
9237 
9238 	if (INTEL_INFO(dev)->gen >= 4) {
9239 		if (val & DISPPLANE_TILED) {
9240 			plane_config->tiling = I915_TILING_X;
9241 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9242 		}
9243 	}
9244 
9245 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9246 	fourcc = i9xx_format_to_fourcc(pixel_format);
9247 	fb->pixel_format = fourcc;
9248 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9249 
9250 	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9251 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9252 		offset = I915_READ(DSPOFFSET(pipe));
9253 	} else {
9254 		if (plane_config->tiling)
9255 			offset = I915_READ(DSPTILEOFF(pipe));
9256 		else
9257 			offset = I915_READ(DSPLINOFF(pipe));
9258 	}
9259 	plane_config->base = base;
9260 
9261 	val = I915_READ(PIPESRC(pipe));
9262 	fb->width = ((val >> 16) & 0xfff) + 1;
9263 	fb->height = ((val >> 0) & 0xfff) + 1;
9264 
9265 	val = I915_READ(DSPSTRIDE(pipe));
9266 	fb->pitches[0] = val & 0xffffffc0;
9267 
9268 	aligned_height = intel_fb_align_height(dev, fb->height,
9269 					       fb->pixel_format,
9270 					       fb->modifier[0]);
9271 
9272 	plane_config->size = fb->pitches[0] * aligned_height;
9273 
9274 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9275 		      pipe_name(pipe), fb->width, fb->height,
9276 		      fb->bits_per_pixel, base, fb->pitches[0],
9277 		      plane_config->size);
9278 
9279 	plane_config->fb = intel_fb;
9280 }
9281 
ironlake_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9282 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9283 				     struct intel_crtc_state *pipe_config)
9284 {
9285 	struct drm_device *dev = crtc->base.dev;
9286 	struct drm_i915_private *dev_priv = dev->dev_private;
9287 	uint32_t tmp;
9288 
9289 	if (!intel_display_power_is_enabled(dev_priv,
9290 					    POWER_DOMAIN_PIPE(crtc->pipe)))
9291 		return false;
9292 
9293 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9294 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9295 
9296 	tmp = I915_READ(PIPECONF(crtc->pipe));
9297 	if (!(tmp & PIPECONF_ENABLE))
9298 		return false;
9299 
9300 	switch (tmp & PIPECONF_BPC_MASK) {
9301 	case PIPECONF_6BPC:
9302 		pipe_config->pipe_bpp = 18;
9303 		break;
9304 	case PIPECONF_8BPC:
9305 		pipe_config->pipe_bpp = 24;
9306 		break;
9307 	case PIPECONF_10BPC:
9308 		pipe_config->pipe_bpp = 30;
9309 		break;
9310 	case PIPECONF_12BPC:
9311 		pipe_config->pipe_bpp = 36;
9312 		break;
9313 	default:
9314 		break;
9315 	}
9316 
9317 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9318 		pipe_config->limited_color_range = true;
9319 
9320 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9321 		struct intel_shared_dpll *pll;
9322 
9323 		pipe_config->has_pch_encoder = true;
9324 
9325 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9326 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9327 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9328 
9329 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9330 
9331 		if (HAS_PCH_IBX(dev_priv->dev)) {
9332 			pipe_config->shared_dpll =
9333 				(enum intel_dpll_id) crtc->pipe;
9334 		} else {
9335 			tmp = I915_READ(PCH_DPLL_SEL);
9336 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9337 				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9338 			else
9339 				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9340 		}
9341 
9342 		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9343 
9344 		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9345 					   &pipe_config->dpll_hw_state));
9346 
9347 		tmp = pipe_config->dpll_hw_state.dpll;
9348 		pipe_config->pixel_multiplier =
9349 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9350 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9351 
9352 		ironlake_pch_clock_get(crtc, pipe_config);
9353 	} else {
9354 		pipe_config->pixel_multiplier = 1;
9355 	}
9356 
9357 	intel_get_pipe_timings(crtc, pipe_config);
9358 
9359 	ironlake_get_pfit_config(crtc, pipe_config);
9360 
9361 	return true;
9362 }
9363 
assert_can_disable_lcpll(struct drm_i915_private * dev_priv)9364 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9365 {
9366 	struct drm_device *dev = dev_priv->dev;
9367 	struct intel_crtc *crtc;
9368 
9369 	for_each_intel_crtc(dev, crtc)
9370 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9371 		     pipe_name(crtc->pipe));
9372 
9373 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9374 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9375 	I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9376 	I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9377 	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9378 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9379 	     "CPU PWM1 enabled\n");
9380 	if (IS_HASWELL(dev))
9381 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9382 		     "CPU PWM2 enabled\n");
9383 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9384 	     "PCH PWM1 enabled\n");
9385 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9386 	     "Utility pin enabled\n");
9387 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9388 
9389 	/*
9390 	 * In theory we can still leave IRQs enabled, as long as only the HPD
9391 	 * interrupts remain enabled. We used to check for that, but since it's
9392 	 * gen-specific and since we only disable LCPLL after we fully disable
9393 	 * the interrupts, the check below should be enough.
9394 	 */
9395 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9396 }
9397 
hsw_read_dcomp(struct drm_i915_private * dev_priv)9398 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9399 {
9400 	struct drm_device *dev = dev_priv->dev;
9401 
9402 	if (IS_HASWELL(dev))
9403 		return I915_READ(D_COMP_HSW);
9404 	else
9405 		return I915_READ(D_COMP_BDW);
9406 }
9407 
hsw_write_dcomp(struct drm_i915_private * dev_priv,uint32_t val)9408 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9409 {
9410 	struct drm_device *dev = dev_priv->dev;
9411 
9412 	if (IS_HASWELL(dev)) {
9413 		mutex_lock(&dev_priv->rps.hw_lock);
9414 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9415 					    val))
9416 			DRM_ERROR("Failed to write to D_COMP\n");
9417 		mutex_unlock(&dev_priv->rps.hw_lock);
9418 	} else {
9419 		I915_WRITE(D_COMP_BDW, val);
9420 		POSTING_READ(D_COMP_BDW);
9421 	}
9422 }
9423 
9424 /*
9425  * This function implements pieces of two sequences from BSpec:
9426  * - Sequence for display software to disable LCPLL
9427  * - Sequence for display software to allow package C8+
9428  * The steps implemented here are just the steps that actually touch the LCPLL
9429  * register. Callers should take care of disabling all the display engine
9430  * functions, doing the mode unset, fixing interrupts, etc.
9431  */
hsw_disable_lcpll(struct drm_i915_private * dev_priv,bool switch_to_fclk,bool allow_power_down)9432 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9433 			      bool switch_to_fclk, bool allow_power_down)
9434 {
9435 	uint32_t val;
9436 
9437 	assert_can_disable_lcpll(dev_priv);
9438 
9439 	val = I915_READ(LCPLL_CTL);
9440 
9441 	if (switch_to_fclk) {
9442 		val |= LCPLL_CD_SOURCE_FCLK;
9443 		I915_WRITE(LCPLL_CTL, val);
9444 
9445 		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9446 				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9447 			DRM_ERROR("Switching to FCLK failed\n");
9448 
9449 		val = I915_READ(LCPLL_CTL);
9450 	}
9451 
9452 	val |= LCPLL_PLL_DISABLE;
9453 	I915_WRITE(LCPLL_CTL, val);
9454 	POSTING_READ(LCPLL_CTL);
9455 
9456 	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9457 		DRM_ERROR("LCPLL still locked\n");
9458 
9459 	val = hsw_read_dcomp(dev_priv);
9460 	val |= D_COMP_COMP_DISABLE;
9461 	hsw_write_dcomp(dev_priv, val);
9462 	ndelay(100);
9463 
9464 	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9465 		     1))
9466 		DRM_ERROR("D_COMP RCOMP still in progress\n");
9467 
9468 	if (allow_power_down) {
9469 		val = I915_READ(LCPLL_CTL);
9470 		val |= LCPLL_POWER_DOWN_ALLOW;
9471 		I915_WRITE(LCPLL_CTL, val);
9472 		POSTING_READ(LCPLL_CTL);
9473 	}
9474 }
9475 
9476 /*
9477  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9478  * source.
9479  */
hsw_restore_lcpll(struct drm_i915_private * dev_priv)9480 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9481 {
9482 	uint32_t val;
9483 
9484 	val = I915_READ(LCPLL_CTL);
9485 
9486 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9487 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9488 		return;
9489 
9490 	/*
9491 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9492 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9493 	 */
9494 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9495 
9496 	if (val & LCPLL_POWER_DOWN_ALLOW) {
9497 		val &= ~LCPLL_POWER_DOWN_ALLOW;
9498 		I915_WRITE(LCPLL_CTL, val);
9499 		POSTING_READ(LCPLL_CTL);
9500 	}
9501 
9502 	val = hsw_read_dcomp(dev_priv);
9503 	val |= D_COMP_COMP_FORCE;
9504 	val &= ~D_COMP_COMP_DISABLE;
9505 	hsw_write_dcomp(dev_priv, val);
9506 
9507 	val = I915_READ(LCPLL_CTL);
9508 	val &= ~LCPLL_PLL_DISABLE;
9509 	I915_WRITE(LCPLL_CTL, val);
9510 
9511 	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9512 		DRM_ERROR("LCPLL not locked yet\n");
9513 
9514 	if (val & LCPLL_CD_SOURCE_FCLK) {
9515 		val = I915_READ(LCPLL_CTL);
9516 		val &= ~LCPLL_CD_SOURCE_FCLK;
9517 		I915_WRITE(LCPLL_CTL, val);
9518 
9519 		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9520 					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9521 			DRM_ERROR("Switching back to LCPLL failed\n");
9522 	}
9523 
9524 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9525 	intel_update_cdclk(dev_priv->dev);
9526 }
9527 
9528 /*
9529  * Package states C8 and deeper are really deep PC states that can only be
9530  * reached when all the devices on the system allow it, so even if the graphics
9531  * device allows PC8+, it doesn't mean the system will actually get to these
9532  * states. Our driver only allows PC8+ when going into runtime PM.
9533  *
9534  * The requirements for PC8+ are that all the outputs are disabled, the power
9535  * well is disabled and most interrupts are disabled, and these are also
9536  * requirements for runtime PM. When these conditions are met, we manually do
9537  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9538  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9539  * hang the machine.
9540  *
9541  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9542  * the state of some registers, so when we come back from PC8+ we need to
9543  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9544  * need to take care of the registers kept by RC6. Notice that this happens even
9545  * if we don't put the device in PCI D3 state (which is what currently happens
9546  * because of the runtime PM support).
9547  *
9548  * For more, read "Display Sequences for Package C8" on the hardware
9549  * documentation.
9550  */
hsw_enable_pc8(struct drm_i915_private * dev_priv)9551 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9552 {
9553 	struct drm_device *dev = dev_priv->dev;
9554 	uint32_t val;
9555 
9556 	DRM_DEBUG_KMS("Enabling package C8+\n");
9557 
9558 	if (HAS_PCH_LPT_LP(dev)) {
9559 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9560 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9561 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9562 	}
9563 
9564 	lpt_disable_clkout_dp(dev);
9565 	hsw_disable_lcpll(dev_priv, true, true);
9566 }
9567 
hsw_disable_pc8(struct drm_i915_private * dev_priv)9568 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9569 {
9570 	struct drm_device *dev = dev_priv->dev;
9571 	uint32_t val;
9572 
9573 	DRM_DEBUG_KMS("Disabling package C8+\n");
9574 
9575 	hsw_restore_lcpll(dev_priv);
9576 	lpt_init_pch_refclk(dev);
9577 
9578 	if (HAS_PCH_LPT_LP(dev)) {
9579 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9580 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9581 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9582 	}
9583 
9584 	intel_prepare_ddi(dev);
9585 }
9586 
broxton_modeset_commit_cdclk(struct drm_atomic_state * old_state)9587 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9588 {
9589 	struct drm_device *dev = old_state->dev;
9590 	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9591 
9592 	broxton_set_cdclk(dev, req_cdclk);
9593 }
9594 
9595 /* compute the max rate for new configuration */
ilk_max_pixel_rate(struct drm_atomic_state * state)9596 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9597 {
9598 	struct intel_crtc *intel_crtc;
9599 	struct intel_crtc_state *crtc_state;
9600 	int max_pixel_rate = 0;
9601 
9602 	for_each_intel_crtc(state->dev, intel_crtc) {
9603 		int pixel_rate;
9604 
9605 		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9606 		if (IS_ERR(crtc_state))
9607 			return PTR_ERR(crtc_state);
9608 
9609 		if (!crtc_state->base.enable)
9610 			continue;
9611 
9612 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9613 
9614 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9615 		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
9616 			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9617 
9618 		max_pixel_rate = max(max_pixel_rate, pixel_rate);
9619 	}
9620 
9621 	return max_pixel_rate;
9622 }
9623 
broadwell_set_cdclk(struct drm_device * dev,int cdclk)9624 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9625 {
9626 	struct drm_i915_private *dev_priv = dev->dev_private;
9627 	uint32_t val, data;
9628 	int ret;
9629 
9630 	if (WARN((I915_READ(LCPLL_CTL) &
9631 		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9632 		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9633 		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9634 		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9635 		 "trying to change cdclk frequency with cdclk not enabled\n"))
9636 		return;
9637 
9638 	mutex_lock(&dev_priv->rps.hw_lock);
9639 	ret = sandybridge_pcode_write(dev_priv,
9640 				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9641 	mutex_unlock(&dev_priv->rps.hw_lock);
9642 	if (ret) {
9643 		DRM_ERROR("failed to inform pcode about cdclk change\n");
9644 		return;
9645 	}
9646 
9647 	val = I915_READ(LCPLL_CTL);
9648 	val |= LCPLL_CD_SOURCE_FCLK;
9649 	I915_WRITE(LCPLL_CTL, val);
9650 
9651 	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9652 			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9653 		DRM_ERROR("Switching to FCLK failed\n");
9654 
9655 	val = I915_READ(LCPLL_CTL);
9656 	val &= ~LCPLL_CLK_FREQ_MASK;
9657 
9658 	switch (cdclk) {
9659 	case 450000:
9660 		val |= LCPLL_CLK_FREQ_450;
9661 		data = 0;
9662 		break;
9663 	case 540000:
9664 		val |= LCPLL_CLK_FREQ_54O_BDW;
9665 		data = 1;
9666 		break;
9667 	case 337500:
9668 		val |= LCPLL_CLK_FREQ_337_5_BDW;
9669 		data = 2;
9670 		break;
9671 	case 675000:
9672 		val |= LCPLL_CLK_FREQ_675_BDW;
9673 		data = 3;
9674 		break;
9675 	default:
9676 		WARN(1, "invalid cdclk frequency\n");
9677 		return;
9678 	}
9679 
9680 	I915_WRITE(LCPLL_CTL, val);
9681 
9682 	val = I915_READ(LCPLL_CTL);
9683 	val &= ~LCPLL_CD_SOURCE_FCLK;
9684 	I915_WRITE(LCPLL_CTL, val);
9685 
9686 	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9687 				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9688 		DRM_ERROR("Switching back to LCPLL failed\n");
9689 
9690 	mutex_lock(&dev_priv->rps.hw_lock);
9691 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9692 	mutex_unlock(&dev_priv->rps.hw_lock);
9693 
9694 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9695 
9696 	intel_update_cdclk(dev);
9697 
9698 	WARN(cdclk != dev_priv->cdclk_freq,
9699 	     "cdclk requested %d kHz but got %d kHz\n",
9700 	     cdclk, dev_priv->cdclk_freq);
9701 }
9702 
broadwell_modeset_calc_cdclk(struct drm_atomic_state * state)9703 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9704 {
9705 	struct drm_i915_private *dev_priv = to_i915(state->dev);
9706 	int max_pixclk = ilk_max_pixel_rate(state);
9707 	int cdclk;
9708 
9709 	/*
9710 	 * FIXME should also account for plane ratio
9711 	 * once 64bpp pixel formats are supported.
9712 	 */
9713 	if (max_pixclk > 540000)
9714 		cdclk = 675000;
9715 	else if (max_pixclk > 450000)
9716 		cdclk = 540000;
9717 	else if (max_pixclk > 337500)
9718 		cdclk = 450000;
9719 	else
9720 		cdclk = 337500;
9721 
9722 	/*
9723 	 * FIXME move the cdclk caclulation to
9724 	 * compute_config() so we can fail gracegully.
9725 	 */
9726 	if (cdclk > dev_priv->max_cdclk_freq) {
9727 		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9728 			  cdclk, dev_priv->max_cdclk_freq);
9729 		cdclk = dev_priv->max_cdclk_freq;
9730 	}
9731 
9732 	to_intel_atomic_state(state)->cdclk = cdclk;
9733 
9734 	return 0;
9735 }
9736 
broadwell_modeset_commit_cdclk(struct drm_atomic_state * old_state)9737 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9738 {
9739 	struct drm_device *dev = old_state->dev;
9740 	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9741 
9742 	broadwell_set_cdclk(dev, req_cdclk);
9743 }
9744 
haswell_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)9745 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9746 				      struct intel_crtc_state *crtc_state)
9747 {
9748 	if (!intel_ddi_pll_select(crtc, crtc_state))
9749 		return -EINVAL;
9750 
9751 	crtc->lowfreq_avail = false;
9752 
9753 	return 0;
9754 }
9755 
bxt_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)9756 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9757 				enum port port,
9758 				struct intel_crtc_state *pipe_config)
9759 {
9760 	switch (port) {
9761 	case PORT_A:
9762 		pipe_config->ddi_pll_sel = SKL_DPLL0;
9763 		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9764 		break;
9765 	case PORT_B:
9766 		pipe_config->ddi_pll_sel = SKL_DPLL1;
9767 		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9768 		break;
9769 	case PORT_C:
9770 		pipe_config->ddi_pll_sel = SKL_DPLL2;
9771 		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9772 		break;
9773 	default:
9774 		DRM_ERROR("Incorrect port type\n");
9775 	}
9776 }
9777 
skylake_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)9778 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9779 				enum port port,
9780 				struct intel_crtc_state *pipe_config)
9781 {
9782 	u32 temp, dpll_ctl1;
9783 
9784 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9785 	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9786 
9787 	switch (pipe_config->ddi_pll_sel) {
9788 	case SKL_DPLL0:
9789 		/*
9790 		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9791 		 * of the shared DPLL framework and thus needs to be read out
9792 		 * separately
9793 		 */
9794 		dpll_ctl1 = I915_READ(DPLL_CTRL1);
9795 		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9796 		break;
9797 	case SKL_DPLL1:
9798 		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9799 		break;
9800 	case SKL_DPLL2:
9801 		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9802 		break;
9803 	case SKL_DPLL3:
9804 		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9805 		break;
9806 	}
9807 }
9808 
haswell_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)9809 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9810 				enum port port,
9811 				struct intel_crtc_state *pipe_config)
9812 {
9813 	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9814 
9815 	switch (pipe_config->ddi_pll_sel) {
9816 	case PORT_CLK_SEL_WRPLL1:
9817 		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9818 		break;
9819 	case PORT_CLK_SEL_WRPLL2:
9820 		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9821 		break;
9822 	case PORT_CLK_SEL_SPLL:
9823 		pipe_config->shared_dpll = DPLL_ID_SPLL;
9824 	}
9825 }
9826 
haswell_get_ddi_port_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9827 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9828 				       struct intel_crtc_state *pipe_config)
9829 {
9830 	struct drm_device *dev = crtc->base.dev;
9831 	struct drm_i915_private *dev_priv = dev->dev_private;
9832 	struct intel_shared_dpll *pll;
9833 	enum port port;
9834 	uint32_t tmp;
9835 
9836 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9837 
9838 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9839 
9840 	if (IS_SKYLAKE(dev))
9841 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
9842 	else if (IS_BROXTON(dev))
9843 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
9844 	else
9845 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
9846 
9847 	if (pipe_config->shared_dpll >= 0) {
9848 		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9849 
9850 		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9851 					   &pipe_config->dpll_hw_state));
9852 	}
9853 
9854 	/*
9855 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9856 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9857 	 * the PCH transcoder is on.
9858 	 */
9859 	if (INTEL_INFO(dev)->gen < 9 &&
9860 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9861 		pipe_config->has_pch_encoder = true;
9862 
9863 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9864 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9865 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9866 
9867 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9868 	}
9869 }
9870 
haswell_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9871 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9872 				    struct intel_crtc_state *pipe_config)
9873 {
9874 	struct drm_device *dev = crtc->base.dev;
9875 	struct drm_i915_private *dev_priv = dev->dev_private;
9876 	enum intel_display_power_domain pfit_domain;
9877 	uint32_t tmp;
9878 
9879 	if (!intel_display_power_is_enabled(dev_priv,
9880 					 POWER_DOMAIN_PIPE(crtc->pipe)))
9881 		return false;
9882 
9883 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9884 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9885 
9886 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9887 	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9888 		enum pipe trans_edp_pipe;
9889 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9890 		default:
9891 			WARN(1, "unknown pipe linked to edp transcoder\n");
9892 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9893 		case TRANS_DDI_EDP_INPUT_A_ON:
9894 			trans_edp_pipe = PIPE_A;
9895 			break;
9896 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9897 			trans_edp_pipe = PIPE_B;
9898 			break;
9899 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9900 			trans_edp_pipe = PIPE_C;
9901 			break;
9902 		}
9903 
9904 		if (trans_edp_pipe == crtc->pipe)
9905 			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9906 	}
9907 
9908 	if (!intel_display_power_is_enabled(dev_priv,
9909 			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
9910 		return false;
9911 
9912 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9913 	if (!(tmp & PIPECONF_ENABLE))
9914 		return false;
9915 
9916 	haswell_get_ddi_port_state(crtc, pipe_config);
9917 
9918 	intel_get_pipe_timings(crtc, pipe_config);
9919 
9920 	if (INTEL_INFO(dev)->gen >= 9) {
9921 		skl_init_scalers(dev, crtc, pipe_config);
9922 	}
9923 
9924 	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9925 
9926 	if (INTEL_INFO(dev)->gen >= 9) {
9927 		pipe_config->scaler_state.scaler_id = -1;
9928 		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9929 	}
9930 
9931 	if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
9932 		if (INTEL_INFO(dev)->gen >= 9)
9933 			skylake_get_pfit_config(crtc, pipe_config);
9934 		else
9935 			ironlake_get_pfit_config(crtc, pipe_config);
9936 	}
9937 
9938 	if (IS_HASWELL(dev))
9939 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
9940 			(I915_READ(IPS_CTL) & IPS_ENABLE);
9941 
9942 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
9943 		pipe_config->pixel_multiplier =
9944 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9945 	} else {
9946 		pipe_config->pixel_multiplier = 1;
9947 	}
9948 
9949 	return true;
9950 }
9951 
i845_update_cursor(struct drm_crtc * crtc,u32 base,bool on)9952 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
9953 {
9954 	struct drm_device *dev = crtc->dev;
9955 	struct drm_i915_private *dev_priv = dev->dev_private;
9956 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9957 	uint32_t cntl = 0, size = 0;
9958 
9959 	if (on) {
9960 		unsigned int width = intel_crtc->base.cursor->state->crtc_w;
9961 		unsigned int height = intel_crtc->base.cursor->state->crtc_h;
9962 		unsigned int stride = roundup_pow_of_two(width) * 4;
9963 
9964 		switch (stride) {
9965 		default:
9966 			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9967 				  width, stride);
9968 			stride = 256;
9969 			/* fallthrough */
9970 		case 256:
9971 		case 512:
9972 		case 1024:
9973 		case 2048:
9974 			break;
9975 		}
9976 
9977 		cntl |= CURSOR_ENABLE |
9978 			CURSOR_GAMMA_ENABLE |
9979 			CURSOR_FORMAT_ARGB |
9980 			CURSOR_STRIDE(stride);
9981 
9982 		size = (height << 12) | width;
9983 	}
9984 
9985 	if (intel_crtc->cursor_cntl != 0 &&
9986 	    (intel_crtc->cursor_base != base ||
9987 	     intel_crtc->cursor_size != size ||
9988 	     intel_crtc->cursor_cntl != cntl)) {
9989 		/* On these chipsets we can only modify the base/size/stride
9990 		 * whilst the cursor is disabled.
9991 		 */
9992 		I915_WRITE(CURCNTR(PIPE_A), 0);
9993 		POSTING_READ(CURCNTR(PIPE_A));
9994 		intel_crtc->cursor_cntl = 0;
9995 	}
9996 
9997 	if (intel_crtc->cursor_base != base) {
9998 		I915_WRITE(CURBASE(PIPE_A), base);
9999 		intel_crtc->cursor_base = base;
10000 	}
10001 
10002 	if (intel_crtc->cursor_size != size) {
10003 		I915_WRITE(CURSIZE, size);
10004 		intel_crtc->cursor_size = size;
10005 	}
10006 
10007 	if (intel_crtc->cursor_cntl != cntl) {
10008 		I915_WRITE(CURCNTR(PIPE_A), cntl);
10009 		POSTING_READ(CURCNTR(PIPE_A));
10010 		intel_crtc->cursor_cntl = cntl;
10011 	}
10012 }
10013 
i9xx_update_cursor(struct drm_crtc * crtc,u32 base,bool on)10014 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10015 {
10016 	struct drm_device *dev = crtc->dev;
10017 	struct drm_i915_private *dev_priv = dev->dev_private;
10018 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10019 	int pipe = intel_crtc->pipe;
10020 	uint32_t cntl = 0;
10021 
10022 	if (on) {
10023 		cntl = MCURSOR_GAMMA_ENABLE;
10024 		switch (intel_crtc->base.cursor->state->crtc_w) {
10025 			case 64:
10026 				cntl |= CURSOR_MODE_64_ARGB_AX;
10027 				break;
10028 			case 128:
10029 				cntl |= CURSOR_MODE_128_ARGB_AX;
10030 				break;
10031 			case 256:
10032 				cntl |= CURSOR_MODE_256_ARGB_AX;
10033 				break;
10034 			default:
10035 				MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
10036 				return;
10037 		}
10038 		cntl |= pipe << 28; /* Connect to correct pipe */
10039 
10040 		if (HAS_DDI(dev))
10041 			cntl |= CURSOR_PIPE_CSC_ENABLE;
10042 	}
10043 
10044 	if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
10045 		cntl |= CURSOR_ROTATE_180;
10046 
10047 	if (intel_crtc->cursor_cntl != cntl) {
10048 		I915_WRITE(CURCNTR(pipe), cntl);
10049 		POSTING_READ(CURCNTR(pipe));
10050 		intel_crtc->cursor_cntl = cntl;
10051 	}
10052 
10053 	/* and commit changes on next vblank */
10054 	I915_WRITE(CURBASE(pipe), base);
10055 	POSTING_READ(CURBASE(pipe));
10056 
10057 	intel_crtc->cursor_base = base;
10058 }
10059 
10060 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
intel_crtc_update_cursor(struct drm_crtc * crtc,bool on)10061 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10062 				     bool on)
10063 {
10064 	struct drm_device *dev = crtc->dev;
10065 	struct drm_i915_private *dev_priv = dev->dev_private;
10066 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10067 	int pipe = intel_crtc->pipe;
10068 	struct drm_plane_state *cursor_state = crtc->cursor->state;
10069 	int x = cursor_state->crtc_x;
10070 	int y = cursor_state->crtc_y;
10071 	u32 base = 0, pos = 0;
10072 
10073 	base = intel_crtc->cursor_addr;
10074 
10075 	if (x >= intel_crtc->config->pipe_src_w)
10076 		on = false;
10077 
10078 	if (y >= intel_crtc->config->pipe_src_h)
10079 		on = false;
10080 
10081 	if (x < 0) {
10082 		if (x + cursor_state->crtc_w <= 0)
10083 			on = false;
10084 
10085 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10086 		x = -x;
10087 	}
10088 	pos |= x << CURSOR_X_SHIFT;
10089 
10090 	if (y < 0) {
10091 		if (y + cursor_state->crtc_h <= 0)
10092 			on = false;
10093 
10094 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10095 		y = -y;
10096 	}
10097 	pos |= y << CURSOR_Y_SHIFT;
10098 
10099 	I915_WRITE(CURPOS(pipe), pos);
10100 
10101 	/* ILK+ do this automagically */
10102 	if (HAS_GMCH_DISPLAY(dev) &&
10103 	    crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
10104 		base += (cursor_state->crtc_h *
10105 			 cursor_state->crtc_w - 1) * 4;
10106 	}
10107 
10108 	if (IS_845G(dev) || IS_I865G(dev))
10109 		i845_update_cursor(crtc, base, on);
10110 	else
10111 		i9xx_update_cursor(crtc, base, on);
10112 }
10113 
cursor_size_ok(struct drm_device * dev,uint32_t width,uint32_t height)10114 static bool cursor_size_ok(struct drm_device *dev,
10115 			   uint32_t width, uint32_t height)
10116 {
10117 	if (width == 0 || height == 0)
10118 		return false;
10119 
10120 	/*
10121 	 * 845g/865g are special in that they are only limited by
10122 	 * the width of their cursors, the height is arbitrary up to
10123 	 * the precision of the register. Everything else requires
10124 	 * square cursors, limited to a few power-of-two sizes.
10125 	 */
10126 	if (IS_845G(dev) || IS_I865G(dev)) {
10127 		if ((width & 63) != 0)
10128 			return false;
10129 
10130 		if (width > (IS_845G(dev) ? 64 : 512))
10131 			return false;
10132 
10133 		if (height > 1023)
10134 			return false;
10135 	} else {
10136 		switch (width | height) {
10137 		case 256:
10138 		case 128:
10139 			if (IS_GEN2(dev))
10140 				return false;
10141 		case 64:
10142 			break;
10143 		default:
10144 			return false;
10145 		}
10146 	}
10147 
10148 	return true;
10149 }
10150 
intel_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t start,uint32_t size)10151 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10152 				 u16 *blue, uint32_t start, uint32_t size)
10153 {
10154 	int end = (start + size > 256) ? 256 : start + size, i;
10155 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10156 
10157 	for (i = start; i < end; i++) {
10158 		intel_crtc->lut_r[i] = red[i] >> 8;
10159 		intel_crtc->lut_g[i] = green[i] >> 8;
10160 		intel_crtc->lut_b[i] = blue[i] >> 8;
10161 	}
10162 
10163 	intel_crtc_load_lut(crtc);
10164 }
10165 
10166 /* VESA 640x480x72Hz mode to set on the pipe */
10167 static struct drm_display_mode load_detect_mode = {
10168 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10169 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10170 };
10171 
10172 struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device * dev,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)10173 __intel_framebuffer_create(struct drm_device *dev,
10174 			   struct drm_mode_fb_cmd2 *mode_cmd,
10175 			   struct drm_i915_gem_object *obj)
10176 {
10177 	struct intel_framebuffer *intel_fb;
10178 	int ret;
10179 
10180 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10181 	if (!intel_fb) {
10182 		drm_gem_object_unreference(&obj->base);
10183 		return ERR_PTR(-ENOMEM);
10184 	}
10185 
10186 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10187 	if (ret)
10188 		goto err;
10189 
10190 	return &intel_fb->base;
10191 err:
10192 	drm_gem_object_unreference(&obj->base);
10193 	kfree(intel_fb);
10194 
10195 	return ERR_PTR(ret);
10196 }
10197 
10198 static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device * dev,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)10199 intel_framebuffer_create(struct drm_device *dev,
10200 			 struct drm_mode_fb_cmd2 *mode_cmd,
10201 			 struct drm_i915_gem_object *obj)
10202 {
10203 	struct drm_framebuffer *fb;
10204 	int ret;
10205 
10206 	ret = i915_mutex_lock_interruptible(dev);
10207 	if (ret)
10208 		return ERR_PTR(ret);
10209 	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10210 	mutex_unlock(&dev->struct_mutex);
10211 
10212 	return fb;
10213 }
10214 
10215 static u32
intel_framebuffer_pitch_for_width(int width,int bpp)10216 intel_framebuffer_pitch_for_width(int width, int bpp)
10217 {
10218 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10219 	return ALIGN(pitch, 64);
10220 }
10221 
10222 static u32
intel_framebuffer_size_for_mode(struct drm_display_mode * mode,int bpp)10223 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10224 {
10225 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10226 	return PAGE_ALIGN(pitch * mode->vdisplay);
10227 }
10228 
10229 static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device * dev,struct drm_display_mode * mode,int depth,int bpp)10230 intel_framebuffer_create_for_mode(struct drm_device *dev,
10231 				  struct drm_display_mode *mode,
10232 				  int depth, int bpp)
10233 {
10234 	struct drm_i915_gem_object *obj;
10235 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10236 
10237 	obj = i915_gem_alloc_object(dev,
10238 				    intel_framebuffer_size_for_mode(mode, bpp));
10239 	if (obj == NULL)
10240 		return ERR_PTR(-ENOMEM);
10241 
10242 	mode_cmd.width = mode->hdisplay;
10243 	mode_cmd.height = mode->vdisplay;
10244 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10245 								bpp);
10246 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10247 
10248 	return intel_framebuffer_create(dev, &mode_cmd, obj);
10249 }
10250 
10251 static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device * dev,struct drm_display_mode * mode)10252 mode_fits_in_fbdev(struct drm_device *dev,
10253 		   struct drm_display_mode *mode)
10254 {
10255 #ifdef CONFIG_DRM_FBDEV_EMULATION
10256 	struct drm_i915_private *dev_priv = dev->dev_private;
10257 	struct drm_i915_gem_object *obj;
10258 	struct drm_framebuffer *fb;
10259 
10260 	if (!dev_priv->fbdev)
10261 		return NULL;
10262 
10263 	if (!dev_priv->fbdev->fb)
10264 		return NULL;
10265 
10266 	obj = dev_priv->fbdev->fb->obj;
10267 	BUG_ON(!obj);
10268 
10269 	fb = &dev_priv->fbdev->fb->base;
10270 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10271 							       fb->bits_per_pixel))
10272 		return NULL;
10273 
10274 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10275 		return NULL;
10276 
10277 	return fb;
10278 #else
10279 	return NULL;
10280 #endif
10281 }
10282 
intel_modeset_setup_plane_state(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_framebuffer * fb,int x,int y)10283 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10284 					   struct drm_crtc *crtc,
10285 					   struct drm_display_mode *mode,
10286 					   struct drm_framebuffer *fb,
10287 					   int x, int y)
10288 {
10289 	struct drm_plane_state *plane_state;
10290 	int hdisplay, vdisplay;
10291 	int ret;
10292 
10293 	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10294 	if (IS_ERR(plane_state))
10295 		return PTR_ERR(plane_state);
10296 
10297 	if (mode)
10298 		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10299 	else
10300 		hdisplay = vdisplay = 0;
10301 
10302 	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10303 	if (ret)
10304 		return ret;
10305 	drm_atomic_set_fb_for_plane(plane_state, fb);
10306 	plane_state->crtc_x = 0;
10307 	plane_state->crtc_y = 0;
10308 	plane_state->crtc_w = hdisplay;
10309 	plane_state->crtc_h = vdisplay;
10310 	plane_state->src_x = x << 16;
10311 	plane_state->src_y = y << 16;
10312 	plane_state->src_w = hdisplay << 16;
10313 	plane_state->src_h = vdisplay << 16;
10314 
10315 	return 0;
10316 }
10317 
intel_get_load_detect_pipe(struct drm_connector * connector,struct drm_display_mode * mode,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)10318 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10319 				struct drm_display_mode *mode,
10320 				struct intel_load_detect_pipe *old,
10321 				struct drm_modeset_acquire_ctx *ctx)
10322 {
10323 	struct intel_crtc *intel_crtc;
10324 	struct intel_encoder *intel_encoder =
10325 		intel_attached_encoder(connector);
10326 	struct drm_crtc *possible_crtc;
10327 	struct drm_encoder *encoder = &intel_encoder->base;
10328 	struct drm_crtc *crtc = NULL;
10329 	struct drm_device *dev = encoder->dev;
10330 	struct drm_framebuffer *fb;
10331 	struct drm_mode_config *config = &dev->mode_config;
10332 	struct drm_atomic_state *state = NULL;
10333 	struct drm_connector_state *connector_state;
10334 	struct intel_crtc_state *crtc_state;
10335 	int ret, i = -1;
10336 
10337 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10338 		      connector->base.id, connector->name,
10339 		      encoder->base.id, encoder->name);
10340 
10341 retry:
10342 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10343 	if (ret)
10344 		goto fail;
10345 
10346 	/*
10347 	 * Algorithm gets a little messy:
10348 	 *
10349 	 *   - if the connector already has an assigned crtc, use it (but make
10350 	 *     sure it's on first)
10351 	 *
10352 	 *   - try to find the first unused crtc that can drive this connector,
10353 	 *     and use that if we find one
10354 	 */
10355 
10356 	/* See if we already have a CRTC for this connector */
10357 	if (encoder->crtc) {
10358 		crtc = encoder->crtc;
10359 
10360 		ret = drm_modeset_lock(&crtc->mutex, ctx);
10361 		if (ret)
10362 			goto fail;
10363 		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10364 		if (ret)
10365 			goto fail;
10366 
10367 		old->dpms_mode = connector->dpms;
10368 		old->load_detect_temp = false;
10369 
10370 		/* Make sure the crtc and connector are running */
10371 		if (connector->dpms != DRM_MODE_DPMS_ON)
10372 			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
10373 
10374 		return true;
10375 	}
10376 
10377 	/* Find an unused one (if possible) */
10378 	for_each_crtc(dev, possible_crtc) {
10379 		i++;
10380 		if (!(encoder->possible_crtcs & (1 << i)))
10381 			continue;
10382 		if (possible_crtc->state->enable)
10383 			continue;
10384 
10385 		crtc = possible_crtc;
10386 		break;
10387 	}
10388 
10389 	/*
10390 	 * If we didn't find an unused CRTC, don't use any.
10391 	 */
10392 	if (!crtc) {
10393 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10394 		goto fail;
10395 	}
10396 
10397 	ret = drm_modeset_lock(&crtc->mutex, ctx);
10398 	if (ret)
10399 		goto fail;
10400 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10401 	if (ret)
10402 		goto fail;
10403 
10404 	intel_crtc = to_intel_crtc(crtc);
10405 	old->dpms_mode = connector->dpms;
10406 	old->load_detect_temp = true;
10407 	old->release_fb = NULL;
10408 
10409 	state = drm_atomic_state_alloc(dev);
10410 	if (!state)
10411 		return false;
10412 
10413 	state->acquire_ctx = ctx;
10414 
10415 	connector_state = drm_atomic_get_connector_state(state, connector);
10416 	if (IS_ERR(connector_state)) {
10417 		ret = PTR_ERR(connector_state);
10418 		goto fail;
10419 	}
10420 
10421 	connector_state->crtc = crtc;
10422 	connector_state->best_encoder = &intel_encoder->base;
10423 
10424 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10425 	if (IS_ERR(crtc_state)) {
10426 		ret = PTR_ERR(crtc_state);
10427 		goto fail;
10428 	}
10429 
10430 	crtc_state->base.active = crtc_state->base.enable = true;
10431 
10432 	if (!mode)
10433 		mode = &load_detect_mode;
10434 
10435 	/* We need a framebuffer large enough to accommodate all accesses
10436 	 * that the plane may generate whilst we perform load detection.
10437 	 * We can not rely on the fbcon either being present (we get called
10438 	 * during its initialisation to detect all boot displays, or it may
10439 	 * not even exist) or that it is large enough to satisfy the
10440 	 * requested mode.
10441 	 */
10442 	fb = mode_fits_in_fbdev(dev, mode);
10443 	if (fb == NULL) {
10444 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10445 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10446 		old->release_fb = fb;
10447 	} else
10448 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10449 	if (IS_ERR(fb)) {
10450 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10451 		goto fail;
10452 	}
10453 
10454 	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10455 	if (ret)
10456 		goto fail;
10457 
10458 	drm_mode_copy(&crtc_state->base.mode, mode);
10459 
10460 	if (drm_atomic_commit(state)) {
10461 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10462 		if (old->release_fb)
10463 			old->release_fb->funcs->destroy(old->release_fb);
10464 		goto fail;
10465 	}
10466 	crtc->primary->crtc = crtc;
10467 
10468 	/* let the connector get through one full cycle before testing */
10469 	intel_wait_for_vblank(dev, intel_crtc->pipe);
10470 	return true;
10471 
10472 fail:
10473 	drm_atomic_state_free(state);
10474 	state = NULL;
10475 
10476 	if (ret == -EDEADLK) {
10477 		drm_modeset_backoff(ctx);
10478 		goto retry;
10479 	}
10480 
10481 	return false;
10482 }
10483 
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)10484 void intel_release_load_detect_pipe(struct drm_connector *connector,
10485 				    struct intel_load_detect_pipe *old,
10486 				    struct drm_modeset_acquire_ctx *ctx)
10487 {
10488 	struct drm_device *dev = connector->dev;
10489 	struct intel_encoder *intel_encoder =
10490 		intel_attached_encoder(connector);
10491 	struct drm_encoder *encoder = &intel_encoder->base;
10492 	struct drm_crtc *crtc = encoder->crtc;
10493 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10494 	struct drm_atomic_state *state;
10495 	struct drm_connector_state *connector_state;
10496 	struct intel_crtc_state *crtc_state;
10497 	int ret;
10498 
10499 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10500 		      connector->base.id, connector->name,
10501 		      encoder->base.id, encoder->name);
10502 
10503 	if (old->load_detect_temp) {
10504 		state = drm_atomic_state_alloc(dev);
10505 		if (!state)
10506 			goto fail;
10507 
10508 		state->acquire_ctx = ctx;
10509 
10510 		connector_state = drm_atomic_get_connector_state(state, connector);
10511 		if (IS_ERR(connector_state))
10512 			goto fail;
10513 
10514 		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10515 		if (IS_ERR(crtc_state))
10516 			goto fail;
10517 
10518 		connector_state->best_encoder = NULL;
10519 		connector_state->crtc = NULL;
10520 
10521 		crtc_state->base.enable = crtc_state->base.active = false;
10522 
10523 		ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10524 						      0, 0);
10525 		if (ret)
10526 			goto fail;
10527 
10528 		ret = drm_atomic_commit(state);
10529 		if (ret)
10530 			goto fail;
10531 
10532 		if (old->release_fb) {
10533 			drm_framebuffer_unregister_private(old->release_fb);
10534 			drm_framebuffer_unreference(old->release_fb);
10535 		}
10536 
10537 		return;
10538 	}
10539 
10540 	/* Switch crtc and encoder back off if necessary */
10541 	if (old->dpms_mode != DRM_MODE_DPMS_ON)
10542 		connector->funcs->dpms(connector, old->dpms_mode);
10543 
10544 	return;
10545 fail:
10546 	DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10547 	drm_atomic_state_free(state);
10548 }
10549 
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)10550 static int i9xx_pll_refclk(struct drm_device *dev,
10551 			   const struct intel_crtc_state *pipe_config)
10552 {
10553 	struct drm_i915_private *dev_priv = dev->dev_private;
10554 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10555 
10556 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10557 		return dev_priv->vbt.lvds_ssc_freq;
10558 	else if (HAS_PCH_SPLIT(dev))
10559 		return 120000;
10560 	else if (!IS_GEN2(dev))
10561 		return 96000;
10562 	else
10563 		return 48000;
10564 }
10565 
10566 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)10567 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10568 				struct intel_crtc_state *pipe_config)
10569 {
10570 	struct drm_device *dev = crtc->base.dev;
10571 	struct drm_i915_private *dev_priv = dev->dev_private;
10572 	int pipe = pipe_config->cpu_transcoder;
10573 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10574 	u32 fp;
10575 	intel_clock_t clock;
10576 	int port_clock;
10577 	int refclk = i9xx_pll_refclk(dev, pipe_config);
10578 
10579 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10580 		fp = pipe_config->dpll_hw_state.fp0;
10581 	else
10582 		fp = pipe_config->dpll_hw_state.fp1;
10583 
10584 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10585 	if (IS_PINEVIEW(dev)) {
10586 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10587 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10588 	} else {
10589 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10590 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10591 	}
10592 
10593 	if (!IS_GEN2(dev)) {
10594 		if (IS_PINEVIEW(dev))
10595 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10596 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10597 		else
10598 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10599 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10600 
10601 		switch (dpll & DPLL_MODE_MASK) {
10602 		case DPLLB_MODE_DAC_SERIAL:
10603 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10604 				5 : 10;
10605 			break;
10606 		case DPLLB_MODE_LVDS:
10607 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10608 				7 : 14;
10609 			break;
10610 		default:
10611 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10612 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10613 			return;
10614 		}
10615 
10616 		if (IS_PINEVIEW(dev))
10617 			port_clock = pnv_calc_dpll_params(refclk, &clock);
10618 		else
10619 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10620 	} else {
10621 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10622 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10623 
10624 		if (is_lvds) {
10625 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10626 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10627 
10628 			if (lvds & LVDS_CLKB_POWER_UP)
10629 				clock.p2 = 7;
10630 			else
10631 				clock.p2 = 14;
10632 		} else {
10633 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10634 				clock.p1 = 2;
10635 			else {
10636 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10637 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10638 			}
10639 			if (dpll & PLL_P2_DIVIDE_BY_4)
10640 				clock.p2 = 4;
10641 			else
10642 				clock.p2 = 2;
10643 		}
10644 
10645 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10646 	}
10647 
10648 	/*
10649 	 * This value includes pixel_multiplier. We will use
10650 	 * port_clock to compute adjusted_mode.crtc_clock in the
10651 	 * encoder's get_config() function.
10652 	 */
10653 	pipe_config->port_clock = port_clock;
10654 }
10655 
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)10656 int intel_dotclock_calculate(int link_freq,
10657 			     const struct intel_link_m_n *m_n)
10658 {
10659 	/*
10660 	 * The calculation for the data clock is:
10661 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10662 	 * But we want to avoid losing precison if possible, so:
10663 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10664 	 *
10665 	 * and the link clock is simpler:
10666 	 * link_clock = (m * link_clock) / n
10667 	 */
10668 
10669 	if (!m_n->link_n)
10670 		return 0;
10671 
10672 	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10673 }
10674 
ironlake_pch_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)10675 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10676 				   struct intel_crtc_state *pipe_config)
10677 {
10678 	struct drm_device *dev = crtc->base.dev;
10679 
10680 	/* read out port_clock from the DPLL */
10681 	i9xx_crtc_clock_get(crtc, pipe_config);
10682 
10683 	/*
10684 	 * This value does not include pixel_multiplier.
10685 	 * We will check that port_clock and adjusted_mode.crtc_clock
10686 	 * agree once we know their relationship in the encoder's
10687 	 * get_config() function.
10688 	 */
10689 	pipe_config->base.adjusted_mode.crtc_clock =
10690 		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10691 					 &pipe_config->fdi_m_n);
10692 }
10693 
10694 /** Returns the currently programmed mode of the given pipe. */
intel_crtc_mode_get(struct drm_device * dev,struct drm_crtc * crtc)10695 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10696 					     struct drm_crtc *crtc)
10697 {
10698 	struct drm_i915_private *dev_priv = dev->dev_private;
10699 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10700 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10701 	struct drm_display_mode *mode;
10702 	struct intel_crtc_state pipe_config;
10703 	int htot = I915_READ(HTOTAL(cpu_transcoder));
10704 	int hsync = I915_READ(HSYNC(cpu_transcoder));
10705 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10706 	int vsync = I915_READ(VSYNC(cpu_transcoder));
10707 	enum pipe pipe = intel_crtc->pipe;
10708 
10709 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10710 	if (!mode)
10711 		return NULL;
10712 
10713 	/*
10714 	 * Construct a pipe_config sufficient for getting the clock info
10715 	 * back out of crtc_clock_get.
10716 	 *
10717 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10718 	 * to use a real value here instead.
10719 	 */
10720 	pipe_config.cpu_transcoder = (enum transcoder) pipe;
10721 	pipe_config.pixel_multiplier = 1;
10722 	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10723 	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10724 	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10725 	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10726 
10727 	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
10728 	mode->hdisplay = (htot & 0xffff) + 1;
10729 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10730 	mode->hsync_start = (hsync & 0xffff) + 1;
10731 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10732 	mode->vdisplay = (vtot & 0xffff) + 1;
10733 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10734 	mode->vsync_start = (vsync & 0xffff) + 1;
10735 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10736 
10737 	drm_mode_set_name(mode);
10738 
10739 	return mode;
10740 }
10741 
intel_mark_busy(struct drm_device * dev)10742 void intel_mark_busy(struct drm_device *dev)
10743 {
10744 	struct drm_i915_private *dev_priv = dev->dev_private;
10745 
10746 	if (dev_priv->mm.busy)
10747 		return;
10748 
10749 	intel_runtime_pm_get(dev_priv);
10750 
10751 	if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv))
10752 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
10753 
10754 	i915_update_gfx_val(dev_priv);
10755 	if (INTEL_INFO(dev)->gen >= 6)
10756 		gen6_rps_busy(dev_priv);
10757 	dev_priv->mm.busy = true;
10758 }
10759 
intel_mark_idle(struct drm_device * dev)10760 void intel_mark_idle(struct drm_device *dev)
10761 {
10762 	struct drm_i915_private *dev_priv = dev->dev_private;
10763 
10764 	if (!dev_priv->mm.busy)
10765 		return;
10766 
10767 	dev_priv->mm.busy = false;
10768 
10769 	if (INTEL_INFO(dev)->gen >= 6)
10770 		gen6_rps_idle(dev->dev_private);
10771 
10772 	if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) {
10773 		i915_rc6_ctx_wa_check(dev_priv);
10774 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
10775 	}
10776 
10777 	intel_runtime_pm_put(dev_priv);
10778 }
10779 
intel_crtc_destroy(struct drm_crtc * crtc)10780 static void intel_crtc_destroy(struct drm_crtc *crtc)
10781 {
10782 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10783 	struct drm_device *dev = crtc->dev;
10784 	struct intel_unpin_work *work;
10785 
10786 	spin_lock_irq(&dev->event_lock);
10787 	work = intel_crtc->unpin_work;
10788 	intel_crtc->unpin_work = NULL;
10789 	spin_unlock_irq(&dev->event_lock);
10790 
10791 	if (work) {
10792 		cancel_work_sync(&work->work);
10793 		kfree(work);
10794 	}
10795 
10796 	drm_crtc_cleanup(crtc);
10797 
10798 	kfree(intel_crtc);
10799 }
10800 
intel_unpin_work_fn(struct work_struct * __work)10801 static void intel_unpin_work_fn(struct work_struct *__work)
10802 {
10803 	struct intel_unpin_work *work =
10804 		container_of(__work, struct intel_unpin_work, work);
10805 	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10806 	struct drm_device *dev = crtc->base.dev;
10807 	struct drm_plane *primary = crtc->base.primary;
10808 
10809 	mutex_lock(&dev->struct_mutex);
10810 	intel_unpin_fb_obj(work->old_fb, primary->state);
10811 	drm_gem_object_unreference(&work->pending_flip_obj->base);
10812 
10813 	if (work->flip_queued_req)
10814 		i915_gem_request_assign(&work->flip_queued_req, NULL);
10815 	mutex_unlock(&dev->struct_mutex);
10816 
10817 	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10818 	drm_framebuffer_unreference(work->old_fb);
10819 
10820 	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10821 	atomic_dec(&crtc->unpin_work_count);
10822 
10823 	kfree(work);
10824 }
10825 
do_intel_finish_page_flip(struct drm_device * dev,struct drm_crtc * crtc)10826 static void do_intel_finish_page_flip(struct drm_device *dev,
10827 				      struct drm_crtc *crtc)
10828 {
10829 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10830 	struct intel_unpin_work *work;
10831 	unsigned long flags;
10832 
10833 	/* Ignore early vblank irqs */
10834 	if (intel_crtc == NULL)
10835 		return;
10836 
10837 	/*
10838 	 * This is called both by irq handlers and the reset code (to complete
10839 	 * lost pageflips) so needs the full irqsave spinlocks.
10840 	 */
10841 	spin_lock_irqsave(&dev->event_lock, flags);
10842 	work = intel_crtc->unpin_work;
10843 
10844 	/* Ensure we don't miss a work->pending update ... */
10845 	smp_rmb();
10846 
10847 	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10848 		spin_unlock_irqrestore(&dev->event_lock, flags);
10849 		return;
10850 	}
10851 
10852 	page_flip_completed(intel_crtc);
10853 
10854 	spin_unlock_irqrestore(&dev->event_lock, flags);
10855 }
10856 
intel_finish_page_flip(struct drm_device * dev,int pipe)10857 void intel_finish_page_flip(struct drm_device *dev, int pipe)
10858 {
10859 	struct drm_i915_private *dev_priv = dev->dev_private;
10860 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10861 
10862 	do_intel_finish_page_flip(dev, crtc);
10863 }
10864 
intel_finish_page_flip_plane(struct drm_device * dev,int plane)10865 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10866 {
10867 	struct drm_i915_private *dev_priv = dev->dev_private;
10868 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10869 
10870 	do_intel_finish_page_flip(dev, crtc);
10871 }
10872 
10873 /* Is 'a' after or equal to 'b'? */
g4x_flip_count_after_eq(u32 a,u32 b)10874 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10875 {
10876 	return !((a - b) & 0x80000000);
10877 }
10878 
page_flip_finished(struct intel_crtc * crtc)10879 static bool page_flip_finished(struct intel_crtc *crtc)
10880 {
10881 	struct drm_device *dev = crtc->base.dev;
10882 	struct drm_i915_private *dev_priv = dev->dev_private;
10883 
10884 	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10885 	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10886 		return true;
10887 
10888 	/*
10889 	 * The relevant registers doen't exist on pre-ctg.
10890 	 * As the flip done interrupt doesn't trigger for mmio
10891 	 * flips on gmch platforms, a flip count check isn't
10892 	 * really needed there. But since ctg has the registers,
10893 	 * include it in the check anyway.
10894 	 */
10895 	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10896 		return true;
10897 
10898 	/*
10899 	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10900 	 * used the same base address. In that case the mmio flip might
10901 	 * have completed, but the CS hasn't even executed the flip yet.
10902 	 *
10903 	 * A flip count check isn't enough as the CS might have updated
10904 	 * the base address just after start of vblank, but before we
10905 	 * managed to process the interrupt. This means we'd complete the
10906 	 * CS flip too soon.
10907 	 *
10908 	 * Combining both checks should get us a good enough result. It may
10909 	 * still happen that the CS flip has been executed, but has not
10910 	 * yet actually completed. But in case the base address is the same
10911 	 * anyway, we don't really care.
10912 	 */
10913 	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10914 		crtc->unpin_work->gtt_offset &&
10915 		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10916 				    crtc->unpin_work->flip_count);
10917 }
10918 
intel_prepare_page_flip(struct drm_device * dev,int plane)10919 void intel_prepare_page_flip(struct drm_device *dev, int plane)
10920 {
10921 	struct drm_i915_private *dev_priv = dev->dev_private;
10922 	struct intel_crtc *intel_crtc =
10923 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10924 	unsigned long flags;
10925 
10926 
10927 	/*
10928 	 * This is called both by irq handlers and the reset code (to complete
10929 	 * lost pageflips) so needs the full irqsave spinlocks.
10930 	 *
10931 	 * NB: An MMIO update of the plane base pointer will also
10932 	 * generate a page-flip completion irq, i.e. every modeset
10933 	 * is also accompanied by a spurious intel_prepare_page_flip().
10934 	 */
10935 	spin_lock_irqsave(&dev->event_lock, flags);
10936 	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
10937 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
10938 	spin_unlock_irqrestore(&dev->event_lock, flags);
10939 }
10940 
intel_mark_page_flip_active(struct intel_unpin_work * work)10941 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
10942 {
10943 	/* Ensure that the work item is consistent when activating it ... */
10944 	smp_wmb();
10945 	atomic_set(&work->pending, INTEL_FLIP_PENDING);
10946 	/* and that it is marked active as soon as the irq could fire. */
10947 	smp_wmb();
10948 }
10949 
intel_gen2_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct drm_i915_gem_request * req,uint32_t flags)10950 static int intel_gen2_queue_flip(struct drm_device *dev,
10951 				 struct drm_crtc *crtc,
10952 				 struct drm_framebuffer *fb,
10953 				 struct drm_i915_gem_object *obj,
10954 				 struct drm_i915_gem_request *req,
10955 				 uint32_t flags)
10956 {
10957 	struct intel_engine_cs *ring = req->ring;
10958 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10959 	u32 flip_mask;
10960 	int ret;
10961 
10962 	ret = intel_ring_begin(req, 6);
10963 	if (ret)
10964 		return ret;
10965 
10966 	/* Can't queue multiple flips, so wait for the previous
10967 	 * one to finish before executing the next.
10968 	 */
10969 	if (intel_crtc->plane)
10970 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10971 	else
10972 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10973 	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
10974 	intel_ring_emit(ring, MI_NOOP);
10975 	intel_ring_emit(ring, MI_DISPLAY_FLIP |
10976 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
10977 	intel_ring_emit(ring, fb->pitches[0]);
10978 	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
10979 	intel_ring_emit(ring, 0); /* aux display base address, unused */
10980 
10981 	intel_mark_page_flip_active(intel_crtc->unpin_work);
10982 	return 0;
10983 }
10984 
intel_gen3_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct drm_i915_gem_request * req,uint32_t flags)10985 static int intel_gen3_queue_flip(struct drm_device *dev,
10986 				 struct drm_crtc *crtc,
10987 				 struct drm_framebuffer *fb,
10988 				 struct drm_i915_gem_object *obj,
10989 				 struct drm_i915_gem_request *req,
10990 				 uint32_t flags)
10991 {
10992 	struct intel_engine_cs *ring = req->ring;
10993 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10994 	u32 flip_mask;
10995 	int ret;
10996 
10997 	ret = intel_ring_begin(req, 6);
10998 	if (ret)
10999 		return ret;
11000 
11001 	if (intel_crtc->plane)
11002 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11003 	else
11004 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11005 	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11006 	intel_ring_emit(ring, MI_NOOP);
11007 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11008 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11009 	intel_ring_emit(ring, fb->pitches[0]);
11010 	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11011 	intel_ring_emit(ring, MI_NOOP);
11012 
11013 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11014 	return 0;
11015 }
11016 
intel_gen4_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct drm_i915_gem_request * req,uint32_t flags)11017 static int intel_gen4_queue_flip(struct drm_device *dev,
11018 				 struct drm_crtc *crtc,
11019 				 struct drm_framebuffer *fb,
11020 				 struct drm_i915_gem_object *obj,
11021 				 struct drm_i915_gem_request *req,
11022 				 uint32_t flags)
11023 {
11024 	struct intel_engine_cs *ring = req->ring;
11025 	struct drm_i915_private *dev_priv = dev->dev_private;
11026 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11027 	uint32_t pf, pipesrc;
11028 	int ret;
11029 
11030 	ret = intel_ring_begin(req, 4);
11031 	if (ret)
11032 		return ret;
11033 
11034 	/* i965+ uses the linear or tiled offsets from the
11035 	 * Display Registers (which do not change across a page-flip)
11036 	 * so we need only reprogram the base address.
11037 	 */
11038 	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11039 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11040 	intel_ring_emit(ring, fb->pitches[0]);
11041 	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
11042 			obj->tiling_mode);
11043 
11044 	/* XXX Enabling the panel-fitter across page-flip is so far
11045 	 * untested on non-native modes, so ignore it for now.
11046 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11047 	 */
11048 	pf = 0;
11049 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11050 	intel_ring_emit(ring, pf | pipesrc);
11051 
11052 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11053 	return 0;
11054 }
11055 
intel_gen6_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct drm_i915_gem_request * req,uint32_t flags)11056 static int intel_gen6_queue_flip(struct drm_device *dev,
11057 				 struct drm_crtc *crtc,
11058 				 struct drm_framebuffer *fb,
11059 				 struct drm_i915_gem_object *obj,
11060 				 struct drm_i915_gem_request *req,
11061 				 uint32_t flags)
11062 {
11063 	struct intel_engine_cs *ring = req->ring;
11064 	struct drm_i915_private *dev_priv = dev->dev_private;
11065 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11066 	uint32_t pf, pipesrc;
11067 	int ret;
11068 
11069 	ret = intel_ring_begin(req, 4);
11070 	if (ret)
11071 		return ret;
11072 
11073 	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11074 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11075 	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
11076 	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11077 
11078 	/* Contrary to the suggestions in the documentation,
11079 	 * "Enable Panel Fitter" does not seem to be required when page
11080 	 * flipping with a non-native mode, and worse causes a normal
11081 	 * modeset to fail.
11082 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11083 	 */
11084 	pf = 0;
11085 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11086 	intel_ring_emit(ring, pf | pipesrc);
11087 
11088 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11089 	return 0;
11090 }
11091 
intel_gen7_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct drm_i915_gem_request * req,uint32_t flags)11092 static int intel_gen7_queue_flip(struct drm_device *dev,
11093 				 struct drm_crtc *crtc,
11094 				 struct drm_framebuffer *fb,
11095 				 struct drm_i915_gem_object *obj,
11096 				 struct drm_i915_gem_request *req,
11097 				 uint32_t flags)
11098 {
11099 	struct intel_engine_cs *ring = req->ring;
11100 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11101 	uint32_t plane_bit = 0;
11102 	int len, ret;
11103 
11104 	switch (intel_crtc->plane) {
11105 	case PLANE_A:
11106 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11107 		break;
11108 	case PLANE_B:
11109 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11110 		break;
11111 	case PLANE_C:
11112 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11113 		break;
11114 	default:
11115 		WARN_ONCE(1, "unknown plane in flip command\n");
11116 		return -ENODEV;
11117 	}
11118 
11119 	len = 4;
11120 	if (ring->id == RCS) {
11121 		len += 6;
11122 		/*
11123 		 * On Gen 8, SRM is now taking an extra dword to accommodate
11124 		 * 48bits addresses, and we need a NOOP for the batch size to
11125 		 * stay even.
11126 		 */
11127 		if (IS_GEN8(dev))
11128 			len += 2;
11129 	}
11130 
11131 	/*
11132 	 * BSpec MI_DISPLAY_FLIP for IVB:
11133 	 * "The full packet must be contained within the same cache line."
11134 	 *
11135 	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11136 	 * cacheline, if we ever start emitting more commands before
11137 	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11138 	 * then do the cacheline alignment, and finally emit the
11139 	 * MI_DISPLAY_FLIP.
11140 	 */
11141 	ret = intel_ring_cacheline_align(req);
11142 	if (ret)
11143 		return ret;
11144 
11145 	ret = intel_ring_begin(req, len);
11146 	if (ret)
11147 		return ret;
11148 
11149 	/* Unmask the flip-done completion message. Note that the bspec says that
11150 	 * we should do this for both the BCS and RCS, and that we must not unmask
11151 	 * more than one flip event at any time (or ensure that one flip message
11152 	 * can be sent by waiting for flip-done prior to queueing new flips).
11153 	 * Experimentation says that BCS works despite DERRMR masking all
11154 	 * flip-done completion events and that unmasking all planes at once
11155 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11156 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11157 	 */
11158 	if (ring->id == RCS) {
11159 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11160 		intel_ring_emit(ring, DERRMR);
11161 		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11162 					DERRMR_PIPEB_PRI_FLIP_DONE |
11163 					DERRMR_PIPEC_PRI_FLIP_DONE));
11164 		if (IS_GEN8(dev))
11165 			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11166 					      MI_SRM_LRM_GLOBAL_GTT);
11167 		else
11168 			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11169 					      MI_SRM_LRM_GLOBAL_GTT);
11170 		intel_ring_emit(ring, DERRMR);
11171 		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11172 		if (IS_GEN8(dev)) {
11173 			intel_ring_emit(ring, 0);
11174 			intel_ring_emit(ring, MI_NOOP);
11175 		}
11176 	}
11177 
11178 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11179 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
11180 	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11181 	intel_ring_emit(ring, (MI_NOOP));
11182 
11183 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11184 	return 0;
11185 }
11186 
use_mmio_flip(struct intel_engine_cs * ring,struct drm_i915_gem_object * obj)11187 static bool use_mmio_flip(struct intel_engine_cs *ring,
11188 			  struct drm_i915_gem_object *obj)
11189 {
11190 	/*
11191 	 * This is not being used for older platforms, because
11192 	 * non-availability of flip done interrupt forces us to use
11193 	 * CS flips. Older platforms derive flip done using some clever
11194 	 * tricks involving the flip_pending status bits and vblank irqs.
11195 	 * So using MMIO flips there would disrupt this mechanism.
11196 	 */
11197 
11198 	if (ring == NULL)
11199 		return true;
11200 
11201 	if (INTEL_INFO(ring->dev)->gen < 5)
11202 		return false;
11203 
11204 	if (i915.use_mmio_flip < 0)
11205 		return false;
11206 	else if (i915.use_mmio_flip > 0)
11207 		return true;
11208 	else if (i915.enable_execlists)
11209 		return true;
11210 	else
11211 		return ring != i915_gem_request_get_ring(obj->last_write_req);
11212 }
11213 
skl_do_mmio_flip(struct intel_crtc * intel_crtc,struct intel_unpin_work * work)11214 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11215 			     struct intel_unpin_work *work)
11216 {
11217 	struct drm_device *dev = intel_crtc->base.dev;
11218 	struct drm_i915_private *dev_priv = dev->dev_private;
11219 	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11220 	const enum pipe pipe = intel_crtc->pipe;
11221 	u32 ctl, stride;
11222 
11223 	ctl = I915_READ(PLANE_CTL(pipe, 0));
11224 	ctl &= ~PLANE_CTL_TILED_MASK;
11225 	switch (fb->modifier[0]) {
11226 	case DRM_FORMAT_MOD_NONE:
11227 		break;
11228 	case I915_FORMAT_MOD_X_TILED:
11229 		ctl |= PLANE_CTL_TILED_X;
11230 		break;
11231 	case I915_FORMAT_MOD_Y_TILED:
11232 		ctl |= PLANE_CTL_TILED_Y;
11233 		break;
11234 	case I915_FORMAT_MOD_Yf_TILED:
11235 		ctl |= PLANE_CTL_TILED_YF;
11236 		break;
11237 	default:
11238 		MISSING_CASE(fb->modifier[0]);
11239 	}
11240 
11241 	/*
11242 	 * The stride is either expressed as a multiple of 64 bytes chunks for
11243 	 * linear buffers or in number of tiles for tiled buffers.
11244 	 */
11245 	stride = fb->pitches[0] /
11246 		 intel_fb_stride_alignment(dev, fb->modifier[0],
11247 					   fb->pixel_format);
11248 
11249 	/*
11250 	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11251 	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11252 	 */
11253 	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11254 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11255 
11256 	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11257 	POSTING_READ(PLANE_SURF(pipe, 0));
11258 }
11259 
ilk_do_mmio_flip(struct intel_crtc * intel_crtc,struct intel_unpin_work * work)11260 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11261 			     struct intel_unpin_work *work)
11262 {
11263 	struct drm_device *dev = intel_crtc->base.dev;
11264 	struct drm_i915_private *dev_priv = dev->dev_private;
11265 	struct intel_framebuffer *intel_fb =
11266 		to_intel_framebuffer(intel_crtc->base.primary->fb);
11267 	struct drm_i915_gem_object *obj = intel_fb->obj;
11268 	u32 dspcntr;
11269 	u32 reg;
11270 
11271 	reg = DSPCNTR(intel_crtc->plane);
11272 	dspcntr = I915_READ(reg);
11273 
11274 	if (obj->tiling_mode != I915_TILING_NONE)
11275 		dspcntr |= DISPPLANE_TILED;
11276 	else
11277 		dspcntr &= ~DISPPLANE_TILED;
11278 
11279 	I915_WRITE(reg, dspcntr);
11280 
11281 	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11282 	POSTING_READ(DSPSURF(intel_crtc->plane));
11283 }
11284 
11285 /*
11286  * XXX: This is the temporary way to update the plane registers until we get
11287  * around to using the usual plane update functions for MMIO flips
11288  */
intel_do_mmio_flip(struct intel_mmio_flip * mmio_flip)11289 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11290 {
11291 	struct intel_crtc *crtc = mmio_flip->crtc;
11292 	struct intel_unpin_work *work;
11293 
11294 	spin_lock_irq(&crtc->base.dev->event_lock);
11295 	work = crtc->unpin_work;
11296 	spin_unlock_irq(&crtc->base.dev->event_lock);
11297 	if (work == NULL)
11298 		return;
11299 
11300 	intel_mark_page_flip_active(work);
11301 
11302 	intel_pipe_update_start(crtc);
11303 
11304 	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11305 		skl_do_mmio_flip(crtc, work);
11306 	else
11307 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11308 		ilk_do_mmio_flip(crtc, work);
11309 
11310 	intel_pipe_update_end(crtc);
11311 }
11312 
intel_mmio_flip_work_func(struct work_struct * work)11313 static void intel_mmio_flip_work_func(struct work_struct *work)
11314 {
11315 	struct intel_mmio_flip *mmio_flip =
11316 		container_of(work, struct intel_mmio_flip, work);
11317 
11318 	if (mmio_flip->req) {
11319 		WARN_ON(__i915_wait_request(mmio_flip->req,
11320 					    mmio_flip->crtc->reset_counter,
11321 					    false, NULL,
11322 					    &mmio_flip->i915->rps.mmioflips));
11323 		i915_gem_request_unreference__unlocked(mmio_flip->req);
11324 	}
11325 
11326 	intel_do_mmio_flip(mmio_flip);
11327 	kfree(mmio_flip);
11328 }
11329 
intel_queue_mmio_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct intel_engine_cs * ring,uint32_t flags)11330 static int intel_queue_mmio_flip(struct drm_device *dev,
11331 				 struct drm_crtc *crtc,
11332 				 struct drm_framebuffer *fb,
11333 				 struct drm_i915_gem_object *obj,
11334 				 struct intel_engine_cs *ring,
11335 				 uint32_t flags)
11336 {
11337 	struct intel_mmio_flip *mmio_flip;
11338 
11339 	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11340 	if (mmio_flip == NULL)
11341 		return -ENOMEM;
11342 
11343 	mmio_flip->i915 = to_i915(dev);
11344 	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11345 	mmio_flip->crtc = to_intel_crtc(crtc);
11346 
11347 	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11348 	schedule_work(&mmio_flip->work);
11349 
11350 	return 0;
11351 }
11352 
intel_default_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj,struct drm_i915_gem_request * req,uint32_t flags)11353 static int intel_default_queue_flip(struct drm_device *dev,
11354 				    struct drm_crtc *crtc,
11355 				    struct drm_framebuffer *fb,
11356 				    struct drm_i915_gem_object *obj,
11357 				    struct drm_i915_gem_request *req,
11358 				    uint32_t flags)
11359 {
11360 	return -ENODEV;
11361 }
11362 
__intel_pageflip_stall_check(struct drm_device * dev,struct drm_crtc * crtc)11363 static bool __intel_pageflip_stall_check(struct drm_device *dev,
11364 					 struct drm_crtc *crtc)
11365 {
11366 	struct drm_i915_private *dev_priv = dev->dev_private;
11367 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11368 	struct intel_unpin_work *work = intel_crtc->unpin_work;
11369 	u32 addr;
11370 
11371 	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11372 		return true;
11373 
11374 	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11375 		return false;
11376 
11377 	if (!work->enable_stall_check)
11378 		return false;
11379 
11380 	if (work->flip_ready_vblank == 0) {
11381 		if (work->flip_queued_req &&
11382 		    !i915_gem_request_completed(work->flip_queued_req, true))
11383 			return false;
11384 
11385 		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11386 	}
11387 
11388 	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11389 		return false;
11390 
11391 	/* Potential stall - if we see that the flip has happened,
11392 	 * assume a missed interrupt. */
11393 	if (INTEL_INFO(dev)->gen >= 4)
11394 		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11395 	else
11396 		addr = I915_READ(DSPADDR(intel_crtc->plane));
11397 
11398 	/* There is a potential issue here with a false positive after a flip
11399 	 * to the same address. We could address this by checking for a
11400 	 * non-incrementing frame counter.
11401 	 */
11402 	return addr == work->gtt_offset;
11403 }
11404 
intel_check_page_flip(struct drm_device * dev,int pipe)11405 void intel_check_page_flip(struct drm_device *dev, int pipe)
11406 {
11407 	struct drm_i915_private *dev_priv = dev->dev_private;
11408 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11409 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11410 	struct intel_unpin_work *work;
11411 
11412 	WARN_ON(!in_interrupt());
11413 
11414 	if (crtc == NULL)
11415 		return;
11416 
11417 	spin_lock(&dev->event_lock);
11418 	work = intel_crtc->unpin_work;
11419 	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11420 		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11421 			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11422 		page_flip_completed(intel_crtc);
11423 		work = NULL;
11424 	}
11425 	if (work != NULL &&
11426 	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11427 		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11428 	spin_unlock(&dev->event_lock);
11429 }
11430 
intel_crtc_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t page_flip_flags)11431 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11432 				struct drm_framebuffer *fb,
11433 				struct drm_pending_vblank_event *event,
11434 				uint32_t page_flip_flags)
11435 {
11436 	struct drm_device *dev = crtc->dev;
11437 	struct drm_i915_private *dev_priv = dev->dev_private;
11438 	struct drm_framebuffer *old_fb = crtc->primary->fb;
11439 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11440 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11441 	struct drm_plane *primary = crtc->primary;
11442 	enum pipe pipe = intel_crtc->pipe;
11443 	struct intel_unpin_work *work;
11444 	struct intel_engine_cs *ring;
11445 	bool mmio_flip;
11446 	struct drm_i915_gem_request *request = NULL;
11447 	int ret;
11448 
11449 	/*
11450 	 * drm_mode_page_flip_ioctl() should already catch this, but double
11451 	 * check to be safe.  In the future we may enable pageflipping from
11452 	 * a disabled primary plane.
11453 	 */
11454 	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11455 		return -EBUSY;
11456 
11457 	/* Can't change pixel format via MI display flips. */
11458 	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11459 		return -EINVAL;
11460 
11461 	/*
11462 	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11463 	 * Note that pitch changes could also affect these register.
11464 	 */
11465 	if (INTEL_INFO(dev)->gen > 3 &&
11466 	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11467 	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11468 		return -EINVAL;
11469 
11470 	if (i915_terminally_wedged(&dev_priv->gpu_error))
11471 		goto out_hang;
11472 
11473 	work = kzalloc(sizeof(*work), GFP_KERNEL);
11474 	if (work == NULL)
11475 		return -ENOMEM;
11476 
11477 	work->event = event;
11478 	work->crtc = crtc;
11479 	work->old_fb = old_fb;
11480 	INIT_WORK(&work->work, intel_unpin_work_fn);
11481 
11482 	ret = drm_crtc_vblank_get(crtc);
11483 	if (ret)
11484 		goto free_work;
11485 
11486 	/* We borrow the event spin lock for protecting unpin_work */
11487 	spin_lock_irq(&dev->event_lock);
11488 	if (intel_crtc->unpin_work) {
11489 		/* Before declaring the flip queue wedged, check if
11490 		 * the hardware completed the operation behind our backs.
11491 		 */
11492 		if (__intel_pageflip_stall_check(dev, crtc)) {
11493 			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11494 			page_flip_completed(intel_crtc);
11495 		} else {
11496 			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11497 			spin_unlock_irq(&dev->event_lock);
11498 
11499 			drm_crtc_vblank_put(crtc);
11500 			kfree(work);
11501 			return -EBUSY;
11502 		}
11503 	}
11504 	intel_crtc->unpin_work = work;
11505 	spin_unlock_irq(&dev->event_lock);
11506 
11507 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11508 		flush_workqueue(dev_priv->wq);
11509 
11510 	/* Reference the objects for the scheduled work. */
11511 	drm_framebuffer_reference(work->old_fb);
11512 	drm_gem_object_reference(&obj->base);
11513 
11514 	crtc->primary->fb = fb;
11515 	update_state_fb(crtc->primary);
11516 
11517 	work->pending_flip_obj = obj;
11518 
11519 	ret = i915_mutex_lock_interruptible(dev);
11520 	if (ret)
11521 		goto cleanup;
11522 
11523 	atomic_inc(&intel_crtc->unpin_work_count);
11524 	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11525 
11526 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11527 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11528 
11529 	if (IS_VALLEYVIEW(dev)) {
11530 		ring = &dev_priv->ring[BCS];
11531 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11532 			/* vlv: DISPLAY_FLIP fails to change tiling */
11533 			ring = NULL;
11534 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11535 		ring = &dev_priv->ring[BCS];
11536 	} else if (INTEL_INFO(dev)->gen >= 7) {
11537 		ring = i915_gem_request_get_ring(obj->last_write_req);
11538 		if (ring == NULL || ring->id != RCS)
11539 			ring = &dev_priv->ring[BCS];
11540 	} else {
11541 		ring = &dev_priv->ring[RCS];
11542 	}
11543 
11544 	mmio_flip = use_mmio_flip(ring, obj);
11545 
11546 	/* When using CS flips, we want to emit semaphores between rings.
11547 	 * However, when using mmio flips we will create a task to do the
11548 	 * synchronisation, so all we want here is to pin the framebuffer
11549 	 * into the display plane and skip any waits.
11550 	 */
11551 	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11552 					 crtc->primary->state,
11553 					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
11554 	if (ret)
11555 		goto cleanup_pending;
11556 
11557 	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11558 						  obj, 0);
11559 	work->gtt_offset += intel_crtc->dspaddr_offset;
11560 
11561 	if (mmio_flip) {
11562 		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
11563 					    page_flip_flags);
11564 		if (ret)
11565 			goto cleanup_unpin;
11566 
11567 		i915_gem_request_assign(&work->flip_queued_req,
11568 					obj->last_write_req);
11569 	} else {
11570 		if (!request) {
11571 			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11572 			if (ret)
11573 				goto cleanup_unpin;
11574 		}
11575 
11576 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11577 						   page_flip_flags);
11578 		if (ret)
11579 			goto cleanup_unpin;
11580 
11581 		i915_gem_request_assign(&work->flip_queued_req, request);
11582 	}
11583 
11584 	if (request)
11585 		i915_add_request_no_flush(request);
11586 
11587 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11588 	work->enable_stall_check = true;
11589 
11590 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11591 			  to_intel_plane(primary)->frontbuffer_bit);
11592 	mutex_unlock(&dev->struct_mutex);
11593 
11594 	intel_fbc_disable_crtc(intel_crtc);
11595 	intel_frontbuffer_flip_prepare(dev,
11596 				       to_intel_plane(primary)->frontbuffer_bit);
11597 
11598 	trace_i915_flip_request(intel_crtc->plane, obj);
11599 
11600 	return 0;
11601 
11602 cleanup_unpin:
11603 	intel_unpin_fb_obj(fb, crtc->primary->state);
11604 cleanup_pending:
11605 	if (request)
11606 		i915_gem_request_cancel(request);
11607 	atomic_dec(&intel_crtc->unpin_work_count);
11608 	mutex_unlock(&dev->struct_mutex);
11609 cleanup:
11610 	crtc->primary->fb = old_fb;
11611 	update_state_fb(crtc->primary);
11612 
11613 	drm_gem_object_unreference_unlocked(&obj->base);
11614 	drm_framebuffer_unreference(work->old_fb);
11615 
11616 	spin_lock_irq(&dev->event_lock);
11617 	intel_crtc->unpin_work = NULL;
11618 	spin_unlock_irq(&dev->event_lock);
11619 
11620 	drm_crtc_vblank_put(crtc);
11621 free_work:
11622 	kfree(work);
11623 
11624 	if (ret == -EIO) {
11625 		struct drm_atomic_state *state;
11626 		struct drm_plane_state *plane_state;
11627 
11628 out_hang:
11629 		state = drm_atomic_state_alloc(dev);
11630 		if (!state)
11631 			return -ENOMEM;
11632 		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11633 
11634 retry:
11635 		plane_state = drm_atomic_get_plane_state(state, primary);
11636 		ret = PTR_ERR_OR_ZERO(plane_state);
11637 		if (!ret) {
11638 			drm_atomic_set_fb_for_plane(plane_state, fb);
11639 
11640 			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11641 			if (!ret)
11642 				ret = drm_atomic_commit(state);
11643 		}
11644 
11645 		if (ret == -EDEADLK) {
11646 			drm_modeset_backoff(state->acquire_ctx);
11647 			drm_atomic_state_clear(state);
11648 			goto retry;
11649 		}
11650 
11651 		if (ret)
11652 			drm_atomic_state_free(state);
11653 
11654 		if (ret == 0 && event) {
11655 			spin_lock_irq(&dev->event_lock);
11656 			drm_send_vblank_event(dev, pipe, event);
11657 			spin_unlock_irq(&dev->event_lock);
11658 		}
11659 	}
11660 	return ret;
11661 }
11662 
11663 
11664 /**
11665  * intel_wm_need_update - Check whether watermarks need updating
11666  * @plane: drm plane
11667  * @state: new plane state
11668  *
11669  * Check current plane state versus the new one to determine whether
11670  * watermarks need to be recalculated.
11671  *
11672  * Returns true or false.
11673  */
intel_wm_need_update(struct drm_plane * plane,struct drm_plane_state * state)11674 static bool intel_wm_need_update(struct drm_plane *plane,
11675 				 struct drm_plane_state *state)
11676 {
11677 	/* Update watermarks on tiling changes. */
11678 	if (!plane->state->fb || !state->fb ||
11679 	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
11680 	    plane->state->rotation != state->rotation)
11681 		return true;
11682 
11683 	if (plane->state->crtc_w != state->crtc_w)
11684 		return true;
11685 
11686 	return false;
11687 }
11688 
intel_plane_atomic_calc_changes(struct drm_crtc_state * crtc_state,struct drm_plane_state * plane_state)11689 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11690 				    struct drm_plane_state *plane_state)
11691 {
11692 	struct drm_crtc *crtc = crtc_state->crtc;
11693 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11694 	struct drm_plane *plane = plane_state->plane;
11695 	struct drm_device *dev = crtc->dev;
11696 	struct drm_i915_private *dev_priv = dev->dev_private;
11697 	struct intel_plane_state *old_plane_state =
11698 		to_intel_plane_state(plane->state);
11699 	int idx = intel_crtc->base.base.id, ret;
11700 	int i = drm_plane_index(plane);
11701 	bool mode_changed = needs_modeset(crtc_state);
11702 	bool was_crtc_enabled = crtc->state->active;
11703 	bool is_crtc_enabled = crtc_state->active;
11704 
11705 	bool turn_off, turn_on, visible, was_visible;
11706 	struct drm_framebuffer *fb = plane_state->fb;
11707 
11708 	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11709 	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11710 		ret = skl_update_scaler_plane(
11711 			to_intel_crtc_state(crtc_state),
11712 			to_intel_plane_state(plane_state));
11713 		if (ret)
11714 			return ret;
11715 	}
11716 
11717 	/*
11718 	 * Disabling a plane is always okay; we just need to update
11719 	 * fb tracking in a special way since cleanup_fb() won't
11720 	 * get called by the plane helpers.
11721 	 */
11722 	if (old_plane_state->base.fb && !fb)
11723 		intel_crtc->atomic.disabled_planes |= 1 << i;
11724 
11725 	was_visible = old_plane_state->visible;
11726 	visible = to_intel_plane_state(plane_state)->visible;
11727 
11728 	if (!was_crtc_enabled && WARN_ON(was_visible))
11729 		was_visible = false;
11730 
11731 	if (!is_crtc_enabled && WARN_ON(visible))
11732 		visible = false;
11733 
11734 	if (!was_visible && !visible)
11735 		return 0;
11736 
11737 	turn_off = was_visible && (!visible || mode_changed);
11738 	turn_on = visible && (!was_visible || mode_changed);
11739 
11740 	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11741 			 plane->base.id, fb ? fb->base.id : -1);
11742 
11743 	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11744 			 plane->base.id, was_visible, visible,
11745 			 turn_off, turn_on, mode_changed);
11746 
11747 	if (turn_on) {
11748 		intel_crtc->atomic.update_wm_pre = true;
11749 		/* must disable cxsr around plane enable/disable */
11750 		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11751 			intel_crtc->atomic.disable_cxsr = true;
11752 			/* to potentially re-enable cxsr */
11753 			intel_crtc->atomic.wait_vblank = true;
11754 			intel_crtc->atomic.update_wm_post = true;
11755 		}
11756 	} else if (turn_off) {
11757 		intel_crtc->atomic.update_wm_post = true;
11758 		/* must disable cxsr around plane enable/disable */
11759 		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11760 			if (is_crtc_enabled)
11761 				intel_crtc->atomic.wait_vblank = true;
11762 			intel_crtc->atomic.disable_cxsr = true;
11763 		}
11764 	} else if (intel_wm_need_update(plane, plane_state)) {
11765 		intel_crtc->atomic.update_wm_pre = true;
11766 	}
11767 
11768 	if (visible || was_visible)
11769 		intel_crtc->atomic.fb_bits |=
11770 			to_intel_plane(plane)->frontbuffer_bit;
11771 
11772 	switch (plane->type) {
11773 	case DRM_PLANE_TYPE_PRIMARY:
11774 		intel_crtc->atomic.wait_for_flips = true;
11775 		intel_crtc->atomic.pre_disable_primary = turn_off;
11776 		intel_crtc->atomic.post_enable_primary = turn_on;
11777 
11778 		if (turn_off) {
11779 			/*
11780 			 * FIXME: Actually if we will still have any other
11781 			 * plane enabled on the pipe we could let IPS enabled
11782 			 * still, but for now lets consider that when we make
11783 			 * primary invisible by setting DSPCNTR to 0 on
11784 			 * update_primary_plane function IPS needs to be
11785 			 * disable.
11786 			 */
11787 			intel_crtc->atomic.disable_ips = true;
11788 
11789 			intel_crtc->atomic.disable_fbc = true;
11790 		}
11791 
11792 		/*
11793 		 * FBC does not work on some platforms for rotated
11794 		 * planes, so disable it when rotation is not 0 and
11795 		 * update it when rotation is set back to 0.
11796 		 *
11797 		 * FIXME: This is redundant with the fbc update done in
11798 		 * the primary plane enable function except that that
11799 		 * one is done too late. We eventually need to unify
11800 		 * this.
11801 		 */
11802 
11803 		if (visible &&
11804 		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11805 		    dev_priv->fbc.crtc == intel_crtc &&
11806 		    plane_state->rotation != BIT(DRM_ROTATE_0))
11807 			intel_crtc->atomic.disable_fbc = true;
11808 
11809 		/*
11810 		 * BDW signals flip done immediately if the plane
11811 		 * is disabled, even if the plane enable is already
11812 		 * armed to occur at the next vblank :(
11813 		 */
11814 		if (turn_on && IS_BROADWELL(dev))
11815 			intel_crtc->atomic.wait_vblank = true;
11816 
11817 		intel_crtc->atomic.update_fbc |= visible || mode_changed;
11818 		break;
11819 	case DRM_PLANE_TYPE_CURSOR:
11820 		break;
11821 	case DRM_PLANE_TYPE_OVERLAY:
11822 		if (turn_off && !mode_changed) {
11823 			intel_crtc->atomic.wait_vblank = true;
11824 			intel_crtc->atomic.update_sprite_watermarks |=
11825 				1 << i;
11826 		}
11827 	}
11828 	return 0;
11829 }
11830 
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)11831 static bool encoders_cloneable(const struct intel_encoder *a,
11832 			       const struct intel_encoder *b)
11833 {
11834 	/* masks could be asymmetric, so check both ways */
11835 	return a == b || (a->cloneable & (1 << b->type) &&
11836 			  b->cloneable & (1 << a->type));
11837 }
11838 
check_single_encoder_cloning(struct drm_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)11839 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11840 					 struct intel_crtc *crtc,
11841 					 struct intel_encoder *encoder)
11842 {
11843 	struct intel_encoder *source_encoder;
11844 	struct drm_connector *connector;
11845 	struct drm_connector_state *connector_state;
11846 	int i;
11847 
11848 	for_each_connector_in_state(state, connector, connector_state, i) {
11849 		if (connector_state->crtc != &crtc->base)
11850 			continue;
11851 
11852 		source_encoder =
11853 			to_intel_encoder(connector_state->best_encoder);
11854 		if (!encoders_cloneable(encoder, source_encoder))
11855 			return false;
11856 	}
11857 
11858 	return true;
11859 }
11860 
check_encoder_cloning(struct drm_atomic_state * state,struct intel_crtc * crtc)11861 static bool check_encoder_cloning(struct drm_atomic_state *state,
11862 				  struct intel_crtc *crtc)
11863 {
11864 	struct intel_encoder *encoder;
11865 	struct drm_connector *connector;
11866 	struct drm_connector_state *connector_state;
11867 	int i;
11868 
11869 	for_each_connector_in_state(state, connector, connector_state, i) {
11870 		if (connector_state->crtc != &crtc->base)
11871 			continue;
11872 
11873 		encoder = to_intel_encoder(connector_state->best_encoder);
11874 		if (!check_single_encoder_cloning(state, crtc, encoder))
11875 			return false;
11876 	}
11877 
11878 	return true;
11879 }
11880 
intel_crtc_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * crtc_state)11881 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11882 				   struct drm_crtc_state *crtc_state)
11883 {
11884 	struct drm_device *dev = crtc->dev;
11885 	struct drm_i915_private *dev_priv = dev->dev_private;
11886 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11887 	struct intel_crtc_state *pipe_config =
11888 		to_intel_crtc_state(crtc_state);
11889 	struct drm_atomic_state *state = crtc_state->state;
11890 	int ret;
11891 	bool mode_changed = needs_modeset(crtc_state);
11892 
11893 	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11894 		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11895 		return -EINVAL;
11896 	}
11897 
11898 	if (mode_changed && !crtc_state->active)
11899 		intel_crtc->atomic.update_wm_post = true;
11900 
11901 	if (mode_changed && crtc_state->enable &&
11902 	    dev_priv->display.crtc_compute_clock &&
11903 	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11904 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11905 							   pipe_config);
11906 		if (ret)
11907 			return ret;
11908 	}
11909 
11910 	ret = 0;
11911 	if (INTEL_INFO(dev)->gen >= 9) {
11912 		if (mode_changed)
11913 			ret = skl_update_scaler_crtc(pipe_config);
11914 
11915 		if (!ret)
11916 			ret = intel_atomic_setup_scalers(dev, intel_crtc,
11917 							 pipe_config);
11918 	}
11919 
11920 	return ret;
11921 }
11922 
11923 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11924 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
11925 	.load_lut = intel_crtc_load_lut,
11926 	.atomic_begin = intel_begin_crtc_commit,
11927 	.atomic_flush = intel_finish_crtc_commit,
11928 	.atomic_check = intel_crtc_atomic_check,
11929 };
11930 
intel_modeset_update_connector_atomic_state(struct drm_device * dev)11931 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11932 {
11933 	struct intel_connector *connector;
11934 
11935 	for_each_intel_connector(dev, connector) {
11936 		if (connector->base.encoder) {
11937 			connector->base.state->best_encoder =
11938 				connector->base.encoder;
11939 			connector->base.state->crtc =
11940 				connector->base.encoder->crtc;
11941 		} else {
11942 			connector->base.state->best_encoder = NULL;
11943 			connector->base.state->crtc = NULL;
11944 		}
11945 	}
11946 }
11947 
11948 static void
connected_sink_compute_bpp(struct intel_connector * connector,struct intel_crtc_state * pipe_config)11949 connected_sink_compute_bpp(struct intel_connector *connector,
11950 			   struct intel_crtc_state *pipe_config)
11951 {
11952 	int bpp = pipe_config->pipe_bpp;
11953 
11954 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
11955 		connector->base.base.id,
11956 		connector->base.name);
11957 
11958 	/* Don't use an invalid EDID bpc value */
11959 	if (connector->base.display_info.bpc &&
11960 	    connector->base.display_info.bpc * 3 < bpp) {
11961 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
11962 			      bpp, connector->base.display_info.bpc*3);
11963 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
11964 	}
11965 
11966 	/* Clamp bpp to 8 on screens without EDID 1.4 */
11967 	if (connector->base.display_info.bpc == 0 && bpp > 24) {
11968 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
11969 			      bpp);
11970 		pipe_config->pipe_bpp = 24;
11971 	}
11972 }
11973 
11974 static int
compute_baseline_pipe_bpp(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)11975 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11976 			  struct intel_crtc_state *pipe_config)
11977 {
11978 	struct drm_device *dev = crtc->base.dev;
11979 	struct drm_atomic_state *state;
11980 	struct drm_connector *connector;
11981 	struct drm_connector_state *connector_state;
11982 	int bpp, i;
11983 
11984 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
11985 		bpp = 10*3;
11986 	else if (INTEL_INFO(dev)->gen >= 5)
11987 		bpp = 12*3;
11988 	else
11989 		bpp = 8*3;
11990 
11991 
11992 	pipe_config->pipe_bpp = bpp;
11993 
11994 	state = pipe_config->base.state;
11995 
11996 	/* Clamp display bpp to EDID value */
11997 	for_each_connector_in_state(state, connector, connector_state, i) {
11998 		if (connector_state->crtc != &crtc->base)
11999 			continue;
12000 
12001 		connected_sink_compute_bpp(to_intel_connector(connector),
12002 					   pipe_config);
12003 	}
12004 
12005 	return bpp;
12006 }
12007 
intel_dump_crtc_timings(const struct drm_display_mode * mode)12008 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12009 {
12010 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12011 			"type: 0x%x flags: 0x%x\n",
12012 		mode->crtc_clock,
12013 		mode->crtc_hdisplay, mode->crtc_hsync_start,
12014 		mode->crtc_hsync_end, mode->crtc_htotal,
12015 		mode->crtc_vdisplay, mode->crtc_vsync_start,
12016 		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12017 }
12018 
intel_dump_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,const char * context)12019 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12020 				   struct intel_crtc_state *pipe_config,
12021 				   const char *context)
12022 {
12023 	struct drm_device *dev = crtc->base.dev;
12024 	struct drm_plane *plane;
12025 	struct intel_plane *intel_plane;
12026 	struct intel_plane_state *state;
12027 	struct drm_framebuffer *fb;
12028 
12029 	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12030 		      context, pipe_config, pipe_name(crtc->pipe));
12031 
12032 	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12033 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12034 		      pipe_config->pipe_bpp, pipe_config->dither);
12035 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12036 		      pipe_config->has_pch_encoder,
12037 		      pipe_config->fdi_lanes,
12038 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12039 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12040 		      pipe_config->fdi_m_n.tu);
12041 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12042 		      pipe_config->has_dp_encoder,
12043 		      pipe_config->lane_count,
12044 		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12045 		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12046 		      pipe_config->dp_m_n.tu);
12047 
12048 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12049 		      pipe_config->has_dp_encoder,
12050 		      pipe_config->lane_count,
12051 		      pipe_config->dp_m2_n2.gmch_m,
12052 		      pipe_config->dp_m2_n2.gmch_n,
12053 		      pipe_config->dp_m2_n2.link_m,
12054 		      pipe_config->dp_m2_n2.link_n,
12055 		      pipe_config->dp_m2_n2.tu);
12056 
12057 	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12058 		      pipe_config->has_audio,
12059 		      pipe_config->has_infoframe);
12060 
12061 	DRM_DEBUG_KMS("requested mode:\n");
12062 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12063 	DRM_DEBUG_KMS("adjusted mode:\n");
12064 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12065 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12066 	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12067 	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12068 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12069 	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12070 		      crtc->num_scalers,
12071 		      pipe_config->scaler_state.scaler_users,
12072 		      pipe_config->scaler_state.scaler_id);
12073 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12074 		      pipe_config->gmch_pfit.control,
12075 		      pipe_config->gmch_pfit.pgm_ratios,
12076 		      pipe_config->gmch_pfit.lvds_border_bits);
12077 	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12078 		      pipe_config->pch_pfit.pos,
12079 		      pipe_config->pch_pfit.size,
12080 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12081 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12082 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12083 
12084 	if (IS_BROXTON(dev)) {
12085 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12086 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12087 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12088 			      pipe_config->ddi_pll_sel,
12089 			      pipe_config->dpll_hw_state.ebb0,
12090 			      pipe_config->dpll_hw_state.ebb4,
12091 			      pipe_config->dpll_hw_state.pll0,
12092 			      pipe_config->dpll_hw_state.pll1,
12093 			      pipe_config->dpll_hw_state.pll2,
12094 			      pipe_config->dpll_hw_state.pll3,
12095 			      pipe_config->dpll_hw_state.pll6,
12096 			      pipe_config->dpll_hw_state.pll8,
12097 			      pipe_config->dpll_hw_state.pll9,
12098 			      pipe_config->dpll_hw_state.pll10,
12099 			      pipe_config->dpll_hw_state.pcsdw12);
12100 	} else if (IS_SKYLAKE(dev)) {
12101 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12102 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12103 			      pipe_config->ddi_pll_sel,
12104 			      pipe_config->dpll_hw_state.ctrl1,
12105 			      pipe_config->dpll_hw_state.cfgcr1,
12106 			      pipe_config->dpll_hw_state.cfgcr2);
12107 	} else if (HAS_DDI(dev)) {
12108 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12109 			      pipe_config->ddi_pll_sel,
12110 			      pipe_config->dpll_hw_state.wrpll,
12111 			      pipe_config->dpll_hw_state.spll);
12112 	} else {
12113 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12114 			      "fp0: 0x%x, fp1: 0x%x\n",
12115 			      pipe_config->dpll_hw_state.dpll,
12116 			      pipe_config->dpll_hw_state.dpll_md,
12117 			      pipe_config->dpll_hw_state.fp0,
12118 			      pipe_config->dpll_hw_state.fp1);
12119 	}
12120 
12121 	DRM_DEBUG_KMS("planes on this crtc\n");
12122 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12123 		intel_plane = to_intel_plane(plane);
12124 		if (intel_plane->pipe != crtc->pipe)
12125 			continue;
12126 
12127 		state = to_intel_plane_state(plane->state);
12128 		fb = state->base.fb;
12129 		if (!fb) {
12130 			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12131 				"disabled, scaler_id = %d\n",
12132 				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12133 				plane->base.id, intel_plane->pipe,
12134 				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12135 				drm_plane_index(plane), state->scaler_id);
12136 			continue;
12137 		}
12138 
12139 		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12140 			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12141 			plane->base.id, intel_plane->pipe,
12142 			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12143 			drm_plane_index(plane));
12144 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12145 			fb->base.id, fb->width, fb->height, fb->pixel_format);
12146 		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12147 			state->scaler_id,
12148 			state->src.x1 >> 16, state->src.y1 >> 16,
12149 			drm_rect_width(&state->src) >> 16,
12150 			drm_rect_height(&state->src) >> 16,
12151 			state->dst.x1, state->dst.y1,
12152 			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12153 	}
12154 }
12155 
check_digital_port_conflicts(struct drm_atomic_state * state)12156 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12157 {
12158 	struct drm_device *dev = state->dev;
12159 	struct drm_connector *connector;
12160 	unsigned int used_ports = 0;
12161 
12162 	/*
12163 	 * Walk the connector list instead of the encoder
12164 	 * list to detect the problem on ddi platforms
12165 	 * where there's just one encoder per digital port.
12166 	 */
12167 	drm_for_each_connector(connector, dev) {
12168 		struct drm_connector_state *connector_state;
12169 		struct intel_encoder *encoder;
12170 
12171 		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12172 		if (!connector_state)
12173 			connector_state = connector->state;
12174 
12175 		if (!connector_state->best_encoder)
12176 			continue;
12177 
12178 		encoder = to_intel_encoder(connector_state->best_encoder);
12179 
12180 		WARN_ON(!connector_state->crtc);
12181 
12182 		switch (encoder->type) {
12183 			unsigned int port_mask;
12184 		case INTEL_OUTPUT_UNKNOWN:
12185 			if (WARN_ON(!HAS_DDI(dev)))
12186 				break;
12187 		case INTEL_OUTPUT_DISPLAYPORT:
12188 		case INTEL_OUTPUT_HDMI:
12189 		case INTEL_OUTPUT_EDP:
12190 			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12191 
12192 			/* the same port mustn't appear more than once */
12193 			if (used_ports & port_mask)
12194 				return false;
12195 
12196 			used_ports |= port_mask;
12197 		default:
12198 			break;
12199 		}
12200 	}
12201 
12202 	return true;
12203 }
12204 
12205 static void
clear_intel_crtc_state(struct intel_crtc_state * crtc_state)12206 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12207 {
12208 	struct drm_crtc_state tmp_state;
12209 	struct intel_crtc_scaler_state scaler_state;
12210 	struct intel_dpll_hw_state dpll_hw_state;
12211 	enum intel_dpll_id shared_dpll;
12212 	uint32_t ddi_pll_sel;
12213 	bool force_thru;
12214 
12215 	/* FIXME: before the switch to atomic started, a new pipe_config was
12216 	 * kzalloc'd. Code that depends on any field being zero should be
12217 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12218 	 * only fields that are know to not cause problems are preserved. */
12219 
12220 	tmp_state = crtc_state->base;
12221 	scaler_state = crtc_state->scaler_state;
12222 	shared_dpll = crtc_state->shared_dpll;
12223 	dpll_hw_state = crtc_state->dpll_hw_state;
12224 	ddi_pll_sel = crtc_state->ddi_pll_sel;
12225 	force_thru = crtc_state->pch_pfit.force_thru;
12226 
12227 	memset(crtc_state, 0, sizeof *crtc_state);
12228 
12229 	crtc_state->base = tmp_state;
12230 	crtc_state->scaler_state = scaler_state;
12231 	crtc_state->shared_dpll = shared_dpll;
12232 	crtc_state->dpll_hw_state = dpll_hw_state;
12233 	crtc_state->ddi_pll_sel = ddi_pll_sel;
12234 	crtc_state->pch_pfit.force_thru = force_thru;
12235 }
12236 
12237 static int
intel_modeset_pipe_config(struct drm_crtc * crtc,struct intel_crtc_state * pipe_config)12238 intel_modeset_pipe_config(struct drm_crtc *crtc,
12239 			  struct intel_crtc_state *pipe_config)
12240 {
12241 	struct drm_atomic_state *state = pipe_config->base.state;
12242 	struct intel_encoder *encoder;
12243 	struct drm_connector *connector;
12244 	struct drm_connector_state *connector_state;
12245 	int base_bpp, ret = -EINVAL;
12246 	int i;
12247 	bool retry = true;
12248 
12249 	clear_intel_crtc_state(pipe_config);
12250 
12251 	pipe_config->cpu_transcoder =
12252 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12253 
12254 	/*
12255 	 * Sanitize sync polarity flags based on requested ones. If neither
12256 	 * positive or negative polarity is requested, treat this as meaning
12257 	 * negative polarity.
12258 	 */
12259 	if (!(pipe_config->base.adjusted_mode.flags &
12260 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12261 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12262 
12263 	if (!(pipe_config->base.adjusted_mode.flags &
12264 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12265 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12266 
12267 	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12268 					     pipe_config);
12269 	if (base_bpp < 0)
12270 		goto fail;
12271 
12272 	/*
12273 	 * Determine the real pipe dimensions. Note that stereo modes can
12274 	 * increase the actual pipe size due to the frame doubling and
12275 	 * insertion of additional space for blanks between the frame. This
12276 	 * is stored in the crtc timings. We use the requested mode to do this
12277 	 * computation to clearly distinguish it from the adjusted mode, which
12278 	 * can be changed by the connectors in the below retry loop.
12279 	 */
12280 	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12281 			       &pipe_config->pipe_src_w,
12282 			       &pipe_config->pipe_src_h);
12283 
12284 encoder_retry:
12285 	/* Ensure the port clock defaults are reset when retrying. */
12286 	pipe_config->port_clock = 0;
12287 	pipe_config->pixel_multiplier = 1;
12288 
12289 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12290 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12291 			      CRTC_STEREO_DOUBLE);
12292 
12293 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12294 	 * adjust it according to limitations or connector properties, and also
12295 	 * a chance to reject the mode entirely.
12296 	 */
12297 	for_each_connector_in_state(state, connector, connector_state, i) {
12298 		if (connector_state->crtc != crtc)
12299 			continue;
12300 
12301 		encoder = to_intel_encoder(connector_state->best_encoder);
12302 
12303 		if (!(encoder->compute_config(encoder, pipe_config))) {
12304 			DRM_DEBUG_KMS("Encoder config failure\n");
12305 			goto fail;
12306 		}
12307 	}
12308 
12309 	/* Set default port clock if not overwritten by the encoder. Needs to be
12310 	 * done afterwards in case the encoder adjusts the mode. */
12311 	if (!pipe_config->port_clock)
12312 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12313 			* pipe_config->pixel_multiplier;
12314 
12315 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12316 	if (ret < 0) {
12317 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12318 		goto fail;
12319 	}
12320 
12321 	if (ret == RETRY) {
12322 		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12323 			ret = -EINVAL;
12324 			goto fail;
12325 		}
12326 
12327 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12328 		retry = false;
12329 		goto encoder_retry;
12330 	}
12331 
12332 	/* Dithering seems to not pass-through bits correctly when it should, so
12333 	 * only enable it on 6bpc panels. */
12334 	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12335 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12336 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12337 
12338 fail:
12339 	return ret;
12340 }
12341 
12342 static void
intel_modeset_update_crtc_state(struct drm_atomic_state * state)12343 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12344 {
12345 	struct drm_crtc *crtc;
12346 	struct drm_crtc_state *crtc_state;
12347 	int i;
12348 
12349 	/* Double check state. */
12350 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12351 		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12352 
12353 		/* Update hwmode for vblank functions */
12354 		if (crtc->state->active)
12355 			crtc->hwmode = crtc->state->adjusted_mode;
12356 		else
12357 			crtc->hwmode.crtc_clock = 0;
12358 	}
12359 }
12360 
intel_fuzzy_clock_check(int clock1,int clock2)12361 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12362 {
12363 	int diff;
12364 
12365 	if (clock1 == clock2)
12366 		return true;
12367 
12368 	if (!clock1 || !clock2)
12369 		return false;
12370 
12371 	diff = abs(clock1 - clock2);
12372 
12373 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12374 		return true;
12375 
12376 	return false;
12377 }
12378 
12379 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12380 	list_for_each_entry((intel_crtc), \
12381 			    &(dev)->mode_config.crtc_list, \
12382 			    base.head) \
12383 		if (mask & (1 <<(intel_crtc)->pipe))
12384 
12385 static bool
intel_compare_m_n(unsigned int m,unsigned int n,unsigned int m2,unsigned int n2,bool exact)12386 intel_compare_m_n(unsigned int m, unsigned int n,
12387 		  unsigned int m2, unsigned int n2,
12388 		  bool exact)
12389 {
12390 	if (m == m2 && n == n2)
12391 		return true;
12392 
12393 	if (exact || !m || !n || !m2 || !n2)
12394 		return false;
12395 
12396 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12397 
12398 	if (m > m2) {
12399 		while (m > m2) {
12400 			m2 <<= 1;
12401 			n2 <<= 1;
12402 		}
12403 	} else if (m < m2) {
12404 		while (m < m2) {
12405 			m <<= 1;
12406 			n <<= 1;
12407 		}
12408 	}
12409 
12410 	return m == m2 && n == n2;
12411 }
12412 
12413 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2,bool adjust)12414 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12415 		       struct intel_link_m_n *m2_n2,
12416 		       bool adjust)
12417 {
12418 	if (m_n->tu == m2_n2->tu &&
12419 	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12420 			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12421 	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12422 			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12423 		if (adjust)
12424 			*m2_n2 = *m_n;
12425 
12426 		return true;
12427 	}
12428 
12429 	return false;
12430 }
12431 
12432 static bool
intel_pipe_config_compare(struct drm_device * dev,struct intel_crtc_state * current_config,struct intel_crtc_state * pipe_config,bool adjust)12433 intel_pipe_config_compare(struct drm_device *dev,
12434 			  struct intel_crtc_state *current_config,
12435 			  struct intel_crtc_state *pipe_config,
12436 			  bool adjust)
12437 {
12438 	bool ret = true;
12439 
12440 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12441 	do { \
12442 		if (!adjust) \
12443 			DRM_ERROR(fmt, ##__VA_ARGS__); \
12444 		else \
12445 			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12446 	} while (0)
12447 
12448 #define PIPE_CONF_CHECK_X(name)	\
12449 	if (current_config->name != pipe_config->name) { \
12450 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12451 			  "(expected 0x%08x, found 0x%08x)\n", \
12452 			  current_config->name, \
12453 			  pipe_config->name); \
12454 		ret = false; \
12455 	}
12456 
12457 #define PIPE_CONF_CHECK_I(name)	\
12458 	if (current_config->name != pipe_config->name) { \
12459 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12460 			  "(expected %i, found %i)\n", \
12461 			  current_config->name, \
12462 			  pipe_config->name); \
12463 		ret = false; \
12464 	}
12465 
12466 #define PIPE_CONF_CHECK_M_N(name) \
12467 	if (!intel_compare_link_m_n(&current_config->name, \
12468 				    &pipe_config->name,\
12469 				    adjust)) { \
12470 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12471 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12472 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12473 			  current_config->name.tu, \
12474 			  current_config->name.gmch_m, \
12475 			  current_config->name.gmch_n, \
12476 			  current_config->name.link_m, \
12477 			  current_config->name.link_n, \
12478 			  pipe_config->name.tu, \
12479 			  pipe_config->name.gmch_m, \
12480 			  pipe_config->name.gmch_n, \
12481 			  pipe_config->name.link_m, \
12482 			  pipe_config->name.link_n); \
12483 		ret = false; \
12484 	}
12485 
12486 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12487 	if (!intel_compare_link_m_n(&current_config->name, \
12488 				    &pipe_config->name, adjust) && \
12489 	    !intel_compare_link_m_n(&current_config->alt_name, \
12490 				    &pipe_config->name, adjust)) { \
12491 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12492 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12493 			  "or tu %i gmch %i/%i link %i/%i, " \
12494 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12495 			  current_config->name.tu, \
12496 			  current_config->name.gmch_m, \
12497 			  current_config->name.gmch_n, \
12498 			  current_config->name.link_m, \
12499 			  current_config->name.link_n, \
12500 			  current_config->alt_name.tu, \
12501 			  current_config->alt_name.gmch_m, \
12502 			  current_config->alt_name.gmch_n, \
12503 			  current_config->alt_name.link_m, \
12504 			  current_config->alt_name.link_n, \
12505 			  pipe_config->name.tu, \
12506 			  pipe_config->name.gmch_m, \
12507 			  pipe_config->name.gmch_n, \
12508 			  pipe_config->name.link_m, \
12509 			  pipe_config->name.link_n); \
12510 		ret = false; \
12511 	}
12512 
12513 /* This is required for BDW+ where there is only one set of registers for
12514  * switching between high and low RR.
12515  * This macro can be used whenever a comparison has to be made between one
12516  * hw state and multiple sw state variables.
12517  */
12518 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12519 	if ((current_config->name != pipe_config->name) && \
12520 		(current_config->alt_name != pipe_config->name)) { \
12521 			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12522 				  "(expected %i or %i, found %i)\n", \
12523 				  current_config->name, \
12524 				  current_config->alt_name, \
12525 				  pipe_config->name); \
12526 			ret = false; \
12527 	}
12528 
12529 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12530 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12531 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12532 			  "(expected %i, found %i)\n", \
12533 			  current_config->name & (mask), \
12534 			  pipe_config->name & (mask)); \
12535 		ret = false; \
12536 	}
12537 
12538 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12539 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12540 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12541 			  "(expected %i, found %i)\n", \
12542 			  current_config->name, \
12543 			  pipe_config->name); \
12544 		ret = false; \
12545 	}
12546 
12547 #define PIPE_CONF_QUIRK(quirk)	\
12548 	((current_config->quirks | pipe_config->quirks) & (quirk))
12549 
12550 	PIPE_CONF_CHECK_I(cpu_transcoder);
12551 
12552 	PIPE_CONF_CHECK_I(has_pch_encoder);
12553 	PIPE_CONF_CHECK_I(fdi_lanes);
12554 	PIPE_CONF_CHECK_M_N(fdi_m_n);
12555 
12556 	PIPE_CONF_CHECK_I(has_dp_encoder);
12557 	PIPE_CONF_CHECK_I(lane_count);
12558 
12559 	if (INTEL_INFO(dev)->gen < 8) {
12560 		PIPE_CONF_CHECK_M_N(dp_m_n);
12561 
12562 		if (current_config->has_drrs)
12563 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12564 	} else
12565 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12566 
12567 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12568 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12569 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12570 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12571 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12572 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12573 
12574 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12575 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12576 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12577 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12578 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12579 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12580 
12581 	PIPE_CONF_CHECK_I(pixel_multiplier);
12582 	PIPE_CONF_CHECK_I(has_hdmi_sink);
12583 	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12584 	    IS_VALLEYVIEW(dev))
12585 		PIPE_CONF_CHECK_I(limited_color_range);
12586 	PIPE_CONF_CHECK_I(has_infoframe);
12587 
12588 	PIPE_CONF_CHECK_I(has_audio);
12589 
12590 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12591 			      DRM_MODE_FLAG_INTERLACE);
12592 
12593 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12594 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12595 				      DRM_MODE_FLAG_PHSYNC);
12596 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12597 				      DRM_MODE_FLAG_NHSYNC);
12598 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12599 				      DRM_MODE_FLAG_PVSYNC);
12600 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12601 				      DRM_MODE_FLAG_NVSYNC);
12602 	}
12603 
12604 	PIPE_CONF_CHECK_X(gmch_pfit.control);
12605 	/* pfit ratios are autocomputed by the hw on gen4+ */
12606 	if (INTEL_INFO(dev)->gen < 4)
12607 		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12608 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12609 
12610 	if (!adjust) {
12611 		PIPE_CONF_CHECK_I(pipe_src_w);
12612 		PIPE_CONF_CHECK_I(pipe_src_h);
12613 
12614 		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12615 		if (current_config->pch_pfit.enabled) {
12616 			PIPE_CONF_CHECK_X(pch_pfit.pos);
12617 			PIPE_CONF_CHECK_X(pch_pfit.size);
12618 		}
12619 
12620 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12621 	}
12622 
12623 	/* BDW+ don't expose a synchronous way to read the state */
12624 	if (IS_HASWELL(dev))
12625 		PIPE_CONF_CHECK_I(ips_enabled);
12626 
12627 	PIPE_CONF_CHECK_I(double_wide);
12628 
12629 	PIPE_CONF_CHECK_X(ddi_pll_sel);
12630 
12631 	PIPE_CONF_CHECK_I(shared_dpll);
12632 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12633 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12634 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12635 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12636 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12637 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12638 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12639 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12640 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12641 
12642 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12643 		PIPE_CONF_CHECK_I(pipe_bpp);
12644 
12645 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12646 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12647 
12648 #undef PIPE_CONF_CHECK_X
12649 #undef PIPE_CONF_CHECK_I
12650 #undef PIPE_CONF_CHECK_I_ALT
12651 #undef PIPE_CONF_CHECK_FLAGS
12652 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12653 #undef PIPE_CONF_QUIRK
12654 #undef INTEL_ERR_OR_DBG_KMS
12655 
12656 	return ret;
12657 }
12658 
check_wm_state(struct drm_device * dev)12659 static void check_wm_state(struct drm_device *dev)
12660 {
12661 	struct drm_i915_private *dev_priv = dev->dev_private;
12662 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12663 	struct intel_crtc *intel_crtc;
12664 	int plane;
12665 
12666 	if (INTEL_INFO(dev)->gen < 9)
12667 		return;
12668 
12669 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12670 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12671 
12672 	for_each_intel_crtc(dev, intel_crtc) {
12673 		struct skl_ddb_entry *hw_entry, *sw_entry;
12674 		const enum pipe pipe = intel_crtc->pipe;
12675 
12676 		if (!intel_crtc->active)
12677 			continue;
12678 
12679 		/* planes */
12680 		for_each_plane(dev_priv, pipe, plane) {
12681 			hw_entry = &hw_ddb.plane[pipe][plane];
12682 			sw_entry = &sw_ddb->plane[pipe][plane];
12683 
12684 			if (skl_ddb_entry_equal(hw_entry, sw_entry))
12685 				continue;
12686 
12687 			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12688 				  "(expected (%u,%u), found (%u,%u))\n",
12689 				  pipe_name(pipe), plane + 1,
12690 				  sw_entry->start, sw_entry->end,
12691 				  hw_entry->start, hw_entry->end);
12692 		}
12693 
12694 		/* cursor */
12695 		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12696 		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12697 
12698 		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12699 			continue;
12700 
12701 		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12702 			  "(expected (%u,%u), found (%u,%u))\n",
12703 			  pipe_name(pipe),
12704 			  sw_entry->start, sw_entry->end,
12705 			  hw_entry->start, hw_entry->end);
12706 	}
12707 }
12708 
12709 static void
check_connector_state(struct drm_device * dev,struct drm_atomic_state * old_state)12710 check_connector_state(struct drm_device *dev,
12711 		      struct drm_atomic_state *old_state)
12712 {
12713 	struct drm_connector_state *old_conn_state;
12714 	struct drm_connector *connector;
12715 	int i;
12716 
12717 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12718 		struct drm_encoder *encoder = connector->encoder;
12719 		struct drm_connector_state *state = connector->state;
12720 
12721 		/* This also checks the encoder/connector hw state with the
12722 		 * ->get_hw_state callbacks. */
12723 		intel_connector_check_state(to_intel_connector(connector));
12724 
12725 		I915_STATE_WARN(state->best_encoder != encoder,
12726 		     "connector's atomic encoder doesn't match legacy encoder\n");
12727 	}
12728 }
12729 
12730 static void
check_encoder_state(struct drm_device * dev)12731 check_encoder_state(struct drm_device *dev)
12732 {
12733 	struct intel_encoder *encoder;
12734 	struct intel_connector *connector;
12735 
12736 	for_each_intel_encoder(dev, encoder) {
12737 		bool enabled = false;
12738 		enum pipe pipe;
12739 
12740 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12741 			      encoder->base.base.id,
12742 			      encoder->base.name);
12743 
12744 		for_each_intel_connector(dev, connector) {
12745 			if (connector->base.state->best_encoder != &encoder->base)
12746 				continue;
12747 			enabled = true;
12748 
12749 			I915_STATE_WARN(connector->base.state->crtc !=
12750 					encoder->base.crtc,
12751 			     "connector's crtc doesn't match encoder crtc\n");
12752 		}
12753 
12754 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12755 		     "encoder's enabled state mismatch "
12756 		     "(expected %i, found %i)\n",
12757 		     !!encoder->base.crtc, enabled);
12758 
12759 		if (!encoder->base.crtc) {
12760 			bool active;
12761 
12762 			active = encoder->get_hw_state(encoder, &pipe);
12763 			I915_STATE_WARN(active,
12764 			     "encoder detached but still enabled on pipe %c.\n",
12765 			     pipe_name(pipe));
12766 		}
12767 	}
12768 }
12769 
12770 static void
check_crtc_state(struct drm_device * dev,struct drm_atomic_state * old_state)12771 check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12772 {
12773 	struct drm_i915_private *dev_priv = dev->dev_private;
12774 	struct intel_encoder *encoder;
12775 	struct drm_crtc_state *old_crtc_state;
12776 	struct drm_crtc *crtc;
12777 	int i;
12778 
12779 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12780 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12781 		struct intel_crtc_state *pipe_config, *sw_config;
12782 		bool active;
12783 
12784 		if (!needs_modeset(crtc->state) &&
12785 		    !to_intel_crtc_state(crtc->state)->update_pipe)
12786 			continue;
12787 
12788 		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12789 		pipe_config = to_intel_crtc_state(old_crtc_state);
12790 		memset(pipe_config, 0, sizeof(*pipe_config));
12791 		pipe_config->base.crtc = crtc;
12792 		pipe_config->base.state = old_state;
12793 
12794 		DRM_DEBUG_KMS("[CRTC:%d]\n",
12795 			      crtc->base.id);
12796 
12797 		active = dev_priv->display.get_pipe_config(intel_crtc,
12798 							   pipe_config);
12799 
12800 		/* hw state is inconsistent with the pipe quirk */
12801 		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12802 		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12803 			active = crtc->state->active;
12804 
12805 		I915_STATE_WARN(crtc->state->active != active,
12806 		     "crtc active state doesn't match with hw state "
12807 		     "(expected %i, found %i)\n", crtc->state->active, active);
12808 
12809 		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12810 		     "transitional active state does not match atomic hw state "
12811 		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12812 
12813 		for_each_encoder_on_crtc(dev, crtc, encoder) {
12814 			enum pipe pipe;
12815 
12816 			active = encoder->get_hw_state(encoder, &pipe);
12817 			I915_STATE_WARN(active != crtc->state->active,
12818 				"[ENCODER:%i] active %i with crtc active %i\n",
12819 				encoder->base.base.id, active, crtc->state->active);
12820 
12821 			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12822 					"Encoder connected to wrong pipe %c\n",
12823 					pipe_name(pipe));
12824 
12825 			if (active)
12826 				encoder->get_config(encoder, pipe_config);
12827 		}
12828 
12829 		if (!crtc->state->active)
12830 			continue;
12831 
12832 		sw_config = to_intel_crtc_state(crtc->state);
12833 		if (!intel_pipe_config_compare(dev, sw_config,
12834 					       pipe_config, false)) {
12835 			I915_STATE_WARN(1, "pipe state doesn't match!\n");
12836 			intel_dump_pipe_config(intel_crtc, pipe_config,
12837 					       "[hw state]");
12838 			intel_dump_pipe_config(intel_crtc, sw_config,
12839 					       "[sw state]");
12840 		}
12841 	}
12842 }
12843 
12844 static void
check_shared_dpll_state(struct drm_device * dev)12845 check_shared_dpll_state(struct drm_device *dev)
12846 {
12847 	struct drm_i915_private *dev_priv = dev->dev_private;
12848 	struct intel_crtc *crtc;
12849 	struct intel_dpll_hw_state dpll_hw_state;
12850 	int i;
12851 
12852 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12853 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12854 		int enabled_crtcs = 0, active_crtcs = 0;
12855 		bool active;
12856 
12857 		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12858 
12859 		DRM_DEBUG_KMS("%s\n", pll->name);
12860 
12861 		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12862 
12863 		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
12864 		     "more active pll users than references: %i vs %i\n",
12865 		     pll->active, hweight32(pll->config.crtc_mask));
12866 		I915_STATE_WARN(pll->active && !pll->on,
12867 		     "pll in active use but not on in sw tracking\n");
12868 		I915_STATE_WARN(pll->on && !pll->active,
12869 		     "pll in on but not on in use in sw tracking\n");
12870 		I915_STATE_WARN(pll->on != active,
12871 		     "pll on state mismatch (expected %i, found %i)\n",
12872 		     pll->on, active);
12873 
12874 		for_each_intel_crtc(dev, crtc) {
12875 			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
12876 				enabled_crtcs++;
12877 			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12878 				active_crtcs++;
12879 		}
12880 		I915_STATE_WARN(pll->active != active_crtcs,
12881 		     "pll active crtcs mismatch (expected %i, found %i)\n",
12882 		     pll->active, active_crtcs);
12883 		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
12884 		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
12885 		     hweight32(pll->config.crtc_mask), enabled_crtcs);
12886 
12887 		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
12888 				       sizeof(dpll_hw_state)),
12889 		     "pll hw state mismatch\n");
12890 	}
12891 }
12892 
12893 static void
intel_modeset_check_state(struct drm_device * dev,struct drm_atomic_state * old_state)12894 intel_modeset_check_state(struct drm_device *dev,
12895 			  struct drm_atomic_state *old_state)
12896 {
12897 	check_wm_state(dev);
12898 	check_connector_state(dev, old_state);
12899 	check_encoder_state(dev);
12900 	check_crtc_state(dev, old_state);
12901 	check_shared_dpll_state(dev);
12902 }
12903 
ironlake_check_encoder_dotclock(const struct intel_crtc_state * pipe_config,int dotclock)12904 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
12905 				     int dotclock)
12906 {
12907 	/*
12908 	 * FDI already provided one idea for the dotclock.
12909 	 * Yell if the encoder disagrees.
12910 	 */
12911 	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
12912 	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12913 	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
12914 }
12915 
update_scanline_offset(struct intel_crtc * crtc)12916 static void update_scanline_offset(struct intel_crtc *crtc)
12917 {
12918 	struct drm_device *dev = crtc->base.dev;
12919 
12920 	/*
12921 	 * The scanline counter increments at the leading edge of hsync.
12922 	 *
12923 	 * On most platforms it starts counting from vtotal-1 on the
12924 	 * first active line. That means the scanline counter value is
12925 	 * always one less than what we would expect. Ie. just after
12926 	 * start of vblank, which also occurs at start of hsync (on the
12927 	 * last active line), the scanline counter will read vblank_start-1.
12928 	 *
12929 	 * On gen2 the scanline counter starts counting from 1 instead
12930 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12931 	 * to keep the value positive), instead of adding one.
12932 	 *
12933 	 * On HSW+ the behaviour of the scanline counter depends on the output
12934 	 * type. For DP ports it behaves like most other platforms, but on HDMI
12935 	 * there's an extra 1 line difference. So we need to add two instead of
12936 	 * one to the value.
12937 	 */
12938 	if (IS_GEN2(dev)) {
12939 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12940 		int vtotal;
12941 
12942 		vtotal = adjusted_mode->crtc_vtotal;
12943 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12944 			vtotal /= 2;
12945 
12946 		crtc->scanline_offset = vtotal - 1;
12947 	} else if (HAS_DDI(dev) &&
12948 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
12949 		crtc->scanline_offset = 2;
12950 	} else
12951 		crtc->scanline_offset = 1;
12952 }
12953 
intel_modeset_clear_plls(struct drm_atomic_state * state)12954 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12955 {
12956 	struct drm_device *dev = state->dev;
12957 	struct drm_i915_private *dev_priv = to_i915(dev);
12958 	struct intel_shared_dpll_config *shared_dpll = NULL;
12959 	struct intel_crtc *intel_crtc;
12960 	struct intel_crtc_state *intel_crtc_state;
12961 	struct drm_crtc *crtc;
12962 	struct drm_crtc_state *crtc_state;
12963 	int i;
12964 
12965 	if (!dev_priv->display.crtc_compute_clock)
12966 		return;
12967 
12968 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12969 		int dpll;
12970 
12971 		intel_crtc = to_intel_crtc(crtc);
12972 		intel_crtc_state = to_intel_crtc_state(crtc_state);
12973 		dpll = intel_crtc_state->shared_dpll;
12974 
12975 		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
12976 			continue;
12977 
12978 		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
12979 
12980 		if (!shared_dpll)
12981 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
12982 
12983 		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
12984 	}
12985 }
12986 
12987 /*
12988  * This implements the workaround described in the "notes" section of the mode
12989  * set sequence documentation. When going from no pipes or single pipe to
12990  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12991  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12992  */
haswell_mode_set_planes_workaround(struct drm_atomic_state * state)12993 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12994 {
12995 	struct drm_crtc_state *crtc_state;
12996 	struct intel_crtc *intel_crtc;
12997 	struct drm_crtc *crtc;
12998 	struct intel_crtc_state *first_crtc_state = NULL;
12999 	struct intel_crtc_state *other_crtc_state = NULL;
13000 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13001 	int i;
13002 
13003 	/* look at all crtc's that are going to be enabled in during modeset */
13004 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13005 		intel_crtc = to_intel_crtc(crtc);
13006 
13007 		if (!crtc_state->active || !needs_modeset(crtc_state))
13008 			continue;
13009 
13010 		if (first_crtc_state) {
13011 			other_crtc_state = to_intel_crtc_state(crtc_state);
13012 			break;
13013 		} else {
13014 			first_crtc_state = to_intel_crtc_state(crtc_state);
13015 			first_pipe = intel_crtc->pipe;
13016 		}
13017 	}
13018 
13019 	/* No workaround needed? */
13020 	if (!first_crtc_state)
13021 		return 0;
13022 
13023 	/* w/a possibly needed, check how many crtc's are already enabled. */
13024 	for_each_intel_crtc(state->dev, intel_crtc) {
13025 		struct intel_crtc_state *pipe_config;
13026 
13027 		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13028 		if (IS_ERR(pipe_config))
13029 			return PTR_ERR(pipe_config);
13030 
13031 		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13032 
13033 		if (!pipe_config->base.active ||
13034 		    needs_modeset(&pipe_config->base))
13035 			continue;
13036 
13037 		/* 2 or more enabled crtcs means no need for w/a */
13038 		if (enabled_pipe != INVALID_PIPE)
13039 			return 0;
13040 
13041 		enabled_pipe = intel_crtc->pipe;
13042 	}
13043 
13044 	if (enabled_pipe != INVALID_PIPE)
13045 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13046 	else if (other_crtc_state)
13047 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13048 
13049 	return 0;
13050 }
13051 
intel_modeset_all_pipes(struct drm_atomic_state * state)13052 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13053 {
13054 	struct drm_crtc *crtc;
13055 	struct drm_crtc_state *crtc_state;
13056 	int ret = 0;
13057 
13058 	/* add all active pipes to the state */
13059 	for_each_crtc(state->dev, crtc) {
13060 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13061 		if (IS_ERR(crtc_state))
13062 			return PTR_ERR(crtc_state);
13063 
13064 		if (!crtc_state->active || needs_modeset(crtc_state))
13065 			continue;
13066 
13067 		crtc_state->mode_changed = true;
13068 
13069 		ret = drm_atomic_add_affected_connectors(state, crtc);
13070 		if (ret)
13071 			break;
13072 
13073 		ret = drm_atomic_add_affected_planes(state, crtc);
13074 		if (ret)
13075 			break;
13076 	}
13077 
13078 	return ret;
13079 }
13080 
intel_modeset_checks(struct drm_atomic_state * state)13081 static int intel_modeset_checks(struct drm_atomic_state *state)
13082 {
13083 	struct drm_device *dev = state->dev;
13084 	struct drm_i915_private *dev_priv = dev->dev_private;
13085 	int ret;
13086 
13087 	if (!check_digital_port_conflicts(state)) {
13088 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13089 		return -EINVAL;
13090 	}
13091 
13092 	/*
13093 	 * See if the config requires any additional preparation, e.g.
13094 	 * to adjust global state with pipes off.  We need to do this
13095 	 * here so we can get the modeset_pipe updated config for the new
13096 	 * mode set on this crtc.  For other crtcs we need to use the
13097 	 * adjusted_mode bits in the crtc directly.
13098 	 */
13099 	if (dev_priv->display.modeset_calc_cdclk) {
13100 		unsigned int cdclk;
13101 
13102 		ret = dev_priv->display.modeset_calc_cdclk(state);
13103 
13104 		cdclk = to_intel_atomic_state(state)->cdclk;
13105 		if (!ret && cdclk != dev_priv->cdclk_freq)
13106 			ret = intel_modeset_all_pipes(state);
13107 
13108 		if (ret < 0)
13109 			return ret;
13110 	} else
13111 		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
13112 
13113 	intel_modeset_clear_plls(state);
13114 
13115 	if (IS_HASWELL(dev))
13116 		return haswell_mode_set_planes_workaround(state);
13117 
13118 	return 0;
13119 }
13120 
13121 /**
13122  * intel_atomic_check - validate state object
13123  * @dev: drm device
13124  * @state: state to validate
13125  */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)13126 static int intel_atomic_check(struct drm_device *dev,
13127 			      struct drm_atomic_state *state)
13128 {
13129 	struct drm_crtc *crtc;
13130 	struct drm_crtc_state *crtc_state;
13131 	int ret, i;
13132 	bool any_ms = false;
13133 
13134 	ret = drm_atomic_helper_check_modeset(dev, state);
13135 	if (ret)
13136 		return ret;
13137 
13138 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13139 		struct intel_crtc_state *pipe_config =
13140 			to_intel_crtc_state(crtc_state);
13141 
13142 		memset(&to_intel_crtc(crtc)->atomic, 0,
13143 		       sizeof(struct intel_crtc_atomic_commit));
13144 
13145 		/* Catch I915_MODE_FLAG_INHERITED */
13146 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13147 			crtc_state->mode_changed = true;
13148 
13149 		if (!crtc_state->enable) {
13150 			if (needs_modeset(crtc_state))
13151 				any_ms = true;
13152 			continue;
13153 		}
13154 
13155 		if (!needs_modeset(crtc_state))
13156 			continue;
13157 
13158 		/* FIXME: For only active_changed we shouldn't need to do any
13159 		 * state recomputation at all. */
13160 
13161 		ret = drm_atomic_add_affected_connectors(state, crtc);
13162 		if (ret)
13163 			return ret;
13164 
13165 		ret = intel_modeset_pipe_config(crtc, pipe_config);
13166 		if (ret)
13167 			return ret;
13168 
13169 		if (i915.fastboot &&
13170 		    intel_pipe_config_compare(state->dev,
13171 					to_intel_crtc_state(crtc->state),
13172 					pipe_config, true)) {
13173 			crtc_state->mode_changed = false;
13174 			to_intel_crtc_state(crtc_state)->update_pipe = true;
13175 		}
13176 
13177 		if (needs_modeset(crtc_state)) {
13178 			any_ms = true;
13179 
13180 			ret = drm_atomic_add_affected_planes(state, crtc);
13181 			if (ret)
13182 				return ret;
13183 		}
13184 
13185 		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13186 				       needs_modeset(crtc_state) ?
13187 				       "[modeset]" : "[fastset]");
13188 	}
13189 
13190 	if (any_ms) {
13191 		ret = intel_modeset_checks(state);
13192 
13193 		if (ret)
13194 			return ret;
13195 	} else
13196 		to_intel_atomic_state(state)->cdclk =
13197 			to_i915(state->dev)->cdclk_freq;
13198 
13199 	return drm_atomic_helper_check_planes(state->dev, state);
13200 }
13201 
13202 /**
13203  * intel_atomic_commit - commit validated state object
13204  * @dev: DRM device
13205  * @state: the top-level driver state object
13206  * @async: asynchronous commit
13207  *
13208  * This function commits a top-level state object that has been validated
13209  * with drm_atomic_helper_check().
13210  *
13211  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13212  * we can only handle plane-related operations and do not yet support
13213  * asynchronous commit.
13214  *
13215  * RETURNS
13216  * Zero for success or -errno.
13217  */
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool async)13218 static int intel_atomic_commit(struct drm_device *dev,
13219 			       struct drm_atomic_state *state,
13220 			       bool async)
13221 {
13222 	struct drm_i915_private *dev_priv = dev->dev_private;
13223 	struct drm_crtc *crtc;
13224 	struct drm_crtc_state *crtc_state;
13225 	int ret = 0;
13226 	int i;
13227 	bool any_ms = false;
13228 
13229 	if (async) {
13230 		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13231 		return -EINVAL;
13232 	}
13233 
13234 	ret = drm_atomic_helper_prepare_planes(dev, state);
13235 	if (ret)
13236 		return ret;
13237 
13238 	drm_atomic_helper_swap_state(dev, state);
13239 
13240 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13241 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13242 
13243 		if (!needs_modeset(crtc->state))
13244 			continue;
13245 
13246 		any_ms = true;
13247 		intel_pre_plane_update(intel_crtc);
13248 
13249 		if (crtc_state->active) {
13250 			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13251 			dev_priv->display.crtc_disable(crtc);
13252 			intel_crtc->active = false;
13253 			intel_disable_shared_dpll(intel_crtc);
13254 		}
13255 	}
13256 
13257 	/* Only after disabling all output pipelines that will be changed can we
13258 	 * update the the output configuration. */
13259 	intel_modeset_update_crtc_state(state);
13260 
13261 	if (any_ms) {
13262 		intel_shared_dpll_commit(state);
13263 
13264 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13265 		modeset_update_crtc_power_domains(state);
13266 	}
13267 
13268 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13269 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13270 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13271 		bool modeset = needs_modeset(crtc->state);
13272 		bool update_pipe = !modeset &&
13273 			to_intel_crtc_state(crtc->state)->update_pipe;
13274 		unsigned long put_domains = 0;
13275 
13276 		if (modeset && crtc->state->active) {
13277 			update_scanline_offset(to_intel_crtc(crtc));
13278 			dev_priv->display.crtc_enable(crtc);
13279 		}
13280 
13281 		if (update_pipe) {
13282 			put_domains = modeset_get_crtc_power_domains(crtc);
13283 
13284 			/* make sure intel_modeset_check_state runs */
13285 			any_ms = true;
13286 		}
13287 
13288 		if (!modeset)
13289 			intel_pre_plane_update(intel_crtc);
13290 
13291 		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13292 
13293 		if (put_domains)
13294 			modeset_put_power_domains(dev_priv, put_domains);
13295 
13296 		intel_post_plane_update(intel_crtc);
13297 	}
13298 
13299 	/* FIXME: add subpixel order */
13300 
13301 	drm_atomic_helper_wait_for_vblanks(dev, state);
13302 	drm_atomic_helper_cleanup_planes(dev, state);
13303 
13304 	if (any_ms)
13305 		intel_modeset_check_state(dev, state);
13306 
13307 	drm_atomic_state_free(state);
13308 
13309 	return 0;
13310 }
13311 
intel_crtc_restore_mode(struct drm_crtc * crtc)13312 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13313 {
13314 	struct drm_device *dev = crtc->dev;
13315 	struct drm_atomic_state *state;
13316 	struct drm_crtc_state *crtc_state;
13317 	int ret;
13318 
13319 	state = drm_atomic_state_alloc(dev);
13320 	if (!state) {
13321 		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13322 			      crtc->base.id);
13323 		return;
13324 	}
13325 
13326 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13327 
13328 retry:
13329 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13330 	ret = PTR_ERR_OR_ZERO(crtc_state);
13331 	if (!ret) {
13332 		if (!crtc_state->active)
13333 			goto out;
13334 
13335 		crtc_state->mode_changed = true;
13336 		ret = drm_atomic_commit(state);
13337 	}
13338 
13339 	if (ret == -EDEADLK) {
13340 		drm_atomic_state_clear(state);
13341 		drm_modeset_backoff(state->acquire_ctx);
13342 		goto retry;
13343 	}
13344 
13345 	if (ret)
13346 out:
13347 		drm_atomic_state_free(state);
13348 }
13349 
13350 #undef for_each_intel_crtc_masked
13351 
13352 static const struct drm_crtc_funcs intel_crtc_funcs = {
13353 	.gamma_set = intel_crtc_gamma_set,
13354 	.set_config = drm_atomic_helper_set_config,
13355 	.destroy = intel_crtc_destroy,
13356 	.page_flip = intel_crtc_page_flip,
13357 	.atomic_duplicate_state = intel_crtc_duplicate_state,
13358 	.atomic_destroy_state = intel_crtc_destroy_state,
13359 };
13360 
ibx_pch_dpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)13361 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13362 				      struct intel_shared_dpll *pll,
13363 				      struct intel_dpll_hw_state *hw_state)
13364 {
13365 	uint32_t val;
13366 
13367 	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
13368 		return false;
13369 
13370 	val = I915_READ(PCH_DPLL(pll->id));
13371 	hw_state->dpll = val;
13372 	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13373 	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13374 
13375 	return val & DPLL_VCO_ENABLE;
13376 }
13377 
ibx_pch_dpll_mode_set(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)13378 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13379 				  struct intel_shared_dpll *pll)
13380 {
13381 	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13382 	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
13383 }
13384 
ibx_pch_dpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)13385 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13386 				struct intel_shared_dpll *pll)
13387 {
13388 	/* PCH refclock must be enabled first */
13389 	ibx_assert_pch_refclk_enabled(dev_priv);
13390 
13391 	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13392 
13393 	/* Wait for the clocks to stabilize. */
13394 	POSTING_READ(PCH_DPLL(pll->id));
13395 	udelay(150);
13396 
13397 	/* The pixel multiplier can only be updated once the
13398 	 * DPLL is enabled and the clocks are stable.
13399 	 *
13400 	 * So write it again.
13401 	 */
13402 	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13403 	POSTING_READ(PCH_DPLL(pll->id));
13404 	udelay(200);
13405 }
13406 
ibx_pch_dpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)13407 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13408 				 struct intel_shared_dpll *pll)
13409 {
13410 	struct drm_device *dev = dev_priv->dev;
13411 	struct intel_crtc *crtc;
13412 
13413 	/* Make sure no transcoder isn't still depending on us. */
13414 	for_each_intel_crtc(dev, crtc) {
13415 		if (intel_crtc_to_shared_dpll(crtc) == pll)
13416 			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
13417 	}
13418 
13419 	I915_WRITE(PCH_DPLL(pll->id), 0);
13420 	POSTING_READ(PCH_DPLL(pll->id));
13421 	udelay(200);
13422 }
13423 
13424 static char *ibx_pch_dpll_names[] = {
13425 	"PCH DPLL A",
13426 	"PCH DPLL B",
13427 };
13428 
ibx_pch_dpll_init(struct drm_device * dev)13429 static void ibx_pch_dpll_init(struct drm_device *dev)
13430 {
13431 	struct drm_i915_private *dev_priv = dev->dev_private;
13432 	int i;
13433 
13434 	dev_priv->num_shared_dpll = 2;
13435 
13436 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13437 		dev_priv->shared_dplls[i].id = i;
13438 		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13439 		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13440 		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13441 		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13442 		dev_priv->shared_dplls[i].get_hw_state =
13443 			ibx_pch_dpll_get_hw_state;
13444 	}
13445 }
13446 
intel_shared_dpll_init(struct drm_device * dev)13447 static void intel_shared_dpll_init(struct drm_device *dev)
13448 {
13449 	struct drm_i915_private *dev_priv = dev->dev_private;
13450 
13451 	if (HAS_DDI(dev))
13452 		intel_ddi_pll_init(dev);
13453 	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13454 		ibx_pch_dpll_init(dev);
13455 	else
13456 		dev_priv->num_shared_dpll = 0;
13457 
13458 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13459 }
13460 
13461 /**
13462  * intel_prepare_plane_fb - Prepare fb for usage on plane
13463  * @plane: drm plane to prepare for
13464  * @fb: framebuffer to prepare for presentation
13465  *
13466  * Prepares a framebuffer for usage on a display plane.  Generally this
13467  * involves pinning the underlying object and updating the frontbuffer tracking
13468  * bits.  Some older platforms need special physical address handling for
13469  * cursor planes.
13470  *
13471  * Returns 0 on success, negative error code on failure.
13472  */
13473 int
intel_prepare_plane_fb(struct drm_plane * plane,const struct drm_plane_state * new_state)13474 intel_prepare_plane_fb(struct drm_plane *plane,
13475 		       const struct drm_plane_state *new_state)
13476 {
13477 	struct drm_device *dev = plane->dev;
13478 	struct drm_framebuffer *fb = new_state->fb;
13479 	struct intel_plane *intel_plane = to_intel_plane(plane);
13480 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13481 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
13482 	int ret = 0;
13483 
13484 	if (!obj)
13485 		return 0;
13486 
13487 	mutex_lock(&dev->struct_mutex);
13488 
13489 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13490 	    INTEL_INFO(dev)->cursor_needs_physical) {
13491 		int align = IS_I830(dev) ? 16 * 1024 : 256;
13492 		ret = i915_gem_object_attach_phys(obj, align);
13493 		if (ret)
13494 			DRM_DEBUG_KMS("failed to attach phys object\n");
13495 	} else {
13496 		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
13497 	}
13498 
13499 	if (ret == 0)
13500 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13501 
13502 	mutex_unlock(&dev->struct_mutex);
13503 
13504 	return ret;
13505 }
13506 
13507 /**
13508  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13509  * @plane: drm plane to clean up for
13510  * @fb: old framebuffer that was on plane
13511  *
13512  * Cleans up a framebuffer that has just been removed from a plane.
13513  */
13514 void
intel_cleanup_plane_fb(struct drm_plane * plane,const struct drm_plane_state * old_state)13515 intel_cleanup_plane_fb(struct drm_plane *plane,
13516 		       const struct drm_plane_state *old_state)
13517 {
13518 	struct drm_device *dev = plane->dev;
13519 	struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
13520 
13521 	if (!obj)
13522 		return;
13523 
13524 	if (plane->type != DRM_PLANE_TYPE_CURSOR ||
13525 	    !INTEL_INFO(dev)->cursor_needs_physical) {
13526 		mutex_lock(&dev->struct_mutex);
13527 		intel_unpin_fb_obj(old_state->fb, old_state);
13528 		mutex_unlock(&dev->struct_mutex);
13529 	}
13530 }
13531 
13532 int
skl_max_scale(struct intel_crtc * intel_crtc,struct intel_crtc_state * crtc_state)13533 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13534 {
13535 	int max_scale;
13536 	struct drm_device *dev;
13537 	struct drm_i915_private *dev_priv;
13538 	int crtc_clock, cdclk;
13539 
13540 	if (!intel_crtc || !crtc_state)
13541 		return DRM_PLANE_HELPER_NO_SCALING;
13542 
13543 	dev = intel_crtc->base.dev;
13544 	dev_priv = dev->dev_private;
13545 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13546 	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13547 
13548 	if (!crtc_clock || !cdclk)
13549 		return DRM_PLANE_HELPER_NO_SCALING;
13550 
13551 	/*
13552 	 * skl max scale is lower of:
13553 	 *    close to 3 but not 3, -1 is for that purpose
13554 	 *            or
13555 	 *    cdclk/crtc_clock
13556 	 */
13557 	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13558 
13559 	return max_scale;
13560 }
13561 
13562 static int
intel_check_primary_plane(struct drm_plane * plane,struct intel_crtc_state * crtc_state,struct intel_plane_state * state)13563 intel_check_primary_plane(struct drm_plane *plane,
13564 			  struct intel_crtc_state *crtc_state,
13565 			  struct intel_plane_state *state)
13566 {
13567 	struct drm_crtc *crtc = state->base.crtc;
13568 	struct drm_framebuffer *fb = state->base.fb;
13569 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13570 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13571 	bool can_position = false;
13572 
13573 	if (INTEL_INFO(plane->dev)->gen >= 9) {
13574 		/* use scaler when colorkey is not required */
13575 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13576 			min_scale = 1;
13577 			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13578 		}
13579 		can_position = true;
13580 	}
13581 
13582 	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13583 					     &state->dst, &state->clip,
13584 					     min_scale, max_scale,
13585 					     can_position, true,
13586 					     &state->visible);
13587 }
13588 
13589 static void
intel_commit_primary_plane(struct drm_plane * plane,struct intel_plane_state * state)13590 intel_commit_primary_plane(struct drm_plane *plane,
13591 			   struct intel_plane_state *state)
13592 {
13593 	struct drm_crtc *crtc = state->base.crtc;
13594 	struct drm_framebuffer *fb = state->base.fb;
13595 	struct drm_device *dev = plane->dev;
13596 	struct drm_i915_private *dev_priv = dev->dev_private;
13597 	struct intel_crtc *intel_crtc;
13598 	struct drm_rect *src = &state->src;
13599 
13600 	crtc = crtc ? crtc : plane->crtc;
13601 	intel_crtc = to_intel_crtc(crtc);
13602 
13603 	plane->fb = fb;
13604 	crtc->x = src->x1 >> 16;
13605 	crtc->y = src->y1 >> 16;
13606 
13607 	if (!crtc->state->active)
13608 		return;
13609 
13610 	dev_priv->display.update_primary_plane(crtc, fb,
13611 					       state->src.x1 >> 16,
13612 					       state->src.y1 >> 16);
13613 }
13614 
13615 static void
intel_disable_primary_plane(struct drm_plane * plane,struct drm_crtc * crtc)13616 intel_disable_primary_plane(struct drm_plane *plane,
13617 			    struct drm_crtc *crtc)
13618 {
13619 	struct drm_device *dev = plane->dev;
13620 	struct drm_i915_private *dev_priv = dev->dev_private;
13621 
13622 	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
13623 }
13624 
intel_begin_crtc_commit(struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state)13625 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13626 				    struct drm_crtc_state *old_crtc_state)
13627 {
13628 	struct drm_device *dev = crtc->dev;
13629 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13630 	struct intel_crtc_state *old_intel_state =
13631 		to_intel_crtc_state(old_crtc_state);
13632 	bool modeset = needs_modeset(crtc->state);
13633 
13634 	if (intel_crtc->atomic.update_wm_pre)
13635 		intel_update_watermarks(crtc);
13636 
13637 	/* Perform vblank evasion around commit operation */
13638 	if (crtc->state->active)
13639 		intel_pipe_update_start(intel_crtc);
13640 
13641 	if (modeset)
13642 		return;
13643 
13644 	if (to_intel_crtc_state(crtc->state)->update_pipe)
13645 		intel_update_pipe_config(intel_crtc, old_intel_state);
13646 	else if (INTEL_INFO(dev)->gen >= 9)
13647 		skl_detach_scalers(intel_crtc);
13648 }
13649 
intel_finish_crtc_commit(struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state)13650 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13651 				     struct drm_crtc_state *old_crtc_state)
13652 {
13653 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13654 
13655 	if (crtc->state->active)
13656 		intel_pipe_update_end(intel_crtc);
13657 }
13658 
13659 /**
13660  * intel_plane_destroy - destroy a plane
13661  * @plane: plane to destroy
13662  *
13663  * Common destruction function for all types of planes (primary, cursor,
13664  * sprite).
13665  */
intel_plane_destroy(struct drm_plane * plane)13666 void intel_plane_destroy(struct drm_plane *plane)
13667 {
13668 	struct intel_plane *intel_plane = to_intel_plane(plane);
13669 	drm_plane_cleanup(plane);
13670 	kfree(intel_plane);
13671 }
13672 
13673 const struct drm_plane_funcs intel_plane_funcs = {
13674 	.update_plane = drm_atomic_helper_update_plane,
13675 	.disable_plane = drm_atomic_helper_disable_plane,
13676 	.destroy = intel_plane_destroy,
13677 	.set_property = drm_atomic_helper_plane_set_property,
13678 	.atomic_get_property = intel_plane_atomic_get_property,
13679 	.atomic_set_property = intel_plane_atomic_set_property,
13680 	.atomic_duplicate_state = intel_plane_duplicate_state,
13681 	.atomic_destroy_state = intel_plane_destroy_state,
13682 
13683 };
13684 
intel_primary_plane_create(struct drm_device * dev,int pipe)13685 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13686 						    int pipe)
13687 {
13688 	struct intel_plane *primary;
13689 	struct intel_plane_state *state;
13690 	const uint32_t *intel_primary_formats;
13691 	unsigned int num_formats;
13692 
13693 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13694 	if (primary == NULL)
13695 		return NULL;
13696 
13697 	state = intel_create_plane_state(&primary->base);
13698 	if (!state) {
13699 		kfree(primary);
13700 		return NULL;
13701 	}
13702 	primary->base.state = &state->base;
13703 
13704 	primary->can_scale = false;
13705 	primary->max_downscale = 1;
13706 	if (INTEL_INFO(dev)->gen >= 9) {
13707 		primary->can_scale = true;
13708 		state->scaler_id = -1;
13709 	}
13710 	primary->pipe = pipe;
13711 	primary->plane = pipe;
13712 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
13713 	primary->check_plane = intel_check_primary_plane;
13714 	primary->commit_plane = intel_commit_primary_plane;
13715 	primary->disable_plane = intel_disable_primary_plane;
13716 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
13717 		primary->plane = !pipe;
13718 
13719 	if (INTEL_INFO(dev)->gen >= 9) {
13720 		intel_primary_formats = skl_primary_formats;
13721 		num_formats = ARRAY_SIZE(skl_primary_formats);
13722 	} else if (INTEL_INFO(dev)->gen >= 4) {
13723 		intel_primary_formats = i965_primary_formats;
13724 		num_formats = ARRAY_SIZE(i965_primary_formats);
13725 	} else {
13726 		intel_primary_formats = i8xx_primary_formats;
13727 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
13728 	}
13729 
13730 	drm_universal_plane_init(dev, &primary->base, 0,
13731 				 &intel_plane_funcs,
13732 				 intel_primary_formats, num_formats,
13733 				 DRM_PLANE_TYPE_PRIMARY);
13734 
13735 	if (INTEL_INFO(dev)->gen >= 4)
13736 		intel_create_rotation_property(dev, primary);
13737 
13738 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13739 
13740 	return &primary->base;
13741 }
13742 
intel_create_rotation_property(struct drm_device * dev,struct intel_plane * plane)13743 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
13744 {
13745 	if (!dev->mode_config.rotation_property) {
13746 		unsigned long flags = BIT(DRM_ROTATE_0) |
13747 			BIT(DRM_ROTATE_180);
13748 
13749 		if (INTEL_INFO(dev)->gen >= 9)
13750 			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
13751 
13752 		dev->mode_config.rotation_property =
13753 			drm_mode_create_rotation_property(dev, flags);
13754 	}
13755 	if (dev->mode_config.rotation_property)
13756 		drm_object_attach_property(&plane->base.base,
13757 				dev->mode_config.rotation_property,
13758 				plane->base.state->rotation);
13759 }
13760 
13761 static int
intel_check_cursor_plane(struct drm_plane * plane,struct intel_crtc_state * crtc_state,struct intel_plane_state * state)13762 intel_check_cursor_plane(struct drm_plane *plane,
13763 			 struct intel_crtc_state *crtc_state,
13764 			 struct intel_plane_state *state)
13765 {
13766 	struct drm_crtc *crtc = crtc_state->base.crtc;
13767 	struct drm_framebuffer *fb = state->base.fb;
13768 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13769 	enum pipe pipe = to_intel_plane(plane)->pipe;
13770 	unsigned stride;
13771 	int ret;
13772 
13773 	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13774 					    &state->dst, &state->clip,
13775 					    DRM_PLANE_HELPER_NO_SCALING,
13776 					    DRM_PLANE_HELPER_NO_SCALING,
13777 					    true, true, &state->visible);
13778 	if (ret)
13779 		return ret;
13780 
13781 	/* if we want to turn off the cursor ignore width and height */
13782 	if (!obj)
13783 		return 0;
13784 
13785 	/* Check for which cursor types we support */
13786 	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
13787 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13788 			  state->base.crtc_w, state->base.crtc_h);
13789 		return -EINVAL;
13790 	}
13791 
13792 	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
13793 	if (obj->base.size < stride * state->base.crtc_h) {
13794 		DRM_DEBUG_KMS("buffer is too small\n");
13795 		return -ENOMEM;
13796 	}
13797 
13798 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
13799 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
13800 		return -EINVAL;
13801 	}
13802 
13803 	/*
13804 	 * There's something wrong with the cursor on CHV pipe C.
13805 	 * If it straddles the left edge of the screen then
13806 	 * moving it away from the edge or disabling it often
13807 	 * results in a pipe underrun, and often that can lead to
13808 	 * dead pipe (constant underrun reported, and it scans
13809 	 * out just a solid color). To recover from that, the
13810 	 * display power well must be turned off and on again.
13811 	 * Refuse the put the cursor into that compromised position.
13812 	 */
13813 	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13814 	    state->visible && state->base.crtc_x < 0) {
13815 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13816 		return -EINVAL;
13817 	}
13818 
13819 	return 0;
13820 }
13821 
13822 static void
intel_disable_cursor_plane(struct drm_plane * plane,struct drm_crtc * crtc)13823 intel_disable_cursor_plane(struct drm_plane *plane,
13824 			   struct drm_crtc *crtc)
13825 {
13826 	intel_crtc_update_cursor(crtc, false);
13827 }
13828 
13829 static void
intel_commit_cursor_plane(struct drm_plane * plane,struct intel_plane_state * state)13830 intel_commit_cursor_plane(struct drm_plane *plane,
13831 			  struct intel_plane_state *state)
13832 {
13833 	struct drm_crtc *crtc = state->base.crtc;
13834 	struct drm_device *dev = plane->dev;
13835 	struct intel_crtc *intel_crtc;
13836 	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
13837 	uint32_t addr;
13838 
13839 	crtc = crtc ? crtc : plane->crtc;
13840 	intel_crtc = to_intel_crtc(crtc);
13841 
13842 	if (!obj)
13843 		addr = 0;
13844 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
13845 		addr = i915_gem_obj_ggtt_offset(obj);
13846 	else
13847 		addr = obj->phys_handle->busaddr;
13848 
13849 	intel_crtc->cursor_addr = addr;
13850 
13851 	if (crtc->state->active)
13852 		intel_crtc_update_cursor(crtc, state->visible);
13853 }
13854 
intel_cursor_plane_create(struct drm_device * dev,int pipe)13855 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13856 						   int pipe)
13857 {
13858 	struct intel_plane *cursor;
13859 	struct intel_plane_state *state;
13860 
13861 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13862 	if (cursor == NULL)
13863 		return NULL;
13864 
13865 	state = intel_create_plane_state(&cursor->base);
13866 	if (!state) {
13867 		kfree(cursor);
13868 		return NULL;
13869 	}
13870 	cursor->base.state = &state->base;
13871 
13872 	cursor->can_scale = false;
13873 	cursor->max_downscale = 1;
13874 	cursor->pipe = pipe;
13875 	cursor->plane = pipe;
13876 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
13877 	cursor->check_plane = intel_check_cursor_plane;
13878 	cursor->commit_plane = intel_commit_cursor_plane;
13879 	cursor->disable_plane = intel_disable_cursor_plane;
13880 
13881 	drm_universal_plane_init(dev, &cursor->base, 0,
13882 				 &intel_plane_funcs,
13883 				 intel_cursor_formats,
13884 				 ARRAY_SIZE(intel_cursor_formats),
13885 				 DRM_PLANE_TYPE_CURSOR);
13886 
13887 	if (INTEL_INFO(dev)->gen >= 4) {
13888 		if (!dev->mode_config.rotation_property)
13889 			dev->mode_config.rotation_property =
13890 				drm_mode_create_rotation_property(dev,
13891 							BIT(DRM_ROTATE_0) |
13892 							BIT(DRM_ROTATE_180));
13893 		if (dev->mode_config.rotation_property)
13894 			drm_object_attach_property(&cursor->base.base,
13895 				dev->mode_config.rotation_property,
13896 				state->base.rotation);
13897 	}
13898 
13899 	if (INTEL_INFO(dev)->gen >=9)
13900 		state->scaler_id = -1;
13901 
13902 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13903 
13904 	return &cursor->base;
13905 }
13906 
skl_init_scalers(struct drm_device * dev,struct intel_crtc * intel_crtc,struct intel_crtc_state * crtc_state)13907 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
13908 	struct intel_crtc_state *crtc_state)
13909 {
13910 	int i;
13911 	struct intel_scaler *intel_scaler;
13912 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
13913 
13914 	for (i = 0; i < intel_crtc->num_scalers; i++) {
13915 		intel_scaler = &scaler_state->scalers[i];
13916 		intel_scaler->in_use = 0;
13917 		intel_scaler->mode = PS_SCALER_MODE_DYN;
13918 	}
13919 
13920 	scaler_state->scaler_id = -1;
13921 }
13922 
intel_crtc_init(struct drm_device * dev,int pipe)13923 static void intel_crtc_init(struct drm_device *dev, int pipe)
13924 {
13925 	struct drm_i915_private *dev_priv = dev->dev_private;
13926 	struct intel_crtc *intel_crtc;
13927 	struct intel_crtc_state *crtc_state = NULL;
13928 	struct drm_plane *primary = NULL;
13929 	struct drm_plane *cursor = NULL;
13930 	int i, ret;
13931 
13932 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13933 	if (intel_crtc == NULL)
13934 		return;
13935 
13936 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13937 	if (!crtc_state)
13938 		goto fail;
13939 	intel_crtc->config = crtc_state;
13940 	intel_crtc->base.state = &crtc_state->base;
13941 	crtc_state->base.crtc = &intel_crtc->base;
13942 
13943 	/* initialize shared scalers */
13944 	if (INTEL_INFO(dev)->gen >= 9) {
13945 		if (pipe == PIPE_C)
13946 			intel_crtc->num_scalers = 1;
13947 		else
13948 			intel_crtc->num_scalers = SKL_NUM_SCALERS;
13949 
13950 		skl_init_scalers(dev, intel_crtc, crtc_state);
13951 	}
13952 
13953 	primary = intel_primary_plane_create(dev, pipe);
13954 	if (!primary)
13955 		goto fail;
13956 
13957 	cursor = intel_cursor_plane_create(dev, pipe);
13958 	if (!cursor)
13959 		goto fail;
13960 
13961 	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
13962 					cursor, &intel_crtc_funcs);
13963 	if (ret)
13964 		goto fail;
13965 
13966 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
13967 	for (i = 0; i < 256; i++) {
13968 		intel_crtc->lut_r[i] = i;
13969 		intel_crtc->lut_g[i] = i;
13970 		intel_crtc->lut_b[i] = i;
13971 	}
13972 
13973 	/*
13974 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
13975 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
13976 	 */
13977 	intel_crtc->pipe = pipe;
13978 	intel_crtc->plane = pipe;
13979 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
13980 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
13981 		intel_crtc->plane = !pipe;
13982 	}
13983 
13984 	intel_crtc->cursor_base = ~0;
13985 	intel_crtc->cursor_cntl = ~0;
13986 	intel_crtc->cursor_size = ~0;
13987 
13988 	intel_crtc->wm.cxsr_allowed = true;
13989 
13990 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13991 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
13992 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
13993 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
13994 
13995 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
13996 
13997 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13998 	return;
13999 
14000 fail:
14001 	if (primary)
14002 		drm_plane_cleanup(primary);
14003 	if (cursor)
14004 		drm_plane_cleanup(cursor);
14005 	kfree(crtc_state);
14006 	kfree(intel_crtc);
14007 }
14008 
intel_get_pipe_from_connector(struct intel_connector * connector)14009 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14010 {
14011 	struct drm_encoder *encoder = connector->base.encoder;
14012 	struct drm_device *dev = connector->base.dev;
14013 
14014 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14015 
14016 	if (!encoder || WARN_ON(!encoder->crtc))
14017 		return INVALID_PIPE;
14018 
14019 	return to_intel_crtc(encoder->crtc)->pipe;
14020 }
14021 
intel_get_pipe_from_crtc_id(struct drm_device * dev,void * data,struct drm_file * file)14022 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14023 				struct drm_file *file)
14024 {
14025 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14026 	struct drm_crtc *drmmode_crtc;
14027 	struct intel_crtc *crtc;
14028 
14029 	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14030 
14031 	if (!drmmode_crtc) {
14032 		DRM_ERROR("no such CRTC id\n");
14033 		return -ENOENT;
14034 	}
14035 
14036 	crtc = to_intel_crtc(drmmode_crtc);
14037 	pipe_from_crtc_id->pipe = crtc->pipe;
14038 
14039 	return 0;
14040 }
14041 
intel_encoder_clones(struct intel_encoder * encoder)14042 static int intel_encoder_clones(struct intel_encoder *encoder)
14043 {
14044 	struct drm_device *dev = encoder->base.dev;
14045 	struct intel_encoder *source_encoder;
14046 	int index_mask = 0;
14047 	int entry = 0;
14048 
14049 	for_each_intel_encoder(dev, source_encoder) {
14050 		if (encoders_cloneable(encoder, source_encoder))
14051 			index_mask |= (1 << entry);
14052 
14053 		entry++;
14054 	}
14055 
14056 	return index_mask;
14057 }
14058 
has_edp_a(struct drm_device * dev)14059 static bool has_edp_a(struct drm_device *dev)
14060 {
14061 	struct drm_i915_private *dev_priv = dev->dev_private;
14062 
14063 	if (!IS_MOBILE(dev))
14064 		return false;
14065 
14066 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14067 		return false;
14068 
14069 	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14070 		return false;
14071 
14072 	return true;
14073 }
14074 
intel_crt_present(struct drm_device * dev)14075 static bool intel_crt_present(struct drm_device *dev)
14076 {
14077 	struct drm_i915_private *dev_priv = dev->dev_private;
14078 
14079 	if (INTEL_INFO(dev)->gen >= 9)
14080 		return false;
14081 
14082 	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14083 		return false;
14084 
14085 	if (IS_CHERRYVIEW(dev))
14086 		return false;
14087 
14088 	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
14089 		return false;
14090 
14091 	return true;
14092 }
14093 
intel_setup_outputs(struct drm_device * dev)14094 static void intel_setup_outputs(struct drm_device *dev)
14095 {
14096 	struct drm_i915_private *dev_priv = dev->dev_private;
14097 	struct intel_encoder *encoder;
14098 	bool dpd_is_edp = false;
14099 
14100 	intel_lvds_init(dev);
14101 
14102 	if (intel_crt_present(dev))
14103 		intel_crt_init(dev);
14104 
14105 	if (IS_BROXTON(dev)) {
14106 		/*
14107 		 * FIXME: Broxton doesn't support port detection via the
14108 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14109 		 * detect the ports.
14110 		 */
14111 		intel_ddi_init(dev, PORT_A);
14112 		intel_ddi_init(dev, PORT_B);
14113 		intel_ddi_init(dev, PORT_C);
14114 	} else if (HAS_DDI(dev)) {
14115 		int found;
14116 
14117 		/*
14118 		 * Haswell uses DDI functions to detect digital outputs.
14119 		 * On SKL pre-D0 the strap isn't connected, so we assume
14120 		 * it's there.
14121 		 */
14122 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14123 		/* WaIgnoreDDIAStrap: skl */
14124 		if (found || IS_SKYLAKE(dev))
14125 			intel_ddi_init(dev, PORT_A);
14126 
14127 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14128 		 * register */
14129 		found = I915_READ(SFUSE_STRAP);
14130 
14131 		if (found & SFUSE_STRAP_DDIB_DETECTED)
14132 			intel_ddi_init(dev, PORT_B);
14133 		if (found & SFUSE_STRAP_DDIC_DETECTED)
14134 			intel_ddi_init(dev, PORT_C);
14135 		if (found & SFUSE_STRAP_DDID_DETECTED)
14136 			intel_ddi_init(dev, PORT_D);
14137 		/*
14138 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14139 		 */
14140 		if (IS_SKYLAKE(dev) &&
14141 		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14142 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14143 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14144 			intel_ddi_init(dev, PORT_E);
14145 
14146 	} else if (HAS_PCH_SPLIT(dev)) {
14147 		int found;
14148 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14149 
14150 		if (has_edp_a(dev))
14151 			intel_dp_init(dev, DP_A, PORT_A);
14152 
14153 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14154 			/* PCH SDVOB multiplex with HDMIB */
14155 			found = intel_sdvo_init(dev, PCH_SDVOB, true);
14156 			if (!found)
14157 				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14158 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14159 				intel_dp_init(dev, PCH_DP_B, PORT_B);
14160 		}
14161 
14162 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14163 			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14164 
14165 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14166 			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14167 
14168 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14169 			intel_dp_init(dev, PCH_DP_C, PORT_C);
14170 
14171 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14172 			intel_dp_init(dev, PCH_DP_D, PORT_D);
14173 	} else if (IS_VALLEYVIEW(dev)) {
14174 		bool has_edp, has_port;
14175 
14176 		/*
14177 		 * The DP_DETECTED bit is the latched state of the DDC
14178 		 * SDA pin at boot. However since eDP doesn't require DDC
14179 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14180 		 * eDP ports may have been muxed to an alternate function.
14181 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14182 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14183 		 * detect eDP ports.
14184 		 *
14185 		 * Sadly the straps seem to be missing sometimes even for HDMI
14186 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14187 		 * and VBT for the presence of the port. Additionally we can't
14188 		 * trust the port type the VBT declares as we've seen at least
14189 		 * HDMI ports that the VBT claim are DP or eDP.
14190 		 */
14191 		has_edp = intel_dp_is_edp(dev, PORT_B);
14192 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14193 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14194 			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14195 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14196 			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14197 
14198 		has_edp = intel_dp_is_edp(dev, PORT_C);
14199 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14200 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14201 			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14202 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14203 			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14204 
14205 		if (IS_CHERRYVIEW(dev)) {
14206 			/*
14207 			 * eDP not supported on port D,
14208 			 * so no need to worry about it
14209 			 */
14210 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14211 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14212 				intel_dp_init(dev, CHV_DP_D, PORT_D);
14213 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14214 				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14215 		}
14216 
14217 		intel_dsi_init(dev);
14218 	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14219 		bool found = false;
14220 
14221 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14222 			DRM_DEBUG_KMS("probing SDVOB\n");
14223 			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
14224 			if (!found && IS_G4X(dev)) {
14225 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14226 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14227 			}
14228 
14229 			if (!found && IS_G4X(dev))
14230 				intel_dp_init(dev, DP_B, PORT_B);
14231 		}
14232 
14233 		/* Before G4X SDVOC doesn't have its own detect register */
14234 
14235 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14236 			DRM_DEBUG_KMS("probing SDVOC\n");
14237 			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
14238 		}
14239 
14240 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14241 
14242 			if (IS_G4X(dev)) {
14243 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14244 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14245 			}
14246 			if (IS_G4X(dev))
14247 				intel_dp_init(dev, DP_C, PORT_C);
14248 		}
14249 
14250 		if (IS_G4X(dev) &&
14251 		    (I915_READ(DP_D) & DP_DETECTED))
14252 			intel_dp_init(dev, DP_D, PORT_D);
14253 	} else if (IS_GEN2(dev))
14254 		intel_dvo_init(dev);
14255 
14256 	if (SUPPORTS_TV(dev))
14257 		intel_tv_init(dev);
14258 
14259 	intel_psr_init(dev);
14260 
14261 	for_each_intel_encoder(dev, encoder) {
14262 		encoder->base.possible_crtcs = encoder->crtc_mask;
14263 		encoder->base.possible_clones =
14264 			intel_encoder_clones(encoder);
14265 	}
14266 
14267 	intel_init_pch_refclk(dev);
14268 
14269 	drm_helper_move_panel_connectors_to_head(dev);
14270 }
14271 
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)14272 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14273 {
14274 	struct drm_device *dev = fb->dev;
14275 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14276 
14277 	drm_framebuffer_cleanup(fb);
14278 	mutex_lock(&dev->struct_mutex);
14279 	WARN_ON(!intel_fb->obj->framebuffer_references--);
14280 	drm_gem_object_unreference(&intel_fb->obj->base);
14281 	mutex_unlock(&dev->struct_mutex);
14282 	kfree(intel_fb);
14283 }
14284 
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)14285 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14286 						struct drm_file *file,
14287 						unsigned int *handle)
14288 {
14289 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14290 	struct drm_i915_gem_object *obj = intel_fb->obj;
14291 
14292 	if (obj->userptr.mm) {
14293 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14294 		return -EINVAL;
14295 	}
14296 
14297 	return drm_gem_handle_create(file, &obj->base, handle);
14298 }
14299 
intel_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file,unsigned flags,unsigned color,struct drm_clip_rect * clips,unsigned num_clips)14300 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14301 					struct drm_file *file,
14302 					unsigned flags, unsigned color,
14303 					struct drm_clip_rect *clips,
14304 					unsigned num_clips)
14305 {
14306 	struct drm_device *dev = fb->dev;
14307 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14308 	struct drm_i915_gem_object *obj = intel_fb->obj;
14309 
14310 	mutex_lock(&dev->struct_mutex);
14311 	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14312 	mutex_unlock(&dev->struct_mutex);
14313 
14314 	return 0;
14315 }
14316 
14317 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14318 	.destroy = intel_user_framebuffer_destroy,
14319 	.create_handle = intel_user_framebuffer_create_handle,
14320 	.dirty = intel_user_framebuffer_dirty,
14321 };
14322 
14323 static
intel_fb_pitch_limit(struct drm_device * dev,uint64_t fb_modifier,uint32_t pixel_format)14324 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14325 			 uint32_t pixel_format)
14326 {
14327 	u32 gen = INTEL_INFO(dev)->gen;
14328 
14329 	if (gen >= 9) {
14330 		/* "The stride in bytes must not exceed the of the size of 8K
14331 		 *  pixels and 32K bytes."
14332 		 */
14333 		 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14334 	} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
14335 		return 32*1024;
14336 	} else if (gen >= 4) {
14337 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14338 			return 16*1024;
14339 		else
14340 			return 32*1024;
14341 	} else if (gen >= 3) {
14342 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14343 			return 8*1024;
14344 		else
14345 			return 16*1024;
14346 	} else {
14347 		/* XXX DSPC is limited to 4k tiled */
14348 		return 8*1024;
14349 	}
14350 }
14351 
intel_framebuffer_init(struct drm_device * dev,struct intel_framebuffer * intel_fb,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)14352 static int intel_framebuffer_init(struct drm_device *dev,
14353 				  struct intel_framebuffer *intel_fb,
14354 				  struct drm_mode_fb_cmd2 *mode_cmd,
14355 				  struct drm_i915_gem_object *obj)
14356 {
14357 	unsigned int aligned_height;
14358 	int ret;
14359 	u32 pitch_limit, stride_alignment;
14360 
14361 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14362 
14363 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14364 		/* Enforce that fb modifier and tiling mode match, but only for
14365 		 * X-tiled. This is needed for FBC. */
14366 		if (!!(obj->tiling_mode == I915_TILING_X) !=
14367 		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14368 			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14369 			return -EINVAL;
14370 		}
14371 	} else {
14372 		if (obj->tiling_mode == I915_TILING_X)
14373 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14374 		else if (obj->tiling_mode == I915_TILING_Y) {
14375 			DRM_DEBUG("No Y tiling for legacy addfb\n");
14376 			return -EINVAL;
14377 		}
14378 	}
14379 
14380 	/* Passed in modifier sanity checking. */
14381 	switch (mode_cmd->modifier[0]) {
14382 	case I915_FORMAT_MOD_Y_TILED:
14383 	case I915_FORMAT_MOD_Yf_TILED:
14384 		if (INTEL_INFO(dev)->gen < 9) {
14385 			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14386 				  mode_cmd->modifier[0]);
14387 			return -EINVAL;
14388 		}
14389 	case DRM_FORMAT_MOD_NONE:
14390 	case I915_FORMAT_MOD_X_TILED:
14391 		break;
14392 	default:
14393 		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14394 			  mode_cmd->modifier[0]);
14395 		return -EINVAL;
14396 	}
14397 
14398 	stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14399 						     mode_cmd->pixel_format);
14400 	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14401 		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14402 			  mode_cmd->pitches[0], stride_alignment);
14403 		return -EINVAL;
14404 	}
14405 
14406 	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14407 					   mode_cmd->pixel_format);
14408 	if (mode_cmd->pitches[0] > pitch_limit) {
14409 		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14410 			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14411 			  "tiled" : "linear",
14412 			  mode_cmd->pitches[0], pitch_limit);
14413 		return -EINVAL;
14414 	}
14415 
14416 	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14417 	    mode_cmd->pitches[0] != obj->stride) {
14418 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14419 			  mode_cmd->pitches[0], obj->stride);
14420 		return -EINVAL;
14421 	}
14422 
14423 	/* Reject formats not supported by any plane early. */
14424 	switch (mode_cmd->pixel_format) {
14425 	case DRM_FORMAT_C8:
14426 	case DRM_FORMAT_RGB565:
14427 	case DRM_FORMAT_XRGB8888:
14428 	case DRM_FORMAT_ARGB8888:
14429 		break;
14430 	case DRM_FORMAT_XRGB1555:
14431 		if (INTEL_INFO(dev)->gen > 3) {
14432 			DRM_DEBUG("unsupported pixel format: %s\n",
14433 				  drm_get_format_name(mode_cmd->pixel_format));
14434 			return -EINVAL;
14435 		}
14436 		break;
14437 	case DRM_FORMAT_ABGR8888:
14438 		if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
14439 			DRM_DEBUG("unsupported pixel format: %s\n",
14440 				  drm_get_format_name(mode_cmd->pixel_format));
14441 			return -EINVAL;
14442 		}
14443 		break;
14444 	case DRM_FORMAT_XBGR8888:
14445 	case DRM_FORMAT_XRGB2101010:
14446 	case DRM_FORMAT_XBGR2101010:
14447 		if (INTEL_INFO(dev)->gen < 4) {
14448 			DRM_DEBUG("unsupported pixel format: %s\n",
14449 				  drm_get_format_name(mode_cmd->pixel_format));
14450 			return -EINVAL;
14451 		}
14452 		break;
14453 	case DRM_FORMAT_ABGR2101010:
14454 		if (!IS_VALLEYVIEW(dev)) {
14455 			DRM_DEBUG("unsupported pixel format: %s\n",
14456 				  drm_get_format_name(mode_cmd->pixel_format));
14457 			return -EINVAL;
14458 		}
14459 		break;
14460 	case DRM_FORMAT_YUYV:
14461 	case DRM_FORMAT_UYVY:
14462 	case DRM_FORMAT_YVYU:
14463 	case DRM_FORMAT_VYUY:
14464 		if (INTEL_INFO(dev)->gen < 5) {
14465 			DRM_DEBUG("unsupported pixel format: %s\n",
14466 				  drm_get_format_name(mode_cmd->pixel_format));
14467 			return -EINVAL;
14468 		}
14469 		break;
14470 	default:
14471 		DRM_DEBUG("unsupported pixel format: %s\n",
14472 			  drm_get_format_name(mode_cmd->pixel_format));
14473 		return -EINVAL;
14474 	}
14475 
14476 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14477 	if (mode_cmd->offsets[0] != 0)
14478 		return -EINVAL;
14479 
14480 	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14481 					       mode_cmd->pixel_format,
14482 					       mode_cmd->modifier[0]);
14483 	/* FIXME drm helper for size checks (especially planar formats)? */
14484 	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14485 		return -EINVAL;
14486 
14487 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14488 	intel_fb->obj = obj;
14489 	intel_fb->obj->framebuffer_references++;
14490 
14491 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14492 	if (ret) {
14493 		DRM_ERROR("framebuffer init failed %d\n", ret);
14494 		return ret;
14495 	}
14496 
14497 	return 0;
14498 }
14499 
14500 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,struct drm_mode_fb_cmd2 * user_mode_cmd)14501 intel_user_framebuffer_create(struct drm_device *dev,
14502 			      struct drm_file *filp,
14503 			      struct drm_mode_fb_cmd2 *user_mode_cmd)
14504 {
14505 	struct drm_i915_gem_object *obj;
14506 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14507 
14508 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14509 						mode_cmd.handles[0]));
14510 	if (&obj->base == NULL)
14511 		return ERR_PTR(-ENOENT);
14512 
14513 	return intel_framebuffer_create(dev, &mode_cmd, obj);
14514 }
14515 
14516 #ifndef CONFIG_DRM_FBDEV_EMULATION
intel_fbdev_output_poll_changed(struct drm_device * dev)14517 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14518 {
14519 }
14520 #endif
14521 
14522 static const struct drm_mode_config_funcs intel_mode_funcs = {
14523 	.fb_create = intel_user_framebuffer_create,
14524 	.output_poll_changed = intel_fbdev_output_poll_changed,
14525 	.atomic_check = intel_atomic_check,
14526 	.atomic_commit = intel_atomic_commit,
14527 	.atomic_state_alloc = intel_atomic_state_alloc,
14528 	.atomic_state_clear = intel_atomic_state_clear,
14529 };
14530 
14531 /* Set up chip specific display functions */
intel_init_display(struct drm_device * dev)14532 static void intel_init_display(struct drm_device *dev)
14533 {
14534 	struct drm_i915_private *dev_priv = dev->dev_private;
14535 
14536 	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14537 		dev_priv->display.find_dpll = g4x_find_best_dpll;
14538 	else if (IS_CHERRYVIEW(dev))
14539 		dev_priv->display.find_dpll = chv_find_best_dpll;
14540 	else if (IS_VALLEYVIEW(dev))
14541 		dev_priv->display.find_dpll = vlv_find_best_dpll;
14542 	else if (IS_PINEVIEW(dev))
14543 		dev_priv->display.find_dpll = pnv_find_best_dpll;
14544 	else
14545 		dev_priv->display.find_dpll = i9xx_find_best_dpll;
14546 
14547 	if (INTEL_INFO(dev)->gen >= 9) {
14548 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14549 		dev_priv->display.get_initial_plane_config =
14550 			skylake_get_initial_plane_config;
14551 		dev_priv->display.crtc_compute_clock =
14552 			haswell_crtc_compute_clock;
14553 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14554 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14555 		dev_priv->display.update_primary_plane =
14556 			skylake_update_primary_plane;
14557 	} else if (HAS_DDI(dev)) {
14558 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14559 		dev_priv->display.get_initial_plane_config =
14560 			ironlake_get_initial_plane_config;
14561 		dev_priv->display.crtc_compute_clock =
14562 			haswell_crtc_compute_clock;
14563 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14564 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14565 		dev_priv->display.update_primary_plane =
14566 			ironlake_update_primary_plane;
14567 	} else if (HAS_PCH_SPLIT(dev)) {
14568 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14569 		dev_priv->display.get_initial_plane_config =
14570 			ironlake_get_initial_plane_config;
14571 		dev_priv->display.crtc_compute_clock =
14572 			ironlake_crtc_compute_clock;
14573 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14574 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14575 		dev_priv->display.update_primary_plane =
14576 			ironlake_update_primary_plane;
14577 	} else if (IS_VALLEYVIEW(dev)) {
14578 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14579 		dev_priv->display.get_initial_plane_config =
14580 			i9xx_get_initial_plane_config;
14581 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14582 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14583 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14584 		dev_priv->display.update_primary_plane =
14585 			i9xx_update_primary_plane;
14586 	} else {
14587 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14588 		dev_priv->display.get_initial_plane_config =
14589 			i9xx_get_initial_plane_config;
14590 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14591 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14592 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14593 		dev_priv->display.update_primary_plane =
14594 			i9xx_update_primary_plane;
14595 	}
14596 
14597 	/* Returns the core display clock speed */
14598 	if (IS_SKYLAKE(dev))
14599 		dev_priv->display.get_display_clock_speed =
14600 			skylake_get_display_clock_speed;
14601 	else if (IS_BROXTON(dev))
14602 		dev_priv->display.get_display_clock_speed =
14603 			broxton_get_display_clock_speed;
14604 	else if (IS_BROADWELL(dev))
14605 		dev_priv->display.get_display_clock_speed =
14606 			broadwell_get_display_clock_speed;
14607 	else if (IS_HASWELL(dev))
14608 		dev_priv->display.get_display_clock_speed =
14609 			haswell_get_display_clock_speed;
14610 	else if (IS_VALLEYVIEW(dev))
14611 		dev_priv->display.get_display_clock_speed =
14612 			valleyview_get_display_clock_speed;
14613 	else if (IS_GEN5(dev))
14614 		dev_priv->display.get_display_clock_speed =
14615 			ilk_get_display_clock_speed;
14616 	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14617 		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14618 		dev_priv->display.get_display_clock_speed =
14619 			i945_get_display_clock_speed;
14620 	else if (IS_GM45(dev))
14621 		dev_priv->display.get_display_clock_speed =
14622 			gm45_get_display_clock_speed;
14623 	else if (IS_CRESTLINE(dev))
14624 		dev_priv->display.get_display_clock_speed =
14625 			i965gm_get_display_clock_speed;
14626 	else if (IS_PINEVIEW(dev))
14627 		dev_priv->display.get_display_clock_speed =
14628 			pnv_get_display_clock_speed;
14629 	else if (IS_G33(dev) || IS_G4X(dev))
14630 		dev_priv->display.get_display_clock_speed =
14631 			g33_get_display_clock_speed;
14632 	else if (IS_I915G(dev))
14633 		dev_priv->display.get_display_clock_speed =
14634 			i915_get_display_clock_speed;
14635 	else if (IS_I945GM(dev) || IS_845G(dev))
14636 		dev_priv->display.get_display_clock_speed =
14637 			i9xx_misc_get_display_clock_speed;
14638 	else if (IS_PINEVIEW(dev))
14639 		dev_priv->display.get_display_clock_speed =
14640 			pnv_get_display_clock_speed;
14641 	else if (IS_I915GM(dev))
14642 		dev_priv->display.get_display_clock_speed =
14643 			i915gm_get_display_clock_speed;
14644 	else if (IS_I865G(dev))
14645 		dev_priv->display.get_display_clock_speed =
14646 			i865_get_display_clock_speed;
14647 	else if (IS_I85X(dev))
14648 		dev_priv->display.get_display_clock_speed =
14649 			i85x_get_display_clock_speed;
14650 	else { /* 830 */
14651 		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
14652 		dev_priv->display.get_display_clock_speed =
14653 			i830_get_display_clock_speed;
14654 	}
14655 
14656 	if (IS_GEN5(dev)) {
14657 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14658 	} else if (IS_GEN6(dev)) {
14659 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14660 	} else if (IS_IVYBRIDGE(dev)) {
14661 		/* FIXME: detect B0+ stepping and use auto training */
14662 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14663 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
14664 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14665 		if (IS_BROADWELL(dev)) {
14666 			dev_priv->display.modeset_commit_cdclk =
14667 				broadwell_modeset_commit_cdclk;
14668 			dev_priv->display.modeset_calc_cdclk =
14669 				broadwell_modeset_calc_cdclk;
14670 		}
14671 	} else if (IS_VALLEYVIEW(dev)) {
14672 		dev_priv->display.modeset_commit_cdclk =
14673 			valleyview_modeset_commit_cdclk;
14674 		dev_priv->display.modeset_calc_cdclk =
14675 			valleyview_modeset_calc_cdclk;
14676 	} else if (IS_BROXTON(dev)) {
14677 		dev_priv->display.modeset_commit_cdclk =
14678 			broxton_modeset_commit_cdclk;
14679 		dev_priv->display.modeset_calc_cdclk =
14680 			broxton_modeset_calc_cdclk;
14681 	}
14682 
14683 	switch (INTEL_INFO(dev)->gen) {
14684 	case 2:
14685 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
14686 		break;
14687 
14688 	case 3:
14689 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
14690 		break;
14691 
14692 	case 4:
14693 	case 5:
14694 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
14695 		break;
14696 
14697 	case 6:
14698 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
14699 		break;
14700 	case 7:
14701 	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
14702 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
14703 		break;
14704 	case 9:
14705 		/* Drop through - unsupported since execlist only. */
14706 	default:
14707 		/* Default just returns -ENODEV to indicate unsupported */
14708 		dev_priv->display.queue_flip = intel_default_queue_flip;
14709 	}
14710 
14711 	mutex_init(&dev_priv->pps_mutex);
14712 }
14713 
14714 /*
14715  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14716  * resume, or other times.  This quirk makes sure that's the case for
14717  * affected systems.
14718  */
quirk_pipea_force(struct drm_device * dev)14719 static void quirk_pipea_force(struct drm_device *dev)
14720 {
14721 	struct drm_i915_private *dev_priv = dev->dev_private;
14722 
14723 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14724 	DRM_INFO("applying pipe a force quirk\n");
14725 }
14726 
quirk_pipeb_force(struct drm_device * dev)14727 static void quirk_pipeb_force(struct drm_device *dev)
14728 {
14729 	struct drm_i915_private *dev_priv = dev->dev_private;
14730 
14731 	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14732 	DRM_INFO("applying pipe b force quirk\n");
14733 }
14734 
14735 /*
14736  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14737  */
quirk_ssc_force_disable(struct drm_device * dev)14738 static void quirk_ssc_force_disable(struct drm_device *dev)
14739 {
14740 	struct drm_i915_private *dev_priv = dev->dev_private;
14741 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14742 	DRM_INFO("applying lvds SSC disable quirk\n");
14743 }
14744 
14745 /*
14746  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14747  * brightness value
14748  */
quirk_invert_brightness(struct drm_device * dev)14749 static void quirk_invert_brightness(struct drm_device *dev)
14750 {
14751 	struct drm_i915_private *dev_priv = dev->dev_private;
14752 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14753 	DRM_INFO("applying inverted panel brightness quirk\n");
14754 }
14755 
14756 /* Some VBT's incorrectly indicate no backlight is present */
quirk_backlight_present(struct drm_device * dev)14757 static void quirk_backlight_present(struct drm_device *dev)
14758 {
14759 	struct drm_i915_private *dev_priv = dev->dev_private;
14760 	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14761 	DRM_INFO("applying backlight present quirk\n");
14762 }
14763 
14764 struct intel_quirk {
14765 	int device;
14766 	int subsystem_vendor;
14767 	int subsystem_device;
14768 	void (*hook)(struct drm_device *dev);
14769 };
14770 
14771 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14772 struct intel_dmi_quirk {
14773 	void (*hook)(struct drm_device *dev);
14774 	const struct dmi_system_id (*dmi_id_list)[];
14775 };
14776 
intel_dmi_reverse_brightness(const struct dmi_system_id * id)14777 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14778 {
14779 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14780 	return 1;
14781 }
14782 
14783 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14784 	{
14785 		.dmi_id_list = &(const struct dmi_system_id[]) {
14786 			{
14787 				.callback = intel_dmi_reverse_brightness,
14788 				.ident = "NCR Corporation",
14789 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14790 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
14791 				},
14792 			},
14793 			{ }  /* terminating entry */
14794 		},
14795 		.hook = quirk_invert_brightness,
14796 	},
14797 };
14798 
14799 static struct intel_quirk intel_quirks[] = {
14800 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14801 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14802 
14803 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14804 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14805 
14806 	/* 830 needs to leave pipe A & dpll A up */
14807 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14808 
14809 	/* 830 needs to leave pipe B & dpll B up */
14810 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14811 
14812 	/* Lenovo U160 cannot use SSC on LVDS */
14813 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14814 
14815 	/* Sony Vaio Y cannot use SSC on LVDS */
14816 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14817 
14818 	/* Acer Aspire 5734Z must invert backlight brightness */
14819 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14820 
14821 	/* Acer/eMachines G725 */
14822 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14823 
14824 	/* Acer/eMachines e725 */
14825 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14826 
14827 	/* Acer/Packard Bell NCL20 */
14828 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14829 
14830 	/* Acer Aspire 4736Z */
14831 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14832 
14833 	/* Acer Aspire 5336 */
14834 	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14835 
14836 	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14837 	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14838 
14839 	/* Acer C720 Chromebook (Core i3 4005U) */
14840 	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14841 
14842 	/* Apple Macbook 2,1 (Core 2 T7400) */
14843 	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14844 
14845 	/* Apple Macbook 4,1 */
14846 	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14847 
14848 	/* Toshiba CB35 Chromebook (Celeron 2955U) */
14849 	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14850 
14851 	/* HP Chromebook 14 (Celeron 2955U) */
14852 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14853 
14854 	/* Dell Chromebook 11 */
14855 	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14856 
14857 	/* Dell Chromebook 11 (2015 version) */
14858 	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14859 };
14860 
intel_init_quirks(struct drm_device * dev)14861 static void intel_init_quirks(struct drm_device *dev)
14862 {
14863 	struct pci_dev *d = dev->pdev;
14864 	int i;
14865 
14866 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14867 		struct intel_quirk *q = &intel_quirks[i];
14868 
14869 		if (d->device == q->device &&
14870 		    (d->subsystem_vendor == q->subsystem_vendor ||
14871 		     q->subsystem_vendor == PCI_ANY_ID) &&
14872 		    (d->subsystem_device == q->subsystem_device ||
14873 		     q->subsystem_device == PCI_ANY_ID))
14874 			q->hook(dev);
14875 	}
14876 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14877 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14878 			intel_dmi_quirks[i].hook(dev);
14879 	}
14880 }
14881 
14882 /* Disable the VGA plane that we never use */
i915_disable_vga(struct drm_device * dev)14883 static void i915_disable_vga(struct drm_device *dev)
14884 {
14885 	struct drm_i915_private *dev_priv = dev->dev_private;
14886 	u8 sr1;
14887 	u32 vga_reg = i915_vgacntrl_reg(dev);
14888 
14889 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14890 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14891 	outb(SR01, VGA_SR_INDEX);
14892 	sr1 = inb(VGA_SR_DATA);
14893 	outb(sr1 | 1<<5, VGA_SR_DATA);
14894 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
14895 	udelay(300);
14896 
14897 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
14898 	POSTING_READ(vga_reg);
14899 }
14900 
intel_modeset_init_hw(struct drm_device * dev)14901 void intel_modeset_init_hw(struct drm_device *dev)
14902 {
14903 	intel_update_cdclk(dev);
14904 	intel_prepare_ddi(dev);
14905 	intel_init_clock_gating(dev);
14906 	intel_enable_gt_powersave(dev);
14907 }
14908 
intel_modeset_init(struct drm_device * dev)14909 void intel_modeset_init(struct drm_device *dev)
14910 {
14911 	struct drm_i915_private *dev_priv = dev->dev_private;
14912 	int sprite, ret;
14913 	enum pipe pipe;
14914 	struct intel_crtc *crtc;
14915 
14916 	drm_mode_config_init(dev);
14917 
14918 	dev->mode_config.min_width = 0;
14919 	dev->mode_config.min_height = 0;
14920 
14921 	dev->mode_config.preferred_depth = 24;
14922 	dev->mode_config.prefer_shadow = 1;
14923 
14924 	dev->mode_config.allow_fb_modifiers = true;
14925 
14926 	dev->mode_config.funcs = &intel_mode_funcs;
14927 
14928 	intel_init_quirks(dev);
14929 
14930 	intel_init_pm(dev);
14931 
14932 	if (INTEL_INFO(dev)->num_pipes == 0)
14933 		return;
14934 
14935 	/*
14936 	 * There may be no VBT; and if the BIOS enabled SSC we can
14937 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
14938 	 * BIOS isn't using it, don't assume it will work even if the VBT
14939 	 * indicates as much.
14940 	 */
14941 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
14942 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14943 					    DREF_SSC1_ENABLE);
14944 
14945 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14946 			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14947 				     bios_lvds_use_ssc ? "en" : "dis",
14948 				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14949 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14950 		}
14951 	}
14952 
14953 	intel_init_display(dev);
14954 	intel_init_audio(dev);
14955 
14956 	if (IS_GEN2(dev)) {
14957 		dev->mode_config.max_width = 2048;
14958 		dev->mode_config.max_height = 2048;
14959 	} else if (IS_GEN3(dev)) {
14960 		dev->mode_config.max_width = 4096;
14961 		dev->mode_config.max_height = 4096;
14962 	} else {
14963 		dev->mode_config.max_width = 8192;
14964 		dev->mode_config.max_height = 8192;
14965 	}
14966 
14967 	if (IS_845G(dev) || IS_I865G(dev)) {
14968 		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
14969 		dev->mode_config.cursor_height = 1023;
14970 	} else if (IS_GEN2(dev)) {
14971 		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
14972 		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
14973 	} else {
14974 		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
14975 		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
14976 	}
14977 
14978 	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
14979 
14980 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
14981 		      INTEL_INFO(dev)->num_pipes,
14982 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
14983 
14984 	for_each_pipe(dev_priv, pipe) {
14985 		intel_crtc_init(dev, pipe);
14986 		for_each_sprite(dev_priv, pipe, sprite) {
14987 			ret = intel_plane_init(dev, pipe, sprite);
14988 			if (ret)
14989 				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
14990 					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
14991 		}
14992 	}
14993 
14994 	intel_update_czclk(dev_priv);
14995 	intel_update_cdclk(dev);
14996 
14997 	intel_shared_dpll_init(dev);
14998 
14999 	/* Just disable it once at startup */
15000 	i915_disable_vga(dev);
15001 	intel_setup_outputs(dev);
15002 
15003 	/* Just in case the BIOS is doing something questionable. */
15004 	intel_fbc_disable(dev_priv);
15005 
15006 	drm_modeset_lock_all(dev);
15007 	intel_modeset_setup_hw_state(dev);
15008 	drm_modeset_unlock_all(dev);
15009 
15010 	for_each_intel_crtc(dev, crtc) {
15011 		struct intel_initial_plane_config plane_config = {};
15012 
15013 		if (!crtc->active)
15014 			continue;
15015 
15016 		/*
15017 		 * Note that reserving the BIOS fb up front prevents us
15018 		 * from stuffing other stolen allocations like the ring
15019 		 * on top.  This prevents some ugliness at boot time, and
15020 		 * can even allow for smooth boot transitions if the BIOS
15021 		 * fb is large enough for the active pipe configuration.
15022 		 */
15023 		dev_priv->display.get_initial_plane_config(crtc,
15024 							   &plane_config);
15025 
15026 		/*
15027 		 * If the fb is shared between multiple heads, we'll
15028 		 * just get the first one.
15029 		 */
15030 		intel_find_initial_plane_obj(crtc, &plane_config);
15031 	}
15032 }
15033 
intel_enable_pipe_a(struct drm_device * dev)15034 static void intel_enable_pipe_a(struct drm_device *dev)
15035 {
15036 	struct intel_connector *connector;
15037 	struct drm_connector *crt = NULL;
15038 	struct intel_load_detect_pipe load_detect_temp;
15039 	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15040 
15041 	/* We can't just switch on the pipe A, we need to set things up with a
15042 	 * proper mode and output configuration. As a gross hack, enable pipe A
15043 	 * by enabling the load detect pipe once. */
15044 	for_each_intel_connector(dev, connector) {
15045 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15046 			crt = &connector->base;
15047 			break;
15048 		}
15049 	}
15050 
15051 	if (!crt)
15052 		return;
15053 
15054 	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15055 		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15056 }
15057 
15058 static bool
intel_check_plane_mapping(struct intel_crtc * crtc)15059 intel_check_plane_mapping(struct intel_crtc *crtc)
15060 {
15061 	struct drm_device *dev = crtc->base.dev;
15062 	struct drm_i915_private *dev_priv = dev->dev_private;
15063 	u32 val;
15064 
15065 	if (INTEL_INFO(dev)->num_pipes == 1)
15066 		return true;
15067 
15068 	val = I915_READ(DSPCNTR(!crtc->plane));
15069 
15070 	if ((val & DISPLAY_PLANE_ENABLE) &&
15071 	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15072 		return false;
15073 
15074 	return true;
15075 }
15076 
intel_crtc_has_encoders(struct intel_crtc * crtc)15077 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15078 {
15079 	struct drm_device *dev = crtc->base.dev;
15080 	struct intel_encoder *encoder;
15081 
15082 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15083 		return true;
15084 
15085 	return false;
15086 }
15087 
intel_sanitize_crtc(struct intel_crtc * crtc)15088 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15089 {
15090 	struct drm_device *dev = crtc->base.dev;
15091 	struct drm_i915_private *dev_priv = dev->dev_private;
15092 	u32 reg;
15093 
15094 	/* Clear any frame start delays used for debugging left by the BIOS */
15095 	reg = PIPECONF(crtc->config->cpu_transcoder);
15096 	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15097 
15098 	/* restore vblank interrupts to correct state */
15099 	drm_crtc_vblank_reset(&crtc->base);
15100 	if (crtc->active) {
15101 		struct intel_plane *plane;
15102 
15103 		drm_crtc_vblank_on(&crtc->base);
15104 
15105 		/* Disable everything but the primary plane */
15106 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15107 			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15108 				continue;
15109 
15110 			plane->disable_plane(&plane->base, &crtc->base);
15111 		}
15112 	}
15113 
15114 	/* We need to sanitize the plane -> pipe mapping first because this will
15115 	 * disable the crtc (and hence change the state) if it is wrong. Note
15116 	 * that gen4+ has a fixed plane -> pipe mapping.  */
15117 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15118 		bool plane;
15119 
15120 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15121 			      crtc->base.base.id);
15122 
15123 		/* Pipe has the wrong plane attached and the plane is active.
15124 		 * Temporarily change the plane mapping and disable everything
15125 		 * ...  */
15126 		plane = crtc->plane;
15127 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15128 		crtc->plane = !plane;
15129 		intel_crtc_disable_noatomic(&crtc->base);
15130 		crtc->plane = plane;
15131 	}
15132 
15133 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15134 	    crtc->pipe == PIPE_A && !crtc->active) {
15135 		/* BIOS forgot to enable pipe A, this mostly happens after
15136 		 * resume. Force-enable the pipe to fix this, the update_dpms
15137 		 * call below we restore the pipe to the right state, but leave
15138 		 * the required bits on. */
15139 		intel_enable_pipe_a(dev);
15140 	}
15141 
15142 	/* Adjust the state of the output pipe according to whether we
15143 	 * have active connectors/encoders. */
15144 	if (!intel_crtc_has_encoders(crtc))
15145 		intel_crtc_disable_noatomic(&crtc->base);
15146 
15147 	if (crtc->active != crtc->base.state->active) {
15148 		struct intel_encoder *encoder;
15149 
15150 		/* This can happen either due to bugs in the get_hw_state
15151 		 * functions or because of calls to intel_crtc_disable_noatomic,
15152 		 * or because the pipe is force-enabled due to the
15153 		 * pipe A quirk. */
15154 		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15155 			      crtc->base.base.id,
15156 			      crtc->base.state->enable ? "enabled" : "disabled",
15157 			      crtc->active ? "enabled" : "disabled");
15158 
15159 		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15160 		crtc->base.state->active = crtc->active;
15161 		crtc->base.enabled = crtc->active;
15162 
15163 		/* Because we only establish the connector -> encoder ->
15164 		 * crtc links if something is active, this means the
15165 		 * crtc is now deactivated. Break the links. connector
15166 		 * -> encoder links are only establish when things are
15167 		 *  actually up, hence no need to break them. */
15168 		WARN_ON(crtc->active);
15169 
15170 		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15171 			encoder->base.crtc = NULL;
15172 	}
15173 
15174 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15175 		/*
15176 		 * We start out with underrun reporting disabled to avoid races.
15177 		 * For correct bookkeeping mark this on active crtcs.
15178 		 *
15179 		 * Also on gmch platforms we dont have any hardware bits to
15180 		 * disable the underrun reporting. Which means we need to start
15181 		 * out with underrun reporting disabled also on inactive pipes,
15182 		 * since otherwise we'll complain about the garbage we read when
15183 		 * e.g. coming up after runtime pm.
15184 		 *
15185 		 * No protection against concurrent access is required - at
15186 		 * worst a fifo underrun happens which also sets this to false.
15187 		 */
15188 		crtc->cpu_fifo_underrun_disabled = true;
15189 		crtc->pch_fifo_underrun_disabled = true;
15190 	}
15191 }
15192 
intel_sanitize_encoder(struct intel_encoder * encoder)15193 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15194 {
15195 	struct intel_connector *connector;
15196 	struct drm_device *dev = encoder->base.dev;
15197 	bool active = false;
15198 
15199 	/* We need to check both for a crtc link (meaning that the
15200 	 * encoder is active and trying to read from a pipe) and the
15201 	 * pipe itself being active. */
15202 	bool has_active_crtc = encoder->base.crtc &&
15203 		to_intel_crtc(encoder->base.crtc)->active;
15204 
15205 	for_each_intel_connector(dev, connector) {
15206 		if (connector->base.encoder != &encoder->base)
15207 			continue;
15208 
15209 		active = true;
15210 		break;
15211 	}
15212 
15213 	if (active && !has_active_crtc) {
15214 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15215 			      encoder->base.base.id,
15216 			      encoder->base.name);
15217 
15218 		/* Connector is active, but has no active pipe. This is
15219 		 * fallout from our resume register restoring. Disable
15220 		 * the encoder manually again. */
15221 		if (encoder->base.crtc) {
15222 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15223 				      encoder->base.base.id,
15224 				      encoder->base.name);
15225 			encoder->disable(encoder);
15226 			if (encoder->post_disable)
15227 				encoder->post_disable(encoder);
15228 		}
15229 		encoder->base.crtc = NULL;
15230 
15231 		/* Inconsistent output/port/pipe state happens presumably due to
15232 		 * a bug in one of the get_hw_state functions. Or someplace else
15233 		 * in our code, like the register restore mess on resume. Clamp
15234 		 * things to off as a safer default. */
15235 		for_each_intel_connector(dev, connector) {
15236 			if (connector->encoder != encoder)
15237 				continue;
15238 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15239 			connector->base.encoder = NULL;
15240 		}
15241 	}
15242 	/* Enabled encoders without active connectors will be fixed in
15243 	 * the crtc fixup. */
15244 }
15245 
i915_redisable_vga_power_on(struct drm_device * dev)15246 void i915_redisable_vga_power_on(struct drm_device *dev)
15247 {
15248 	struct drm_i915_private *dev_priv = dev->dev_private;
15249 	u32 vga_reg = i915_vgacntrl_reg(dev);
15250 
15251 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15252 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15253 		i915_disable_vga(dev);
15254 	}
15255 }
15256 
i915_redisable_vga(struct drm_device * dev)15257 void i915_redisable_vga(struct drm_device *dev)
15258 {
15259 	struct drm_i915_private *dev_priv = dev->dev_private;
15260 
15261 	/* This function can be called both from intel_modeset_setup_hw_state or
15262 	 * at a very early point in our resume sequence, where the power well
15263 	 * structures are not yet restored. Since this function is at a very
15264 	 * paranoid "someone might have enabled VGA while we were not looking"
15265 	 * level, just check if the power well is enabled instead of trying to
15266 	 * follow the "don't touch the power well if we don't need it" policy
15267 	 * the rest of the driver uses. */
15268 	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
15269 		return;
15270 
15271 	i915_redisable_vga_power_on(dev);
15272 }
15273 
primary_get_hw_state(struct intel_plane * plane)15274 static bool primary_get_hw_state(struct intel_plane *plane)
15275 {
15276 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15277 
15278 	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15279 }
15280 
15281 /* FIXME read out full plane state for all planes */
readout_plane_state(struct intel_crtc * crtc)15282 static void readout_plane_state(struct intel_crtc *crtc)
15283 {
15284 	struct drm_plane *primary = crtc->base.primary;
15285 	struct intel_plane_state *plane_state =
15286 		to_intel_plane_state(primary->state);
15287 
15288 	plane_state->visible =
15289 		primary_get_hw_state(to_intel_plane(primary));
15290 
15291 	if (plane_state->visible)
15292 		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15293 }
15294 
intel_modeset_readout_hw_state(struct drm_device * dev)15295 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15296 {
15297 	struct drm_i915_private *dev_priv = dev->dev_private;
15298 	enum pipe pipe;
15299 	struct intel_crtc *crtc;
15300 	struct intel_encoder *encoder;
15301 	struct intel_connector *connector;
15302 	int i;
15303 
15304 	for_each_intel_crtc(dev, crtc) {
15305 		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
15306 		memset(crtc->config, 0, sizeof(*crtc->config));
15307 		crtc->config->base.crtc = &crtc->base;
15308 
15309 		crtc->active = dev_priv->display.get_pipe_config(crtc,
15310 								 crtc->config);
15311 
15312 		crtc->base.state->active = crtc->active;
15313 		crtc->base.enabled = crtc->active;
15314 
15315 		readout_plane_state(crtc);
15316 
15317 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15318 			      crtc->base.base.id,
15319 			      crtc->active ? "enabled" : "disabled");
15320 	}
15321 
15322 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15323 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15324 
15325 		pll->on = pll->get_hw_state(dev_priv, pll,
15326 					    &pll->config.hw_state);
15327 		pll->active = 0;
15328 		pll->config.crtc_mask = 0;
15329 		for_each_intel_crtc(dev, crtc) {
15330 			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15331 				pll->active++;
15332 				pll->config.crtc_mask |= 1 << crtc->pipe;
15333 			}
15334 		}
15335 
15336 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15337 			      pll->name, pll->config.crtc_mask, pll->on);
15338 
15339 		if (pll->config.crtc_mask)
15340 			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15341 	}
15342 
15343 	for_each_intel_encoder(dev, encoder) {
15344 		pipe = 0;
15345 
15346 		if (encoder->get_hw_state(encoder, &pipe)) {
15347 			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15348 			encoder->base.crtc = &crtc->base;
15349 			encoder->get_config(encoder, crtc->config);
15350 		} else {
15351 			encoder->base.crtc = NULL;
15352 		}
15353 
15354 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15355 			      encoder->base.base.id,
15356 			      encoder->base.name,
15357 			      encoder->base.crtc ? "enabled" : "disabled",
15358 			      pipe_name(pipe));
15359 	}
15360 
15361 	for_each_intel_connector(dev, connector) {
15362 		if (connector->get_hw_state(connector)) {
15363 			connector->base.dpms = DRM_MODE_DPMS_ON;
15364 			connector->base.encoder = &connector->encoder->base;
15365 		} else {
15366 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15367 			connector->base.encoder = NULL;
15368 		}
15369 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15370 			      connector->base.base.id,
15371 			      connector->base.name,
15372 			      connector->base.encoder ? "enabled" : "disabled");
15373 	}
15374 
15375 	for_each_intel_crtc(dev, crtc) {
15376 		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15377 
15378 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15379 		if (crtc->base.state->active) {
15380 			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15381 			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15382 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15383 
15384 			/*
15385 			 * The initial mode needs to be set in order to keep
15386 			 * the atomic core happy. It wants a valid mode if the
15387 			 * crtc's enabled, so we do the above call.
15388 			 *
15389 			 * At this point some state updated by the connectors
15390 			 * in their ->detect() callback has not run yet, so
15391 			 * no recalculation can be done yet.
15392 			 *
15393 			 * Even if we could do a recalculation and modeset
15394 			 * right now it would cause a double modeset if
15395 			 * fbdev or userspace chooses a different initial mode.
15396 			 *
15397 			 * If that happens, someone indicated they wanted a
15398 			 * mode change, which means it's safe to do a full
15399 			 * recalculation.
15400 			 */
15401 			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15402 
15403 			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15404 			update_scanline_offset(crtc);
15405 		}
15406 	}
15407 }
15408 
15409 /* Scan out the current hw modeset state,
15410  * and sanitizes it to the current state
15411  */
15412 static void
intel_modeset_setup_hw_state(struct drm_device * dev)15413 intel_modeset_setup_hw_state(struct drm_device *dev)
15414 {
15415 	struct drm_i915_private *dev_priv = dev->dev_private;
15416 	enum pipe pipe;
15417 	struct intel_crtc *crtc;
15418 	struct intel_encoder *encoder;
15419 	int i;
15420 
15421 	intel_modeset_readout_hw_state(dev);
15422 
15423 	/* HW state is read out, now we need to sanitize this mess. */
15424 	for_each_intel_encoder(dev, encoder) {
15425 		intel_sanitize_encoder(encoder);
15426 	}
15427 
15428 	for_each_pipe(dev_priv, pipe) {
15429 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15430 		intel_sanitize_crtc(crtc);
15431 		intel_dump_pipe_config(crtc, crtc->config,
15432 				       "[setup_hw_state]");
15433 	}
15434 
15435 	intel_modeset_update_connector_atomic_state(dev);
15436 
15437 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15438 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15439 
15440 		if (!pll->on || pll->active)
15441 			continue;
15442 
15443 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15444 
15445 		pll->disable(dev_priv, pll);
15446 		pll->on = false;
15447 	}
15448 
15449 	if (IS_VALLEYVIEW(dev))
15450 		vlv_wm_get_hw_state(dev);
15451 	else if (IS_GEN9(dev))
15452 		skl_wm_get_hw_state(dev);
15453 	else if (HAS_PCH_SPLIT(dev))
15454 		ilk_wm_get_hw_state(dev);
15455 
15456 	for_each_intel_crtc(dev, crtc) {
15457 		unsigned long put_domains;
15458 
15459 		put_domains = modeset_get_crtc_power_domains(&crtc->base);
15460 		if (WARN_ON(put_domains))
15461 			modeset_put_power_domains(dev_priv, put_domains);
15462 	}
15463 	intel_display_set_init_power(dev_priv, false);
15464 }
15465 
intel_display_resume(struct drm_device * dev)15466 void intel_display_resume(struct drm_device *dev)
15467 {
15468 	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15469 	struct intel_connector *conn;
15470 	struct intel_plane *plane;
15471 	struct drm_crtc *crtc;
15472 	int ret;
15473 
15474 	if (!state)
15475 		return;
15476 
15477 	state->acquire_ctx = dev->mode_config.acquire_ctx;
15478 
15479 	/* preserve complete old state, including dpll */
15480 	intel_atomic_get_shared_dpll_state(state);
15481 
15482 	for_each_crtc(dev, crtc) {
15483 		struct drm_crtc_state *crtc_state =
15484 			drm_atomic_get_crtc_state(state, crtc);
15485 
15486 		ret = PTR_ERR_OR_ZERO(crtc_state);
15487 		if (ret)
15488 			goto err;
15489 
15490 		/* force a restore */
15491 		crtc_state->mode_changed = true;
15492 	}
15493 
15494 	for_each_intel_plane(dev, plane) {
15495 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15496 		if (ret)
15497 			goto err;
15498 	}
15499 
15500 	for_each_intel_connector(dev, conn) {
15501 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15502 		if (ret)
15503 			goto err;
15504 	}
15505 
15506 	intel_modeset_setup_hw_state(dev);
15507 
15508 	i915_redisable_vga(dev);
15509 	ret = drm_atomic_commit(state);
15510 	if (!ret)
15511 		return;
15512 
15513 err:
15514 	DRM_ERROR("Restoring old state failed with %i\n", ret);
15515 	drm_atomic_state_free(state);
15516 }
15517 
intel_modeset_gem_init(struct drm_device * dev)15518 void intel_modeset_gem_init(struct drm_device *dev)
15519 {
15520 	struct drm_crtc *c;
15521 	struct drm_i915_gem_object *obj;
15522 	int ret;
15523 
15524 	mutex_lock(&dev->struct_mutex);
15525 	intel_init_gt_powersave(dev);
15526 	mutex_unlock(&dev->struct_mutex);
15527 
15528 	intel_modeset_init_hw(dev);
15529 
15530 	intel_setup_overlay(dev);
15531 
15532 	/*
15533 	 * Make sure any fbs we allocated at startup are properly
15534 	 * pinned & fenced.  When we do the allocation it's too early
15535 	 * for this.
15536 	 */
15537 	for_each_crtc(dev, c) {
15538 		obj = intel_fb_obj(c->primary->fb);
15539 		if (obj == NULL)
15540 			continue;
15541 
15542 		mutex_lock(&dev->struct_mutex);
15543 		ret = intel_pin_and_fence_fb_obj(c->primary,
15544 						 c->primary->fb,
15545 						 c->primary->state,
15546 						 NULL, NULL);
15547 		mutex_unlock(&dev->struct_mutex);
15548 		if (ret) {
15549 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
15550 				  to_intel_crtc(c)->pipe);
15551 			drm_framebuffer_unreference(c->primary->fb);
15552 			c->primary->fb = NULL;
15553 			c->primary->crtc = c->primary->state->crtc = NULL;
15554 			update_state_fb(c->primary);
15555 			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15556 		}
15557 	}
15558 
15559 	intel_backlight_register(dev);
15560 }
15561 
intel_connector_unregister(struct intel_connector * intel_connector)15562 void intel_connector_unregister(struct intel_connector *intel_connector)
15563 {
15564 	struct drm_connector *connector = &intel_connector->base;
15565 
15566 	intel_panel_destroy_backlight(connector);
15567 	drm_connector_unregister(connector);
15568 }
15569 
intel_modeset_cleanup(struct drm_device * dev)15570 void intel_modeset_cleanup(struct drm_device *dev)
15571 {
15572 	struct drm_i915_private *dev_priv = dev->dev_private;
15573 	struct drm_connector *connector;
15574 
15575 	intel_disable_gt_powersave(dev);
15576 
15577 	intel_backlight_unregister(dev);
15578 
15579 	/*
15580 	 * Interrupts and polling as the first thing to avoid creating havoc.
15581 	 * Too much stuff here (turning of connectors, ...) would
15582 	 * experience fancy races otherwise.
15583 	 */
15584 	intel_irq_uninstall(dev_priv);
15585 
15586 	/*
15587 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
15588 	 * poll handlers. Hence disable polling after hpd handling is shut down.
15589 	 */
15590 	drm_kms_helper_poll_fini(dev);
15591 
15592 	intel_unregister_dsm_handler();
15593 
15594 	intel_fbc_disable(dev_priv);
15595 
15596 	/* flush any delayed tasks or pending work */
15597 	flush_scheduled_work();
15598 
15599 	/* destroy the backlight and sysfs files before encoders/connectors */
15600 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
15601 		struct intel_connector *intel_connector;
15602 
15603 		intel_connector = to_intel_connector(connector);
15604 		intel_connector->unregister(intel_connector);
15605 	}
15606 
15607 	drm_mode_config_cleanup(dev);
15608 
15609 	intel_cleanup_overlay(dev);
15610 
15611 	mutex_lock(&dev->struct_mutex);
15612 	intel_cleanup_gt_powersave(dev);
15613 	mutex_unlock(&dev->struct_mutex);
15614 
15615 	intel_teardown_gmbus(dev);
15616 }
15617 
15618 /*
15619  * Return which encoder is currently attached for connector.
15620  */
intel_best_encoder(struct drm_connector * connector)15621 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
15622 {
15623 	return &intel_attached_encoder(connector)->base;
15624 }
15625 
intel_connector_attach_encoder(struct intel_connector * connector,struct intel_encoder * encoder)15626 void intel_connector_attach_encoder(struct intel_connector *connector,
15627 				    struct intel_encoder *encoder)
15628 {
15629 	connector->encoder = encoder;
15630 	drm_mode_connector_attach_encoder(&connector->base,
15631 					  &encoder->base);
15632 }
15633 
15634 /*
15635  * set vga decode state - true == enable VGA decode
15636  */
intel_modeset_vga_set_state(struct drm_device * dev,bool state)15637 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15638 {
15639 	struct drm_i915_private *dev_priv = dev->dev_private;
15640 	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15641 	u16 gmch_ctrl;
15642 
15643 	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15644 		DRM_ERROR("failed to read control word\n");
15645 		return -EIO;
15646 	}
15647 
15648 	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15649 		return 0;
15650 
15651 	if (state)
15652 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15653 	else
15654 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
15655 
15656 	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15657 		DRM_ERROR("failed to write control word\n");
15658 		return -EIO;
15659 	}
15660 
15661 	return 0;
15662 }
15663 
15664 struct intel_display_error_state {
15665 
15666 	u32 power_well_driver;
15667 
15668 	int num_transcoders;
15669 
15670 	struct intel_cursor_error_state {
15671 		u32 control;
15672 		u32 position;
15673 		u32 base;
15674 		u32 size;
15675 	} cursor[I915_MAX_PIPES];
15676 
15677 	struct intel_pipe_error_state {
15678 		bool power_domain_on;
15679 		u32 source;
15680 		u32 stat;
15681 	} pipe[I915_MAX_PIPES];
15682 
15683 	struct intel_plane_error_state {
15684 		u32 control;
15685 		u32 stride;
15686 		u32 size;
15687 		u32 pos;
15688 		u32 addr;
15689 		u32 surface;
15690 		u32 tile_offset;
15691 	} plane[I915_MAX_PIPES];
15692 
15693 	struct intel_transcoder_error_state {
15694 		bool power_domain_on;
15695 		enum transcoder cpu_transcoder;
15696 
15697 		u32 conf;
15698 
15699 		u32 htotal;
15700 		u32 hblank;
15701 		u32 hsync;
15702 		u32 vtotal;
15703 		u32 vblank;
15704 		u32 vsync;
15705 	} transcoder[4];
15706 };
15707 
15708 struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device * dev)15709 intel_display_capture_error_state(struct drm_device *dev)
15710 {
15711 	struct drm_i915_private *dev_priv = dev->dev_private;
15712 	struct intel_display_error_state *error;
15713 	int transcoders[] = {
15714 		TRANSCODER_A,
15715 		TRANSCODER_B,
15716 		TRANSCODER_C,
15717 		TRANSCODER_EDP,
15718 	};
15719 	int i;
15720 
15721 	if (INTEL_INFO(dev)->num_pipes == 0)
15722 		return NULL;
15723 
15724 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
15725 	if (error == NULL)
15726 		return NULL;
15727 
15728 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15729 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15730 
15731 	for_each_pipe(dev_priv, i) {
15732 		error->pipe[i].power_domain_on =
15733 			__intel_display_power_is_enabled(dev_priv,
15734 							 POWER_DOMAIN_PIPE(i));
15735 		if (!error->pipe[i].power_domain_on)
15736 			continue;
15737 
15738 		error->cursor[i].control = I915_READ(CURCNTR(i));
15739 		error->cursor[i].position = I915_READ(CURPOS(i));
15740 		error->cursor[i].base = I915_READ(CURBASE(i));
15741 
15742 		error->plane[i].control = I915_READ(DSPCNTR(i));
15743 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
15744 		if (INTEL_INFO(dev)->gen <= 3) {
15745 			error->plane[i].size = I915_READ(DSPSIZE(i));
15746 			error->plane[i].pos = I915_READ(DSPPOS(i));
15747 		}
15748 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15749 			error->plane[i].addr = I915_READ(DSPADDR(i));
15750 		if (INTEL_INFO(dev)->gen >= 4) {
15751 			error->plane[i].surface = I915_READ(DSPSURF(i));
15752 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15753 		}
15754 
15755 		error->pipe[i].source = I915_READ(PIPESRC(i));
15756 
15757 		if (HAS_GMCH_DISPLAY(dev))
15758 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
15759 	}
15760 
15761 	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
15762 	if (HAS_DDI(dev_priv->dev))
15763 		error->num_transcoders++; /* Account for eDP. */
15764 
15765 	for (i = 0; i < error->num_transcoders; i++) {
15766 		enum transcoder cpu_transcoder = transcoders[i];
15767 
15768 		error->transcoder[i].power_domain_on =
15769 			__intel_display_power_is_enabled(dev_priv,
15770 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15771 		if (!error->transcoder[i].power_domain_on)
15772 			continue;
15773 
15774 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
15775 
15776 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15777 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15778 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15779 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15780 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15781 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15782 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15783 	}
15784 
15785 	return error;
15786 }
15787 
15788 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15789 
15790 void
intel_display_print_error_state(struct drm_i915_error_state_buf * m,struct drm_device * dev,struct intel_display_error_state * error)15791 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15792 				struct drm_device *dev,
15793 				struct intel_display_error_state *error)
15794 {
15795 	struct drm_i915_private *dev_priv = dev->dev_private;
15796 	int i;
15797 
15798 	if (!error)
15799 		return;
15800 
15801 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
15802 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15803 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
15804 			   error->power_well_driver);
15805 	for_each_pipe(dev_priv, i) {
15806 		err_printf(m, "Pipe [%d]:\n", i);
15807 		err_printf(m, "  Power: %s\n",
15808 			   error->pipe[i].power_domain_on ? "on" : "off");
15809 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
15810 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
15811 
15812 		err_printf(m, "Plane [%d]:\n", i);
15813 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
15814 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
15815 		if (INTEL_INFO(dev)->gen <= 3) {
15816 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
15817 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
15818 		}
15819 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15820 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
15821 		if (INTEL_INFO(dev)->gen >= 4) {
15822 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
15823 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
15824 		}
15825 
15826 		err_printf(m, "Cursor [%d]:\n", i);
15827 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
15828 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
15829 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
15830 	}
15831 
15832 	for (i = 0; i < error->num_transcoders; i++) {
15833 		err_printf(m, "CPU transcoder: %c\n",
15834 			   transcoder_name(error->transcoder[i].cpu_transcoder));
15835 		err_printf(m, "  Power: %s\n",
15836 			   error->transcoder[i].power_domain_on ? "on" : "off");
15837 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
15838 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
15839 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
15840 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
15841 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
15842 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
15843 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
15844 	}
15845 }
15846 
intel_modeset_preclose(struct drm_device * dev,struct drm_file * file)15847 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
15848 {
15849 	struct intel_crtc *crtc;
15850 
15851 	for_each_intel_crtc(dev, crtc) {
15852 		struct intel_unpin_work *work;
15853 
15854 		spin_lock_irq(&dev->event_lock);
15855 
15856 		work = crtc->unpin_work;
15857 
15858 		if (work && work->event &&
15859 		    work->event->base.file_priv == file) {
15860 			kfree(work->event);
15861 			work->event = NULL;
15862 		}
15863 
15864 		spin_unlock_irq(&dev->event_lock);
15865 	}
15866 }
15867