1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/cpufreq.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include "drmP.h"
36 #include "intel_drv.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
43
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47 static void intel_update_watermarks(struct drm_device *dev);
48 static void intel_increase_pllclock(struct drm_crtc *crtc);
49 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50
51 typedef struct {
52 /* given values */
53 int n;
54 int m1, m2;
55 int p1, p2;
56 /* derived values */
57 int dot;
58 int vco;
59 int m;
60 int p;
61 } intel_clock_t;
62
63 typedef struct {
64 int min, max;
65 } intel_range_t;
66
67 typedef struct {
68 int dot_limit;
69 int p2_slow, p2_fast;
70 } intel_p2_t;
71
72 #define INTEL_P2_NUM 2
73 typedef struct intel_limit intel_limit_t;
74 struct intel_limit {
75 intel_range_t dot, vco, n, m, m1, m2, p, p1;
76 intel_p2_t p2;
77 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
78 int, int, intel_clock_t *, intel_clock_t *);
79 };
80
81 /* FDI */
82 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
83
84 static bool
85 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
86 int target, int refclk, intel_clock_t *match_clock,
87 intel_clock_t *best_clock);
88 static bool
89 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
90 int target, int refclk, intel_clock_t *match_clock,
91 intel_clock_t *best_clock);
92
93 static bool
94 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
95 int target, int refclk, intel_clock_t *match_clock,
96 intel_clock_t *best_clock);
97 static bool
98 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
99 int target, int refclk, intel_clock_t *match_clock,
100 intel_clock_t *best_clock);
101
102 static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device * dev)103 intel_fdi_link_freq(struct drm_device *dev)
104 {
105 if (IS_GEN5(dev)) {
106 struct drm_i915_private *dev_priv = dev->dev_private;
107 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
108 } else
109 return 27;
110 }
111
112 static const intel_limit_t intel_limits_i8xx_dvo = {
113 .dot = { .min = 25000, .max = 350000 },
114 .vco = { .min = 930000, .max = 1400000 },
115 .n = { .min = 3, .max = 16 },
116 .m = { .min = 96, .max = 140 },
117 .m1 = { .min = 18, .max = 26 },
118 .m2 = { .min = 6, .max = 16 },
119 .p = { .min = 4, .max = 128 },
120 .p1 = { .min = 2, .max = 33 },
121 .p2 = { .dot_limit = 165000,
122 .p2_slow = 4, .p2_fast = 2 },
123 .find_pll = intel_find_best_PLL,
124 };
125
126 static const intel_limit_t intel_limits_i8xx_lvds = {
127 .dot = { .min = 25000, .max = 350000 },
128 .vco = { .min = 930000, .max = 1400000 },
129 .n = { .min = 3, .max = 16 },
130 .m = { .min = 96, .max = 140 },
131 .m1 = { .min = 18, .max = 26 },
132 .m2 = { .min = 6, .max = 16 },
133 .p = { .min = 4, .max = 128 },
134 .p1 = { .min = 1, .max = 6 },
135 .p2 = { .dot_limit = 165000,
136 .p2_slow = 14, .p2_fast = 7 },
137 .find_pll = intel_find_best_PLL,
138 };
139
140 static const intel_limit_t intel_limits_i9xx_sdvo = {
141 .dot = { .min = 20000, .max = 400000 },
142 .vco = { .min = 1400000, .max = 2800000 },
143 .n = { .min = 1, .max = 6 },
144 .m = { .min = 70, .max = 120 },
145 .m1 = { .min = 8, .max = 18 },
146 .m2 = { .min = 3, .max = 7 },
147 .p = { .min = 5, .max = 80 },
148 .p1 = { .min = 1, .max = 8 },
149 .p2 = { .dot_limit = 200000,
150 .p2_slow = 10, .p2_fast = 5 },
151 .find_pll = intel_find_best_PLL,
152 };
153
154 static const intel_limit_t intel_limits_i9xx_lvds = {
155 .dot = { .min = 20000, .max = 400000 },
156 .vco = { .min = 1400000, .max = 2800000 },
157 .n = { .min = 1, .max = 6 },
158 .m = { .min = 70, .max = 120 },
159 .m1 = { .min = 10, .max = 22 },
160 .m2 = { .min = 5, .max = 9 },
161 .p = { .min = 7, .max = 98 },
162 .p1 = { .min = 1, .max = 8 },
163 .p2 = { .dot_limit = 112000,
164 .p2_slow = 14, .p2_fast = 7 },
165 .find_pll = intel_find_best_PLL,
166 };
167
168
169 static const intel_limit_t intel_limits_g4x_sdvo = {
170 .dot = { .min = 25000, .max = 270000 },
171 .vco = { .min = 1750000, .max = 3500000},
172 .n = { .min = 1, .max = 4 },
173 .m = { .min = 104, .max = 138 },
174 .m1 = { .min = 17, .max = 23 },
175 .m2 = { .min = 5, .max = 11 },
176 .p = { .min = 10, .max = 30 },
177 .p1 = { .min = 1, .max = 3},
178 .p2 = { .dot_limit = 270000,
179 .p2_slow = 10,
180 .p2_fast = 10
181 },
182 .find_pll = intel_g4x_find_best_PLL,
183 };
184
185 static const intel_limit_t intel_limits_g4x_hdmi = {
186 .dot = { .min = 22000, .max = 400000 },
187 .vco = { .min = 1750000, .max = 3500000},
188 .n = { .min = 1, .max = 4 },
189 .m = { .min = 104, .max = 138 },
190 .m1 = { .min = 16, .max = 23 },
191 .m2 = { .min = 5, .max = 11 },
192 .p = { .min = 5, .max = 80 },
193 .p1 = { .min = 1, .max = 8},
194 .p2 = { .dot_limit = 165000,
195 .p2_slow = 10, .p2_fast = 5 },
196 .find_pll = intel_g4x_find_best_PLL,
197 };
198
199 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
200 .dot = { .min = 20000, .max = 115000 },
201 .vco = { .min = 1750000, .max = 3500000 },
202 .n = { .min = 1, .max = 3 },
203 .m = { .min = 104, .max = 138 },
204 .m1 = { .min = 17, .max = 23 },
205 .m2 = { .min = 5, .max = 11 },
206 .p = { .min = 28, .max = 112 },
207 .p1 = { .min = 2, .max = 8 },
208 .p2 = { .dot_limit = 0,
209 .p2_slow = 14, .p2_fast = 14
210 },
211 .find_pll = intel_g4x_find_best_PLL,
212 };
213
214 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
215 .dot = { .min = 80000, .max = 224000 },
216 .vco = { .min = 1750000, .max = 3500000 },
217 .n = { .min = 1, .max = 3 },
218 .m = { .min = 104, .max = 138 },
219 .m1 = { .min = 17, .max = 23 },
220 .m2 = { .min = 5, .max = 11 },
221 .p = { .min = 14, .max = 42 },
222 .p1 = { .min = 2, .max = 6 },
223 .p2 = { .dot_limit = 0,
224 .p2_slow = 7, .p2_fast = 7
225 },
226 .find_pll = intel_g4x_find_best_PLL,
227 };
228
229 static const intel_limit_t intel_limits_g4x_display_port = {
230 .dot = { .min = 161670, .max = 227000 },
231 .vco = { .min = 1750000, .max = 3500000},
232 .n = { .min = 1, .max = 2 },
233 .m = { .min = 97, .max = 108 },
234 .m1 = { .min = 0x10, .max = 0x12 },
235 .m2 = { .min = 0x05, .max = 0x06 },
236 .p = { .min = 10, .max = 20 },
237 .p1 = { .min = 1, .max = 2},
238 .p2 = { .dot_limit = 0,
239 .p2_slow = 10, .p2_fast = 10 },
240 .find_pll = intel_find_pll_g4x_dp,
241 };
242
243 static const intel_limit_t intel_limits_pineview_sdvo = {
244 .dot = { .min = 20000, .max = 400000},
245 .vco = { .min = 1700000, .max = 3500000 },
246 /* Pineview's Ncounter is a ring counter */
247 .n = { .min = 3, .max = 6 },
248 .m = { .min = 2, .max = 256 },
249 /* Pineview only has one combined m divider, which we treat as m2. */
250 .m1 = { .min = 0, .max = 0 },
251 .m2 = { .min = 0, .max = 254 },
252 .p = { .min = 5, .max = 80 },
253 .p1 = { .min = 1, .max = 8 },
254 .p2 = { .dot_limit = 200000,
255 .p2_slow = 10, .p2_fast = 5 },
256 .find_pll = intel_find_best_PLL,
257 };
258
259 static const intel_limit_t intel_limits_pineview_lvds = {
260 .dot = { .min = 20000, .max = 400000 },
261 .vco = { .min = 1700000, .max = 3500000 },
262 .n = { .min = 3, .max = 6 },
263 .m = { .min = 2, .max = 256 },
264 .m1 = { .min = 0, .max = 0 },
265 .m2 = { .min = 0, .max = 254 },
266 .p = { .min = 7, .max = 112 },
267 .p1 = { .min = 1, .max = 8 },
268 .p2 = { .dot_limit = 112000,
269 .p2_slow = 14, .p2_fast = 14 },
270 .find_pll = intel_find_best_PLL,
271 };
272
273 /* Ironlake / Sandybridge
274 *
275 * We calculate clock using (register_value + 2) for N/M1/M2, so here
276 * the range value for them is (actual_value - 2).
277 */
278 static const intel_limit_t intel_limits_ironlake_dac = {
279 .dot = { .min = 25000, .max = 350000 },
280 .vco = { .min = 1760000, .max = 3510000 },
281 .n = { .min = 1, .max = 5 },
282 .m = { .min = 79, .max = 127 },
283 .m1 = { .min = 12, .max = 22 },
284 .m2 = { .min = 5, .max = 9 },
285 .p = { .min = 5, .max = 80 },
286 .p1 = { .min = 1, .max = 8 },
287 .p2 = { .dot_limit = 225000,
288 .p2_slow = 10, .p2_fast = 5 },
289 .find_pll = intel_g4x_find_best_PLL,
290 };
291
292 static const intel_limit_t intel_limits_ironlake_single_lvds = {
293 .dot = { .min = 25000, .max = 350000 },
294 .vco = { .min = 1760000, .max = 3510000 },
295 .n = { .min = 1, .max = 3 },
296 .m = { .min = 79, .max = 118 },
297 .m1 = { .min = 12, .max = 22 },
298 .m2 = { .min = 5, .max = 9 },
299 .p = { .min = 28, .max = 112 },
300 .p1 = { .min = 2, .max = 8 },
301 .p2 = { .dot_limit = 225000,
302 .p2_slow = 14, .p2_fast = 14 },
303 .find_pll = intel_g4x_find_best_PLL,
304 };
305
306 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
307 .dot = { .min = 25000, .max = 350000 },
308 .vco = { .min = 1760000, .max = 3510000 },
309 .n = { .min = 1, .max = 3 },
310 .m = { .min = 79, .max = 127 },
311 .m1 = { .min = 12, .max = 22 },
312 .m2 = { .min = 5, .max = 9 },
313 .p = { .min = 14, .max = 56 },
314 .p1 = { .min = 2, .max = 8 },
315 .p2 = { .dot_limit = 225000,
316 .p2_slow = 7, .p2_fast = 7 },
317 .find_pll = intel_g4x_find_best_PLL,
318 };
319
320 /* LVDS 100mhz refclk limits. */
321 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
322 .dot = { .min = 25000, .max = 350000 },
323 .vco = { .min = 1760000, .max = 3510000 },
324 .n = { .min = 1, .max = 2 },
325 .m = { .min = 79, .max = 126 },
326 .m1 = { .min = 12, .max = 22 },
327 .m2 = { .min = 5, .max = 9 },
328 .p = { .min = 28, .max = 112 },
329 .p1 = { .min = 2, .max = 8 },
330 .p2 = { .dot_limit = 225000,
331 .p2_slow = 14, .p2_fast = 14 },
332 .find_pll = intel_g4x_find_best_PLL,
333 };
334
335 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
336 .dot = { .min = 25000, .max = 350000 },
337 .vco = { .min = 1760000, .max = 3510000 },
338 .n = { .min = 1, .max = 3 },
339 .m = { .min = 79, .max = 126 },
340 .m1 = { .min = 12, .max = 22 },
341 .m2 = { .min = 5, .max = 9 },
342 .p = { .min = 14, .max = 42 },
343 .p1 = { .min = 2, .max = 6 },
344 .p2 = { .dot_limit = 225000,
345 .p2_slow = 7, .p2_fast = 7 },
346 .find_pll = intel_g4x_find_best_PLL,
347 };
348
349 static const intel_limit_t intel_limits_ironlake_display_port = {
350 .dot = { .min = 25000, .max = 350000 },
351 .vco = { .min = 1760000, .max = 3510000},
352 .n = { .min = 1, .max = 2 },
353 .m = { .min = 81, .max = 90 },
354 .m1 = { .min = 12, .max = 22 },
355 .m2 = { .min = 5, .max = 9 },
356 .p = { .min = 10, .max = 20 },
357 .p1 = { .min = 1, .max = 2},
358 .p2 = { .dot_limit = 0,
359 .p2_slow = 10, .p2_fast = 10 },
360 .find_pll = intel_find_pll_ironlake_dp,
361 };
362
is_dual_link_lvds(struct drm_i915_private * dev_priv,unsigned int reg)363 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
364 unsigned int reg)
365 {
366 unsigned int val;
367
368 if (dev_priv->lvds_val)
369 val = dev_priv->lvds_val;
370 else {
371 /* BIOS should set the proper LVDS register value at boot, but
372 * in reality, it doesn't set the value when the lid is closed;
373 * we need to check "the value to be set" in VBT when LVDS
374 * register is uninitialized.
375 */
376 val = I915_READ(reg);
377 if (!(val & ~LVDS_DETECTED))
378 val = dev_priv->bios_lvds_val;
379 dev_priv->lvds_val = val;
380 }
381 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
382 }
383
intel_ironlake_limit(struct drm_crtc * crtc,int refclk)384 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
385 int refclk)
386 {
387 struct drm_device *dev = crtc->dev;
388 struct drm_i915_private *dev_priv = dev->dev_private;
389 const intel_limit_t *limit;
390
391 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
392 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
393 /* LVDS dual channel */
394 if (refclk == 100000)
395 limit = &intel_limits_ironlake_dual_lvds_100m;
396 else
397 limit = &intel_limits_ironlake_dual_lvds;
398 } else {
399 if (refclk == 100000)
400 limit = &intel_limits_ironlake_single_lvds_100m;
401 else
402 limit = &intel_limits_ironlake_single_lvds;
403 }
404 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
405 HAS_eDP)
406 limit = &intel_limits_ironlake_display_port;
407 else
408 limit = &intel_limits_ironlake_dac;
409
410 return limit;
411 }
412
intel_g4x_limit(struct drm_crtc * crtc)413 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
414 {
415 struct drm_device *dev = crtc->dev;
416 struct drm_i915_private *dev_priv = dev->dev_private;
417 const intel_limit_t *limit;
418
419 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
420 if (is_dual_link_lvds(dev_priv, LVDS))
421 /* LVDS with dual channel */
422 limit = &intel_limits_g4x_dual_channel_lvds;
423 else
424 /* LVDS with dual channel */
425 limit = &intel_limits_g4x_single_channel_lvds;
426 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
427 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
428 limit = &intel_limits_g4x_hdmi;
429 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
430 limit = &intel_limits_g4x_sdvo;
431 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
432 limit = &intel_limits_g4x_display_port;
433 } else /* The option is for other outputs */
434 limit = &intel_limits_i9xx_sdvo;
435
436 return limit;
437 }
438
intel_limit(struct drm_crtc * crtc,int refclk)439 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
440 {
441 struct drm_device *dev = crtc->dev;
442 const intel_limit_t *limit;
443
444 if (HAS_PCH_SPLIT(dev))
445 limit = intel_ironlake_limit(crtc, refclk);
446 else if (IS_G4X(dev)) {
447 limit = intel_g4x_limit(crtc);
448 } else if (IS_PINEVIEW(dev)) {
449 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
450 limit = &intel_limits_pineview_lvds;
451 else
452 limit = &intel_limits_pineview_sdvo;
453 } else if (!IS_GEN2(dev)) {
454 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
455 limit = &intel_limits_i9xx_lvds;
456 else
457 limit = &intel_limits_i9xx_sdvo;
458 } else {
459 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
460 limit = &intel_limits_i8xx_lvds;
461 else
462 limit = &intel_limits_i8xx_dvo;
463 }
464 return limit;
465 }
466
467 /* m1 is reserved as 0 in Pineview, n is a ring counter */
pineview_clock(int refclk,intel_clock_t * clock)468 static void pineview_clock(int refclk, intel_clock_t *clock)
469 {
470 clock->m = clock->m2 + 2;
471 clock->p = clock->p1 * clock->p2;
472 clock->vco = refclk * clock->m / clock->n;
473 clock->dot = clock->vco / clock->p;
474 }
475
intel_clock(struct drm_device * dev,int refclk,intel_clock_t * clock)476 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
477 {
478 if (IS_PINEVIEW(dev)) {
479 pineview_clock(refclk, clock);
480 return;
481 }
482 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
483 clock->p = clock->p1 * clock->p2;
484 clock->vco = refclk * clock->m / (clock->n + 2);
485 clock->dot = clock->vco / clock->p;
486 }
487
488 /**
489 * Returns whether any output on the specified pipe is of the specified type
490 */
intel_pipe_has_type(struct drm_crtc * crtc,int type)491 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
492 {
493 struct drm_device *dev = crtc->dev;
494 struct drm_mode_config *mode_config = &dev->mode_config;
495 struct intel_encoder *encoder;
496
497 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
498 if (encoder->base.crtc == crtc && encoder->type == type)
499 return true;
500
501 return false;
502 }
503
504 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
505 /**
506 * Returns whether the given set of divisors are valid for a given refclk with
507 * the given connectors.
508 */
509
intel_PLL_is_valid(struct drm_device * dev,const intel_limit_t * limit,const intel_clock_t * clock)510 static bool intel_PLL_is_valid(struct drm_device *dev,
511 const intel_limit_t *limit,
512 const intel_clock_t *clock)
513 {
514 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
515 INTELPllInvalid("p1 out of range\n");
516 if (clock->p < limit->p.min || limit->p.max < clock->p)
517 INTELPllInvalid("p out of range\n");
518 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
519 INTELPllInvalid("m2 out of range\n");
520 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
521 INTELPllInvalid("m1 out of range\n");
522 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
523 INTELPllInvalid("m1 <= m2\n");
524 if (clock->m < limit->m.min || limit->m.max < clock->m)
525 INTELPllInvalid("m out of range\n");
526 if (clock->n < limit->n.min || limit->n.max < clock->n)
527 INTELPllInvalid("n out of range\n");
528 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
529 INTELPllInvalid("vco out of range\n");
530 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
531 * connector, etc., rather than just a single range.
532 */
533 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
534 INTELPllInvalid("dot out of range\n");
535
536 return true;
537 }
538
539 static bool
intel_find_best_PLL(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)540 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
541 int target, int refclk, intel_clock_t *match_clock,
542 intel_clock_t *best_clock)
543
544 {
545 struct drm_device *dev = crtc->dev;
546 struct drm_i915_private *dev_priv = dev->dev_private;
547 intel_clock_t clock;
548 int err = target;
549
550 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
551 (I915_READ(LVDS)) != 0) {
552 /*
553 * For LVDS, if the panel is on, just rely on its current
554 * settings for dual-channel. We haven't figured out how to
555 * reliably set up different single/dual channel state, if we
556 * even can.
557 */
558 if (is_dual_link_lvds(dev_priv, LVDS))
559 clock.p2 = limit->p2.p2_fast;
560 else
561 clock.p2 = limit->p2.p2_slow;
562 } else {
563 if (target < limit->p2.dot_limit)
564 clock.p2 = limit->p2.p2_slow;
565 else
566 clock.p2 = limit->p2.p2_fast;
567 }
568
569 memset(best_clock, 0, sizeof(*best_clock));
570
571 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
572 clock.m1++) {
573 for (clock.m2 = limit->m2.min;
574 clock.m2 <= limit->m2.max; clock.m2++) {
575 /* m1 is always 0 in Pineview */
576 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
577 break;
578 for (clock.n = limit->n.min;
579 clock.n <= limit->n.max; clock.n++) {
580 for (clock.p1 = limit->p1.min;
581 clock.p1 <= limit->p1.max; clock.p1++) {
582 int this_err;
583
584 intel_clock(dev, refclk, &clock);
585 if (!intel_PLL_is_valid(dev, limit,
586 &clock))
587 continue;
588 if (match_clock &&
589 clock.p != match_clock->p)
590 continue;
591
592 this_err = abs(clock.dot - target);
593 if (this_err < err) {
594 *best_clock = clock;
595 err = this_err;
596 }
597 }
598 }
599 }
600 }
601
602 return (err != target);
603 }
604
605 static bool
intel_g4x_find_best_PLL(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)606 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
607 int target, int refclk, intel_clock_t *match_clock,
608 intel_clock_t *best_clock)
609 {
610 struct drm_device *dev = crtc->dev;
611 struct drm_i915_private *dev_priv = dev->dev_private;
612 intel_clock_t clock;
613 int max_n;
614 bool found;
615 /* approximately equals target * 0.00585 */
616 int err_most = (target >> 8) + (target >> 9);
617 found = false;
618
619 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
620 int lvds_reg;
621
622 if (HAS_PCH_SPLIT(dev))
623 lvds_reg = PCH_LVDS;
624 else
625 lvds_reg = LVDS;
626 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
627 LVDS_CLKB_POWER_UP)
628 clock.p2 = limit->p2.p2_fast;
629 else
630 clock.p2 = limit->p2.p2_slow;
631 } else {
632 if (target < limit->p2.dot_limit)
633 clock.p2 = limit->p2.p2_slow;
634 else
635 clock.p2 = limit->p2.p2_fast;
636 }
637
638 memset(best_clock, 0, sizeof(*best_clock));
639 max_n = limit->n.max;
640 /* based on hardware requirement, prefer smaller n to precision */
641 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
642 /* based on hardware requirement, prefere larger m1,m2 */
643 for (clock.m1 = limit->m1.max;
644 clock.m1 >= limit->m1.min; clock.m1--) {
645 for (clock.m2 = limit->m2.max;
646 clock.m2 >= limit->m2.min; clock.m2--) {
647 for (clock.p1 = limit->p1.max;
648 clock.p1 >= limit->p1.min; clock.p1--) {
649 int this_err;
650
651 intel_clock(dev, refclk, &clock);
652 if (!intel_PLL_is_valid(dev, limit,
653 &clock))
654 continue;
655 if (match_clock &&
656 clock.p != match_clock->p)
657 continue;
658
659 this_err = abs(clock.dot - target);
660 if (this_err < err_most) {
661 *best_clock = clock;
662 err_most = this_err;
663 max_n = clock.n;
664 found = true;
665 }
666 }
667 }
668 }
669 }
670 return found;
671 }
672
673 static bool
intel_find_pll_ironlake_dp(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)674 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675 int target, int refclk, intel_clock_t *match_clock,
676 intel_clock_t *best_clock)
677 {
678 struct drm_device *dev = crtc->dev;
679 intel_clock_t clock;
680
681 if (target < 200000) {
682 clock.n = 1;
683 clock.p1 = 2;
684 clock.p2 = 10;
685 clock.m1 = 12;
686 clock.m2 = 9;
687 } else {
688 clock.n = 2;
689 clock.p1 = 1;
690 clock.p2 = 10;
691 clock.m1 = 14;
692 clock.m2 = 8;
693 }
694 intel_clock(dev, refclk, &clock);
695 memcpy(best_clock, &clock, sizeof(intel_clock_t));
696 return true;
697 }
698
699 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
700 static bool
intel_find_pll_g4x_dp(const intel_limit_t * limit,struct drm_crtc * crtc,int target,int refclk,intel_clock_t * match_clock,intel_clock_t * best_clock)701 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
702 int target, int refclk, intel_clock_t *match_clock,
703 intel_clock_t *best_clock)
704 {
705 intel_clock_t clock;
706 if (target < 200000) {
707 clock.p1 = 2;
708 clock.p2 = 10;
709 clock.n = 2;
710 clock.m1 = 23;
711 clock.m2 = 8;
712 } else {
713 clock.p1 = 1;
714 clock.p2 = 10;
715 clock.n = 1;
716 clock.m1 = 14;
717 clock.m2 = 2;
718 }
719 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
720 clock.p = (clock.p1 * clock.p2);
721 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
722 clock.vco = 0;
723 memcpy(best_clock, &clock, sizeof(intel_clock_t));
724 return true;
725 }
726
727 /**
728 * intel_wait_for_vblank - wait for vblank on a given pipe
729 * @dev: drm device
730 * @pipe: pipe to wait for
731 *
732 * Wait for vblank to occur on a given pipe. Needed for various bits of
733 * mode setting code.
734 */
intel_wait_for_vblank(struct drm_device * dev,int pipe)735 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
736 {
737 struct drm_i915_private *dev_priv = dev->dev_private;
738 int pipestat_reg = PIPESTAT(pipe);
739
740 /* Clear existing vblank status. Note this will clear any other
741 * sticky status fields as well.
742 *
743 * This races with i915_driver_irq_handler() with the result
744 * that either function could miss a vblank event. Here it is not
745 * fatal, as we will either wait upon the next vblank interrupt or
746 * timeout. Generally speaking intel_wait_for_vblank() is only
747 * called during modeset at which time the GPU should be idle and
748 * should *not* be performing page flips and thus not waiting on
749 * vblanks...
750 * Currently, the result of us stealing a vblank from the irq
751 * handler is that a single frame will be skipped during swapbuffers.
752 */
753 I915_WRITE(pipestat_reg,
754 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
755
756 /* Wait for vblank interrupt bit to set */
757 if (wait_for(I915_READ(pipestat_reg) &
758 PIPE_VBLANK_INTERRUPT_STATUS,
759 50))
760 DRM_DEBUG_KMS("vblank wait timed out\n");
761 }
762
763 /*
764 * intel_wait_for_pipe_off - wait for pipe to turn off
765 * @dev: drm device
766 * @pipe: pipe to wait for
767 *
768 * After disabling a pipe, we can't wait for vblank in the usual way,
769 * spinning on the vblank interrupt status bit, since we won't actually
770 * see an interrupt when the pipe is disabled.
771 *
772 * On Gen4 and above:
773 * wait for the pipe register state bit to turn off
774 *
775 * Otherwise:
776 * wait for the display line value to settle (it usually
777 * ends up stopping at the start of the next frame).
778 *
779 */
intel_wait_for_pipe_off(struct drm_device * dev,int pipe)780 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
781 {
782 struct drm_i915_private *dev_priv = dev->dev_private;
783
784 if (INTEL_INFO(dev)->gen >= 4) {
785 int reg = PIPECONF(pipe);
786
787 /* Wait for the Pipe State to go off */
788 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
789 100))
790 DRM_DEBUG_KMS("pipe_off wait timed out\n");
791 } else {
792 u32 last_line;
793 int reg = PIPEDSL(pipe);
794 unsigned long timeout = jiffies + msecs_to_jiffies(100);
795
796 /* Wait for the display line to settle */
797 do {
798 last_line = I915_READ(reg) & DSL_LINEMASK;
799 mdelay(5);
800 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
801 time_after(timeout, jiffies));
802 if (time_after(jiffies, timeout))
803 DRM_DEBUG_KMS("pipe_off wait timed out\n");
804 }
805 }
806
state_string(bool enabled)807 static const char *state_string(bool enabled)
808 {
809 return enabled ? "on" : "off";
810 }
811
812 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)813 static void assert_pll(struct drm_i915_private *dev_priv,
814 enum pipe pipe, bool state)
815 {
816 int reg;
817 u32 val;
818 bool cur_state;
819
820 reg = DPLL(pipe);
821 val = I915_READ(reg);
822 cur_state = !!(val & DPLL_VCO_ENABLE);
823 WARN(cur_state != state,
824 "PLL state assertion failure (expected %s, current %s)\n",
825 state_string(state), state_string(cur_state));
826 }
827 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
828 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
829
830 /* For ILK+ */
assert_pch_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)831 static void assert_pch_pll(struct drm_i915_private *dev_priv,
832 enum pipe pipe, bool state)
833 {
834 int reg;
835 u32 val;
836 bool cur_state;
837
838 if (HAS_PCH_CPT(dev_priv->dev)) {
839 u32 pch_dpll;
840
841 pch_dpll = I915_READ(PCH_DPLL_SEL);
842
843 /* Make sure the selected PLL is enabled to the transcoder */
844 WARN(!((pch_dpll >> (4 * pipe)) & 8),
845 "transcoder %d PLL not enabled\n", pipe);
846
847 /* Convert the transcoder pipe number to a pll pipe number */
848 pipe = (pch_dpll >> (4 * pipe)) & 1;
849 }
850
851 reg = PCH_DPLL(pipe);
852 val = I915_READ(reg);
853 cur_state = !!(val & DPLL_VCO_ENABLE);
854 WARN(cur_state != state,
855 "PCH PLL state assertion failure (expected %s, current %s)\n",
856 state_string(state), state_string(cur_state));
857 }
858 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
859 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
860
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)861 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
862 enum pipe pipe, bool state)
863 {
864 int reg;
865 u32 val;
866 bool cur_state;
867
868 reg = FDI_TX_CTL(pipe);
869 val = I915_READ(reg);
870 cur_state = !!(val & FDI_TX_ENABLE);
871 WARN(cur_state != state,
872 "FDI TX state assertion failure (expected %s, current %s)\n",
873 state_string(state), state_string(cur_state));
874 }
875 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
876 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
877
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)878 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
879 enum pipe pipe, bool state)
880 {
881 int reg;
882 u32 val;
883 bool cur_state;
884
885 reg = FDI_RX_CTL(pipe);
886 val = I915_READ(reg);
887 cur_state = !!(val & FDI_RX_ENABLE);
888 WARN(cur_state != state,
889 "FDI RX state assertion failure (expected %s, current %s)\n",
890 state_string(state), state_string(cur_state));
891 }
892 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
893 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
894
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)895 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
896 enum pipe pipe)
897 {
898 int reg;
899 u32 val;
900
901 /* ILK FDI PLL is always enabled */
902 if (dev_priv->info->gen == 5)
903 return;
904
905 reg = FDI_TX_CTL(pipe);
906 val = I915_READ(reg);
907 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
908 }
909
assert_fdi_rx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)910 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
911 enum pipe pipe)
912 {
913 int reg;
914 u32 val;
915
916 reg = FDI_RX_CTL(pipe);
917 val = I915_READ(reg);
918 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
919 }
920
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)921 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
922 enum pipe pipe)
923 {
924 int pp_reg, lvds_reg;
925 u32 val;
926 enum pipe panel_pipe = PIPE_A;
927 bool locked = true;
928
929 if (HAS_PCH_SPLIT(dev_priv->dev)) {
930 pp_reg = PCH_PP_CONTROL;
931 lvds_reg = PCH_LVDS;
932 } else {
933 pp_reg = PP_CONTROL;
934 lvds_reg = LVDS;
935 }
936
937 val = I915_READ(pp_reg);
938 if (!(val & PANEL_POWER_ON) ||
939 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
940 locked = false;
941
942 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
943 panel_pipe = PIPE_B;
944
945 WARN(panel_pipe == pipe && locked,
946 "panel assertion failure, pipe %c regs locked\n",
947 pipe_name(pipe));
948 }
949
assert_pipe(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)950 void assert_pipe(struct drm_i915_private *dev_priv,
951 enum pipe pipe, bool state)
952 {
953 int reg;
954 u32 val;
955 bool cur_state;
956
957 /* if we need the pipe A quirk it must be always on */
958 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
959 state = true;
960
961 reg = PIPECONF(pipe);
962 val = I915_READ(reg);
963 cur_state = !!(val & PIPECONF_ENABLE);
964 WARN(cur_state != state,
965 "pipe %c assertion failure (expected %s, current %s)\n",
966 pipe_name(pipe), state_string(state), state_string(cur_state));
967 }
968
assert_plane(struct drm_i915_private * dev_priv,enum plane plane,bool state)969 static void assert_plane(struct drm_i915_private *dev_priv,
970 enum plane plane, bool state)
971 {
972 int reg;
973 u32 val;
974 bool cur_state;
975
976 reg = DSPCNTR(plane);
977 val = I915_READ(reg);
978 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
979 WARN(cur_state != state,
980 "plane %c assertion failure (expected %s, current %s)\n",
981 plane_name(plane), state_string(state), state_string(cur_state));
982 }
983
984 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
985 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
986
assert_planes_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)987 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
988 enum pipe pipe)
989 {
990 int reg, i;
991 u32 val;
992 int cur_pipe;
993
994 /* Planes are fixed to pipes on ILK+ */
995 if (HAS_PCH_SPLIT(dev_priv->dev)) {
996 reg = DSPCNTR(pipe);
997 val = I915_READ(reg);
998 WARN((val & DISPLAY_PLANE_ENABLE),
999 "plane %c assertion failure, should be disabled but not\n",
1000 plane_name(pipe));
1001 return;
1002 }
1003
1004 /* Need to check both planes against the pipe */
1005 for (i = 0; i < 2; i++) {
1006 reg = DSPCNTR(i);
1007 val = I915_READ(reg);
1008 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1009 DISPPLANE_SEL_PIPE_SHIFT;
1010 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1011 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1012 plane_name(i), pipe_name(pipe));
1013 }
1014 }
1015
assert_pch_refclk_enabled(struct drm_i915_private * dev_priv)1016 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1017 {
1018 u32 val;
1019 bool enabled;
1020
1021 val = I915_READ(PCH_DREF_CONTROL);
1022 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1023 DREF_SUPERSPREAD_SOURCE_MASK));
1024 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1025 }
1026
assert_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1027 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1028 enum pipe pipe)
1029 {
1030 int reg;
1031 u32 val;
1032 bool enabled;
1033
1034 reg = TRANSCONF(pipe);
1035 val = I915_READ(reg);
1036 enabled = !!(val & TRANS_ENABLE);
1037 WARN(enabled,
1038 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1039 pipe_name(pipe));
1040 }
1041
dp_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 port_sel,u32 val)1042 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1043 enum pipe pipe, u32 port_sel, u32 val)
1044 {
1045 if ((val & DP_PORT_EN) == 0)
1046 return false;
1047
1048 if (HAS_PCH_CPT(dev_priv->dev)) {
1049 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1050 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1051 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1052 return false;
1053 } else {
1054 if ((val & DP_PIPE_MASK) != (pipe << 30))
1055 return false;
1056 }
1057 return true;
1058 }
1059
hdmi_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1060 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1061 enum pipe pipe, u32 val)
1062 {
1063 if ((val & PORT_ENABLE) == 0)
1064 return false;
1065
1066 if (HAS_PCH_CPT(dev_priv->dev)) {
1067 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1068 return false;
1069 } else {
1070 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1071 return false;
1072 }
1073 return true;
1074 }
1075
lvds_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1076 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1077 enum pipe pipe, u32 val)
1078 {
1079 if ((val & LVDS_PORT_EN) == 0)
1080 return false;
1081
1082 if (HAS_PCH_CPT(dev_priv->dev)) {
1083 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1084 return false;
1085 } else {
1086 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1087 return false;
1088 }
1089 return true;
1090 }
1091
adpa_pipe_enabled(struct drm_i915_private * dev_priv,enum pipe pipe,u32 val)1092 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1093 enum pipe pipe, u32 val)
1094 {
1095 if ((val & ADPA_DAC_ENABLE) == 0)
1096 return false;
1097 if (HAS_PCH_CPT(dev_priv->dev)) {
1098 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1099 return false;
1100 } else {
1101 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1102 return false;
1103 }
1104 return true;
1105 }
1106
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,int reg,u32 port_sel)1107 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1108 enum pipe pipe, int reg, u32 port_sel)
1109 {
1110 u32 val = I915_READ(reg);
1111 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1112 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1113 reg, pipe_name(pipe));
1114 }
1115
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,int reg)1116 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1117 enum pipe pipe, int reg)
1118 {
1119 u32 val = I915_READ(reg);
1120 WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1121 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1122 reg, pipe_name(pipe));
1123 }
1124
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1125 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1126 enum pipe pipe)
1127 {
1128 int reg;
1129 u32 val;
1130
1131 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1132 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1133 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1134
1135 reg = PCH_ADPA;
1136 val = I915_READ(reg);
1137 WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1138 "PCH VGA enabled on transcoder %c, should be disabled\n",
1139 pipe_name(pipe));
1140
1141 reg = PCH_LVDS;
1142 val = I915_READ(reg);
1143 WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1144 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1145 pipe_name(pipe));
1146
1147 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1148 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1149 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1150 }
1151
1152 /**
1153 * intel_enable_pll - enable a PLL
1154 * @dev_priv: i915 private structure
1155 * @pipe: pipe PLL to enable
1156 *
1157 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1158 * make sure the PLL reg is writable first though, since the panel write
1159 * protect mechanism may be enabled.
1160 *
1161 * Note! This is for pre-ILK only.
1162 */
intel_enable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1163 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1164 {
1165 int reg;
1166 u32 val;
1167
1168 /* No really, not for ILK+ */
1169 BUG_ON(dev_priv->info->gen >= 5);
1170
1171 /* PLL is protected by panel, make sure we can write it */
1172 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1173 assert_panel_unlocked(dev_priv, pipe);
1174
1175 reg = DPLL(pipe);
1176 val = I915_READ(reg);
1177 val |= DPLL_VCO_ENABLE;
1178
1179 /* We do this three times for luck */
1180 I915_WRITE(reg, val);
1181 POSTING_READ(reg);
1182 udelay(150); /* wait for warmup */
1183 I915_WRITE(reg, val);
1184 POSTING_READ(reg);
1185 udelay(150); /* wait for warmup */
1186 I915_WRITE(reg, val);
1187 POSTING_READ(reg);
1188 udelay(150); /* wait for warmup */
1189 }
1190
1191 /**
1192 * intel_disable_pll - disable a PLL
1193 * @dev_priv: i915 private structure
1194 * @pipe: pipe PLL to disable
1195 *
1196 * Disable the PLL for @pipe, making sure the pipe is off first.
1197 *
1198 * Note! This is for pre-ILK only.
1199 */
intel_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1200 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1201 {
1202 int reg;
1203 u32 val;
1204
1205 /* Don't disable pipe A or pipe A PLLs if needed */
1206 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1207 return;
1208
1209 /* Make sure the pipe isn't still relying on us */
1210 assert_pipe_disabled(dev_priv, pipe);
1211
1212 reg = DPLL(pipe);
1213 val = I915_READ(reg);
1214 val &= ~DPLL_VCO_ENABLE;
1215 I915_WRITE(reg, val);
1216 POSTING_READ(reg);
1217 }
1218
1219 /**
1220 * intel_enable_pch_pll - enable PCH PLL
1221 * @dev_priv: i915 private structure
1222 * @pipe: pipe PLL to enable
1223 *
1224 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1225 * drives the transcoder clock.
1226 */
intel_enable_pch_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1227 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1228 enum pipe pipe)
1229 {
1230 int reg;
1231 u32 val;
1232
1233 if (pipe > 1)
1234 return;
1235
1236 /* PCH only available on ILK+ */
1237 BUG_ON(dev_priv->info->gen < 5);
1238
1239 /* PCH refclock must be enabled first */
1240 assert_pch_refclk_enabled(dev_priv);
1241
1242 reg = PCH_DPLL(pipe);
1243 val = I915_READ(reg);
1244 val |= DPLL_VCO_ENABLE;
1245 I915_WRITE(reg, val);
1246 POSTING_READ(reg);
1247 udelay(200);
1248 }
1249
intel_disable_pch_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1250 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1251 enum pipe pipe)
1252 {
1253 int reg;
1254 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1255 pll_sel = TRANSC_DPLL_ENABLE;
1256
1257 if (pipe > 1)
1258 return;
1259
1260 /* PCH only available on ILK+ */
1261 BUG_ON(dev_priv->info->gen < 5);
1262
1263 /* Make sure transcoder isn't still depending on us */
1264 assert_transcoder_disabled(dev_priv, pipe);
1265
1266 if (pipe == 0)
1267 pll_sel |= TRANSC_DPLLA_SEL;
1268 else if (pipe == 1)
1269 pll_sel |= TRANSC_DPLLB_SEL;
1270
1271
1272 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1273 return;
1274
1275 reg = PCH_DPLL(pipe);
1276 val = I915_READ(reg);
1277 val &= ~DPLL_VCO_ENABLE;
1278 I915_WRITE(reg, val);
1279 POSTING_READ(reg);
1280 udelay(200);
1281 }
1282
intel_enable_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1283 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1284 enum pipe pipe)
1285 {
1286 int reg;
1287 u32 val, pipeconf_val;
1288 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1289
1290 /* PCH only available on ILK+ */
1291 BUG_ON(dev_priv->info->gen < 5);
1292
1293 /* Make sure PCH DPLL is enabled */
1294 assert_pch_pll_enabled(dev_priv, pipe);
1295
1296 /* FDI must be feeding us bits for PCH ports */
1297 assert_fdi_tx_enabled(dev_priv, pipe);
1298 assert_fdi_rx_enabled(dev_priv, pipe);
1299
1300 reg = TRANSCONF(pipe);
1301 val = I915_READ(reg);
1302 pipeconf_val = I915_READ(PIPECONF(pipe));
1303
1304 if (HAS_PCH_IBX(dev_priv->dev)) {
1305 /*
1306 * make the BPC in transcoder be consistent with
1307 * that in pipeconf reg.
1308 */
1309 val &= ~PIPE_BPC_MASK;
1310 val |= pipeconf_val & PIPE_BPC_MASK;
1311 }
1312
1313 val &= ~TRANS_INTERLACE_MASK;
1314 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1315 if (HAS_PCH_IBX(dev_priv->dev) &&
1316 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1317 val |= TRANS_LEGACY_INTERLACED_ILK;
1318 else
1319 val |= TRANS_INTERLACED;
1320 else
1321 val |= TRANS_PROGRESSIVE;
1322
1323 I915_WRITE(reg, val | TRANS_ENABLE);
1324 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1325 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1326 }
1327
intel_disable_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1328 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1329 enum pipe pipe)
1330 {
1331 int reg;
1332 u32 val;
1333
1334 /* FDI relies on the transcoder */
1335 assert_fdi_tx_disabled(dev_priv, pipe);
1336 assert_fdi_rx_disabled(dev_priv, pipe);
1337
1338 /* Ports must be off as well */
1339 assert_pch_ports_disabled(dev_priv, pipe);
1340
1341 reg = TRANSCONF(pipe);
1342 val = I915_READ(reg);
1343 val &= ~TRANS_ENABLE;
1344 I915_WRITE(reg, val);
1345 /* wait for PCH transcoder off, transcoder state */
1346 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1347 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1348 }
1349
1350 /**
1351 * intel_enable_pipe - enable a pipe, asserting requirements
1352 * @dev_priv: i915 private structure
1353 * @pipe: pipe to enable
1354 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1355 *
1356 * Enable @pipe, making sure that various hardware specific requirements
1357 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1358 *
1359 * @pipe should be %PIPE_A or %PIPE_B.
1360 *
1361 * Will wait until the pipe is actually running (i.e. first vblank) before
1362 * returning.
1363 */
intel_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe,bool pch_port)1364 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1365 bool pch_port)
1366 {
1367 int reg;
1368 u32 val;
1369
1370 /*
1371 * A pipe without a PLL won't actually be able to drive bits from
1372 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1373 * need the check.
1374 */
1375 if (!HAS_PCH_SPLIT(dev_priv->dev))
1376 assert_pll_enabled(dev_priv, pipe);
1377 else {
1378 if (pch_port) {
1379 /* if driving the PCH, we need FDI enabled */
1380 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1381 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1382 }
1383 /* FIXME: assert CPU port conditions for SNB+ */
1384 }
1385
1386 reg = PIPECONF(pipe);
1387 val = I915_READ(reg);
1388 if (val & PIPECONF_ENABLE)
1389 return;
1390
1391 I915_WRITE(reg, val | PIPECONF_ENABLE);
1392 intel_wait_for_vblank(dev_priv->dev, pipe);
1393 }
1394
1395 /**
1396 * intel_disable_pipe - disable a pipe, asserting requirements
1397 * @dev_priv: i915 private structure
1398 * @pipe: pipe to disable
1399 *
1400 * Disable @pipe, making sure that various hardware specific requirements
1401 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1402 *
1403 * @pipe should be %PIPE_A or %PIPE_B.
1404 *
1405 * Will wait until the pipe has shut down before returning.
1406 */
intel_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)1407 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1408 enum pipe pipe)
1409 {
1410 int reg;
1411 u32 val;
1412
1413 /*
1414 * Make sure planes won't keep trying to pump pixels to us,
1415 * or we might hang the display.
1416 */
1417 assert_planes_disabled(dev_priv, pipe);
1418
1419 /* Don't disable pipe A or pipe A PLLs if needed */
1420 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1421 return;
1422
1423 reg = PIPECONF(pipe);
1424 val = I915_READ(reg);
1425 if ((val & PIPECONF_ENABLE) == 0)
1426 return;
1427
1428 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1429 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1430 }
1431
1432 /*
1433 * Plane regs are double buffered, going from enabled->disabled needs a
1434 * trigger in order to latch. The display address reg provides this.
1435 */
intel_flush_display_plane(struct drm_i915_private * dev_priv,enum plane plane)1436 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1437 enum plane plane)
1438 {
1439 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1440 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1441 }
1442
1443 /**
1444 * intel_enable_plane - enable a display plane on a given pipe
1445 * @dev_priv: i915 private structure
1446 * @plane: plane to enable
1447 * @pipe: pipe being fed
1448 *
1449 * Enable @plane on @pipe, making sure that @pipe is running first.
1450 */
intel_enable_plane(struct drm_i915_private * dev_priv,enum plane plane,enum pipe pipe)1451 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1452 enum plane plane, enum pipe pipe)
1453 {
1454 int reg;
1455 u32 val;
1456
1457 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1458 assert_pipe_enabled(dev_priv, pipe);
1459
1460 reg = DSPCNTR(plane);
1461 val = I915_READ(reg);
1462 if (val & DISPLAY_PLANE_ENABLE)
1463 return;
1464
1465 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1466 intel_flush_display_plane(dev_priv, plane);
1467 intel_wait_for_vblank(dev_priv->dev, pipe);
1468 }
1469
1470 /**
1471 * intel_disable_plane - disable a display plane
1472 * @dev_priv: i915 private structure
1473 * @plane: plane to disable
1474 * @pipe: pipe consuming the data
1475 *
1476 * Disable @plane; should be an independent operation.
1477 */
intel_disable_plane(struct drm_i915_private * dev_priv,enum plane plane,enum pipe pipe)1478 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1479 enum plane plane, enum pipe pipe)
1480 {
1481 int reg;
1482 u32 val;
1483
1484 reg = DSPCNTR(plane);
1485 val = I915_READ(reg);
1486 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1487 return;
1488
1489 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1490 intel_flush_display_plane(dev_priv, plane);
1491 intel_wait_for_vblank(dev_priv->dev, pipe);
1492 }
1493
disable_pch_dp(struct drm_i915_private * dev_priv,enum pipe pipe,int reg,u32 port_sel)1494 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1495 enum pipe pipe, int reg, u32 port_sel)
1496 {
1497 u32 val = I915_READ(reg);
1498 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1499 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1500 I915_WRITE(reg, val & ~DP_PORT_EN);
1501 }
1502 }
1503
disable_pch_hdmi(struct drm_i915_private * dev_priv,enum pipe pipe,int reg)1504 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1505 enum pipe pipe, int reg)
1506 {
1507 u32 val = I915_READ(reg);
1508 if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1509 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1510 reg, pipe);
1511 I915_WRITE(reg, val & ~PORT_ENABLE);
1512 }
1513 }
1514
1515 /* Disable any ports connected to this transcoder */
intel_disable_pch_ports(struct drm_i915_private * dev_priv,enum pipe pipe)1516 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1517 enum pipe pipe)
1518 {
1519 u32 reg, val;
1520
1521 val = I915_READ(PCH_PP_CONTROL);
1522 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1523
1524 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1525 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1526 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1527
1528 reg = PCH_ADPA;
1529 val = I915_READ(reg);
1530 if (adpa_pipe_enabled(dev_priv, pipe, val))
1531 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1532
1533 reg = PCH_LVDS;
1534 val = I915_READ(reg);
1535 if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1536 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1537 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1538 POSTING_READ(reg);
1539 udelay(100);
1540 }
1541
1542 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1543 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1544 disable_pch_hdmi(dev_priv, pipe, HDMID);
1545 }
1546
i8xx_disable_fbc(struct drm_device * dev)1547 static void i8xx_disable_fbc(struct drm_device *dev)
1548 {
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 u32 fbc_ctl;
1551
1552 /* Disable compression */
1553 fbc_ctl = I915_READ(FBC_CONTROL);
1554 if ((fbc_ctl & FBC_CTL_EN) == 0)
1555 return;
1556
1557 fbc_ctl &= ~FBC_CTL_EN;
1558 I915_WRITE(FBC_CONTROL, fbc_ctl);
1559
1560 /* Wait for compressing bit to clear */
1561 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1562 DRM_DEBUG_KMS("FBC idle timed out\n");
1563 return;
1564 }
1565
1566 DRM_DEBUG_KMS("disabled FBC\n");
1567 }
1568
i8xx_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1569 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1570 {
1571 struct drm_device *dev = crtc->dev;
1572 struct drm_i915_private *dev_priv = dev->dev_private;
1573 struct drm_framebuffer *fb = crtc->fb;
1574 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1575 struct drm_i915_gem_object *obj = intel_fb->obj;
1576 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1577 int cfb_pitch;
1578 int plane, i;
1579 u32 fbc_ctl, fbc_ctl2;
1580
1581 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1582 if (fb->pitches[0] < cfb_pitch)
1583 cfb_pitch = fb->pitches[0];
1584
1585 /* FBC_CTL wants 64B units */
1586 cfb_pitch = (cfb_pitch / 64) - 1;
1587 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1588
1589 /* Clear old tags */
1590 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1591 I915_WRITE(FBC_TAG + (i * 4), 0);
1592
1593 /* Set it up... */
1594 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1595 fbc_ctl2 |= plane;
1596 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1597 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1598
1599 /* enable it... */
1600 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1601 if (IS_I945GM(dev))
1602 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1603 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1604 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1605 fbc_ctl |= obj->fence_reg;
1606 I915_WRITE(FBC_CONTROL, fbc_ctl);
1607
1608 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1609 cfb_pitch, crtc->y, intel_crtc->plane);
1610 }
1611
i8xx_fbc_enabled(struct drm_device * dev)1612 static bool i8xx_fbc_enabled(struct drm_device *dev)
1613 {
1614 struct drm_i915_private *dev_priv = dev->dev_private;
1615
1616 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1617 }
1618
g4x_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1619 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1620 {
1621 struct drm_device *dev = crtc->dev;
1622 struct drm_i915_private *dev_priv = dev->dev_private;
1623 struct drm_framebuffer *fb = crtc->fb;
1624 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1625 struct drm_i915_gem_object *obj = intel_fb->obj;
1626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1627 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1628 unsigned long stall_watermark = 200;
1629 u32 dpfc_ctl;
1630
1631 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1632 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1633 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1634
1635 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1636 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1637 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1638 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1639
1640 /* enable it... */
1641 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1642
1643 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1644 }
1645
g4x_disable_fbc(struct drm_device * dev)1646 static void g4x_disable_fbc(struct drm_device *dev)
1647 {
1648 struct drm_i915_private *dev_priv = dev->dev_private;
1649 u32 dpfc_ctl;
1650
1651 /* Disable compression */
1652 dpfc_ctl = I915_READ(DPFC_CONTROL);
1653 if (dpfc_ctl & DPFC_CTL_EN) {
1654 dpfc_ctl &= ~DPFC_CTL_EN;
1655 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1656
1657 DRM_DEBUG_KMS("disabled FBC\n");
1658 }
1659 }
1660
g4x_fbc_enabled(struct drm_device * dev)1661 static bool g4x_fbc_enabled(struct drm_device *dev)
1662 {
1663 struct drm_i915_private *dev_priv = dev->dev_private;
1664
1665 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1666 }
1667
sandybridge_blit_fbc_update(struct drm_device * dev)1668 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1669 {
1670 struct drm_i915_private *dev_priv = dev->dev_private;
1671 u32 blt_ecoskpd;
1672
1673 /* Make sure blitter notifies FBC of writes */
1674 gen6_gt_force_wake_get(dev_priv);
1675 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1676 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1677 GEN6_BLITTER_LOCK_SHIFT;
1678 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1679 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1680 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1681 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1682 GEN6_BLITTER_LOCK_SHIFT);
1683 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1684 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1685 gen6_gt_force_wake_put(dev_priv);
1686 }
1687
ironlake_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1688 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1689 {
1690 struct drm_device *dev = crtc->dev;
1691 struct drm_i915_private *dev_priv = dev->dev_private;
1692 struct drm_framebuffer *fb = crtc->fb;
1693 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1694 struct drm_i915_gem_object *obj = intel_fb->obj;
1695 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1696 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1697 unsigned long stall_watermark = 200;
1698 u32 dpfc_ctl;
1699
1700 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1701 dpfc_ctl &= DPFC_RESERVED;
1702 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1703 /* Set persistent mode for front-buffer rendering, ala X. */
1704 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1705 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1706 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1707
1708 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1709 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1710 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1711 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1712 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1713 /* enable it... */
1714 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1715
1716 if (IS_GEN6(dev)) {
1717 I915_WRITE(SNB_DPFC_CTL_SA,
1718 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1719 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1720 sandybridge_blit_fbc_update(dev);
1721 }
1722
1723 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1724 }
1725
ironlake_disable_fbc(struct drm_device * dev)1726 static void ironlake_disable_fbc(struct drm_device *dev)
1727 {
1728 struct drm_i915_private *dev_priv = dev->dev_private;
1729 u32 dpfc_ctl;
1730
1731 /* Disable compression */
1732 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1733 if (dpfc_ctl & DPFC_CTL_EN) {
1734 dpfc_ctl &= ~DPFC_CTL_EN;
1735 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1736
1737 DRM_DEBUG_KMS("disabled FBC\n");
1738 }
1739 }
1740
ironlake_fbc_enabled(struct drm_device * dev)1741 static bool ironlake_fbc_enabled(struct drm_device *dev)
1742 {
1743 struct drm_i915_private *dev_priv = dev->dev_private;
1744
1745 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1746 }
1747
intel_fbc_enabled(struct drm_device * dev)1748 bool intel_fbc_enabled(struct drm_device *dev)
1749 {
1750 struct drm_i915_private *dev_priv = dev->dev_private;
1751
1752 if (!dev_priv->display.fbc_enabled)
1753 return false;
1754
1755 return dev_priv->display.fbc_enabled(dev);
1756 }
1757
intel_fbc_work_fn(struct work_struct * __work)1758 static void intel_fbc_work_fn(struct work_struct *__work)
1759 {
1760 struct intel_fbc_work *work =
1761 container_of(to_delayed_work(__work),
1762 struct intel_fbc_work, work);
1763 struct drm_device *dev = work->crtc->dev;
1764 struct drm_i915_private *dev_priv = dev->dev_private;
1765
1766 mutex_lock(&dev->struct_mutex);
1767 if (work == dev_priv->fbc_work) {
1768 /* Double check that we haven't switched fb without cancelling
1769 * the prior work.
1770 */
1771 if (work->crtc->fb == work->fb) {
1772 dev_priv->display.enable_fbc(work->crtc,
1773 work->interval);
1774
1775 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1776 dev_priv->cfb_fb = work->crtc->fb->base.id;
1777 dev_priv->cfb_y = work->crtc->y;
1778 }
1779
1780 dev_priv->fbc_work = NULL;
1781 }
1782 mutex_unlock(&dev->struct_mutex);
1783
1784 kfree(work);
1785 }
1786
intel_cancel_fbc_work(struct drm_i915_private * dev_priv)1787 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1788 {
1789 if (dev_priv->fbc_work == NULL)
1790 return;
1791
1792 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1793
1794 /* Synchronisation is provided by struct_mutex and checking of
1795 * dev_priv->fbc_work, so we can perform the cancellation
1796 * entirely asynchronously.
1797 */
1798 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1799 /* tasklet was killed before being run, clean up */
1800 kfree(dev_priv->fbc_work);
1801
1802 /* Mark the work as no longer wanted so that if it does
1803 * wake-up (because the work was already running and waiting
1804 * for our mutex), it will discover that is no longer
1805 * necessary to run.
1806 */
1807 dev_priv->fbc_work = NULL;
1808 }
1809
intel_enable_fbc(struct drm_crtc * crtc,unsigned long interval)1810 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1811 {
1812 struct intel_fbc_work *work;
1813 struct drm_device *dev = crtc->dev;
1814 struct drm_i915_private *dev_priv = dev->dev_private;
1815
1816 if (!dev_priv->display.enable_fbc)
1817 return;
1818
1819 intel_cancel_fbc_work(dev_priv);
1820
1821 work = kzalloc(sizeof *work, GFP_KERNEL);
1822 if (work == NULL) {
1823 dev_priv->display.enable_fbc(crtc, interval);
1824 return;
1825 }
1826
1827 work->crtc = crtc;
1828 work->fb = crtc->fb;
1829 work->interval = interval;
1830 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1831
1832 dev_priv->fbc_work = work;
1833
1834 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1835
1836 /* Delay the actual enabling to let pageflipping cease and the
1837 * display to settle before starting the compression. Note that
1838 * this delay also serves a second purpose: it allows for a
1839 * vblank to pass after disabling the FBC before we attempt
1840 * to modify the control registers.
1841 *
1842 * A more complicated solution would involve tracking vblanks
1843 * following the termination of the page-flipping sequence
1844 * and indeed performing the enable as a co-routine and not
1845 * waiting synchronously upon the vblank.
1846 */
1847 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1848 }
1849
intel_disable_fbc(struct drm_device * dev)1850 void intel_disable_fbc(struct drm_device *dev)
1851 {
1852 struct drm_i915_private *dev_priv = dev->dev_private;
1853
1854 intel_cancel_fbc_work(dev_priv);
1855
1856 if (!dev_priv->display.disable_fbc)
1857 return;
1858
1859 dev_priv->display.disable_fbc(dev);
1860 dev_priv->cfb_plane = -1;
1861 }
1862
1863 /**
1864 * intel_update_fbc - enable/disable FBC as needed
1865 * @dev: the drm_device
1866 *
1867 * Set up the framebuffer compression hardware at mode set time. We
1868 * enable it if possible:
1869 * - plane A only (on pre-965)
1870 * - no pixel mulitply/line duplication
1871 * - no alpha buffer discard
1872 * - no dual wide
1873 * - framebuffer <= 2048 in width, 1536 in height
1874 *
1875 * We can't assume that any compression will take place (worst case),
1876 * so the compressed buffer has to be the same size as the uncompressed
1877 * one. It also must reside (along with the line length buffer) in
1878 * stolen memory.
1879 *
1880 * We need to enable/disable FBC on a global basis.
1881 */
intel_update_fbc(struct drm_device * dev)1882 static void intel_update_fbc(struct drm_device *dev)
1883 {
1884 struct drm_i915_private *dev_priv = dev->dev_private;
1885 struct drm_crtc *crtc = NULL, *tmp_crtc;
1886 struct intel_crtc *intel_crtc;
1887 struct drm_framebuffer *fb;
1888 struct intel_framebuffer *intel_fb;
1889 struct drm_i915_gem_object *obj;
1890 int enable_fbc;
1891
1892 DRM_DEBUG_KMS("\n");
1893
1894 if (!i915_powersave)
1895 return;
1896
1897 if (!I915_HAS_FBC(dev))
1898 return;
1899
1900 /*
1901 * If FBC is already on, we just have to verify that we can
1902 * keep it that way...
1903 * Need to disable if:
1904 * - more than one pipe is active
1905 * - changing FBC params (stride, fence, mode)
1906 * - new fb is too large to fit in compressed buffer
1907 * - going to an unsupported config (interlace, pixel multiply, etc.)
1908 */
1909 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1910 if (tmp_crtc->enabled && tmp_crtc->fb) {
1911 if (crtc) {
1912 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1913 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1914 goto out_disable;
1915 }
1916 crtc = tmp_crtc;
1917 }
1918 }
1919
1920 if (!crtc || crtc->fb == NULL) {
1921 DRM_DEBUG_KMS("no output, disabling\n");
1922 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1923 goto out_disable;
1924 }
1925
1926 intel_crtc = to_intel_crtc(crtc);
1927 fb = crtc->fb;
1928 intel_fb = to_intel_framebuffer(fb);
1929 obj = intel_fb->obj;
1930
1931 enable_fbc = i915_enable_fbc;
1932 if (enable_fbc < 0) {
1933 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1934 enable_fbc = 1;
1935 if (INTEL_INFO(dev)->gen <= 6)
1936 enable_fbc = 0;
1937 }
1938 if (!enable_fbc) {
1939 DRM_DEBUG_KMS("fbc disabled per module param\n");
1940 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1941 goto out_disable;
1942 }
1943 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1944 DRM_DEBUG_KMS("framebuffer too large, disabling "
1945 "compression\n");
1946 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1947 goto out_disable;
1948 }
1949 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1950 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1951 DRM_DEBUG_KMS("mode incompatible with compression, "
1952 "disabling\n");
1953 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1954 goto out_disable;
1955 }
1956 if ((crtc->mode.hdisplay > 2048) ||
1957 (crtc->mode.vdisplay > 1536)) {
1958 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1959 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1960 goto out_disable;
1961 }
1962 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1963 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1964 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1965 goto out_disable;
1966 }
1967
1968 /* The use of a CPU fence is mandatory in order to detect writes
1969 * by the CPU to the scanout and trigger updates to the FBC.
1970 */
1971 if (obj->tiling_mode != I915_TILING_X ||
1972 obj->fence_reg == I915_FENCE_REG_NONE) {
1973 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1974 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1975 goto out_disable;
1976 }
1977
1978 /* If the kernel debugger is active, always disable compression */
1979 if (in_dbg_master())
1980 goto out_disable;
1981
1982 /* If the scanout has not changed, don't modify the FBC settings.
1983 * Note that we make the fundamental assumption that the fb->obj
1984 * cannot be unpinned (and have its GTT offset and fence revoked)
1985 * without first being decoupled from the scanout and FBC disabled.
1986 */
1987 if (dev_priv->cfb_plane == intel_crtc->plane &&
1988 dev_priv->cfb_fb == fb->base.id &&
1989 dev_priv->cfb_y == crtc->y)
1990 return;
1991
1992 if (intel_fbc_enabled(dev)) {
1993 /* We update FBC along two paths, after changing fb/crtc
1994 * configuration (modeswitching) and after page-flipping
1995 * finishes. For the latter, we know that not only did
1996 * we disable the FBC at the start of the page-flip
1997 * sequence, but also more than one vblank has passed.
1998 *
1999 * For the former case of modeswitching, it is possible
2000 * to switch between two FBC valid configurations
2001 * instantaneously so we do need to disable the FBC
2002 * before we can modify its control registers. We also
2003 * have to wait for the next vblank for that to take
2004 * effect. However, since we delay enabling FBC we can
2005 * assume that a vblank has passed since disabling and
2006 * that we can safely alter the registers in the deferred
2007 * callback.
2008 *
2009 * In the scenario that we go from a valid to invalid
2010 * and then back to valid FBC configuration we have
2011 * no strict enforcement that a vblank occurred since
2012 * disabling the FBC. However, along all current pipe
2013 * disabling paths we do need to wait for a vblank at
2014 * some point. And we wait before enabling FBC anyway.
2015 */
2016 DRM_DEBUG_KMS("disabling active FBC for update\n");
2017 intel_disable_fbc(dev);
2018 }
2019
2020 intel_enable_fbc(crtc, 500);
2021 return;
2022
2023 out_disable:
2024 /* Multiple disables should be harmless */
2025 if (intel_fbc_enabled(dev)) {
2026 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2027 intel_disable_fbc(dev);
2028 }
2029 }
2030
2031 int
intel_pin_and_fence_fb_obj(struct drm_device * dev,struct drm_i915_gem_object * obj,struct intel_ring_buffer * pipelined)2032 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2033 struct drm_i915_gem_object *obj,
2034 struct intel_ring_buffer *pipelined)
2035 {
2036 struct drm_i915_private *dev_priv = dev->dev_private;
2037 u32 alignment;
2038 int ret;
2039
2040 switch (obj->tiling_mode) {
2041 case I915_TILING_NONE:
2042 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2043 alignment = 128 * 1024;
2044 else if (INTEL_INFO(dev)->gen >= 4)
2045 alignment = 4 * 1024;
2046 else
2047 alignment = 64 * 1024;
2048 break;
2049 case I915_TILING_X:
2050 /* pin() will align the object as required by fence */
2051 alignment = 0;
2052 break;
2053 case I915_TILING_Y:
2054 /* FIXME: Is this true? */
2055 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2056 return -EINVAL;
2057 default:
2058 BUG();
2059 }
2060
2061 dev_priv->mm.interruptible = false;
2062 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2063 if (ret)
2064 goto err_interruptible;
2065
2066 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2067 * fence, whereas 965+ only requires a fence if using
2068 * framebuffer compression. For simplicity, we always install
2069 * a fence as the cost is not that onerous.
2070 */
2071 if (obj->tiling_mode != I915_TILING_NONE) {
2072 ret = i915_gem_object_get_fence(obj, pipelined);
2073 if (ret)
2074 goto err_unpin;
2075
2076 i915_gem_object_pin_fence(obj);
2077 }
2078
2079 dev_priv->mm.interruptible = true;
2080 return 0;
2081
2082 err_unpin:
2083 i915_gem_object_unpin(obj);
2084 err_interruptible:
2085 dev_priv->mm.interruptible = true;
2086 return ret;
2087 }
2088
intel_unpin_fb_obj(struct drm_i915_gem_object * obj)2089 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2090 {
2091 i915_gem_object_unpin_fence(obj);
2092 i915_gem_object_unpin(obj);
2093 }
2094
i9xx_update_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)2095 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2096 int x, int y)
2097 {
2098 struct drm_device *dev = crtc->dev;
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2101 struct intel_framebuffer *intel_fb;
2102 struct drm_i915_gem_object *obj;
2103 int plane = intel_crtc->plane;
2104 unsigned long Start, Offset;
2105 u32 dspcntr;
2106 u32 reg;
2107
2108 switch (plane) {
2109 case 0:
2110 case 1:
2111 break;
2112 default:
2113 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2114 return -EINVAL;
2115 }
2116
2117 intel_fb = to_intel_framebuffer(fb);
2118 obj = intel_fb->obj;
2119
2120 reg = DSPCNTR(plane);
2121 dspcntr = I915_READ(reg);
2122 /* Mask out pixel format bits in case we change it */
2123 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2124 switch (fb->bits_per_pixel) {
2125 case 8:
2126 dspcntr |= DISPPLANE_8BPP;
2127 break;
2128 case 16:
2129 if (fb->depth == 15)
2130 dspcntr |= DISPPLANE_15_16BPP;
2131 else
2132 dspcntr |= DISPPLANE_16BPP;
2133 break;
2134 case 24:
2135 case 32:
2136 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2137 break;
2138 default:
2139 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2140 return -EINVAL;
2141 }
2142 if (INTEL_INFO(dev)->gen >= 4) {
2143 if (obj->tiling_mode != I915_TILING_NONE)
2144 dspcntr |= DISPPLANE_TILED;
2145 else
2146 dspcntr &= ~DISPPLANE_TILED;
2147 }
2148
2149 I915_WRITE(reg, dspcntr);
2150
2151 Start = obj->gtt_offset;
2152 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2153
2154 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2155 Start, Offset, x, y, fb->pitches[0]);
2156 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2157 if (INTEL_INFO(dev)->gen >= 4) {
2158 I915_WRITE(DSPSURF(plane), Start);
2159 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2160 I915_WRITE(DSPADDR(plane), Offset);
2161 } else
2162 I915_WRITE(DSPADDR(plane), Start + Offset);
2163 POSTING_READ(reg);
2164
2165 return 0;
2166 }
2167
ironlake_update_plane(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)2168 static int ironlake_update_plane(struct drm_crtc *crtc,
2169 struct drm_framebuffer *fb, int x, int y)
2170 {
2171 struct drm_device *dev = crtc->dev;
2172 struct drm_i915_private *dev_priv = dev->dev_private;
2173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2174 struct intel_framebuffer *intel_fb;
2175 struct drm_i915_gem_object *obj;
2176 int plane = intel_crtc->plane;
2177 unsigned long Start, Offset;
2178 u32 dspcntr;
2179 u32 reg;
2180
2181 switch (plane) {
2182 case 0:
2183 case 1:
2184 case 2:
2185 break;
2186 default:
2187 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2188 return -EINVAL;
2189 }
2190
2191 intel_fb = to_intel_framebuffer(fb);
2192 obj = intel_fb->obj;
2193
2194 reg = DSPCNTR(plane);
2195 dspcntr = I915_READ(reg);
2196 /* Mask out pixel format bits in case we change it */
2197 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2198 switch (fb->bits_per_pixel) {
2199 case 8:
2200 dspcntr |= DISPPLANE_8BPP;
2201 break;
2202 case 16:
2203 if (fb->depth != 16)
2204 return -EINVAL;
2205
2206 dspcntr |= DISPPLANE_16BPP;
2207 break;
2208 case 24:
2209 case 32:
2210 if (fb->depth == 24)
2211 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2212 else if (fb->depth == 30)
2213 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2214 else
2215 return -EINVAL;
2216 break;
2217 default:
2218 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2219 return -EINVAL;
2220 }
2221
2222 if (obj->tiling_mode != I915_TILING_NONE)
2223 dspcntr |= DISPPLANE_TILED;
2224 else
2225 dspcntr &= ~DISPPLANE_TILED;
2226
2227 /* must disable */
2228 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2229
2230 I915_WRITE(reg, dspcntr);
2231
2232 Start = obj->gtt_offset;
2233 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2234
2235 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2236 Start, Offset, x, y, fb->pitches[0]);
2237 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2238 I915_WRITE(DSPSURF(plane), Start);
2239 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2240 I915_WRITE(DSPADDR(plane), Offset);
2241 POSTING_READ(reg);
2242
2243 return 0;
2244 }
2245
2246 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2247 static int
intel_pipe_set_base_atomic(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,enum mode_set_atomic state)2248 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2249 int x, int y, enum mode_set_atomic state)
2250 {
2251 struct drm_device *dev = crtc->dev;
2252 struct drm_i915_private *dev_priv = dev->dev_private;
2253 int ret;
2254
2255 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2256 if (ret)
2257 return ret;
2258
2259 intel_update_fbc(dev);
2260 intel_increase_pllclock(crtc);
2261
2262 return 0;
2263 }
2264
2265 static int
intel_finish_fb(struct drm_framebuffer * old_fb)2266 intel_finish_fb(struct drm_framebuffer *old_fb)
2267 {
2268 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2269 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2270 bool was_interruptible = dev_priv->mm.interruptible;
2271 int ret;
2272
2273 wait_event(dev_priv->pending_flip_queue,
2274 atomic_read(&dev_priv->mm.wedged) ||
2275 atomic_read(&obj->pending_flip) == 0);
2276
2277 /* Big Hammer, we also need to ensure that any pending
2278 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2279 * current scanout is retired before unpinning the old
2280 * framebuffer.
2281 *
2282 * This should only fail upon a hung GPU, in which case we
2283 * can safely continue.
2284 */
2285 dev_priv->mm.interruptible = false;
2286 ret = i915_gem_object_finish_gpu(obj);
2287 dev_priv->mm.interruptible = was_interruptible;
2288
2289 return ret;
2290 }
2291
2292 static int
intel_pipe_set_base(struct drm_crtc * crtc,int x,int y,struct drm_framebuffer * old_fb)2293 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2294 struct drm_framebuffer *old_fb)
2295 {
2296 struct drm_device *dev = crtc->dev;
2297 struct drm_i915_master_private *master_priv;
2298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2299 int ret;
2300
2301 /* no fb bound */
2302 if (!crtc->fb) {
2303 DRM_ERROR("No FB bound\n");
2304 return 0;
2305 }
2306
2307 switch (intel_crtc->plane) {
2308 case 0:
2309 case 1:
2310 break;
2311 case 2:
2312 if (IS_IVYBRIDGE(dev))
2313 break;
2314 /* fall through otherwise */
2315 default:
2316 DRM_ERROR("no plane for crtc\n");
2317 return -EINVAL;
2318 }
2319
2320 mutex_lock(&dev->struct_mutex);
2321 ret = intel_pin_and_fence_fb_obj(dev,
2322 to_intel_framebuffer(crtc->fb)->obj,
2323 NULL);
2324 if (ret != 0) {
2325 mutex_unlock(&dev->struct_mutex);
2326 DRM_ERROR("pin & fence failed\n");
2327 return ret;
2328 }
2329
2330 if (old_fb)
2331 intel_finish_fb(old_fb);
2332
2333 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2334 LEAVE_ATOMIC_MODE_SET);
2335 if (ret) {
2336 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2337 mutex_unlock(&dev->struct_mutex);
2338 DRM_ERROR("failed to update base address\n");
2339 return ret;
2340 }
2341
2342 if (old_fb) {
2343 intel_wait_for_vblank(dev, intel_crtc->pipe);
2344 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2345 }
2346
2347 mutex_unlock(&dev->struct_mutex);
2348
2349 if (!dev->primary->master)
2350 return 0;
2351
2352 master_priv = dev->primary->master->driver_priv;
2353 if (!master_priv->sarea_priv)
2354 return 0;
2355
2356 if (intel_crtc->pipe) {
2357 master_priv->sarea_priv->pipeB_x = x;
2358 master_priv->sarea_priv->pipeB_y = y;
2359 } else {
2360 master_priv->sarea_priv->pipeA_x = x;
2361 master_priv->sarea_priv->pipeA_y = y;
2362 }
2363
2364 return 0;
2365 }
2366
ironlake_set_pll_edp(struct drm_crtc * crtc,int clock)2367 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2368 {
2369 struct drm_device *dev = crtc->dev;
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371 u32 dpa_ctl;
2372
2373 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2374 dpa_ctl = I915_READ(DP_A);
2375 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2376
2377 if (clock < 200000) {
2378 u32 temp;
2379 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2380 /* workaround for 160Mhz:
2381 1) program 0x4600c bits 15:0 = 0x8124
2382 2) program 0x46010 bit 0 = 1
2383 3) program 0x46034 bit 24 = 1
2384 4) program 0x64000 bit 14 = 1
2385 */
2386 temp = I915_READ(0x4600c);
2387 temp &= 0xffff0000;
2388 I915_WRITE(0x4600c, temp | 0x8124);
2389
2390 temp = I915_READ(0x46010);
2391 I915_WRITE(0x46010, temp | 1);
2392
2393 temp = I915_READ(0x46034);
2394 I915_WRITE(0x46034, temp | (1 << 24));
2395 } else {
2396 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2397 }
2398 I915_WRITE(DP_A, dpa_ctl);
2399
2400 POSTING_READ(DP_A);
2401 udelay(500);
2402 }
2403
intel_fdi_normal_train(struct drm_crtc * crtc)2404 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2405 {
2406 struct drm_device *dev = crtc->dev;
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2409 int pipe = intel_crtc->pipe;
2410 u32 reg, temp;
2411
2412 /* enable normal train */
2413 reg = FDI_TX_CTL(pipe);
2414 temp = I915_READ(reg);
2415 if (IS_IVYBRIDGE(dev)) {
2416 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2417 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2418 } else {
2419 temp &= ~FDI_LINK_TRAIN_NONE;
2420 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2421 }
2422 I915_WRITE(reg, temp);
2423
2424 reg = FDI_RX_CTL(pipe);
2425 temp = I915_READ(reg);
2426 if (HAS_PCH_CPT(dev)) {
2427 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2428 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2429 } else {
2430 temp &= ~FDI_LINK_TRAIN_NONE;
2431 temp |= FDI_LINK_TRAIN_NONE;
2432 }
2433 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2434
2435 /* wait one idle pattern time */
2436 POSTING_READ(reg);
2437 udelay(1000);
2438
2439 /* IVB wants error correction enabled */
2440 if (IS_IVYBRIDGE(dev))
2441 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2442 FDI_FE_ERRC_ENABLE);
2443 }
2444
2445 /* The FDI link training functions for ILK/Ibexpeak. */
ironlake_fdi_link_train(struct drm_crtc * crtc)2446 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2447 {
2448 struct drm_device *dev = crtc->dev;
2449 struct drm_i915_private *dev_priv = dev->dev_private;
2450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2451 int pipe = intel_crtc->pipe;
2452 int plane = intel_crtc->plane;
2453 u32 reg, temp, tries;
2454
2455 /* FDI needs bits from pipe & plane first */
2456 assert_pipe_enabled(dev_priv, pipe);
2457 assert_plane_enabled(dev_priv, plane);
2458
2459 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2460 for train result */
2461 reg = FDI_RX_IMR(pipe);
2462 temp = I915_READ(reg);
2463 temp &= ~FDI_RX_SYMBOL_LOCK;
2464 temp &= ~FDI_RX_BIT_LOCK;
2465 I915_WRITE(reg, temp);
2466 I915_READ(reg);
2467 udelay(150);
2468
2469 /* enable CPU FDI TX and PCH FDI RX */
2470 reg = FDI_TX_CTL(pipe);
2471 temp = I915_READ(reg);
2472 temp &= ~(7 << 19);
2473 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2474 temp &= ~FDI_LINK_TRAIN_NONE;
2475 temp |= FDI_LINK_TRAIN_PATTERN_1;
2476 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2477
2478 reg = FDI_RX_CTL(pipe);
2479 temp = I915_READ(reg);
2480 temp &= ~FDI_LINK_TRAIN_NONE;
2481 temp |= FDI_LINK_TRAIN_PATTERN_1;
2482 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2483
2484 POSTING_READ(reg);
2485 udelay(150);
2486
2487 /* Ironlake workaround, enable clock pointer after FDI enable*/
2488 if (HAS_PCH_IBX(dev)) {
2489 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2490 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2491 FDI_RX_PHASE_SYNC_POINTER_EN);
2492 }
2493
2494 reg = FDI_RX_IIR(pipe);
2495 for (tries = 0; tries < 5; tries++) {
2496 temp = I915_READ(reg);
2497 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2498
2499 if ((temp & FDI_RX_BIT_LOCK)) {
2500 DRM_DEBUG_KMS("FDI train 1 done.\n");
2501 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2502 break;
2503 }
2504 }
2505 if (tries == 5)
2506 DRM_ERROR("FDI train 1 fail!\n");
2507
2508 /* Train 2 */
2509 reg = FDI_TX_CTL(pipe);
2510 temp = I915_READ(reg);
2511 temp &= ~FDI_LINK_TRAIN_NONE;
2512 temp |= FDI_LINK_TRAIN_PATTERN_2;
2513 I915_WRITE(reg, temp);
2514
2515 reg = FDI_RX_CTL(pipe);
2516 temp = I915_READ(reg);
2517 temp &= ~FDI_LINK_TRAIN_NONE;
2518 temp |= FDI_LINK_TRAIN_PATTERN_2;
2519 I915_WRITE(reg, temp);
2520
2521 POSTING_READ(reg);
2522 udelay(150);
2523
2524 reg = FDI_RX_IIR(pipe);
2525 for (tries = 0; tries < 5; tries++) {
2526 temp = I915_READ(reg);
2527 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2528
2529 if (temp & FDI_RX_SYMBOL_LOCK) {
2530 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2531 DRM_DEBUG_KMS("FDI train 2 done.\n");
2532 break;
2533 }
2534 }
2535 if (tries == 5)
2536 DRM_ERROR("FDI train 2 fail!\n");
2537
2538 DRM_DEBUG_KMS("FDI train done\n");
2539
2540 }
2541
2542 static const int snb_b_fdi_train_param[] = {
2543 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2544 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2545 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2546 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2547 };
2548
2549 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct drm_crtc * crtc)2550 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2551 {
2552 struct drm_device *dev = crtc->dev;
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2555 int pipe = intel_crtc->pipe;
2556 u32 reg, temp, i;
2557
2558 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2559 for train result */
2560 reg = FDI_RX_IMR(pipe);
2561 temp = I915_READ(reg);
2562 temp &= ~FDI_RX_SYMBOL_LOCK;
2563 temp &= ~FDI_RX_BIT_LOCK;
2564 I915_WRITE(reg, temp);
2565
2566 POSTING_READ(reg);
2567 udelay(150);
2568
2569 /* enable CPU FDI TX and PCH FDI RX */
2570 reg = FDI_TX_CTL(pipe);
2571 temp = I915_READ(reg);
2572 temp &= ~(7 << 19);
2573 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2574 temp &= ~FDI_LINK_TRAIN_NONE;
2575 temp |= FDI_LINK_TRAIN_PATTERN_1;
2576 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2577 /* SNB-B */
2578 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2579 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2580
2581 reg = FDI_RX_CTL(pipe);
2582 temp = I915_READ(reg);
2583 if (HAS_PCH_CPT(dev)) {
2584 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2585 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2586 } else {
2587 temp &= ~FDI_LINK_TRAIN_NONE;
2588 temp |= FDI_LINK_TRAIN_PATTERN_1;
2589 }
2590 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2591
2592 POSTING_READ(reg);
2593 udelay(150);
2594
2595 for (i = 0; i < 4; i++) {
2596 reg = FDI_TX_CTL(pipe);
2597 temp = I915_READ(reg);
2598 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2599 temp |= snb_b_fdi_train_param[i];
2600 I915_WRITE(reg, temp);
2601
2602 POSTING_READ(reg);
2603 udelay(500);
2604
2605 reg = FDI_RX_IIR(pipe);
2606 temp = I915_READ(reg);
2607 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2608
2609 if (temp & FDI_RX_BIT_LOCK) {
2610 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2611 DRM_DEBUG_KMS("FDI train 1 done.\n");
2612 break;
2613 }
2614 }
2615 if (i == 4)
2616 DRM_ERROR("FDI train 1 fail!\n");
2617
2618 /* Train 2 */
2619 reg = FDI_TX_CTL(pipe);
2620 temp = I915_READ(reg);
2621 temp &= ~FDI_LINK_TRAIN_NONE;
2622 temp |= FDI_LINK_TRAIN_PATTERN_2;
2623 if (IS_GEN6(dev)) {
2624 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625 /* SNB-B */
2626 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2627 }
2628 I915_WRITE(reg, temp);
2629
2630 reg = FDI_RX_CTL(pipe);
2631 temp = I915_READ(reg);
2632 if (HAS_PCH_CPT(dev)) {
2633 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2634 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2635 } else {
2636 temp &= ~FDI_LINK_TRAIN_NONE;
2637 temp |= FDI_LINK_TRAIN_PATTERN_2;
2638 }
2639 I915_WRITE(reg, temp);
2640
2641 POSTING_READ(reg);
2642 udelay(150);
2643
2644 for (i = 0; i < 4; i++) {
2645 reg = FDI_TX_CTL(pipe);
2646 temp = I915_READ(reg);
2647 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2648 temp |= snb_b_fdi_train_param[i];
2649 I915_WRITE(reg, temp);
2650
2651 POSTING_READ(reg);
2652 udelay(500);
2653
2654 reg = FDI_RX_IIR(pipe);
2655 temp = I915_READ(reg);
2656 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2657
2658 if (temp & FDI_RX_SYMBOL_LOCK) {
2659 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2660 DRM_DEBUG_KMS("FDI train 2 done.\n");
2661 break;
2662 }
2663 }
2664 if (i == 4)
2665 DRM_ERROR("FDI train 2 fail!\n");
2666
2667 DRM_DEBUG_KMS("FDI train done.\n");
2668 }
2669
2670 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct drm_crtc * crtc)2671 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2672 {
2673 struct drm_device *dev = crtc->dev;
2674 struct drm_i915_private *dev_priv = dev->dev_private;
2675 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676 int pipe = intel_crtc->pipe;
2677 u32 reg, temp, i;
2678
2679 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2680 for train result */
2681 reg = FDI_RX_IMR(pipe);
2682 temp = I915_READ(reg);
2683 temp &= ~FDI_RX_SYMBOL_LOCK;
2684 temp &= ~FDI_RX_BIT_LOCK;
2685 I915_WRITE(reg, temp);
2686
2687 POSTING_READ(reg);
2688 udelay(150);
2689
2690 /* enable CPU FDI TX and PCH FDI RX */
2691 reg = FDI_TX_CTL(pipe);
2692 temp = I915_READ(reg);
2693 temp &= ~(7 << 19);
2694 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2695 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2696 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2697 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2698 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2699 temp |= FDI_COMPOSITE_SYNC;
2700 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2701
2702 reg = FDI_RX_CTL(pipe);
2703 temp = I915_READ(reg);
2704 temp &= ~FDI_LINK_TRAIN_AUTO;
2705 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2706 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2707 temp |= FDI_COMPOSITE_SYNC;
2708 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2709
2710 POSTING_READ(reg);
2711 udelay(150);
2712
2713 for (i = 0; i < 4; i++) {
2714 reg = FDI_TX_CTL(pipe);
2715 temp = I915_READ(reg);
2716 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2717 temp |= snb_b_fdi_train_param[i];
2718 I915_WRITE(reg, temp);
2719
2720 POSTING_READ(reg);
2721 udelay(500);
2722
2723 reg = FDI_RX_IIR(pipe);
2724 temp = I915_READ(reg);
2725 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2726
2727 if (temp & FDI_RX_BIT_LOCK ||
2728 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2729 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2730 DRM_DEBUG_KMS("FDI train 1 done.\n");
2731 break;
2732 }
2733 }
2734 if (i == 4)
2735 DRM_ERROR("FDI train 1 fail!\n");
2736
2737 /* Train 2 */
2738 reg = FDI_TX_CTL(pipe);
2739 temp = I915_READ(reg);
2740 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2741 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2742 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2743 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2744 I915_WRITE(reg, temp);
2745
2746 reg = FDI_RX_CTL(pipe);
2747 temp = I915_READ(reg);
2748 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2749 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2750 I915_WRITE(reg, temp);
2751
2752 POSTING_READ(reg);
2753 udelay(150);
2754
2755 for (i = 0; i < 4; i++) {
2756 reg = FDI_TX_CTL(pipe);
2757 temp = I915_READ(reg);
2758 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2759 temp |= snb_b_fdi_train_param[i];
2760 I915_WRITE(reg, temp);
2761
2762 POSTING_READ(reg);
2763 udelay(500);
2764
2765 reg = FDI_RX_IIR(pipe);
2766 temp = I915_READ(reg);
2767 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2768
2769 if (temp & FDI_RX_SYMBOL_LOCK) {
2770 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2771 DRM_DEBUG_KMS("FDI train 2 done.\n");
2772 break;
2773 }
2774 }
2775 if (i == 4)
2776 DRM_ERROR("FDI train 2 fail!\n");
2777
2778 DRM_DEBUG_KMS("FDI train done.\n");
2779 }
2780
ironlake_fdi_pll_enable(struct drm_crtc * crtc)2781 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2782 {
2783 struct drm_device *dev = crtc->dev;
2784 struct drm_i915_private *dev_priv = dev->dev_private;
2785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2786 int pipe = intel_crtc->pipe;
2787 u32 reg, temp;
2788
2789 /* Write the TU size bits so error detection works */
2790 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2791 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2792
2793 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2794 reg = FDI_RX_CTL(pipe);
2795 temp = I915_READ(reg);
2796 temp &= ~((0x7 << 19) | (0x7 << 16));
2797 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2798 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2799 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2800
2801 POSTING_READ(reg);
2802 udelay(200);
2803
2804 /* Switch from Rawclk to PCDclk */
2805 temp = I915_READ(reg);
2806 I915_WRITE(reg, temp | FDI_PCDCLK);
2807
2808 POSTING_READ(reg);
2809 udelay(200);
2810
2811 /* Enable CPU FDI TX PLL, always on for Ironlake */
2812 reg = FDI_TX_CTL(pipe);
2813 temp = I915_READ(reg);
2814 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2815 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2816
2817 POSTING_READ(reg);
2818 udelay(100);
2819 }
2820 }
2821
ironlake_fdi_disable(struct drm_crtc * crtc)2822 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2823 {
2824 struct drm_device *dev = crtc->dev;
2825 struct drm_i915_private *dev_priv = dev->dev_private;
2826 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2827 int pipe = intel_crtc->pipe;
2828 u32 reg, temp;
2829
2830 /* disable CPU FDI tx and PCH FDI rx */
2831 reg = FDI_TX_CTL(pipe);
2832 temp = I915_READ(reg);
2833 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2834 POSTING_READ(reg);
2835
2836 reg = FDI_RX_CTL(pipe);
2837 temp = I915_READ(reg);
2838 temp &= ~(0x7 << 16);
2839 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2840 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2841
2842 POSTING_READ(reg);
2843 udelay(100);
2844
2845 /* Ironlake workaround, disable clock pointer after downing FDI */
2846 if (HAS_PCH_IBX(dev)) {
2847 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2848 I915_WRITE(FDI_RX_CHICKEN(pipe),
2849 I915_READ(FDI_RX_CHICKEN(pipe) &
2850 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2851 }
2852
2853 /* still set train pattern 1 */
2854 reg = FDI_TX_CTL(pipe);
2855 temp = I915_READ(reg);
2856 temp &= ~FDI_LINK_TRAIN_NONE;
2857 temp |= FDI_LINK_TRAIN_PATTERN_1;
2858 I915_WRITE(reg, temp);
2859
2860 reg = FDI_RX_CTL(pipe);
2861 temp = I915_READ(reg);
2862 if (HAS_PCH_CPT(dev)) {
2863 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2864 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2865 } else {
2866 temp &= ~FDI_LINK_TRAIN_NONE;
2867 temp |= FDI_LINK_TRAIN_PATTERN_1;
2868 }
2869 /* BPC in FDI rx is consistent with that in PIPECONF */
2870 temp &= ~(0x07 << 16);
2871 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2872 I915_WRITE(reg, temp);
2873
2874 POSTING_READ(reg);
2875 udelay(100);
2876 }
2877
2878 /*
2879 * When we disable a pipe, we need to clear any pending scanline wait events
2880 * to avoid hanging the ring, which we assume we are waiting on.
2881 */
intel_clear_scanline_wait(struct drm_device * dev)2882 static void intel_clear_scanline_wait(struct drm_device *dev)
2883 {
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 struct intel_ring_buffer *ring;
2886 u32 tmp;
2887
2888 if (IS_GEN2(dev))
2889 /* Can't break the hang on i8xx */
2890 return;
2891
2892 ring = LP_RING(dev_priv);
2893 tmp = I915_READ_CTL(ring);
2894 if (tmp & RING_WAIT)
2895 I915_WRITE_CTL(ring, tmp);
2896 }
2897
intel_crtc_has_pending_flip(struct drm_crtc * crtc)2898 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2899 {
2900 struct drm_device *dev = crtc->dev;
2901 struct drm_i915_private *dev_priv = dev->dev_private;
2902 unsigned long flags;
2903 bool pending;
2904
2905 if (atomic_read(&dev_priv->mm.wedged))
2906 return false;
2907
2908 spin_lock_irqsave(&dev->event_lock, flags);
2909 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2910 spin_unlock_irqrestore(&dev->event_lock, flags);
2911
2912 return pending;
2913 }
2914
intel_crtc_wait_for_pending_flips(struct drm_crtc * crtc)2915 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2916 {
2917 struct drm_device *dev = crtc->dev;
2918 struct drm_i915_private *dev_priv = dev->dev_private;
2919
2920 if (crtc->fb == NULL)
2921 return;
2922
2923 wait_event(dev_priv->pending_flip_queue,
2924 !intel_crtc_has_pending_flip(crtc));
2925
2926 mutex_lock(&dev->struct_mutex);
2927 intel_finish_fb(crtc->fb);
2928 mutex_unlock(&dev->struct_mutex);
2929 }
2930
intel_crtc_driving_pch(struct drm_crtc * crtc)2931 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2932 {
2933 struct drm_device *dev = crtc->dev;
2934 struct drm_mode_config *mode_config = &dev->mode_config;
2935 struct intel_encoder *encoder;
2936
2937 /*
2938 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2939 * must be driven by its own crtc; no sharing is possible.
2940 */
2941 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2942 if (encoder->base.crtc != crtc)
2943 continue;
2944
2945 switch (encoder->type) {
2946 case INTEL_OUTPUT_EDP:
2947 if (!intel_encoder_is_pch_edp(&encoder->base))
2948 return false;
2949 continue;
2950 }
2951 }
2952
2953 return true;
2954 }
2955
2956 /*
2957 * Enable PCH resources required for PCH ports:
2958 * - PCH PLLs
2959 * - FDI training & RX/TX
2960 * - update transcoder timings
2961 * - DP transcoding bits
2962 * - transcoder
2963 */
ironlake_pch_enable(struct drm_crtc * crtc)2964 static void ironlake_pch_enable(struct drm_crtc *crtc)
2965 {
2966 struct drm_device *dev = crtc->dev;
2967 struct drm_i915_private *dev_priv = dev->dev_private;
2968 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2969 int pipe = intel_crtc->pipe;
2970 u32 reg, temp, transc_sel;
2971
2972 /* For PCH output, training FDI link */
2973 dev_priv->display.fdi_link_train(crtc);
2974
2975 intel_enable_pch_pll(dev_priv, pipe);
2976
2977 if (HAS_PCH_CPT(dev)) {
2978 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2979 TRANSC_DPLLB_SEL;
2980
2981 /* Be sure PCH DPLL SEL is set */
2982 temp = I915_READ(PCH_DPLL_SEL);
2983 if (pipe == 0) {
2984 temp &= ~(TRANSA_DPLLB_SEL);
2985 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2986 } else if (pipe == 1) {
2987 temp &= ~(TRANSB_DPLLB_SEL);
2988 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2989 } else if (pipe == 2) {
2990 temp &= ~(TRANSC_DPLLB_SEL);
2991 temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2992 }
2993 I915_WRITE(PCH_DPLL_SEL, temp);
2994 }
2995
2996 /* set transcoder timing, panel must allow it */
2997 assert_panel_unlocked(dev_priv, pipe);
2998 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2999 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3000 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
3001
3002 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3003 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3004 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
3005 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3006
3007 intel_fdi_normal_train(crtc);
3008
3009 /* For PCH DP, enable TRANS_DP_CTL */
3010 if (HAS_PCH_CPT(dev) &&
3011 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3012 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3013 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3014 reg = TRANS_DP_CTL(pipe);
3015 temp = I915_READ(reg);
3016 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3017 TRANS_DP_SYNC_MASK |
3018 TRANS_DP_BPC_MASK);
3019 temp |= (TRANS_DP_OUTPUT_ENABLE |
3020 TRANS_DP_ENH_FRAMING);
3021 temp |= bpc << 9; /* same format but at 11:9 */
3022
3023 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3024 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3025 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3026 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3027
3028 switch (intel_trans_dp_port_sel(crtc)) {
3029 case PCH_DP_B:
3030 temp |= TRANS_DP_PORT_SEL_B;
3031 break;
3032 case PCH_DP_C:
3033 temp |= TRANS_DP_PORT_SEL_C;
3034 break;
3035 case PCH_DP_D:
3036 temp |= TRANS_DP_PORT_SEL_D;
3037 break;
3038 default:
3039 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3040 temp |= TRANS_DP_PORT_SEL_B;
3041 break;
3042 }
3043
3044 I915_WRITE(reg, temp);
3045 }
3046
3047 intel_enable_transcoder(dev_priv, pipe);
3048 }
3049
intel_cpt_verify_modeset(struct drm_device * dev,int pipe)3050 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3051 {
3052 struct drm_i915_private *dev_priv = dev->dev_private;
3053 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3054 u32 temp;
3055
3056 temp = I915_READ(dslreg);
3057 udelay(500);
3058 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3059 /* Without this, mode sets may fail silently on FDI */
3060 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3061 udelay(250);
3062 I915_WRITE(tc2reg, 0);
3063 if (wait_for(I915_READ(dslreg) != temp, 5))
3064 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3065 }
3066 }
3067
ironlake_crtc_enable(struct drm_crtc * crtc)3068 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3069 {
3070 struct drm_device *dev = crtc->dev;
3071 struct drm_i915_private *dev_priv = dev->dev_private;
3072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3073 int pipe = intel_crtc->pipe;
3074 int plane = intel_crtc->plane;
3075 u32 temp;
3076 bool is_pch_port;
3077
3078 if (intel_crtc->active)
3079 return;
3080
3081 intel_crtc->active = true;
3082 intel_update_watermarks(dev);
3083
3084 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3085 temp = I915_READ(PCH_LVDS);
3086 if ((temp & LVDS_PORT_EN) == 0)
3087 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3088 }
3089
3090 is_pch_port = intel_crtc_driving_pch(crtc);
3091
3092 if (is_pch_port)
3093 ironlake_fdi_pll_enable(crtc);
3094 else
3095 ironlake_fdi_disable(crtc);
3096
3097 /* Enable panel fitting for LVDS */
3098 if (dev_priv->pch_pf_size &&
3099 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3100 /* Force use of hard-coded filter coefficients
3101 * as some pre-programmed values are broken,
3102 * e.g. x201.
3103 */
3104 if (IS_IVYBRIDGE(dev))
3105 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3106 PF_PIPE_SEL_IVB(pipe));
3107 else
3108 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3109 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3110 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3111 }
3112
3113 /*
3114 * On ILK+ LUT must be loaded before the pipe is running but with
3115 * clocks enabled
3116 */
3117 intel_crtc_load_lut(crtc);
3118
3119 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3120 intel_enable_plane(dev_priv, plane, pipe);
3121
3122 if (is_pch_port)
3123 ironlake_pch_enable(crtc);
3124
3125 mutex_lock(&dev->struct_mutex);
3126 intel_update_fbc(dev);
3127 mutex_unlock(&dev->struct_mutex);
3128
3129 intel_crtc_update_cursor(crtc, true);
3130 }
3131
ironlake_crtc_disable(struct drm_crtc * crtc)3132 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3133 {
3134 struct drm_device *dev = crtc->dev;
3135 struct drm_i915_private *dev_priv = dev->dev_private;
3136 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3137 int pipe = intel_crtc->pipe;
3138 int plane = intel_crtc->plane;
3139 u32 reg, temp;
3140
3141 if (!intel_crtc->active)
3142 return;
3143
3144 intel_crtc_wait_for_pending_flips(crtc);
3145 drm_vblank_off(dev, pipe);
3146 intel_crtc_update_cursor(crtc, false);
3147
3148 intel_disable_plane(dev_priv, plane, pipe);
3149
3150 if (dev_priv->cfb_plane == plane)
3151 intel_disable_fbc(dev);
3152
3153 intel_disable_pipe(dev_priv, pipe);
3154
3155 /* Disable PF */
3156 I915_WRITE(PF_CTL(pipe), 0);
3157 I915_WRITE(PF_WIN_SZ(pipe), 0);
3158
3159 ironlake_fdi_disable(crtc);
3160
3161 /* This is a horrible layering violation; we should be doing this in
3162 * the connector/encoder ->prepare instead, but we don't always have
3163 * enough information there about the config to know whether it will
3164 * actually be necessary or just cause undesired flicker.
3165 */
3166 intel_disable_pch_ports(dev_priv, pipe);
3167
3168 intel_disable_transcoder(dev_priv, pipe);
3169
3170 if (HAS_PCH_CPT(dev)) {
3171 /* disable TRANS_DP_CTL */
3172 reg = TRANS_DP_CTL(pipe);
3173 temp = I915_READ(reg);
3174 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3175 temp |= TRANS_DP_PORT_SEL_NONE;
3176 I915_WRITE(reg, temp);
3177
3178 /* disable DPLL_SEL */
3179 temp = I915_READ(PCH_DPLL_SEL);
3180 switch (pipe) {
3181 case 0:
3182 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3183 break;
3184 case 1:
3185 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3186 break;
3187 case 2:
3188 /* C shares PLL A or B */
3189 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3190 break;
3191 default:
3192 BUG(); /* wtf */
3193 }
3194 I915_WRITE(PCH_DPLL_SEL, temp);
3195 }
3196
3197 /* disable PCH DPLL */
3198 if (!intel_crtc->no_pll)
3199 intel_disable_pch_pll(dev_priv, pipe);
3200
3201 /* Switch from PCDclk to Rawclk */
3202 reg = FDI_RX_CTL(pipe);
3203 temp = I915_READ(reg);
3204 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3205
3206 /* Disable CPU FDI TX PLL */
3207 reg = FDI_TX_CTL(pipe);
3208 temp = I915_READ(reg);
3209 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3210
3211 POSTING_READ(reg);
3212 udelay(100);
3213
3214 reg = FDI_RX_CTL(pipe);
3215 temp = I915_READ(reg);
3216 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3217
3218 /* Wait for the clocks to turn off. */
3219 POSTING_READ(reg);
3220 udelay(100);
3221
3222 intel_crtc->active = false;
3223 intel_update_watermarks(dev);
3224
3225 mutex_lock(&dev->struct_mutex);
3226 intel_update_fbc(dev);
3227 intel_clear_scanline_wait(dev);
3228 mutex_unlock(&dev->struct_mutex);
3229 }
3230
ironlake_crtc_dpms(struct drm_crtc * crtc,int mode)3231 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3232 {
3233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3234 int pipe = intel_crtc->pipe;
3235 int plane = intel_crtc->plane;
3236
3237 /* XXX: When our outputs are all unaware of DPMS modes other than off
3238 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3239 */
3240 switch (mode) {
3241 case DRM_MODE_DPMS_ON:
3242 case DRM_MODE_DPMS_STANDBY:
3243 case DRM_MODE_DPMS_SUSPEND:
3244 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3245 ironlake_crtc_enable(crtc);
3246 break;
3247
3248 case DRM_MODE_DPMS_OFF:
3249 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3250 ironlake_crtc_disable(crtc);
3251 break;
3252 }
3253 }
3254
intel_crtc_dpms_overlay(struct intel_crtc * intel_crtc,bool enable)3255 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3256 {
3257 if (!enable && intel_crtc->overlay) {
3258 struct drm_device *dev = intel_crtc->base.dev;
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260
3261 mutex_lock(&dev->struct_mutex);
3262 dev_priv->mm.interruptible = false;
3263 (void) intel_overlay_switch_off(intel_crtc->overlay);
3264 dev_priv->mm.interruptible = true;
3265 mutex_unlock(&dev->struct_mutex);
3266 }
3267
3268 /* Let userspace switch the overlay on again. In most cases userspace
3269 * has to recompute where to put it anyway.
3270 */
3271 }
3272
i9xx_crtc_enable(struct drm_crtc * crtc)3273 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3274 {
3275 struct drm_device *dev = crtc->dev;
3276 struct drm_i915_private *dev_priv = dev->dev_private;
3277 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3278 int pipe = intel_crtc->pipe;
3279 int plane = intel_crtc->plane;
3280
3281 if (intel_crtc->active)
3282 return;
3283
3284 intel_crtc->active = true;
3285 intel_update_watermarks(dev);
3286
3287 intel_enable_pll(dev_priv, pipe);
3288 intel_enable_pipe(dev_priv, pipe, false);
3289 intel_enable_plane(dev_priv, plane, pipe);
3290
3291 intel_crtc_load_lut(crtc);
3292 intel_update_fbc(dev);
3293
3294 /* Give the overlay scaler a chance to enable if it's on this pipe */
3295 intel_crtc_dpms_overlay(intel_crtc, true);
3296 intel_crtc_update_cursor(crtc, true);
3297 }
3298
i9xx_crtc_disable(struct drm_crtc * crtc)3299 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3300 {
3301 struct drm_device *dev = crtc->dev;
3302 struct drm_i915_private *dev_priv = dev->dev_private;
3303 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3304 int pipe = intel_crtc->pipe;
3305 int plane = intel_crtc->plane;
3306 u32 pctl;
3307
3308 if (!intel_crtc->active)
3309 return;
3310
3311 /* Give the overlay scaler a chance to disable if it's on this pipe */
3312 intel_crtc_wait_for_pending_flips(crtc);
3313 drm_vblank_off(dev, pipe);
3314 intel_crtc_dpms_overlay(intel_crtc, false);
3315 intel_crtc_update_cursor(crtc, false);
3316
3317 if (dev_priv->cfb_plane == plane)
3318 intel_disable_fbc(dev);
3319
3320 intel_disable_plane(dev_priv, plane, pipe);
3321 intel_disable_pipe(dev_priv, pipe);
3322
3323 /* Disable pannel fitter if it is on this pipe. */
3324 pctl = I915_READ(PFIT_CONTROL);
3325 if ((pctl & PFIT_ENABLE) &&
3326 ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
3327 I915_WRITE(PFIT_CONTROL, 0);
3328
3329 intel_disable_pll(dev_priv, pipe);
3330
3331 intel_crtc->active = false;
3332 intel_update_fbc(dev);
3333 intel_update_watermarks(dev);
3334 intel_clear_scanline_wait(dev);
3335 }
3336
i9xx_crtc_dpms(struct drm_crtc * crtc,int mode)3337 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3338 {
3339 /* XXX: When our outputs are all unaware of DPMS modes other than off
3340 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3341 */
3342 switch (mode) {
3343 case DRM_MODE_DPMS_ON:
3344 case DRM_MODE_DPMS_STANDBY:
3345 case DRM_MODE_DPMS_SUSPEND:
3346 i9xx_crtc_enable(crtc);
3347 break;
3348 case DRM_MODE_DPMS_OFF:
3349 i9xx_crtc_disable(crtc);
3350 break;
3351 }
3352 }
3353
3354 /**
3355 * Sets the power management mode of the pipe and plane.
3356 */
intel_crtc_dpms(struct drm_crtc * crtc,int mode)3357 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3358 {
3359 struct drm_device *dev = crtc->dev;
3360 struct drm_i915_private *dev_priv = dev->dev_private;
3361 struct drm_i915_master_private *master_priv;
3362 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3363 int pipe = intel_crtc->pipe;
3364 bool enabled;
3365
3366 if (intel_crtc->dpms_mode == mode)
3367 return;
3368
3369 intel_crtc->dpms_mode = mode;
3370
3371 dev_priv->display.dpms(crtc, mode);
3372
3373 if (!dev->primary->master)
3374 return;
3375
3376 master_priv = dev->primary->master->driver_priv;
3377 if (!master_priv->sarea_priv)
3378 return;
3379
3380 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3381
3382 switch (pipe) {
3383 case 0:
3384 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3385 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3386 break;
3387 case 1:
3388 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3389 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3390 break;
3391 default:
3392 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3393 break;
3394 }
3395 }
3396
intel_crtc_disable(struct drm_crtc * crtc)3397 static void intel_crtc_disable(struct drm_crtc *crtc)
3398 {
3399 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3400 struct drm_device *dev = crtc->dev;
3401
3402 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3403 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3404 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3405
3406 if (crtc->fb) {
3407 mutex_lock(&dev->struct_mutex);
3408 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3409 mutex_unlock(&dev->struct_mutex);
3410 }
3411 }
3412
3413 /* Prepare for a mode set.
3414 *
3415 * Note we could be a lot smarter here. We need to figure out which outputs
3416 * will be enabled, which disabled (in short, how the config will changes)
3417 * and perform the minimum necessary steps to accomplish that, e.g. updating
3418 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3419 * panel fitting is in the proper state, etc.
3420 */
i9xx_crtc_prepare(struct drm_crtc * crtc)3421 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3422 {
3423 i9xx_crtc_disable(crtc);
3424 }
3425
i9xx_crtc_commit(struct drm_crtc * crtc)3426 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3427 {
3428 i9xx_crtc_enable(crtc);
3429 }
3430
ironlake_crtc_prepare(struct drm_crtc * crtc)3431 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3432 {
3433 ironlake_crtc_disable(crtc);
3434 }
3435
ironlake_crtc_commit(struct drm_crtc * crtc)3436 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3437 {
3438 ironlake_crtc_enable(crtc);
3439 }
3440
intel_encoder_prepare(struct drm_encoder * encoder)3441 void intel_encoder_prepare(struct drm_encoder *encoder)
3442 {
3443 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3444 /* lvds has its own version of prepare see intel_lvds_prepare */
3445 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3446 }
3447
intel_encoder_commit(struct drm_encoder * encoder)3448 void intel_encoder_commit(struct drm_encoder *encoder)
3449 {
3450 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3451 struct drm_device *dev = encoder->dev;
3452 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3453 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3454
3455 /* lvds has its own version of commit see intel_lvds_commit */
3456 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3457
3458 if (HAS_PCH_CPT(dev))
3459 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3460 }
3461
intel_encoder_destroy(struct drm_encoder * encoder)3462 void intel_encoder_destroy(struct drm_encoder *encoder)
3463 {
3464 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3465
3466 drm_encoder_cleanup(encoder);
3467 kfree(intel_encoder);
3468 }
3469
intel_crtc_mode_fixup(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3470 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3471 struct drm_display_mode *mode,
3472 struct drm_display_mode *adjusted_mode)
3473 {
3474 struct drm_device *dev = crtc->dev;
3475
3476 if (HAS_PCH_SPLIT(dev)) {
3477 /* FDI link clock is fixed at 2.7G */
3478 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3479 return false;
3480 }
3481
3482 /* All interlaced capable intel hw wants timings in frames. Note though
3483 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3484 * timings, so we need to be careful not to clobber these.*/
3485 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3486 drm_mode_set_crtcinfo(adjusted_mode, 0);
3487
3488 return true;
3489 }
3490
i945_get_display_clock_speed(struct drm_device * dev)3491 static int i945_get_display_clock_speed(struct drm_device *dev)
3492 {
3493 return 400000;
3494 }
3495
i915_get_display_clock_speed(struct drm_device * dev)3496 static int i915_get_display_clock_speed(struct drm_device *dev)
3497 {
3498 return 333000;
3499 }
3500
i9xx_misc_get_display_clock_speed(struct drm_device * dev)3501 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3502 {
3503 return 200000;
3504 }
3505
i915gm_get_display_clock_speed(struct drm_device * dev)3506 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3507 {
3508 u16 gcfgc = 0;
3509
3510 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3511
3512 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3513 return 133000;
3514 else {
3515 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3516 case GC_DISPLAY_CLOCK_333_MHZ:
3517 return 333000;
3518 default:
3519 case GC_DISPLAY_CLOCK_190_200_MHZ:
3520 return 190000;
3521 }
3522 }
3523 }
3524
i865_get_display_clock_speed(struct drm_device * dev)3525 static int i865_get_display_clock_speed(struct drm_device *dev)
3526 {
3527 return 266000;
3528 }
3529
i855_get_display_clock_speed(struct drm_device * dev)3530 static int i855_get_display_clock_speed(struct drm_device *dev)
3531 {
3532 u16 hpllcc = 0;
3533 /* Assume that the hardware is in the high speed state. This
3534 * should be the default.
3535 */
3536 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3537 case GC_CLOCK_133_200:
3538 case GC_CLOCK_100_200:
3539 return 200000;
3540 case GC_CLOCK_166_250:
3541 return 250000;
3542 case GC_CLOCK_100_133:
3543 return 133000;
3544 }
3545
3546 /* Shouldn't happen */
3547 return 0;
3548 }
3549
i830_get_display_clock_speed(struct drm_device * dev)3550 static int i830_get_display_clock_speed(struct drm_device *dev)
3551 {
3552 return 133000;
3553 }
3554
3555 struct fdi_m_n {
3556 u32 tu;
3557 u32 gmch_m;
3558 u32 gmch_n;
3559 u32 link_m;
3560 u32 link_n;
3561 };
3562
3563 static void
fdi_reduce_ratio(u32 * num,u32 * den)3564 fdi_reduce_ratio(u32 *num, u32 *den)
3565 {
3566 while (*num > 0xffffff || *den > 0xffffff) {
3567 *num >>= 1;
3568 *den >>= 1;
3569 }
3570 }
3571
3572 static void
ironlake_compute_m_n(int bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct fdi_m_n * m_n)3573 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3574 int link_clock, struct fdi_m_n *m_n)
3575 {
3576 m_n->tu = 64; /* default size */
3577
3578 /* BUG_ON(pixel_clock > INT_MAX / 36); */
3579 m_n->gmch_m = bits_per_pixel * pixel_clock;
3580 m_n->gmch_n = link_clock * nlanes * 8;
3581 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3582
3583 m_n->link_m = pixel_clock;
3584 m_n->link_n = link_clock;
3585 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3586 }
3587
3588
3589 struct intel_watermark_params {
3590 unsigned long fifo_size;
3591 unsigned long max_wm;
3592 unsigned long default_wm;
3593 unsigned long guard_size;
3594 unsigned long cacheline_size;
3595 };
3596
3597 /* Pineview has different values for various configs */
3598 static const struct intel_watermark_params pineview_display_wm = {
3599 PINEVIEW_DISPLAY_FIFO,
3600 PINEVIEW_MAX_WM,
3601 PINEVIEW_DFT_WM,
3602 PINEVIEW_GUARD_WM,
3603 PINEVIEW_FIFO_LINE_SIZE
3604 };
3605 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3606 PINEVIEW_DISPLAY_FIFO,
3607 PINEVIEW_MAX_WM,
3608 PINEVIEW_DFT_HPLLOFF_WM,
3609 PINEVIEW_GUARD_WM,
3610 PINEVIEW_FIFO_LINE_SIZE
3611 };
3612 static const struct intel_watermark_params pineview_cursor_wm = {
3613 PINEVIEW_CURSOR_FIFO,
3614 PINEVIEW_CURSOR_MAX_WM,
3615 PINEVIEW_CURSOR_DFT_WM,
3616 PINEVIEW_CURSOR_GUARD_WM,
3617 PINEVIEW_FIFO_LINE_SIZE,
3618 };
3619 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3620 PINEVIEW_CURSOR_FIFO,
3621 PINEVIEW_CURSOR_MAX_WM,
3622 PINEVIEW_CURSOR_DFT_WM,
3623 PINEVIEW_CURSOR_GUARD_WM,
3624 PINEVIEW_FIFO_LINE_SIZE
3625 };
3626 static const struct intel_watermark_params g4x_wm_info = {
3627 G4X_FIFO_SIZE,
3628 G4X_MAX_WM,
3629 G4X_MAX_WM,
3630 2,
3631 G4X_FIFO_LINE_SIZE,
3632 };
3633 static const struct intel_watermark_params g4x_cursor_wm_info = {
3634 I965_CURSOR_FIFO,
3635 I965_CURSOR_MAX_WM,
3636 I965_CURSOR_DFT_WM,
3637 2,
3638 G4X_FIFO_LINE_SIZE,
3639 };
3640 static const struct intel_watermark_params i965_cursor_wm_info = {
3641 I965_CURSOR_FIFO,
3642 I965_CURSOR_MAX_WM,
3643 I965_CURSOR_DFT_WM,
3644 2,
3645 I915_FIFO_LINE_SIZE,
3646 };
3647 static const struct intel_watermark_params i945_wm_info = {
3648 I945_FIFO_SIZE,
3649 I915_MAX_WM,
3650 1,
3651 2,
3652 I915_FIFO_LINE_SIZE
3653 };
3654 static const struct intel_watermark_params i915_wm_info = {
3655 I915_FIFO_SIZE,
3656 I915_MAX_WM,
3657 1,
3658 2,
3659 I915_FIFO_LINE_SIZE
3660 };
3661 static const struct intel_watermark_params i855_wm_info = {
3662 I855GM_FIFO_SIZE,
3663 I915_MAX_WM,
3664 1,
3665 2,
3666 I830_FIFO_LINE_SIZE
3667 };
3668 static const struct intel_watermark_params i830_wm_info = {
3669 I830_FIFO_SIZE,
3670 I915_MAX_WM,
3671 1,
3672 2,
3673 I830_FIFO_LINE_SIZE
3674 };
3675
3676 static const struct intel_watermark_params ironlake_display_wm_info = {
3677 ILK_DISPLAY_FIFO,
3678 ILK_DISPLAY_MAXWM,
3679 ILK_DISPLAY_DFTWM,
3680 2,
3681 ILK_FIFO_LINE_SIZE
3682 };
3683 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3684 ILK_CURSOR_FIFO,
3685 ILK_CURSOR_MAXWM,
3686 ILK_CURSOR_DFTWM,
3687 2,
3688 ILK_FIFO_LINE_SIZE
3689 };
3690 static const struct intel_watermark_params ironlake_display_srwm_info = {
3691 ILK_DISPLAY_SR_FIFO,
3692 ILK_DISPLAY_MAX_SRWM,
3693 ILK_DISPLAY_DFT_SRWM,
3694 2,
3695 ILK_FIFO_LINE_SIZE
3696 };
3697 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3698 ILK_CURSOR_SR_FIFO,
3699 ILK_CURSOR_MAX_SRWM,
3700 ILK_CURSOR_DFT_SRWM,
3701 2,
3702 ILK_FIFO_LINE_SIZE
3703 };
3704
3705 static const struct intel_watermark_params sandybridge_display_wm_info = {
3706 SNB_DISPLAY_FIFO,
3707 SNB_DISPLAY_MAXWM,
3708 SNB_DISPLAY_DFTWM,
3709 2,
3710 SNB_FIFO_LINE_SIZE
3711 };
3712 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3713 SNB_CURSOR_FIFO,
3714 SNB_CURSOR_MAXWM,
3715 SNB_CURSOR_DFTWM,
3716 2,
3717 SNB_FIFO_LINE_SIZE
3718 };
3719 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3720 SNB_DISPLAY_SR_FIFO,
3721 SNB_DISPLAY_MAX_SRWM,
3722 SNB_DISPLAY_DFT_SRWM,
3723 2,
3724 SNB_FIFO_LINE_SIZE
3725 };
3726 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3727 SNB_CURSOR_SR_FIFO,
3728 SNB_CURSOR_MAX_SRWM,
3729 SNB_CURSOR_DFT_SRWM,
3730 2,
3731 SNB_FIFO_LINE_SIZE
3732 };
3733
3734
3735 /**
3736 * intel_calculate_wm - calculate watermark level
3737 * @clock_in_khz: pixel clock
3738 * @wm: chip FIFO params
3739 * @pixel_size: display pixel size
3740 * @latency_ns: memory latency for the platform
3741 *
3742 * Calculate the watermark level (the level at which the display plane will
3743 * start fetching from memory again). Each chip has a different display
3744 * FIFO size and allocation, so the caller needs to figure that out and pass
3745 * in the correct intel_watermark_params structure.
3746 *
3747 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3748 * on the pixel size. When it reaches the watermark level, it'll start
3749 * fetching FIFO line sized based chunks from memory until the FIFO fills
3750 * past the watermark point. If the FIFO drains completely, a FIFO underrun
3751 * will occur, and a display engine hang could result.
3752 */
intel_calculate_wm(unsigned long clock_in_khz,const struct intel_watermark_params * wm,int fifo_size,int pixel_size,unsigned long latency_ns)3753 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3754 const struct intel_watermark_params *wm,
3755 int fifo_size,
3756 int pixel_size,
3757 unsigned long latency_ns)
3758 {
3759 long entries_required, wm_size;
3760
3761 /*
3762 * Note: we need to make sure we don't overflow for various clock &
3763 * latency values.
3764 * clocks go from a few thousand to several hundred thousand.
3765 * latency is usually a few thousand
3766 */
3767 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3768 1000;
3769 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3770
3771 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3772
3773 wm_size = fifo_size - (entries_required + wm->guard_size);
3774
3775 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3776
3777 /* Don't promote wm_size to unsigned... */
3778 if (wm_size > (long)wm->max_wm)
3779 wm_size = wm->max_wm;
3780 if (wm_size <= 0)
3781 wm_size = wm->default_wm;
3782 return wm_size;
3783 }
3784
3785 struct cxsr_latency {
3786 int is_desktop;
3787 int is_ddr3;
3788 unsigned long fsb_freq;
3789 unsigned long mem_freq;
3790 unsigned long display_sr;
3791 unsigned long display_hpll_disable;
3792 unsigned long cursor_sr;
3793 unsigned long cursor_hpll_disable;
3794 };
3795
3796 static const struct cxsr_latency cxsr_latency_table[] = {
3797 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
3798 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
3799 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
3800 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
3801 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
3802
3803 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
3804 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
3805 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
3806 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
3807 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
3808
3809 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
3810 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
3811 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
3812 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
3813 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
3814
3815 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
3816 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
3817 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
3818 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
3819 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
3820
3821 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
3822 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
3823 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
3824 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
3825 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
3826
3827 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
3828 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
3829 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
3830 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
3831 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
3832 };
3833
intel_get_cxsr_latency(int is_desktop,int is_ddr3,int fsb,int mem)3834 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3835 int is_ddr3,
3836 int fsb,
3837 int mem)
3838 {
3839 const struct cxsr_latency *latency;
3840 int i;
3841
3842 if (fsb == 0 || mem == 0)
3843 return NULL;
3844
3845 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3846 latency = &cxsr_latency_table[i];
3847 if (is_desktop == latency->is_desktop &&
3848 is_ddr3 == latency->is_ddr3 &&
3849 fsb == latency->fsb_freq && mem == latency->mem_freq)
3850 return latency;
3851 }
3852
3853 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3854
3855 return NULL;
3856 }
3857
pineview_disable_cxsr(struct drm_device * dev)3858 static void pineview_disable_cxsr(struct drm_device *dev)
3859 {
3860 struct drm_i915_private *dev_priv = dev->dev_private;
3861
3862 /* deactivate cxsr */
3863 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3864 }
3865
3866 /*
3867 * Latency for FIFO fetches is dependent on several factors:
3868 * - memory configuration (speed, channels)
3869 * - chipset
3870 * - current MCH state
3871 * It can be fairly high in some situations, so here we assume a fairly
3872 * pessimal value. It's a tradeoff between extra memory fetches (if we
3873 * set this value too high, the FIFO will fetch frequently to stay full)
3874 * and power consumption (set it too low to save power and we might see
3875 * FIFO underruns and display "flicker").
3876 *
3877 * A value of 5us seems to be a good balance; safe for very low end
3878 * platforms but not overly aggressive on lower latency configs.
3879 */
3880 static const int latency_ns = 5000;
3881
i9xx_get_fifo_size(struct drm_device * dev,int plane)3882 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3883 {
3884 struct drm_i915_private *dev_priv = dev->dev_private;
3885 uint32_t dsparb = I915_READ(DSPARB);
3886 int size;
3887
3888 size = dsparb & 0x7f;
3889 if (plane)
3890 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3891
3892 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3893 plane ? "B" : "A", size);
3894
3895 return size;
3896 }
3897
i85x_get_fifo_size(struct drm_device * dev,int plane)3898 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3899 {
3900 struct drm_i915_private *dev_priv = dev->dev_private;
3901 uint32_t dsparb = I915_READ(DSPARB);
3902 int size;
3903
3904 size = dsparb & 0x1ff;
3905 if (plane)
3906 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3907 size >>= 1; /* Convert to cachelines */
3908
3909 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3910 plane ? "B" : "A", size);
3911
3912 return size;
3913 }
3914
i845_get_fifo_size(struct drm_device * dev,int plane)3915 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3916 {
3917 struct drm_i915_private *dev_priv = dev->dev_private;
3918 uint32_t dsparb = I915_READ(DSPARB);
3919 int size;
3920
3921 size = dsparb & 0x7f;
3922 size >>= 2; /* Convert to cachelines */
3923
3924 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3925 plane ? "B" : "A",
3926 size);
3927
3928 return size;
3929 }
3930
i830_get_fifo_size(struct drm_device * dev,int plane)3931 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3932 {
3933 struct drm_i915_private *dev_priv = dev->dev_private;
3934 uint32_t dsparb = I915_READ(DSPARB);
3935 int size;
3936
3937 size = dsparb & 0x7f;
3938 size >>= 1; /* Convert to cachelines */
3939
3940 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3941 plane ? "B" : "A", size);
3942
3943 return size;
3944 }
3945
single_enabled_crtc(struct drm_device * dev)3946 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3947 {
3948 struct drm_crtc *crtc, *enabled = NULL;
3949
3950 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3951 if (crtc->enabled && crtc->fb) {
3952 if (enabled)
3953 return NULL;
3954 enabled = crtc;
3955 }
3956 }
3957
3958 return enabled;
3959 }
3960
pineview_update_wm(struct drm_device * dev)3961 static void pineview_update_wm(struct drm_device *dev)
3962 {
3963 struct drm_i915_private *dev_priv = dev->dev_private;
3964 struct drm_crtc *crtc;
3965 const struct cxsr_latency *latency;
3966 u32 reg;
3967 unsigned long wm;
3968
3969 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3970 dev_priv->fsb_freq, dev_priv->mem_freq);
3971 if (!latency) {
3972 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3973 pineview_disable_cxsr(dev);
3974 return;
3975 }
3976
3977 crtc = single_enabled_crtc(dev);
3978 if (crtc) {
3979 int clock = crtc->mode.clock;
3980 int pixel_size = crtc->fb->bits_per_pixel / 8;
3981
3982 /* Display SR */
3983 wm = intel_calculate_wm(clock, &pineview_display_wm,
3984 pineview_display_wm.fifo_size,
3985 pixel_size, latency->display_sr);
3986 reg = I915_READ(DSPFW1);
3987 reg &= ~DSPFW_SR_MASK;
3988 reg |= wm << DSPFW_SR_SHIFT;
3989 I915_WRITE(DSPFW1, reg);
3990 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3991
3992 /* cursor SR */
3993 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3994 pineview_display_wm.fifo_size,
3995 pixel_size, latency->cursor_sr);
3996 reg = I915_READ(DSPFW3);
3997 reg &= ~DSPFW_CURSOR_SR_MASK;
3998 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3999 I915_WRITE(DSPFW3, reg);
4000
4001 /* Display HPLL off SR */
4002 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4003 pineview_display_hplloff_wm.fifo_size,
4004 pixel_size, latency->display_hpll_disable);
4005 reg = I915_READ(DSPFW3);
4006 reg &= ~DSPFW_HPLL_SR_MASK;
4007 reg |= wm & DSPFW_HPLL_SR_MASK;
4008 I915_WRITE(DSPFW3, reg);
4009
4010 /* cursor HPLL off SR */
4011 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4012 pineview_display_hplloff_wm.fifo_size,
4013 pixel_size, latency->cursor_hpll_disable);
4014 reg = I915_READ(DSPFW3);
4015 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4016 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4017 I915_WRITE(DSPFW3, reg);
4018 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4019
4020 /* activate cxsr */
4021 I915_WRITE(DSPFW3,
4022 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4023 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4024 } else {
4025 pineview_disable_cxsr(dev);
4026 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4027 }
4028 }
4029
g4x_compute_wm0(struct drm_device * dev,int plane,const struct intel_watermark_params * display,int display_latency_ns,const struct intel_watermark_params * cursor,int cursor_latency_ns,int * plane_wm,int * cursor_wm)4030 static bool g4x_compute_wm0(struct drm_device *dev,
4031 int plane,
4032 const struct intel_watermark_params *display,
4033 int display_latency_ns,
4034 const struct intel_watermark_params *cursor,
4035 int cursor_latency_ns,
4036 int *plane_wm,
4037 int *cursor_wm)
4038 {
4039 struct drm_crtc *crtc;
4040 int htotal, hdisplay, clock, pixel_size;
4041 int line_time_us, line_count;
4042 int entries, tlb_miss;
4043
4044 crtc = intel_get_crtc_for_plane(dev, plane);
4045 if (crtc->fb == NULL || !crtc->enabled) {
4046 *cursor_wm = cursor->guard_size;
4047 *plane_wm = display->guard_size;
4048 return false;
4049 }
4050
4051 htotal = crtc->mode.htotal;
4052 hdisplay = crtc->mode.hdisplay;
4053 clock = crtc->mode.clock;
4054 pixel_size = crtc->fb->bits_per_pixel / 8;
4055
4056 /* Use the small buffer method to calculate plane watermark */
4057 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4058 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4059 if (tlb_miss > 0)
4060 entries += tlb_miss;
4061 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4062 *plane_wm = entries + display->guard_size;
4063 if (*plane_wm > (int)display->max_wm)
4064 *plane_wm = display->max_wm;
4065
4066 /* Use the large buffer method to calculate cursor watermark */
4067 line_time_us = ((htotal * 1000) / clock);
4068 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4069 entries = line_count * 64 * pixel_size;
4070 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4071 if (tlb_miss > 0)
4072 entries += tlb_miss;
4073 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4074 *cursor_wm = entries + cursor->guard_size;
4075 if (*cursor_wm > (int)cursor->max_wm)
4076 *cursor_wm = (int)cursor->max_wm;
4077
4078 return true;
4079 }
4080
4081 /*
4082 * Check the wm result.
4083 *
4084 * If any calculated watermark values is larger than the maximum value that
4085 * can be programmed into the associated watermark register, that watermark
4086 * must be disabled.
4087 */
g4x_check_srwm(struct drm_device * dev,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)4088 static bool g4x_check_srwm(struct drm_device *dev,
4089 int display_wm, int cursor_wm,
4090 const struct intel_watermark_params *display,
4091 const struct intel_watermark_params *cursor)
4092 {
4093 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4094 display_wm, cursor_wm);
4095
4096 if (display_wm > display->max_wm) {
4097 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4098 display_wm, display->max_wm);
4099 return false;
4100 }
4101
4102 if (cursor_wm > cursor->max_wm) {
4103 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4104 cursor_wm, cursor->max_wm);
4105 return false;
4106 }
4107
4108 if (!(display_wm || cursor_wm)) {
4109 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4110 return false;
4111 }
4112
4113 return true;
4114 }
4115
g4x_compute_srwm(struct drm_device * dev,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * display_wm,int * cursor_wm)4116 static bool g4x_compute_srwm(struct drm_device *dev,
4117 int plane,
4118 int latency_ns,
4119 const struct intel_watermark_params *display,
4120 const struct intel_watermark_params *cursor,
4121 int *display_wm, int *cursor_wm)
4122 {
4123 struct drm_crtc *crtc;
4124 int hdisplay, htotal, pixel_size, clock;
4125 unsigned long line_time_us;
4126 int line_count, line_size;
4127 int small, large;
4128 int entries;
4129
4130 if (!latency_ns) {
4131 *display_wm = *cursor_wm = 0;
4132 return false;
4133 }
4134
4135 crtc = intel_get_crtc_for_plane(dev, plane);
4136 hdisplay = crtc->mode.hdisplay;
4137 htotal = crtc->mode.htotal;
4138 clock = crtc->mode.clock;
4139 pixel_size = crtc->fb->bits_per_pixel / 8;
4140
4141 line_time_us = (htotal * 1000) / clock;
4142 line_count = (latency_ns / line_time_us + 1000) / 1000;
4143 line_size = hdisplay * pixel_size;
4144
4145 /* Use the minimum of the small and large buffer method for primary */
4146 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4147 large = line_count * line_size;
4148
4149 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4150 *display_wm = entries + display->guard_size;
4151
4152 /* calculate the self-refresh watermark for display cursor */
4153 entries = line_count * pixel_size * 64;
4154 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4155 *cursor_wm = entries + cursor->guard_size;
4156
4157 return g4x_check_srwm(dev,
4158 *display_wm, *cursor_wm,
4159 display, cursor);
4160 }
4161
4162 #define single_plane_enabled(mask) is_power_of_2(mask)
4163
g4x_update_wm(struct drm_device * dev)4164 static void g4x_update_wm(struct drm_device *dev)
4165 {
4166 static const int sr_latency_ns = 12000;
4167 struct drm_i915_private *dev_priv = dev->dev_private;
4168 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4169 int plane_sr, cursor_sr;
4170 unsigned int enabled = 0;
4171
4172 if (g4x_compute_wm0(dev, 0,
4173 &g4x_wm_info, latency_ns,
4174 &g4x_cursor_wm_info, latency_ns,
4175 &planea_wm, &cursora_wm))
4176 enabled |= 1;
4177
4178 if (g4x_compute_wm0(dev, 1,
4179 &g4x_wm_info, latency_ns,
4180 &g4x_cursor_wm_info, latency_ns,
4181 &planeb_wm, &cursorb_wm))
4182 enabled |= 2;
4183
4184 plane_sr = cursor_sr = 0;
4185 if (single_plane_enabled(enabled) &&
4186 g4x_compute_srwm(dev, ffs(enabled) - 1,
4187 sr_latency_ns,
4188 &g4x_wm_info,
4189 &g4x_cursor_wm_info,
4190 &plane_sr, &cursor_sr))
4191 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4192 else
4193 I915_WRITE(FW_BLC_SELF,
4194 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4195
4196 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4197 planea_wm, cursora_wm,
4198 planeb_wm, cursorb_wm,
4199 plane_sr, cursor_sr);
4200
4201 I915_WRITE(DSPFW1,
4202 (plane_sr << DSPFW_SR_SHIFT) |
4203 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4204 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4205 planea_wm);
4206 I915_WRITE(DSPFW2,
4207 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4208 (cursora_wm << DSPFW_CURSORA_SHIFT));
4209 /* HPLL off in SR has some issues on G4x... disable it */
4210 I915_WRITE(DSPFW3,
4211 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4212 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4213 }
4214
i965_update_wm(struct drm_device * dev)4215 static void i965_update_wm(struct drm_device *dev)
4216 {
4217 struct drm_i915_private *dev_priv = dev->dev_private;
4218 struct drm_crtc *crtc;
4219 int srwm = 1;
4220 int cursor_sr = 16;
4221
4222 /* Calc sr entries for one plane configs */
4223 crtc = single_enabled_crtc(dev);
4224 if (crtc) {
4225 /* self-refresh has much higher latency */
4226 static const int sr_latency_ns = 12000;
4227 int clock = crtc->mode.clock;
4228 int htotal = crtc->mode.htotal;
4229 int hdisplay = crtc->mode.hdisplay;
4230 int pixel_size = crtc->fb->bits_per_pixel / 8;
4231 unsigned long line_time_us;
4232 int entries;
4233
4234 line_time_us = ((htotal * 1000) / clock);
4235
4236 /* Use ns/us then divide to preserve precision */
4237 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4238 pixel_size * hdisplay;
4239 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4240 srwm = I965_FIFO_SIZE - entries;
4241 if (srwm < 0)
4242 srwm = 1;
4243 srwm &= 0x1ff;
4244 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4245 entries, srwm);
4246
4247 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4248 pixel_size * 64;
4249 entries = DIV_ROUND_UP(entries,
4250 i965_cursor_wm_info.cacheline_size);
4251 cursor_sr = i965_cursor_wm_info.fifo_size -
4252 (entries + i965_cursor_wm_info.guard_size);
4253
4254 if (cursor_sr > i965_cursor_wm_info.max_wm)
4255 cursor_sr = i965_cursor_wm_info.max_wm;
4256
4257 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4258 "cursor %d\n", srwm, cursor_sr);
4259
4260 if (IS_CRESTLINE(dev))
4261 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4262 } else {
4263 /* Turn off self refresh if both pipes are enabled */
4264 if (IS_CRESTLINE(dev))
4265 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4266 & ~FW_BLC_SELF_EN);
4267 }
4268
4269 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4270 srwm);
4271
4272 /* 965 has limitations... */
4273 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4274 (8 << 16) | (8 << 8) | (8 << 0));
4275 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4276 /* update cursor SR watermark */
4277 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4278 }
4279
i9xx_update_wm(struct drm_device * dev)4280 static void i9xx_update_wm(struct drm_device *dev)
4281 {
4282 struct drm_i915_private *dev_priv = dev->dev_private;
4283 const struct intel_watermark_params *wm_info;
4284 uint32_t fwater_lo;
4285 uint32_t fwater_hi;
4286 int cwm, srwm = 1;
4287 int fifo_size;
4288 int planea_wm, planeb_wm;
4289 struct drm_crtc *crtc, *enabled = NULL;
4290
4291 if (IS_I945GM(dev))
4292 wm_info = &i945_wm_info;
4293 else if (!IS_GEN2(dev))
4294 wm_info = &i915_wm_info;
4295 else
4296 wm_info = &i855_wm_info;
4297
4298 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4299 crtc = intel_get_crtc_for_plane(dev, 0);
4300 if (crtc->enabled && crtc->fb) {
4301 planea_wm = intel_calculate_wm(crtc->mode.clock,
4302 wm_info, fifo_size,
4303 crtc->fb->bits_per_pixel / 8,
4304 latency_ns);
4305 enabled = crtc;
4306 } else
4307 planea_wm = fifo_size - wm_info->guard_size;
4308
4309 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4310 crtc = intel_get_crtc_for_plane(dev, 1);
4311 if (crtc->enabled && crtc->fb) {
4312 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4313 wm_info, fifo_size,
4314 crtc->fb->bits_per_pixel / 8,
4315 latency_ns);
4316 if (enabled == NULL)
4317 enabled = crtc;
4318 else
4319 enabled = NULL;
4320 } else
4321 planeb_wm = fifo_size - wm_info->guard_size;
4322
4323 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4324
4325 /*
4326 * Overlay gets an aggressive default since video jitter is bad.
4327 */
4328 cwm = 2;
4329
4330 /* Play safe and disable self-refresh before adjusting watermarks. */
4331 if (IS_I945G(dev) || IS_I945GM(dev))
4332 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4333 else if (IS_I915GM(dev))
4334 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4335
4336 /* Calc sr entries for one plane configs */
4337 if (HAS_FW_BLC(dev) && enabled) {
4338 /* self-refresh has much higher latency */
4339 static const int sr_latency_ns = 6000;
4340 int clock = enabled->mode.clock;
4341 int htotal = enabled->mode.htotal;
4342 int hdisplay = enabled->mode.hdisplay;
4343 int pixel_size = enabled->fb->bits_per_pixel / 8;
4344 unsigned long line_time_us;
4345 int entries;
4346
4347 line_time_us = (htotal * 1000) / clock;
4348
4349 /* Use ns/us then divide to preserve precision */
4350 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4351 pixel_size * hdisplay;
4352 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4353 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4354 srwm = wm_info->fifo_size - entries;
4355 if (srwm < 0)
4356 srwm = 1;
4357
4358 if (IS_I945G(dev) || IS_I945GM(dev))
4359 I915_WRITE(FW_BLC_SELF,
4360 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4361 else if (IS_I915GM(dev))
4362 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4363 }
4364
4365 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4366 planea_wm, planeb_wm, cwm, srwm);
4367
4368 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4369 fwater_hi = (cwm & 0x1f);
4370
4371 /* Set request length to 8 cachelines per fetch */
4372 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4373 fwater_hi = fwater_hi | (1 << 8);
4374
4375 I915_WRITE(FW_BLC, fwater_lo);
4376 I915_WRITE(FW_BLC2, fwater_hi);
4377
4378 if (HAS_FW_BLC(dev)) {
4379 if (enabled) {
4380 if (IS_I945G(dev) || IS_I945GM(dev))
4381 I915_WRITE(FW_BLC_SELF,
4382 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4383 else if (IS_I915GM(dev))
4384 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4385 DRM_DEBUG_KMS("memory self refresh enabled\n");
4386 } else
4387 DRM_DEBUG_KMS("memory self refresh disabled\n");
4388 }
4389 }
4390
i830_update_wm(struct drm_device * dev)4391 static void i830_update_wm(struct drm_device *dev)
4392 {
4393 struct drm_i915_private *dev_priv = dev->dev_private;
4394 struct drm_crtc *crtc;
4395 uint32_t fwater_lo;
4396 int planea_wm;
4397
4398 crtc = single_enabled_crtc(dev);
4399 if (crtc == NULL)
4400 return;
4401
4402 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4403 dev_priv->display.get_fifo_size(dev, 0),
4404 crtc->fb->bits_per_pixel / 8,
4405 latency_ns);
4406 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4407 fwater_lo |= (3<<8) | planea_wm;
4408
4409 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4410
4411 I915_WRITE(FW_BLC, fwater_lo);
4412 }
4413
4414 #define ILK_LP0_PLANE_LATENCY 700
4415 #define ILK_LP0_CURSOR_LATENCY 1300
4416
4417 /*
4418 * Check the wm result.
4419 *
4420 * If any calculated watermark values is larger than the maximum value that
4421 * can be programmed into the associated watermark register, that watermark
4422 * must be disabled.
4423 */
ironlake_check_srwm(struct drm_device * dev,int level,int fbc_wm,int display_wm,int cursor_wm,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor)4424 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4425 int fbc_wm, int display_wm, int cursor_wm,
4426 const struct intel_watermark_params *display,
4427 const struct intel_watermark_params *cursor)
4428 {
4429 struct drm_i915_private *dev_priv = dev->dev_private;
4430
4431 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4432 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4433
4434 if (fbc_wm > SNB_FBC_MAX_SRWM) {
4435 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4436 fbc_wm, SNB_FBC_MAX_SRWM, level);
4437
4438 /* fbc has it's own way to disable FBC WM */
4439 I915_WRITE(DISP_ARB_CTL,
4440 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4441 return false;
4442 }
4443
4444 if (display_wm > display->max_wm) {
4445 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4446 display_wm, SNB_DISPLAY_MAX_SRWM, level);
4447 return false;
4448 }
4449
4450 if (cursor_wm > cursor->max_wm) {
4451 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4452 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4453 return false;
4454 }
4455
4456 if (!(fbc_wm || display_wm || cursor_wm)) {
4457 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4458 return false;
4459 }
4460
4461 return true;
4462 }
4463
4464 /*
4465 * Compute watermark values of WM[1-3],
4466 */
ironlake_compute_srwm(struct drm_device * dev,int level,int plane,int latency_ns,const struct intel_watermark_params * display,const struct intel_watermark_params * cursor,int * fbc_wm,int * display_wm,int * cursor_wm)4467 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4468 int latency_ns,
4469 const struct intel_watermark_params *display,
4470 const struct intel_watermark_params *cursor,
4471 int *fbc_wm, int *display_wm, int *cursor_wm)
4472 {
4473 struct drm_crtc *crtc;
4474 unsigned long line_time_us;
4475 int hdisplay, htotal, pixel_size, clock;
4476 int line_count, line_size;
4477 int small, large;
4478 int entries;
4479
4480 if (!latency_ns) {
4481 *fbc_wm = *display_wm = *cursor_wm = 0;
4482 return false;
4483 }
4484
4485 crtc = intel_get_crtc_for_plane(dev, plane);
4486 hdisplay = crtc->mode.hdisplay;
4487 htotal = crtc->mode.htotal;
4488 clock = crtc->mode.clock;
4489 pixel_size = crtc->fb->bits_per_pixel / 8;
4490
4491 line_time_us = (htotal * 1000) / clock;
4492 line_count = (latency_ns / line_time_us + 1000) / 1000;
4493 line_size = hdisplay * pixel_size;
4494
4495 /* Use the minimum of the small and large buffer method for primary */
4496 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4497 large = line_count * line_size;
4498
4499 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4500 *display_wm = entries + display->guard_size;
4501
4502 /*
4503 * Spec says:
4504 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4505 */
4506 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4507
4508 /* calculate the self-refresh watermark for display cursor */
4509 entries = line_count * pixel_size * 64;
4510 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4511 *cursor_wm = entries + cursor->guard_size;
4512
4513 return ironlake_check_srwm(dev, level,
4514 *fbc_wm, *display_wm, *cursor_wm,
4515 display, cursor);
4516 }
4517
ironlake_update_wm(struct drm_device * dev)4518 static void ironlake_update_wm(struct drm_device *dev)
4519 {
4520 struct drm_i915_private *dev_priv = dev->dev_private;
4521 int fbc_wm, plane_wm, cursor_wm;
4522 unsigned int enabled;
4523
4524 enabled = 0;
4525 if (g4x_compute_wm0(dev, 0,
4526 &ironlake_display_wm_info,
4527 ILK_LP0_PLANE_LATENCY,
4528 &ironlake_cursor_wm_info,
4529 ILK_LP0_CURSOR_LATENCY,
4530 &plane_wm, &cursor_wm)) {
4531 I915_WRITE(WM0_PIPEA_ILK,
4532 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4533 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4534 " plane %d, " "cursor: %d\n",
4535 plane_wm, cursor_wm);
4536 enabled |= 1;
4537 }
4538
4539 if (g4x_compute_wm0(dev, 1,
4540 &ironlake_display_wm_info,
4541 ILK_LP0_PLANE_LATENCY,
4542 &ironlake_cursor_wm_info,
4543 ILK_LP0_CURSOR_LATENCY,
4544 &plane_wm, &cursor_wm)) {
4545 I915_WRITE(WM0_PIPEB_ILK,
4546 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4547 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4548 " plane %d, cursor: %d\n",
4549 plane_wm, cursor_wm);
4550 enabled |= 2;
4551 }
4552
4553 /*
4554 * Calculate and update the self-refresh watermark only when one
4555 * display plane is used.
4556 */
4557 I915_WRITE(WM3_LP_ILK, 0);
4558 I915_WRITE(WM2_LP_ILK, 0);
4559 I915_WRITE(WM1_LP_ILK, 0);
4560
4561 if (!single_plane_enabled(enabled))
4562 return;
4563 enabled = ffs(enabled) - 1;
4564
4565 /* WM1 */
4566 if (!ironlake_compute_srwm(dev, 1, enabled,
4567 ILK_READ_WM1_LATENCY() * 500,
4568 &ironlake_display_srwm_info,
4569 &ironlake_cursor_srwm_info,
4570 &fbc_wm, &plane_wm, &cursor_wm))
4571 return;
4572
4573 I915_WRITE(WM1_LP_ILK,
4574 WM1_LP_SR_EN |
4575 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4576 (fbc_wm << WM1_LP_FBC_SHIFT) |
4577 (plane_wm << WM1_LP_SR_SHIFT) |
4578 cursor_wm);
4579
4580 /* WM2 */
4581 if (!ironlake_compute_srwm(dev, 2, enabled,
4582 ILK_READ_WM2_LATENCY() * 500,
4583 &ironlake_display_srwm_info,
4584 &ironlake_cursor_srwm_info,
4585 &fbc_wm, &plane_wm, &cursor_wm))
4586 return;
4587
4588 I915_WRITE(WM2_LP_ILK,
4589 WM2_LP_EN |
4590 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4591 (fbc_wm << WM1_LP_FBC_SHIFT) |
4592 (plane_wm << WM1_LP_SR_SHIFT) |
4593 cursor_wm);
4594
4595 /*
4596 * WM3 is unsupported on ILK, probably because we don't have latency
4597 * data for that power state
4598 */
4599 }
4600
sandybridge_update_wm(struct drm_device * dev)4601 void sandybridge_update_wm(struct drm_device *dev)
4602 {
4603 struct drm_i915_private *dev_priv = dev->dev_private;
4604 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4605 u32 val;
4606 int fbc_wm, plane_wm, cursor_wm;
4607 unsigned int enabled;
4608
4609 enabled = 0;
4610 if (g4x_compute_wm0(dev, 0,
4611 &sandybridge_display_wm_info, latency,
4612 &sandybridge_cursor_wm_info, latency,
4613 &plane_wm, &cursor_wm)) {
4614 val = I915_READ(WM0_PIPEA_ILK);
4615 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4616 I915_WRITE(WM0_PIPEA_ILK, val |
4617 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4618 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4619 " plane %d, " "cursor: %d\n",
4620 plane_wm, cursor_wm);
4621 enabled |= 1;
4622 }
4623
4624 if (g4x_compute_wm0(dev, 1,
4625 &sandybridge_display_wm_info, latency,
4626 &sandybridge_cursor_wm_info, latency,
4627 &plane_wm, &cursor_wm)) {
4628 val = I915_READ(WM0_PIPEB_ILK);
4629 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4630 I915_WRITE(WM0_PIPEB_ILK, val |
4631 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4632 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4633 " plane %d, cursor: %d\n",
4634 plane_wm, cursor_wm);
4635 enabled |= 2;
4636 }
4637
4638 /* IVB has 3 pipes */
4639 if (IS_IVYBRIDGE(dev) &&
4640 g4x_compute_wm0(dev, 2,
4641 &sandybridge_display_wm_info, latency,
4642 &sandybridge_cursor_wm_info, latency,
4643 &plane_wm, &cursor_wm)) {
4644 val = I915_READ(WM0_PIPEC_IVB);
4645 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4646 I915_WRITE(WM0_PIPEC_IVB, val |
4647 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4648 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4649 " plane %d, cursor: %d\n",
4650 plane_wm, cursor_wm);
4651 enabled |= 3;
4652 }
4653
4654 /*
4655 * Calculate and update the self-refresh watermark only when one
4656 * display plane is used.
4657 *
4658 * SNB support 3 levels of watermark.
4659 *
4660 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4661 * and disabled in the descending order
4662 *
4663 */
4664 I915_WRITE(WM3_LP_ILK, 0);
4665 I915_WRITE(WM2_LP_ILK, 0);
4666 I915_WRITE(WM1_LP_ILK, 0);
4667
4668 if (!single_plane_enabled(enabled) ||
4669 dev_priv->sprite_scaling_enabled)
4670 return;
4671 enabled = ffs(enabled) - 1;
4672
4673 /* WM1 */
4674 if (!ironlake_compute_srwm(dev, 1, enabled,
4675 SNB_READ_WM1_LATENCY() * 500,
4676 &sandybridge_display_srwm_info,
4677 &sandybridge_cursor_srwm_info,
4678 &fbc_wm, &plane_wm, &cursor_wm))
4679 return;
4680
4681 I915_WRITE(WM1_LP_ILK,
4682 WM1_LP_SR_EN |
4683 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4684 (fbc_wm << WM1_LP_FBC_SHIFT) |
4685 (plane_wm << WM1_LP_SR_SHIFT) |
4686 cursor_wm);
4687
4688 /* WM2 */
4689 if (!ironlake_compute_srwm(dev, 2, enabled,
4690 SNB_READ_WM2_LATENCY() * 500,
4691 &sandybridge_display_srwm_info,
4692 &sandybridge_cursor_srwm_info,
4693 &fbc_wm, &plane_wm, &cursor_wm))
4694 return;
4695
4696 I915_WRITE(WM2_LP_ILK,
4697 WM2_LP_EN |
4698 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4699 (fbc_wm << WM1_LP_FBC_SHIFT) |
4700 (plane_wm << WM1_LP_SR_SHIFT) |
4701 cursor_wm);
4702
4703 /* WM3 */
4704 if (!ironlake_compute_srwm(dev, 3, enabled,
4705 SNB_READ_WM3_LATENCY() * 500,
4706 &sandybridge_display_srwm_info,
4707 &sandybridge_cursor_srwm_info,
4708 &fbc_wm, &plane_wm, &cursor_wm))
4709 return;
4710
4711 I915_WRITE(WM3_LP_ILK,
4712 WM3_LP_EN |
4713 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4714 (fbc_wm << WM1_LP_FBC_SHIFT) |
4715 (plane_wm << WM1_LP_SR_SHIFT) |
4716 cursor_wm);
4717 }
4718
4719 static bool
sandybridge_compute_sprite_wm(struct drm_device * dev,int plane,uint32_t sprite_width,int pixel_size,const struct intel_watermark_params * display,int display_latency_ns,int * sprite_wm)4720 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4721 uint32_t sprite_width, int pixel_size,
4722 const struct intel_watermark_params *display,
4723 int display_latency_ns, int *sprite_wm)
4724 {
4725 struct drm_crtc *crtc;
4726 int clock;
4727 int entries, tlb_miss;
4728
4729 crtc = intel_get_crtc_for_plane(dev, plane);
4730 if (crtc->fb == NULL || !crtc->enabled) {
4731 *sprite_wm = display->guard_size;
4732 return false;
4733 }
4734
4735 clock = crtc->mode.clock;
4736
4737 /* Use the small buffer method to calculate the sprite watermark */
4738 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4739 tlb_miss = display->fifo_size*display->cacheline_size -
4740 sprite_width * 8;
4741 if (tlb_miss > 0)
4742 entries += tlb_miss;
4743 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4744 *sprite_wm = entries + display->guard_size;
4745 if (*sprite_wm > (int)display->max_wm)
4746 *sprite_wm = display->max_wm;
4747
4748 return true;
4749 }
4750
4751 static bool
sandybridge_compute_sprite_srwm(struct drm_device * dev,int plane,uint32_t sprite_width,int pixel_size,const struct intel_watermark_params * display,int latency_ns,int * sprite_wm)4752 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4753 uint32_t sprite_width, int pixel_size,
4754 const struct intel_watermark_params *display,
4755 int latency_ns, int *sprite_wm)
4756 {
4757 struct drm_crtc *crtc;
4758 unsigned long line_time_us;
4759 int clock;
4760 int line_count, line_size;
4761 int small, large;
4762 int entries;
4763
4764 if (!latency_ns) {
4765 *sprite_wm = 0;
4766 return false;
4767 }
4768
4769 crtc = intel_get_crtc_for_plane(dev, plane);
4770 clock = crtc->mode.clock;
4771 if (!clock) {
4772 *sprite_wm = 0;
4773 return false;
4774 }
4775
4776 line_time_us = (sprite_width * 1000) / clock;
4777 if (!line_time_us) {
4778 *sprite_wm = 0;
4779 return false;
4780 }
4781
4782 line_count = (latency_ns / line_time_us + 1000) / 1000;
4783 line_size = sprite_width * pixel_size;
4784
4785 /* Use the minimum of the small and large buffer method for primary */
4786 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4787 large = line_count * line_size;
4788
4789 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4790 *sprite_wm = entries + display->guard_size;
4791
4792 return *sprite_wm > 0x3ff ? false : true;
4793 }
4794
sandybridge_update_sprite_wm(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size)4795 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4796 uint32_t sprite_width, int pixel_size)
4797 {
4798 struct drm_i915_private *dev_priv = dev->dev_private;
4799 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4800 u32 val;
4801 int sprite_wm, reg;
4802 int ret;
4803
4804 switch (pipe) {
4805 case 0:
4806 reg = WM0_PIPEA_ILK;
4807 break;
4808 case 1:
4809 reg = WM0_PIPEB_ILK;
4810 break;
4811 case 2:
4812 reg = WM0_PIPEC_IVB;
4813 break;
4814 default:
4815 return; /* bad pipe */
4816 }
4817
4818 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4819 &sandybridge_display_wm_info,
4820 latency, &sprite_wm);
4821 if (!ret) {
4822 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4823 pipe);
4824 return;
4825 }
4826
4827 val = I915_READ(reg);
4828 val &= ~WM0_PIPE_SPRITE_MASK;
4829 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4830 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4831
4832
4833 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4834 pixel_size,
4835 &sandybridge_display_srwm_info,
4836 SNB_READ_WM1_LATENCY() * 500,
4837 &sprite_wm);
4838 if (!ret) {
4839 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4840 pipe);
4841 return;
4842 }
4843 I915_WRITE(WM1S_LP_ILK, sprite_wm);
4844
4845 /* Only IVB has two more LP watermarks for sprite */
4846 if (!IS_IVYBRIDGE(dev))
4847 return;
4848
4849 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4850 pixel_size,
4851 &sandybridge_display_srwm_info,
4852 SNB_READ_WM2_LATENCY() * 500,
4853 &sprite_wm);
4854 if (!ret) {
4855 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4856 pipe);
4857 return;
4858 }
4859 I915_WRITE(WM2S_LP_IVB, sprite_wm);
4860
4861 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4862 pixel_size,
4863 &sandybridge_display_srwm_info,
4864 SNB_READ_WM3_LATENCY() * 500,
4865 &sprite_wm);
4866 if (!ret) {
4867 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4868 pipe);
4869 return;
4870 }
4871 I915_WRITE(WM3S_LP_IVB, sprite_wm);
4872 }
4873
4874 /**
4875 * intel_update_watermarks - update FIFO watermark values based on current modes
4876 *
4877 * Calculate watermark values for the various WM regs based on current mode
4878 * and plane configuration.
4879 *
4880 * There are several cases to deal with here:
4881 * - normal (i.e. non-self-refresh)
4882 * - self-refresh (SR) mode
4883 * - lines are large relative to FIFO size (buffer can hold up to 2)
4884 * - lines are small relative to FIFO size (buffer can hold more than 2
4885 * lines), so need to account for TLB latency
4886 *
4887 * The normal calculation is:
4888 * watermark = dotclock * bytes per pixel * latency
4889 * where latency is platform & configuration dependent (we assume pessimal
4890 * values here).
4891 *
4892 * The SR calculation is:
4893 * watermark = (trunc(latency/line time)+1) * surface width *
4894 * bytes per pixel
4895 * where
4896 * line time = htotal / dotclock
4897 * surface width = hdisplay for normal plane and 64 for cursor
4898 * and latency is assumed to be high, as above.
4899 *
4900 * The final value programmed to the register should always be rounded up,
4901 * and include an extra 2 entries to account for clock crossings.
4902 *
4903 * We don't use the sprite, so we can ignore that. And on Crestline we have
4904 * to set the non-SR watermarks to 8.
4905 */
intel_update_watermarks(struct drm_device * dev)4906 static void intel_update_watermarks(struct drm_device *dev)
4907 {
4908 struct drm_i915_private *dev_priv = dev->dev_private;
4909
4910 if (dev_priv->display.update_wm)
4911 dev_priv->display.update_wm(dev);
4912 }
4913
intel_update_sprite_watermarks(struct drm_device * dev,int pipe,uint32_t sprite_width,int pixel_size)4914 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4915 uint32_t sprite_width, int pixel_size)
4916 {
4917 struct drm_i915_private *dev_priv = dev->dev_private;
4918
4919 if (dev_priv->display.update_sprite_wm)
4920 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4921 pixel_size);
4922 }
4923
intel_panel_use_ssc(struct drm_i915_private * dev_priv)4924 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4925 {
4926 if (i915_panel_use_ssc >= 0)
4927 return i915_panel_use_ssc != 0;
4928 return dev_priv->lvds_use_ssc
4929 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4930 }
4931
4932 /**
4933 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4934 * @crtc: CRTC structure
4935 * @mode: requested mode
4936 *
4937 * A pipe may be connected to one or more outputs. Based on the depth of the
4938 * attached framebuffer, choose a good color depth to use on the pipe.
4939 *
4940 * If possible, match the pipe depth to the fb depth. In some cases, this
4941 * isn't ideal, because the connected output supports a lesser or restricted
4942 * set of depths. Resolve that here:
4943 * LVDS typically supports only 6bpc, so clamp down in that case
4944 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4945 * Displays may support a restricted set as well, check EDID and clamp as
4946 * appropriate.
4947 * DP may want to dither down to 6bpc to fit larger modes
4948 *
4949 * RETURNS:
4950 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4951 * true if they don't match).
4952 */
intel_choose_pipe_bpp_dither(struct drm_crtc * crtc,unsigned int * pipe_bpp,struct drm_display_mode * mode)4953 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4954 unsigned int *pipe_bpp,
4955 struct drm_display_mode *mode)
4956 {
4957 struct drm_device *dev = crtc->dev;
4958 struct drm_i915_private *dev_priv = dev->dev_private;
4959 struct drm_encoder *encoder;
4960 struct drm_connector *connector;
4961 unsigned int display_bpc = UINT_MAX, bpc;
4962
4963 /* Walk the encoders & connectors on this crtc, get min bpc */
4964 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4965 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4966
4967 if (encoder->crtc != crtc)
4968 continue;
4969
4970 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4971 unsigned int lvds_bpc;
4972
4973 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4974 LVDS_A3_POWER_UP)
4975 lvds_bpc = 8;
4976 else
4977 lvds_bpc = 6;
4978
4979 if (lvds_bpc < display_bpc) {
4980 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4981 display_bpc = lvds_bpc;
4982 }
4983 continue;
4984 }
4985
4986 /* Not one of the known troublemakers, check the EDID */
4987 list_for_each_entry(connector, &dev->mode_config.connector_list,
4988 head) {
4989 if (connector->encoder != encoder)
4990 continue;
4991
4992 /* Don't use an invalid EDID bpc value */
4993 if (connector->display_info.bpc &&
4994 connector->display_info.bpc < display_bpc) {
4995 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4996 display_bpc = connector->display_info.bpc;
4997 }
4998 }
4999
5000 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5001 /* Use VBT settings if we have an eDP panel */
5002 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5003
5004 if (edp_bpc && edp_bpc < display_bpc) {
5005 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5006 display_bpc = edp_bpc;
5007 }
5008 continue;
5009 }
5010
5011 /*
5012 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5013 * through, clamp it down. (Note: >12bpc will be caught below.)
5014 */
5015 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5016 if (display_bpc > 8 && display_bpc < 12) {
5017 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5018 display_bpc = 12;
5019 } else {
5020 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5021 display_bpc = 8;
5022 }
5023 }
5024 }
5025
5026 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5027 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5028 display_bpc = 6;
5029 }
5030
5031 /*
5032 * We could just drive the pipe at the highest bpc all the time and
5033 * enable dithering as needed, but that costs bandwidth. So choose
5034 * the minimum value that expresses the full color range of the fb but
5035 * also stays within the max display bpc discovered above.
5036 */
5037
5038 switch (crtc->fb->depth) {
5039 case 8:
5040 bpc = 8; /* since we go through a colormap */
5041 break;
5042 case 15:
5043 case 16:
5044 bpc = 6; /* min is 18bpp */
5045 break;
5046 case 24:
5047 bpc = 8;
5048 break;
5049 case 30:
5050 bpc = 10;
5051 break;
5052 case 48:
5053 bpc = 12;
5054 break;
5055 default:
5056 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5057 bpc = min((unsigned int)8, display_bpc);
5058 break;
5059 }
5060
5061 display_bpc = min(display_bpc, bpc);
5062
5063 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5064 bpc, display_bpc);
5065
5066 *pipe_bpp = display_bpc * 3;
5067
5068 return display_bpc != bpc;
5069 }
5070
i9xx_get_refclk(struct drm_crtc * crtc,int num_connectors)5071 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5072 {
5073 struct drm_device *dev = crtc->dev;
5074 struct drm_i915_private *dev_priv = dev->dev_private;
5075 int refclk;
5076
5077 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5078 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5079 refclk = dev_priv->lvds_ssc_freq * 1000;
5080 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5081 refclk / 1000);
5082 } else if (!IS_GEN2(dev)) {
5083 refclk = 96000;
5084 } else {
5085 refclk = 48000;
5086 }
5087
5088 return refclk;
5089 }
5090
i9xx_adjust_sdvo_tv_clock(struct drm_display_mode * adjusted_mode,intel_clock_t * clock)5091 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5092 intel_clock_t *clock)
5093 {
5094 /* SDVO TV has fixed PLL values depend on its clock range,
5095 this mirrors vbios setting. */
5096 if (adjusted_mode->clock >= 100000
5097 && adjusted_mode->clock < 140500) {
5098 clock->p1 = 2;
5099 clock->p2 = 10;
5100 clock->n = 3;
5101 clock->m1 = 16;
5102 clock->m2 = 8;
5103 } else if (adjusted_mode->clock >= 140500
5104 && adjusted_mode->clock <= 200000) {
5105 clock->p1 = 1;
5106 clock->p2 = 10;
5107 clock->n = 6;
5108 clock->m1 = 12;
5109 clock->m2 = 8;
5110 }
5111 }
5112
i9xx_update_pll_dividers(struct drm_crtc * crtc,intel_clock_t * clock,intel_clock_t * reduced_clock)5113 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5114 intel_clock_t *clock,
5115 intel_clock_t *reduced_clock)
5116 {
5117 struct drm_device *dev = crtc->dev;
5118 struct drm_i915_private *dev_priv = dev->dev_private;
5119 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5120 int pipe = intel_crtc->pipe;
5121 u32 fp, fp2 = 0;
5122
5123 if (IS_PINEVIEW(dev)) {
5124 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5125 if (reduced_clock)
5126 fp2 = (1 << reduced_clock->n) << 16 |
5127 reduced_clock->m1 << 8 | reduced_clock->m2;
5128 } else {
5129 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5130 if (reduced_clock)
5131 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5132 reduced_clock->m2;
5133 }
5134
5135 I915_WRITE(FP0(pipe), fp);
5136
5137 intel_crtc->lowfreq_avail = false;
5138 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5139 reduced_clock && i915_powersave) {
5140 I915_WRITE(FP1(pipe), fp2);
5141 intel_crtc->lowfreq_avail = true;
5142 } else {
5143 I915_WRITE(FP1(pipe), fp);
5144 }
5145 }
5146
i9xx_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)5147 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5148 struct drm_display_mode *mode,
5149 struct drm_display_mode *adjusted_mode,
5150 int x, int y,
5151 struct drm_framebuffer *old_fb)
5152 {
5153 struct drm_device *dev = crtc->dev;
5154 struct drm_i915_private *dev_priv = dev->dev_private;
5155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5156 int pipe = intel_crtc->pipe;
5157 int plane = intel_crtc->plane;
5158 int refclk, num_connectors = 0;
5159 intel_clock_t clock, reduced_clock;
5160 u32 dpll, dspcntr, pipeconf, vsyncshift;
5161 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5162 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5163 struct drm_mode_config *mode_config = &dev->mode_config;
5164 struct intel_encoder *encoder;
5165 const intel_limit_t *limit;
5166 int ret;
5167 u32 temp;
5168 u32 lvds_sync = 0;
5169
5170 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5171 if (encoder->base.crtc != crtc)
5172 continue;
5173
5174 switch (encoder->type) {
5175 case INTEL_OUTPUT_LVDS:
5176 is_lvds = true;
5177 break;
5178 case INTEL_OUTPUT_SDVO:
5179 case INTEL_OUTPUT_HDMI:
5180 is_sdvo = true;
5181 if (encoder->needs_tv_clock)
5182 is_tv = true;
5183 break;
5184 case INTEL_OUTPUT_DVO:
5185 is_dvo = true;
5186 break;
5187 case INTEL_OUTPUT_TVOUT:
5188 is_tv = true;
5189 break;
5190 case INTEL_OUTPUT_ANALOG:
5191 is_crt = true;
5192 break;
5193 case INTEL_OUTPUT_DISPLAYPORT:
5194 is_dp = true;
5195 break;
5196 }
5197
5198 num_connectors++;
5199 }
5200
5201 refclk = i9xx_get_refclk(crtc, num_connectors);
5202
5203 /*
5204 * Returns a set of divisors for the desired target clock with the given
5205 * refclk, or FALSE. The returned values represent the clock equation:
5206 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5207 */
5208 limit = intel_limit(crtc, refclk);
5209 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5210 &clock);
5211 if (!ok) {
5212 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5213 return -EINVAL;
5214 }
5215
5216 /* Ensure that the cursor is valid for the new mode before changing... */
5217 intel_crtc_update_cursor(crtc, true);
5218
5219 if (is_lvds && dev_priv->lvds_downclock_avail) {
5220 /*
5221 * Ensure we match the reduced clock's P to the target clock.
5222 * If the clocks don't match, we can't switch the display clock
5223 * by using the FP0/FP1. In such case we will disable the LVDS
5224 * downclock feature.
5225 */
5226 has_reduced_clock = limit->find_pll(limit, crtc,
5227 dev_priv->lvds_downclock,
5228 refclk,
5229 &clock,
5230 &reduced_clock);
5231 }
5232
5233 if (is_sdvo && is_tv)
5234 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5235
5236 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5237 &reduced_clock : NULL);
5238
5239 dpll = DPLL_VGA_MODE_DIS;
5240
5241 if (!IS_GEN2(dev)) {
5242 if (is_lvds)
5243 dpll |= DPLLB_MODE_LVDS;
5244 else
5245 dpll |= DPLLB_MODE_DAC_SERIAL;
5246 if (is_sdvo) {
5247 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5248 if (pixel_multiplier > 1) {
5249 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5250 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5251 }
5252 dpll |= DPLL_DVO_HIGH_SPEED;
5253 }
5254 if (is_dp)
5255 dpll |= DPLL_DVO_HIGH_SPEED;
5256
5257 /* compute bitmask from p1 value */
5258 if (IS_PINEVIEW(dev))
5259 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5260 else {
5261 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5262 if (IS_G4X(dev) && has_reduced_clock)
5263 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5264 }
5265 switch (clock.p2) {
5266 case 5:
5267 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5268 break;
5269 case 7:
5270 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5271 break;
5272 case 10:
5273 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5274 break;
5275 case 14:
5276 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5277 break;
5278 }
5279 if (INTEL_INFO(dev)->gen >= 4)
5280 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5281 } else {
5282 if (is_lvds) {
5283 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5284 } else {
5285 if (clock.p1 == 2)
5286 dpll |= PLL_P1_DIVIDE_BY_TWO;
5287 else
5288 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5289 if (clock.p2 == 4)
5290 dpll |= PLL_P2_DIVIDE_BY_4;
5291 }
5292 }
5293
5294 if (is_sdvo && is_tv)
5295 dpll |= PLL_REF_INPUT_TVCLKINBC;
5296 else if (is_tv)
5297 /* XXX: just matching BIOS for now */
5298 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5299 dpll |= 3;
5300 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5301 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5302 else
5303 dpll |= PLL_REF_INPUT_DREFCLK;
5304
5305 /* setup pipeconf */
5306 pipeconf = I915_READ(PIPECONF(pipe));
5307
5308 /* Set up the display plane register */
5309 dspcntr = DISPPLANE_GAMMA_ENABLE;
5310
5311 if (pipe == 0)
5312 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5313 else
5314 dspcntr |= DISPPLANE_SEL_PIPE_B;
5315
5316 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5317 /* Enable pixel doubling when the dot clock is > 90% of the (display)
5318 * core speed.
5319 *
5320 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5321 * pipe == 0 check?
5322 */
5323 if (mode->clock >
5324 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5325 pipeconf |= PIPECONF_DOUBLE_WIDE;
5326 else
5327 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5328 }
5329
5330 /* default to 8bpc */
5331 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5332 if (is_dp) {
5333 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5334 pipeconf |= PIPECONF_BPP_6 |
5335 PIPECONF_DITHER_EN |
5336 PIPECONF_DITHER_TYPE_SP;
5337 }
5338 }
5339
5340 dpll |= DPLL_VCO_ENABLE;
5341
5342 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5343 drm_mode_debug_printmodeline(mode);
5344
5345 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5346
5347 POSTING_READ(DPLL(pipe));
5348 udelay(150);
5349
5350 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5351 * This is an exception to the general rule that mode_set doesn't turn
5352 * things on.
5353 */
5354 if (is_lvds) {
5355 temp = I915_READ(LVDS);
5356 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5357 if (pipe == 1) {
5358 temp |= LVDS_PIPEB_SELECT;
5359 } else {
5360 temp &= ~LVDS_PIPEB_SELECT;
5361 }
5362 /* set the corresponsding LVDS_BORDER bit */
5363 temp |= dev_priv->lvds_border_bits;
5364 /* Set the B0-B3 data pairs corresponding to whether we're going to
5365 * set the DPLLs for dual-channel mode or not.
5366 */
5367 if (clock.p2 == 7)
5368 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5369 else
5370 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5371
5372 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5373 * appropriately here, but we need to look more thoroughly into how
5374 * panels behave in the two modes.
5375 */
5376 /* set the dithering flag on LVDS as needed */
5377 if (INTEL_INFO(dev)->gen >= 4) {
5378 if (dev_priv->lvds_dither)
5379 temp |= LVDS_ENABLE_DITHER;
5380 else
5381 temp &= ~LVDS_ENABLE_DITHER;
5382 }
5383 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5384 lvds_sync |= LVDS_HSYNC_POLARITY;
5385 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5386 lvds_sync |= LVDS_VSYNC_POLARITY;
5387 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5388 != lvds_sync) {
5389 char flags[2] = "-+";
5390 DRM_INFO("Changing LVDS panel from "
5391 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5392 flags[!(temp & LVDS_HSYNC_POLARITY)],
5393 flags[!(temp & LVDS_VSYNC_POLARITY)],
5394 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5395 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5396 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5397 temp |= lvds_sync;
5398 }
5399 I915_WRITE(LVDS, temp);
5400 }
5401
5402 if (is_dp) {
5403 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5404 }
5405
5406 I915_WRITE(DPLL(pipe), dpll);
5407
5408 /* Wait for the clocks to stabilize. */
5409 POSTING_READ(DPLL(pipe));
5410 udelay(150);
5411
5412 if (INTEL_INFO(dev)->gen >= 4) {
5413 temp = 0;
5414 if (is_sdvo) {
5415 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5416 if (temp > 1)
5417 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5418 else
5419 temp = 0;
5420 }
5421 I915_WRITE(DPLL_MD(pipe), temp);
5422 } else {
5423 /* The pixel multiplier can only be updated once the
5424 * DPLL is enabled and the clocks are stable.
5425 *
5426 * So write it again.
5427 */
5428 I915_WRITE(DPLL(pipe), dpll);
5429 }
5430
5431 if (HAS_PIPE_CXSR(dev)) {
5432 if (intel_crtc->lowfreq_avail) {
5433 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5434 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5435 } else {
5436 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5437 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5438 }
5439 }
5440
5441 pipeconf &= ~PIPECONF_INTERLACE_MASK;
5442 if (!IS_GEN2(dev) &&
5443 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5444 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5445 /* the chip adds 2 halflines automatically */
5446 adjusted_mode->crtc_vtotal -= 1;
5447 adjusted_mode->crtc_vblank_end -= 1;
5448 vsyncshift = adjusted_mode->crtc_hsync_start
5449 - adjusted_mode->crtc_htotal/2;
5450 } else {
5451 pipeconf |= PIPECONF_PROGRESSIVE;
5452 vsyncshift = 0;
5453 }
5454
5455 if (!IS_GEN3(dev))
5456 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5457
5458 I915_WRITE(HTOTAL(pipe),
5459 (adjusted_mode->crtc_hdisplay - 1) |
5460 ((adjusted_mode->crtc_htotal - 1) << 16));
5461 I915_WRITE(HBLANK(pipe),
5462 (adjusted_mode->crtc_hblank_start - 1) |
5463 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5464 I915_WRITE(HSYNC(pipe),
5465 (adjusted_mode->crtc_hsync_start - 1) |
5466 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5467
5468 I915_WRITE(VTOTAL(pipe),
5469 (adjusted_mode->crtc_vdisplay - 1) |
5470 ((adjusted_mode->crtc_vtotal - 1) << 16));
5471 I915_WRITE(VBLANK(pipe),
5472 (adjusted_mode->crtc_vblank_start - 1) |
5473 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5474 I915_WRITE(VSYNC(pipe),
5475 (adjusted_mode->crtc_vsync_start - 1) |
5476 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5477
5478 /* pipesrc and dspsize control the size that is scaled from,
5479 * which should always be the user's requested size.
5480 */
5481 I915_WRITE(DSPSIZE(plane),
5482 ((mode->vdisplay - 1) << 16) |
5483 (mode->hdisplay - 1));
5484 I915_WRITE(DSPPOS(plane), 0);
5485 I915_WRITE(PIPESRC(pipe),
5486 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5487
5488 I915_WRITE(PIPECONF(pipe), pipeconf);
5489 POSTING_READ(PIPECONF(pipe));
5490 intel_enable_pipe(dev_priv, pipe, false);
5491
5492 intel_wait_for_vblank(dev, pipe);
5493
5494 I915_WRITE(DSPCNTR(plane), dspcntr);
5495 POSTING_READ(DSPCNTR(plane));
5496 intel_enable_plane(dev_priv, plane, pipe);
5497
5498 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5499
5500 intel_update_watermarks(dev);
5501
5502 return ret;
5503 }
5504
5505 /*
5506 * Initialize reference clocks when the driver loads
5507 */
ironlake_init_pch_refclk(struct drm_device * dev)5508 void ironlake_init_pch_refclk(struct drm_device *dev)
5509 {
5510 struct drm_i915_private *dev_priv = dev->dev_private;
5511 struct drm_mode_config *mode_config = &dev->mode_config;
5512 struct intel_encoder *encoder;
5513 u32 temp;
5514 bool has_lvds = false;
5515 bool has_cpu_edp = false;
5516 bool has_pch_edp = false;
5517 bool has_panel = false;
5518 bool has_ck505 = false;
5519 bool can_ssc = false;
5520
5521 /* We need to take the global config into account */
5522 list_for_each_entry(encoder, &mode_config->encoder_list,
5523 base.head) {
5524 switch (encoder->type) {
5525 case INTEL_OUTPUT_LVDS:
5526 has_panel = true;
5527 has_lvds = true;
5528 break;
5529 case INTEL_OUTPUT_EDP:
5530 has_panel = true;
5531 if (intel_encoder_is_pch_edp(&encoder->base))
5532 has_pch_edp = true;
5533 else
5534 has_cpu_edp = true;
5535 break;
5536 }
5537 }
5538
5539 if (HAS_PCH_IBX(dev)) {
5540 has_ck505 = dev_priv->display_clock_mode;
5541 can_ssc = has_ck505;
5542 } else {
5543 has_ck505 = false;
5544 can_ssc = true;
5545 }
5546
5547 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5548 has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5549 has_ck505);
5550
5551 /* Ironlake: try to setup display ref clock before DPLL
5552 * enabling. This is only under driver's control after
5553 * PCH B stepping, previous chipset stepping should be
5554 * ignoring this setting.
5555 */
5556 temp = I915_READ(PCH_DREF_CONTROL);
5557 /* Always enable nonspread source */
5558 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5559
5560 if (has_ck505)
5561 temp |= DREF_NONSPREAD_CK505_ENABLE;
5562 else
5563 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5564
5565 if (has_panel) {
5566 temp &= ~DREF_SSC_SOURCE_MASK;
5567 temp |= DREF_SSC_SOURCE_ENABLE;
5568
5569 /* SSC must be turned on before enabling the CPU output */
5570 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5571 DRM_DEBUG_KMS("Using SSC on panel\n");
5572 temp |= DREF_SSC1_ENABLE;
5573 } else
5574 temp &= ~DREF_SSC1_ENABLE;
5575
5576 /* Get SSC going before enabling the outputs */
5577 I915_WRITE(PCH_DREF_CONTROL, temp);
5578 POSTING_READ(PCH_DREF_CONTROL);
5579 udelay(200);
5580
5581 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5582
5583 /* Enable CPU source on CPU attached eDP */
5584 if (has_cpu_edp) {
5585 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5586 DRM_DEBUG_KMS("Using SSC on eDP\n");
5587 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5588 }
5589 else
5590 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5591 } else
5592 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5593
5594 I915_WRITE(PCH_DREF_CONTROL, temp);
5595 POSTING_READ(PCH_DREF_CONTROL);
5596 udelay(200);
5597 } else {
5598 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5599
5600 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5601
5602 /* Turn off CPU output */
5603 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5604
5605 I915_WRITE(PCH_DREF_CONTROL, temp);
5606 POSTING_READ(PCH_DREF_CONTROL);
5607 udelay(200);
5608
5609 /* Turn off the SSC source */
5610 temp &= ~DREF_SSC_SOURCE_MASK;
5611 temp |= DREF_SSC_SOURCE_DISABLE;
5612
5613 /* Turn off SSC1 */
5614 temp &= ~ DREF_SSC1_ENABLE;
5615
5616 I915_WRITE(PCH_DREF_CONTROL, temp);
5617 POSTING_READ(PCH_DREF_CONTROL);
5618 udelay(200);
5619 }
5620 }
5621
ironlake_get_refclk(struct drm_crtc * crtc)5622 static int ironlake_get_refclk(struct drm_crtc *crtc)
5623 {
5624 struct drm_device *dev = crtc->dev;
5625 struct drm_i915_private *dev_priv = dev->dev_private;
5626 struct intel_encoder *encoder;
5627 struct drm_mode_config *mode_config = &dev->mode_config;
5628 struct intel_encoder *edp_encoder = NULL;
5629 int num_connectors = 0;
5630 bool is_lvds = false;
5631
5632 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5633 if (encoder->base.crtc != crtc)
5634 continue;
5635
5636 switch (encoder->type) {
5637 case INTEL_OUTPUT_LVDS:
5638 is_lvds = true;
5639 break;
5640 case INTEL_OUTPUT_EDP:
5641 edp_encoder = encoder;
5642 break;
5643 }
5644 num_connectors++;
5645 }
5646
5647 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5648 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5649 dev_priv->lvds_ssc_freq);
5650 return dev_priv->lvds_ssc_freq * 1000;
5651 }
5652
5653 return 120000;
5654 }
5655
ironlake_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)5656 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5657 struct drm_display_mode *mode,
5658 struct drm_display_mode *adjusted_mode,
5659 int x, int y,
5660 struct drm_framebuffer *old_fb)
5661 {
5662 struct drm_device *dev = crtc->dev;
5663 struct drm_i915_private *dev_priv = dev->dev_private;
5664 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5665 int pipe = intel_crtc->pipe;
5666 int plane = intel_crtc->plane;
5667 int refclk, num_connectors = 0;
5668 intel_clock_t clock, reduced_clock;
5669 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5670 bool ok, has_reduced_clock = false, is_sdvo = false;
5671 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5672 struct intel_encoder *has_edp_encoder = NULL;
5673 struct drm_mode_config *mode_config = &dev->mode_config;
5674 struct intel_encoder *encoder;
5675 const intel_limit_t *limit;
5676 int ret;
5677 struct fdi_m_n m_n = {0};
5678 u32 temp;
5679 u32 lvds_sync = 0;
5680 int target_clock, pixel_multiplier, lane, link_bw, factor;
5681 unsigned int pipe_bpp;
5682 bool dither;
5683
5684 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5685 if (encoder->base.crtc != crtc)
5686 continue;
5687
5688 switch (encoder->type) {
5689 case INTEL_OUTPUT_LVDS:
5690 is_lvds = true;
5691 break;
5692 case INTEL_OUTPUT_SDVO:
5693 case INTEL_OUTPUT_HDMI:
5694 is_sdvo = true;
5695 if (encoder->needs_tv_clock)
5696 is_tv = true;
5697 break;
5698 case INTEL_OUTPUT_TVOUT:
5699 is_tv = true;
5700 break;
5701 case INTEL_OUTPUT_ANALOG:
5702 is_crt = true;
5703 break;
5704 case INTEL_OUTPUT_DISPLAYPORT:
5705 is_dp = true;
5706 break;
5707 case INTEL_OUTPUT_EDP:
5708 has_edp_encoder = encoder;
5709 break;
5710 }
5711
5712 num_connectors++;
5713 }
5714
5715 refclk = ironlake_get_refclk(crtc);
5716
5717 /*
5718 * Returns a set of divisors for the desired target clock with the given
5719 * refclk, or FALSE. The returned values represent the clock equation:
5720 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5721 */
5722 limit = intel_limit(crtc, refclk);
5723 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5724 &clock);
5725 if (!ok) {
5726 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5727 return -EINVAL;
5728 }
5729
5730 /* Ensure that the cursor is valid for the new mode before changing... */
5731 intel_crtc_update_cursor(crtc, true);
5732
5733 if (is_lvds && dev_priv->lvds_downclock_avail) {
5734 /*
5735 * Ensure we match the reduced clock's P to the target clock.
5736 * If the clocks don't match, we can't switch the display clock
5737 * by using the FP0/FP1. In such case we will disable the LVDS
5738 * downclock feature.
5739 */
5740 has_reduced_clock = limit->find_pll(limit, crtc,
5741 dev_priv->lvds_downclock,
5742 refclk,
5743 &clock,
5744 &reduced_clock);
5745 }
5746 /* SDVO TV has fixed PLL values depend on its clock range,
5747 this mirrors vbios setting. */
5748 if (is_sdvo && is_tv) {
5749 if (adjusted_mode->clock >= 100000
5750 && adjusted_mode->clock < 140500) {
5751 clock.p1 = 2;
5752 clock.p2 = 10;
5753 clock.n = 3;
5754 clock.m1 = 16;
5755 clock.m2 = 8;
5756 } else if (adjusted_mode->clock >= 140500
5757 && adjusted_mode->clock <= 200000) {
5758 clock.p1 = 1;
5759 clock.p2 = 10;
5760 clock.n = 6;
5761 clock.m1 = 12;
5762 clock.m2 = 8;
5763 }
5764 }
5765
5766 /* FDI link */
5767 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5768 lane = 0;
5769 /* CPU eDP doesn't require FDI link, so just set DP M/N
5770 according to current link config */
5771 if (has_edp_encoder &&
5772 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5773 target_clock = mode->clock;
5774 intel_edp_link_config(has_edp_encoder,
5775 &lane, &link_bw);
5776 } else {
5777 /* [e]DP over FDI requires target mode clock
5778 instead of link clock */
5779 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5780 target_clock = mode->clock;
5781 else
5782 target_clock = adjusted_mode->clock;
5783
5784 /* FDI is a binary signal running at ~2.7GHz, encoding
5785 * each output octet as 10 bits. The actual frequency
5786 * is stored as a divider into a 100MHz clock, and the
5787 * mode pixel clock is stored in units of 1KHz.
5788 * Hence the bw of each lane in terms of the mode signal
5789 * is:
5790 */
5791 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5792 }
5793
5794 /* determine panel color depth */
5795 temp = I915_READ(PIPECONF(pipe));
5796 temp &= ~PIPE_BPC_MASK;
5797 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
5798 switch (pipe_bpp) {
5799 case 18:
5800 temp |= PIPE_6BPC;
5801 break;
5802 case 24:
5803 temp |= PIPE_8BPC;
5804 break;
5805 case 30:
5806 temp |= PIPE_10BPC;
5807 break;
5808 case 36:
5809 temp |= PIPE_12BPC;
5810 break;
5811 default:
5812 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5813 pipe_bpp);
5814 temp |= PIPE_8BPC;
5815 pipe_bpp = 24;
5816 break;
5817 }
5818
5819 intel_crtc->bpp = pipe_bpp;
5820 I915_WRITE(PIPECONF(pipe), temp);
5821
5822 if (!lane) {
5823 /*
5824 * Account for spread spectrum to avoid
5825 * oversubscribing the link. Max center spread
5826 * is 2.5%; use 5% for safety's sake.
5827 */
5828 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5829 lane = bps / (link_bw * 8) + 1;
5830 }
5831
5832 intel_crtc->fdi_lanes = lane;
5833
5834 if (pixel_multiplier > 1)
5835 link_bw *= pixel_multiplier;
5836 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5837 &m_n);
5838
5839 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5840 if (has_reduced_clock)
5841 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5842 reduced_clock.m2;
5843
5844 /* Enable autotuning of the PLL clock (if permissible) */
5845 factor = 21;
5846 if (is_lvds) {
5847 if ((intel_panel_use_ssc(dev_priv) &&
5848 dev_priv->lvds_ssc_freq == 100) ||
5849 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5850 factor = 25;
5851 } else if (is_sdvo && is_tv)
5852 factor = 20;
5853
5854 if (clock.m < factor * clock.n)
5855 fp |= FP_CB_TUNE;
5856
5857 dpll = 0;
5858
5859 if (is_lvds)
5860 dpll |= DPLLB_MODE_LVDS;
5861 else
5862 dpll |= DPLLB_MODE_DAC_SERIAL;
5863 if (is_sdvo) {
5864 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5865 if (pixel_multiplier > 1) {
5866 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5867 }
5868 dpll |= DPLL_DVO_HIGH_SPEED;
5869 }
5870 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5871 dpll |= DPLL_DVO_HIGH_SPEED;
5872
5873 /* compute bitmask from p1 value */
5874 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5875 /* also FPA1 */
5876 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5877
5878 switch (clock.p2) {
5879 case 5:
5880 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5881 break;
5882 case 7:
5883 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5884 break;
5885 case 10:
5886 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5887 break;
5888 case 14:
5889 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5890 break;
5891 }
5892
5893 if (is_sdvo && is_tv)
5894 dpll |= PLL_REF_INPUT_TVCLKINBC;
5895 else if (is_tv)
5896 /* XXX: just matching BIOS for now */
5897 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5898 dpll |= 3;
5899 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5900 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5901 else
5902 dpll |= PLL_REF_INPUT_DREFCLK;
5903
5904 /* setup pipeconf */
5905 pipeconf = I915_READ(PIPECONF(pipe));
5906
5907 /* Set up the display plane register */
5908 dspcntr = DISPPLANE_GAMMA_ENABLE;
5909
5910 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5911 drm_mode_debug_printmodeline(mode);
5912
5913 /* PCH eDP needs FDI, but CPU eDP does not */
5914 if (!intel_crtc->no_pll) {
5915 if (!has_edp_encoder ||
5916 intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5917 I915_WRITE(PCH_FP0(pipe), fp);
5918 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5919
5920 POSTING_READ(PCH_DPLL(pipe));
5921 udelay(150);
5922 }
5923 } else {
5924 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5925 fp == I915_READ(PCH_FP0(0))) {
5926 intel_crtc->use_pll_a = true;
5927 DRM_DEBUG_KMS("using pipe a dpll\n");
5928 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5929 fp == I915_READ(PCH_FP0(1))) {
5930 intel_crtc->use_pll_a = false;
5931 DRM_DEBUG_KMS("using pipe b dpll\n");
5932 } else {
5933 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5934 return -EINVAL;
5935 }
5936 }
5937
5938 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5939 * This is an exception to the general rule that mode_set doesn't turn
5940 * things on.
5941 */
5942 if (is_lvds) {
5943 temp = I915_READ(PCH_LVDS);
5944 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5945 if (HAS_PCH_CPT(dev)) {
5946 temp &= ~PORT_TRANS_SEL_MASK;
5947 temp |= PORT_TRANS_SEL_CPT(pipe);
5948 } else {
5949 if (pipe == 1)
5950 temp |= LVDS_PIPEB_SELECT;
5951 else
5952 temp &= ~LVDS_PIPEB_SELECT;
5953 }
5954
5955 /* set the corresponsding LVDS_BORDER bit */
5956 temp |= dev_priv->lvds_border_bits;
5957 /* Set the B0-B3 data pairs corresponding to whether we're going to
5958 * set the DPLLs for dual-channel mode or not.
5959 */
5960 if (clock.p2 == 7)
5961 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5962 else
5963 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5964
5965 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5966 * appropriately here, but we need to look more thoroughly into how
5967 * panels behave in the two modes.
5968 */
5969 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5970 lvds_sync |= LVDS_HSYNC_POLARITY;
5971 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5972 lvds_sync |= LVDS_VSYNC_POLARITY;
5973 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5974 != lvds_sync) {
5975 char flags[2] = "-+";
5976 DRM_INFO("Changing LVDS panel from "
5977 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5978 flags[!(temp & LVDS_HSYNC_POLARITY)],
5979 flags[!(temp & LVDS_VSYNC_POLARITY)],
5980 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5981 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5982 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5983 temp |= lvds_sync;
5984 }
5985 I915_WRITE(PCH_LVDS, temp);
5986 }
5987
5988 pipeconf &= ~PIPECONF_DITHER_EN;
5989 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5990 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5991 pipeconf |= PIPECONF_DITHER_EN;
5992 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5993 }
5994 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5995 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5996 } else {
5997 /* For non-DP output, clear any trans DP clock recovery setting.*/
5998 I915_WRITE(TRANSDATA_M1(pipe), 0);
5999 I915_WRITE(TRANSDATA_N1(pipe), 0);
6000 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6001 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6002 }
6003
6004 if (!intel_crtc->no_pll &&
6005 (!has_edp_encoder ||
6006 intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6007 I915_WRITE(PCH_DPLL(pipe), dpll);
6008
6009 /* Wait for the clocks to stabilize. */
6010 POSTING_READ(PCH_DPLL(pipe));
6011 udelay(150);
6012
6013 /* The pixel multiplier can only be updated once the
6014 * DPLL is enabled and the clocks are stable.
6015 *
6016 * So write it again.
6017 */
6018 I915_WRITE(PCH_DPLL(pipe), dpll);
6019 }
6020
6021 intel_crtc->lowfreq_avail = false;
6022 if (!intel_crtc->no_pll) {
6023 if (is_lvds && has_reduced_clock && i915_powersave) {
6024 I915_WRITE(PCH_FP1(pipe), fp2);
6025 intel_crtc->lowfreq_avail = true;
6026 if (HAS_PIPE_CXSR(dev)) {
6027 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6028 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6029 }
6030 } else {
6031 I915_WRITE(PCH_FP1(pipe), fp);
6032 if (HAS_PIPE_CXSR(dev)) {
6033 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6034 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6035 }
6036 }
6037 }
6038
6039 pipeconf &= ~PIPECONF_INTERLACE_MASK;
6040 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6041 pipeconf |= PIPECONF_INTERLACED_ILK;
6042 /* the chip adds 2 halflines automatically */
6043 adjusted_mode->crtc_vtotal -= 1;
6044 adjusted_mode->crtc_vblank_end -= 1;
6045 I915_WRITE(VSYNCSHIFT(pipe),
6046 adjusted_mode->crtc_hsync_start
6047 - adjusted_mode->crtc_htotal/2);
6048 } else {
6049 pipeconf |= PIPECONF_PROGRESSIVE;
6050 I915_WRITE(VSYNCSHIFT(pipe), 0);
6051 }
6052
6053 I915_WRITE(HTOTAL(pipe),
6054 (adjusted_mode->crtc_hdisplay - 1) |
6055 ((adjusted_mode->crtc_htotal - 1) << 16));
6056 I915_WRITE(HBLANK(pipe),
6057 (adjusted_mode->crtc_hblank_start - 1) |
6058 ((adjusted_mode->crtc_hblank_end - 1) << 16));
6059 I915_WRITE(HSYNC(pipe),
6060 (adjusted_mode->crtc_hsync_start - 1) |
6061 ((adjusted_mode->crtc_hsync_end - 1) << 16));
6062
6063 I915_WRITE(VTOTAL(pipe),
6064 (adjusted_mode->crtc_vdisplay - 1) |
6065 ((adjusted_mode->crtc_vtotal - 1) << 16));
6066 I915_WRITE(VBLANK(pipe),
6067 (adjusted_mode->crtc_vblank_start - 1) |
6068 ((adjusted_mode->crtc_vblank_end - 1) << 16));
6069 I915_WRITE(VSYNC(pipe),
6070 (adjusted_mode->crtc_vsync_start - 1) |
6071 ((adjusted_mode->crtc_vsync_end - 1) << 16));
6072
6073 /* pipesrc controls the size that is scaled from, which should
6074 * always be the user's requested size.
6075 */
6076 I915_WRITE(PIPESRC(pipe),
6077 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6078
6079 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6080 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6081 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6082 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6083
6084 if (has_edp_encoder &&
6085 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6086 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6087 }
6088
6089 I915_WRITE(PIPECONF(pipe), pipeconf);
6090 POSTING_READ(PIPECONF(pipe));
6091
6092 intel_wait_for_vblank(dev, pipe);
6093
6094 I915_WRITE(DSPCNTR(plane), dspcntr);
6095 POSTING_READ(DSPCNTR(plane));
6096
6097 ret = intel_pipe_set_base(crtc, x, y, old_fb);
6098
6099 intel_update_watermarks(dev);
6100
6101 return ret;
6102 }
6103
intel_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)6104 static int intel_crtc_mode_set(struct drm_crtc *crtc,
6105 struct drm_display_mode *mode,
6106 struct drm_display_mode *adjusted_mode,
6107 int x, int y,
6108 struct drm_framebuffer *old_fb)
6109 {
6110 struct drm_device *dev = crtc->dev;
6111 struct drm_i915_private *dev_priv = dev->dev_private;
6112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6113 int pipe = intel_crtc->pipe;
6114 int ret;
6115
6116 drm_vblank_pre_modeset(dev, pipe);
6117
6118 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6119 x, y, old_fb);
6120 drm_vblank_post_modeset(dev, pipe);
6121
6122 if (ret)
6123 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6124 else
6125 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6126
6127 return ret;
6128 }
6129
intel_eld_uptodate(struct drm_connector * connector,int reg_eldv,uint32_t bits_eldv,int reg_elda,uint32_t bits_elda,int reg_edid)6130 static bool intel_eld_uptodate(struct drm_connector *connector,
6131 int reg_eldv, uint32_t bits_eldv,
6132 int reg_elda, uint32_t bits_elda,
6133 int reg_edid)
6134 {
6135 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6136 uint8_t *eld = connector->eld;
6137 uint32_t i;
6138
6139 i = I915_READ(reg_eldv);
6140 i &= bits_eldv;
6141
6142 if (!eld[0])
6143 return !i;
6144
6145 if (!i)
6146 return false;
6147
6148 i = I915_READ(reg_elda);
6149 i &= ~bits_elda;
6150 I915_WRITE(reg_elda, i);
6151
6152 for (i = 0; i < eld[2]; i++)
6153 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6154 return false;
6155
6156 return true;
6157 }
6158
g4x_write_eld(struct drm_connector * connector,struct drm_crtc * crtc)6159 static void g4x_write_eld(struct drm_connector *connector,
6160 struct drm_crtc *crtc)
6161 {
6162 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6163 uint8_t *eld = connector->eld;
6164 uint32_t eldv;
6165 uint32_t len;
6166 uint32_t i;
6167
6168 i = I915_READ(G4X_AUD_VID_DID);
6169
6170 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6171 eldv = G4X_ELDV_DEVCL_DEVBLC;
6172 else
6173 eldv = G4X_ELDV_DEVCTG;
6174
6175 if (intel_eld_uptodate(connector,
6176 G4X_AUD_CNTL_ST, eldv,
6177 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6178 G4X_HDMIW_HDMIEDID))
6179 return;
6180
6181 i = I915_READ(G4X_AUD_CNTL_ST);
6182 i &= ~(eldv | G4X_ELD_ADDR);
6183 len = (i >> 9) & 0x1f; /* ELD buffer size */
6184 I915_WRITE(G4X_AUD_CNTL_ST, i);
6185
6186 if (!eld[0])
6187 return;
6188
6189 len = min_t(uint8_t, eld[2], len);
6190 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6191 for (i = 0; i < len; i++)
6192 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6193
6194 i = I915_READ(G4X_AUD_CNTL_ST);
6195 i |= eldv;
6196 I915_WRITE(G4X_AUD_CNTL_ST, i);
6197 }
6198
ironlake_write_eld(struct drm_connector * connector,struct drm_crtc * crtc)6199 static void ironlake_write_eld(struct drm_connector *connector,
6200 struct drm_crtc *crtc)
6201 {
6202 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6203 uint8_t *eld = connector->eld;
6204 uint32_t eldv;
6205 uint32_t i;
6206 int len;
6207 int hdmiw_hdmiedid;
6208 int aud_config;
6209 int aud_cntl_st;
6210 int aud_cntrl_st2;
6211
6212 if (HAS_PCH_IBX(connector->dev)) {
6213 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6214 aud_config = IBX_AUD_CONFIG_A;
6215 aud_cntl_st = IBX_AUD_CNTL_ST_A;
6216 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6217 } else {
6218 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6219 aud_config = CPT_AUD_CONFIG_A;
6220 aud_cntl_st = CPT_AUD_CNTL_ST_A;
6221 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6222 }
6223
6224 i = to_intel_crtc(crtc)->pipe;
6225 hdmiw_hdmiedid += i * 0x100;
6226 aud_cntl_st += i * 0x100;
6227 aud_config += i * 0x100;
6228
6229 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
6230
6231 i = I915_READ(aud_cntl_st);
6232 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
6233 if (!i) {
6234 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6235 /* operate blindly on all ports */
6236 eldv = IBX_ELD_VALIDB;
6237 eldv |= IBX_ELD_VALIDB << 4;
6238 eldv |= IBX_ELD_VALIDB << 8;
6239 } else {
6240 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6241 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6242 }
6243
6244 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6245 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6246 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6247 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6248 } else
6249 I915_WRITE(aud_config, 0);
6250
6251 if (intel_eld_uptodate(connector,
6252 aud_cntrl_st2, eldv,
6253 aud_cntl_st, IBX_ELD_ADDRESS,
6254 hdmiw_hdmiedid))
6255 return;
6256
6257 i = I915_READ(aud_cntrl_st2);
6258 i &= ~eldv;
6259 I915_WRITE(aud_cntrl_st2, i);
6260
6261 if (!eld[0])
6262 return;
6263
6264 i = I915_READ(aud_cntl_st);
6265 i &= ~IBX_ELD_ADDRESS;
6266 I915_WRITE(aud_cntl_st, i);
6267
6268 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
6269 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6270 for (i = 0; i < len; i++)
6271 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6272
6273 i = I915_READ(aud_cntrl_st2);
6274 i |= eldv;
6275 I915_WRITE(aud_cntrl_st2, i);
6276 }
6277
intel_write_eld(struct drm_encoder * encoder,struct drm_display_mode * mode)6278 void intel_write_eld(struct drm_encoder *encoder,
6279 struct drm_display_mode *mode)
6280 {
6281 struct drm_crtc *crtc = encoder->crtc;
6282 struct drm_connector *connector;
6283 struct drm_device *dev = encoder->dev;
6284 struct drm_i915_private *dev_priv = dev->dev_private;
6285
6286 connector = drm_select_eld(encoder, mode);
6287 if (!connector)
6288 return;
6289
6290 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6291 connector->base.id,
6292 drm_get_connector_name(connector),
6293 connector->encoder->base.id,
6294 drm_get_encoder_name(connector->encoder));
6295
6296 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6297
6298 if (dev_priv->display.write_eld)
6299 dev_priv->display.write_eld(connector, crtc);
6300 }
6301
6302 /** Loads the palette/gamma unit for the CRTC with the prepared values */
intel_crtc_load_lut(struct drm_crtc * crtc)6303 void intel_crtc_load_lut(struct drm_crtc *crtc)
6304 {
6305 struct drm_device *dev = crtc->dev;
6306 struct drm_i915_private *dev_priv = dev->dev_private;
6307 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6308 int palreg = PALETTE(intel_crtc->pipe);
6309 int i;
6310
6311 /* The clocks have to be on to load the palette. */
6312 if (!crtc->enabled || !intel_crtc->active)
6313 return;
6314
6315 /* use legacy palette for Ironlake */
6316 if (HAS_PCH_SPLIT(dev))
6317 palreg = LGC_PALETTE(intel_crtc->pipe);
6318
6319 for (i = 0; i < 256; i++) {
6320 I915_WRITE(palreg + 4 * i,
6321 (intel_crtc->lut_r[i] << 16) |
6322 (intel_crtc->lut_g[i] << 8) |
6323 intel_crtc->lut_b[i]);
6324 }
6325 }
6326
i845_update_cursor(struct drm_crtc * crtc,u32 base)6327 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6328 {
6329 struct drm_device *dev = crtc->dev;
6330 struct drm_i915_private *dev_priv = dev->dev_private;
6331 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6332 bool visible = base != 0;
6333 u32 cntl;
6334
6335 if (intel_crtc->cursor_visible == visible)
6336 return;
6337
6338 cntl = I915_READ(_CURACNTR);
6339 if (visible) {
6340 /* On these chipsets we can only modify the base whilst
6341 * the cursor is disabled.
6342 */
6343 I915_WRITE(_CURABASE, base);
6344
6345 cntl &= ~(CURSOR_FORMAT_MASK);
6346 /* XXX width must be 64, stride 256 => 0x00 << 28 */
6347 cntl |= CURSOR_ENABLE |
6348 CURSOR_GAMMA_ENABLE |
6349 CURSOR_FORMAT_ARGB;
6350 } else
6351 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6352 I915_WRITE(_CURACNTR, cntl);
6353
6354 intel_crtc->cursor_visible = visible;
6355 }
6356
i9xx_update_cursor(struct drm_crtc * crtc,u32 base)6357 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6358 {
6359 struct drm_device *dev = crtc->dev;
6360 struct drm_i915_private *dev_priv = dev->dev_private;
6361 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6362 int pipe = intel_crtc->pipe;
6363 bool visible = base != 0;
6364
6365 if (intel_crtc->cursor_visible != visible) {
6366 uint32_t cntl = I915_READ(CURCNTR(pipe));
6367 if (base) {
6368 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6369 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6370 cntl |= pipe << 28; /* Connect to correct pipe */
6371 } else {
6372 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6373 cntl |= CURSOR_MODE_DISABLE;
6374 }
6375 I915_WRITE(CURCNTR(pipe), cntl);
6376
6377 intel_crtc->cursor_visible = visible;
6378 }
6379 /* and commit changes on next vblank */
6380 I915_WRITE(CURBASE(pipe), base);
6381 }
6382
ivb_update_cursor(struct drm_crtc * crtc,u32 base)6383 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6384 {
6385 struct drm_device *dev = crtc->dev;
6386 struct drm_i915_private *dev_priv = dev->dev_private;
6387 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6388 int pipe = intel_crtc->pipe;
6389 bool visible = base != 0;
6390
6391 if (intel_crtc->cursor_visible != visible) {
6392 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6393 if (base) {
6394 cntl &= ~CURSOR_MODE;
6395 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6396 } else {
6397 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6398 cntl |= CURSOR_MODE_DISABLE;
6399 }
6400 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6401
6402 intel_crtc->cursor_visible = visible;
6403 }
6404 /* and commit changes on next vblank */
6405 I915_WRITE(CURBASE_IVB(pipe), base);
6406 }
6407
6408 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
intel_crtc_update_cursor(struct drm_crtc * crtc,bool on)6409 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6410 bool on)
6411 {
6412 struct drm_device *dev = crtc->dev;
6413 struct drm_i915_private *dev_priv = dev->dev_private;
6414 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6415 int pipe = intel_crtc->pipe;
6416 int x = intel_crtc->cursor_x;
6417 int y = intel_crtc->cursor_y;
6418 u32 base, pos;
6419 bool visible;
6420
6421 pos = 0;
6422
6423 if (on && crtc->enabled && crtc->fb) {
6424 base = intel_crtc->cursor_addr;
6425 if (x > (int) crtc->fb->width)
6426 base = 0;
6427
6428 if (y > (int) crtc->fb->height)
6429 base = 0;
6430 } else
6431 base = 0;
6432
6433 if (x < 0) {
6434 if (x + intel_crtc->cursor_width < 0)
6435 base = 0;
6436
6437 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6438 x = -x;
6439 }
6440 pos |= x << CURSOR_X_SHIFT;
6441
6442 if (y < 0) {
6443 if (y + intel_crtc->cursor_height < 0)
6444 base = 0;
6445
6446 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6447 y = -y;
6448 }
6449 pos |= y << CURSOR_Y_SHIFT;
6450
6451 visible = base != 0;
6452 if (!visible && !intel_crtc->cursor_visible)
6453 return;
6454
6455 if (IS_IVYBRIDGE(dev)) {
6456 I915_WRITE(CURPOS_IVB(pipe), pos);
6457 ivb_update_cursor(crtc, base);
6458 } else {
6459 I915_WRITE(CURPOS(pipe), pos);
6460 if (IS_845G(dev) || IS_I865G(dev))
6461 i845_update_cursor(crtc, base);
6462 else
6463 i9xx_update_cursor(crtc, base);
6464 }
6465
6466 if (visible)
6467 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6468 }
6469
intel_crtc_cursor_set(struct drm_crtc * crtc,struct drm_file * file,uint32_t handle,uint32_t width,uint32_t height)6470 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6471 struct drm_file *file,
6472 uint32_t handle,
6473 uint32_t width, uint32_t height)
6474 {
6475 struct drm_device *dev = crtc->dev;
6476 struct drm_i915_private *dev_priv = dev->dev_private;
6477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6478 struct drm_i915_gem_object *obj;
6479 uint32_t addr;
6480 int ret;
6481
6482 DRM_DEBUG_KMS("\n");
6483
6484 /* if we want to turn off the cursor ignore width and height */
6485 if (!handle) {
6486 DRM_DEBUG_KMS("cursor off\n");
6487 addr = 0;
6488 obj = NULL;
6489 mutex_lock(&dev->struct_mutex);
6490 goto finish;
6491 }
6492
6493 /* Currently we only support 64x64 cursors */
6494 if (width != 64 || height != 64) {
6495 DRM_ERROR("we currently only support 64x64 cursors\n");
6496 return -EINVAL;
6497 }
6498
6499 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6500 if (&obj->base == NULL)
6501 return -ENOENT;
6502
6503 if (obj->base.size < width * height * 4) {
6504 DRM_ERROR("buffer is to small\n");
6505 ret = -ENOMEM;
6506 goto fail;
6507 }
6508
6509 /* we only need to pin inside GTT if cursor is non-phy */
6510 mutex_lock(&dev->struct_mutex);
6511 if (!dev_priv->info->cursor_needs_physical) {
6512 if (obj->tiling_mode) {
6513 DRM_ERROR("cursor cannot be tiled\n");
6514 ret = -EINVAL;
6515 goto fail_locked;
6516 }
6517
6518 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6519 if (ret) {
6520 DRM_ERROR("failed to move cursor bo into the GTT\n");
6521 goto fail_locked;
6522 }
6523
6524 ret = i915_gem_object_put_fence(obj);
6525 if (ret) {
6526 DRM_ERROR("failed to release fence for cursor");
6527 goto fail_unpin;
6528 }
6529
6530 addr = obj->gtt_offset;
6531 } else {
6532 int align = IS_I830(dev) ? 16 * 1024 : 256;
6533 ret = i915_gem_attach_phys_object(dev, obj,
6534 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6535 align);
6536 if (ret) {
6537 DRM_ERROR("failed to attach phys object\n");
6538 goto fail_locked;
6539 }
6540 addr = obj->phys_obj->handle->busaddr;
6541 }
6542
6543 if (IS_GEN2(dev))
6544 I915_WRITE(CURSIZE, (height << 12) | width);
6545
6546 finish:
6547 if (intel_crtc->cursor_bo) {
6548 if (dev_priv->info->cursor_needs_physical) {
6549 if (intel_crtc->cursor_bo != obj)
6550 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6551 } else
6552 i915_gem_object_unpin(intel_crtc->cursor_bo);
6553 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6554 }
6555
6556 mutex_unlock(&dev->struct_mutex);
6557
6558 intel_crtc->cursor_addr = addr;
6559 intel_crtc->cursor_bo = obj;
6560 intel_crtc->cursor_width = width;
6561 intel_crtc->cursor_height = height;
6562
6563 intel_crtc_update_cursor(crtc, true);
6564
6565 return 0;
6566 fail_unpin:
6567 i915_gem_object_unpin(obj);
6568 fail_locked:
6569 mutex_unlock(&dev->struct_mutex);
6570 fail:
6571 drm_gem_object_unreference_unlocked(&obj->base);
6572 return ret;
6573 }
6574
intel_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)6575 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6576 {
6577 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6578
6579 intel_crtc->cursor_x = x;
6580 intel_crtc->cursor_y = y;
6581
6582 intel_crtc_update_cursor(crtc, true);
6583
6584 return 0;
6585 }
6586
6587 /** Sets the color ramps on behalf of RandR */
intel_crtc_fb_gamma_set(struct drm_crtc * crtc,u16 red,u16 green,u16 blue,int regno)6588 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6589 u16 blue, int regno)
6590 {
6591 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6592
6593 intel_crtc->lut_r[regno] = red >> 8;
6594 intel_crtc->lut_g[regno] = green >> 8;
6595 intel_crtc->lut_b[regno] = blue >> 8;
6596 }
6597
intel_crtc_fb_gamma_get(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,int regno)6598 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6599 u16 *blue, int regno)
6600 {
6601 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6602
6603 *red = intel_crtc->lut_r[regno] << 8;
6604 *green = intel_crtc->lut_g[regno] << 8;
6605 *blue = intel_crtc->lut_b[regno] << 8;
6606 }
6607
intel_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t start,uint32_t size)6608 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6609 u16 *blue, uint32_t start, uint32_t size)
6610 {
6611 int end = (start + size > 256) ? 256 : start + size, i;
6612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6613
6614 for (i = start; i < end; i++) {
6615 intel_crtc->lut_r[i] = red[i] >> 8;
6616 intel_crtc->lut_g[i] = green[i] >> 8;
6617 intel_crtc->lut_b[i] = blue[i] >> 8;
6618 }
6619
6620 intel_crtc_load_lut(crtc);
6621 }
6622
6623 /**
6624 * Get a pipe with a simple mode set on it for doing load-based monitor
6625 * detection.
6626 *
6627 * It will be up to the load-detect code to adjust the pipe as appropriate for
6628 * its requirements. The pipe will be connected to no other encoders.
6629 *
6630 * Currently this code will only succeed if there is a pipe with no encoders
6631 * configured for it. In the future, it could choose to temporarily disable
6632 * some outputs to free up a pipe for its use.
6633 *
6634 * \return crtc, or NULL if no pipes are available.
6635 */
6636
6637 /* VESA 640x480x72Hz mode to set on the pipe */
6638 static struct drm_display_mode load_detect_mode = {
6639 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6640 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6641 };
6642
6643 static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device * dev,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)6644 intel_framebuffer_create(struct drm_device *dev,
6645 struct drm_mode_fb_cmd2 *mode_cmd,
6646 struct drm_i915_gem_object *obj)
6647 {
6648 struct intel_framebuffer *intel_fb;
6649 int ret;
6650
6651 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6652 if (!intel_fb) {
6653 drm_gem_object_unreference_unlocked(&obj->base);
6654 return ERR_PTR(-ENOMEM);
6655 }
6656
6657 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6658 if (ret) {
6659 drm_gem_object_unreference_unlocked(&obj->base);
6660 kfree(intel_fb);
6661 return ERR_PTR(ret);
6662 }
6663
6664 return &intel_fb->base;
6665 }
6666
6667 static u32
intel_framebuffer_pitch_for_width(int width,int bpp)6668 intel_framebuffer_pitch_for_width(int width, int bpp)
6669 {
6670 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6671 return ALIGN(pitch, 64);
6672 }
6673
6674 static u32
intel_framebuffer_size_for_mode(struct drm_display_mode * mode,int bpp)6675 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6676 {
6677 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6678 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6679 }
6680
6681 static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device * dev,struct drm_display_mode * mode,int depth,int bpp)6682 intel_framebuffer_create_for_mode(struct drm_device *dev,
6683 struct drm_display_mode *mode,
6684 int depth, int bpp)
6685 {
6686 struct drm_i915_gem_object *obj;
6687 struct drm_mode_fb_cmd2 mode_cmd;
6688
6689 obj = i915_gem_alloc_object(dev,
6690 intel_framebuffer_size_for_mode(mode, bpp));
6691 if (obj == NULL)
6692 return ERR_PTR(-ENOMEM);
6693
6694 mode_cmd.width = mode->hdisplay;
6695 mode_cmd.height = mode->vdisplay;
6696 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6697 bpp);
6698 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6699
6700 return intel_framebuffer_create(dev, &mode_cmd, obj);
6701 }
6702
6703 static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device * dev,struct drm_display_mode * mode)6704 mode_fits_in_fbdev(struct drm_device *dev,
6705 struct drm_display_mode *mode)
6706 {
6707 struct drm_i915_private *dev_priv = dev->dev_private;
6708 struct drm_i915_gem_object *obj;
6709 struct drm_framebuffer *fb;
6710
6711 if (dev_priv->fbdev == NULL)
6712 return NULL;
6713
6714 obj = dev_priv->fbdev->ifb.obj;
6715 if (obj == NULL)
6716 return NULL;
6717
6718 fb = &dev_priv->fbdev->ifb.base;
6719 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6720 fb->bits_per_pixel))
6721 return NULL;
6722
6723 if (obj->base.size < mode->vdisplay * fb->pitches[0])
6724 return NULL;
6725
6726 return fb;
6727 }
6728
intel_get_load_detect_pipe(struct intel_encoder * intel_encoder,struct drm_connector * connector,struct drm_display_mode * mode,struct intel_load_detect_pipe * old)6729 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6730 struct drm_connector *connector,
6731 struct drm_display_mode *mode,
6732 struct intel_load_detect_pipe *old)
6733 {
6734 struct intel_crtc *intel_crtc;
6735 struct drm_crtc *possible_crtc;
6736 struct drm_encoder *encoder = &intel_encoder->base;
6737 struct drm_crtc *crtc = NULL;
6738 struct drm_device *dev = encoder->dev;
6739 struct drm_framebuffer *old_fb;
6740 int i = -1;
6741
6742 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6743 connector->base.id, drm_get_connector_name(connector),
6744 encoder->base.id, drm_get_encoder_name(encoder));
6745
6746 /*
6747 * Algorithm gets a little messy:
6748 *
6749 * - if the connector already has an assigned crtc, use it (but make
6750 * sure it's on first)
6751 *
6752 * - try to find the first unused crtc that can drive this connector,
6753 * and use that if we find one
6754 */
6755
6756 /* See if we already have a CRTC for this connector */
6757 if (encoder->crtc) {
6758 crtc = encoder->crtc;
6759
6760 intel_crtc = to_intel_crtc(crtc);
6761 old->dpms_mode = intel_crtc->dpms_mode;
6762 old->load_detect_temp = false;
6763
6764 /* Make sure the crtc and connector are running */
6765 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6766 struct drm_encoder_helper_funcs *encoder_funcs;
6767 struct drm_crtc_helper_funcs *crtc_funcs;
6768
6769 crtc_funcs = crtc->helper_private;
6770 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6771
6772 encoder_funcs = encoder->helper_private;
6773 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6774 }
6775
6776 return true;
6777 }
6778
6779 /* Find an unused one (if possible) */
6780 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6781 i++;
6782 if (!(encoder->possible_crtcs & (1 << i)))
6783 continue;
6784 if (!possible_crtc->enabled) {
6785 crtc = possible_crtc;
6786 break;
6787 }
6788 }
6789
6790 /*
6791 * If we didn't find an unused CRTC, don't use any.
6792 */
6793 if (!crtc) {
6794 DRM_DEBUG_KMS("no pipe available for load-detect\n");
6795 return false;
6796 }
6797
6798 encoder->crtc = crtc;
6799 connector->encoder = encoder;
6800
6801 intel_crtc = to_intel_crtc(crtc);
6802 old->dpms_mode = intel_crtc->dpms_mode;
6803 old->load_detect_temp = true;
6804 old->release_fb = NULL;
6805
6806 if (!mode)
6807 mode = &load_detect_mode;
6808
6809 old_fb = crtc->fb;
6810
6811 /* We need a framebuffer large enough to accommodate all accesses
6812 * that the plane may generate whilst we perform load detection.
6813 * We can not rely on the fbcon either being present (we get called
6814 * during its initialisation to detect all boot displays, or it may
6815 * not even exist) or that it is large enough to satisfy the
6816 * requested mode.
6817 */
6818 crtc->fb = mode_fits_in_fbdev(dev, mode);
6819 if (crtc->fb == NULL) {
6820 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6821 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6822 old->release_fb = crtc->fb;
6823 } else
6824 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6825 if (IS_ERR(crtc->fb)) {
6826 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6827 crtc->fb = old_fb;
6828 return false;
6829 }
6830
6831 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6832 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6833 if (old->release_fb)
6834 old->release_fb->funcs->destroy(old->release_fb);
6835 crtc->fb = old_fb;
6836 return false;
6837 }
6838
6839 /* let the connector get through one full cycle before testing */
6840 intel_wait_for_vblank(dev, intel_crtc->pipe);
6841
6842 return true;
6843 }
6844
intel_release_load_detect_pipe(struct intel_encoder * intel_encoder,struct drm_connector * connector,struct intel_load_detect_pipe * old)6845 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6846 struct drm_connector *connector,
6847 struct intel_load_detect_pipe *old)
6848 {
6849 struct drm_encoder *encoder = &intel_encoder->base;
6850 struct drm_device *dev = encoder->dev;
6851 struct drm_crtc *crtc = encoder->crtc;
6852 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6853 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6854
6855 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6856 connector->base.id, drm_get_connector_name(connector),
6857 encoder->base.id, drm_get_encoder_name(encoder));
6858
6859 if (old->load_detect_temp) {
6860 connector->encoder = NULL;
6861 drm_helper_disable_unused_functions(dev);
6862
6863 if (old->release_fb)
6864 old->release_fb->funcs->destroy(old->release_fb);
6865
6866 return;
6867 }
6868
6869 /* Switch crtc and encoder back off if necessary */
6870 if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6871 encoder_funcs->dpms(encoder, old->dpms_mode);
6872 crtc_funcs->dpms(crtc, old->dpms_mode);
6873 }
6874 }
6875
6876 /* Returns the clock of the currently programmed mode of the given pipe. */
intel_crtc_clock_get(struct drm_device * dev,struct drm_crtc * crtc)6877 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6878 {
6879 struct drm_i915_private *dev_priv = dev->dev_private;
6880 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6881 int pipe = intel_crtc->pipe;
6882 u32 dpll = I915_READ(DPLL(pipe));
6883 u32 fp;
6884 intel_clock_t clock;
6885
6886 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6887 fp = I915_READ(FP0(pipe));
6888 else
6889 fp = I915_READ(FP1(pipe));
6890
6891 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6892 if (IS_PINEVIEW(dev)) {
6893 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6894 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6895 } else {
6896 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6897 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6898 }
6899
6900 if (!IS_GEN2(dev)) {
6901 if (IS_PINEVIEW(dev))
6902 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6903 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6904 else
6905 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6906 DPLL_FPA01_P1_POST_DIV_SHIFT);
6907
6908 switch (dpll & DPLL_MODE_MASK) {
6909 case DPLLB_MODE_DAC_SERIAL:
6910 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6911 5 : 10;
6912 break;
6913 case DPLLB_MODE_LVDS:
6914 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6915 7 : 14;
6916 break;
6917 default:
6918 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6919 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6920 return 0;
6921 }
6922
6923 /* XXX: Handle the 100Mhz refclk */
6924 intel_clock(dev, 96000, &clock);
6925 } else {
6926 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6927
6928 if (is_lvds) {
6929 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6930 DPLL_FPA01_P1_POST_DIV_SHIFT);
6931 clock.p2 = 14;
6932
6933 if ((dpll & PLL_REF_INPUT_MASK) ==
6934 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6935 /* XXX: might not be 66MHz */
6936 intel_clock(dev, 66000, &clock);
6937 } else
6938 intel_clock(dev, 48000, &clock);
6939 } else {
6940 if (dpll & PLL_P1_DIVIDE_BY_TWO)
6941 clock.p1 = 2;
6942 else {
6943 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6944 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6945 }
6946 if (dpll & PLL_P2_DIVIDE_BY_4)
6947 clock.p2 = 4;
6948 else
6949 clock.p2 = 2;
6950
6951 intel_clock(dev, 48000, &clock);
6952 }
6953 }
6954
6955 /* XXX: It would be nice to validate the clocks, but we can't reuse
6956 * i830PllIsValid() because it relies on the xf86_config connector
6957 * configuration being accurate, which it isn't necessarily.
6958 */
6959
6960 return clock.dot;
6961 }
6962
6963 /** Returns the currently programmed mode of the given pipe. */
intel_crtc_mode_get(struct drm_device * dev,struct drm_crtc * crtc)6964 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6965 struct drm_crtc *crtc)
6966 {
6967 struct drm_i915_private *dev_priv = dev->dev_private;
6968 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6969 int pipe = intel_crtc->pipe;
6970 struct drm_display_mode *mode;
6971 int htot = I915_READ(HTOTAL(pipe));
6972 int hsync = I915_READ(HSYNC(pipe));
6973 int vtot = I915_READ(VTOTAL(pipe));
6974 int vsync = I915_READ(VSYNC(pipe));
6975
6976 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6977 if (!mode)
6978 return NULL;
6979
6980 mode->clock = intel_crtc_clock_get(dev, crtc);
6981 mode->hdisplay = (htot & 0xffff) + 1;
6982 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6983 mode->hsync_start = (hsync & 0xffff) + 1;
6984 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6985 mode->vdisplay = (vtot & 0xffff) + 1;
6986 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6987 mode->vsync_start = (vsync & 0xffff) + 1;
6988 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6989
6990 drm_mode_set_name(mode);
6991 drm_mode_set_crtcinfo(mode, 0);
6992
6993 return mode;
6994 }
6995
6996 #define GPU_IDLE_TIMEOUT 500 /* ms */
6997
6998 /* When this timer fires, we've been idle for awhile */
intel_gpu_idle_timer(unsigned long arg)6999 static void intel_gpu_idle_timer(unsigned long arg)
7000 {
7001 struct drm_device *dev = (struct drm_device *)arg;
7002 drm_i915_private_t *dev_priv = dev->dev_private;
7003
7004 if (!list_empty(&dev_priv->mm.active_list)) {
7005 /* Still processing requests, so just re-arm the timer. */
7006 mod_timer(&dev_priv->idle_timer, jiffies +
7007 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7008 return;
7009 }
7010
7011 dev_priv->busy = false;
7012 queue_work(dev_priv->wq, &dev_priv->idle_work);
7013 }
7014
7015 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
7016
intel_crtc_idle_timer(unsigned long arg)7017 static void intel_crtc_idle_timer(unsigned long arg)
7018 {
7019 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
7020 struct drm_crtc *crtc = &intel_crtc->base;
7021 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7022 struct intel_framebuffer *intel_fb;
7023
7024 intel_fb = to_intel_framebuffer(crtc->fb);
7025 if (intel_fb && intel_fb->obj->active) {
7026 /* The framebuffer is still being accessed by the GPU. */
7027 mod_timer(&intel_crtc->idle_timer, jiffies +
7028 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7029 return;
7030 }
7031
7032 intel_crtc->busy = false;
7033 queue_work(dev_priv->wq, &dev_priv->idle_work);
7034 }
7035
intel_increase_pllclock(struct drm_crtc * crtc)7036 static void intel_increase_pllclock(struct drm_crtc *crtc)
7037 {
7038 struct drm_device *dev = crtc->dev;
7039 drm_i915_private_t *dev_priv = dev->dev_private;
7040 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7041 int pipe = intel_crtc->pipe;
7042 int dpll_reg = DPLL(pipe);
7043 int dpll;
7044
7045 if (HAS_PCH_SPLIT(dev))
7046 return;
7047
7048 if (!dev_priv->lvds_downclock_avail)
7049 return;
7050
7051 dpll = I915_READ(dpll_reg);
7052 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7053 DRM_DEBUG_DRIVER("upclocking LVDS\n");
7054
7055 assert_panel_unlocked(dev_priv, pipe);
7056
7057 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7058 I915_WRITE(dpll_reg, dpll);
7059 intel_wait_for_vblank(dev, pipe);
7060
7061 dpll = I915_READ(dpll_reg);
7062 if (dpll & DISPLAY_RATE_SELECT_FPA1)
7063 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7064 }
7065
7066 /* Schedule downclock */
7067 mod_timer(&intel_crtc->idle_timer, jiffies +
7068 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7069 }
7070
intel_decrease_pllclock(struct drm_crtc * crtc)7071 static void intel_decrease_pllclock(struct drm_crtc *crtc)
7072 {
7073 struct drm_device *dev = crtc->dev;
7074 drm_i915_private_t *dev_priv = dev->dev_private;
7075 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7076
7077 if (HAS_PCH_SPLIT(dev))
7078 return;
7079
7080 if (!dev_priv->lvds_downclock_avail)
7081 return;
7082
7083 /*
7084 * Since this is called by a timer, we should never get here in
7085 * the manual case.
7086 */
7087 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7088 int pipe = intel_crtc->pipe;
7089 int dpll_reg = DPLL(pipe);
7090 u32 dpll;
7091
7092 DRM_DEBUG_DRIVER("downclocking LVDS\n");
7093
7094 assert_panel_unlocked(dev_priv, pipe);
7095
7096 dpll = I915_READ(dpll_reg);
7097 dpll |= DISPLAY_RATE_SELECT_FPA1;
7098 I915_WRITE(dpll_reg, dpll);
7099 intel_wait_for_vblank(dev, pipe);
7100 dpll = I915_READ(dpll_reg);
7101 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7102 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7103 }
7104 }
7105
7106 /**
7107 * intel_idle_update - adjust clocks for idleness
7108 * @work: work struct
7109 *
7110 * Either the GPU or display (or both) went idle. Check the busy status
7111 * here and adjust the CRTC and GPU clocks as necessary.
7112 */
intel_idle_update(struct work_struct * work)7113 static void intel_idle_update(struct work_struct *work)
7114 {
7115 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
7116 idle_work);
7117 struct drm_device *dev = dev_priv->dev;
7118 struct drm_crtc *crtc;
7119 struct intel_crtc *intel_crtc;
7120
7121 if (!i915_powersave)
7122 return;
7123
7124 mutex_lock(&dev->struct_mutex);
7125
7126 i915_update_gfx_val(dev_priv);
7127
7128 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7129 /* Skip inactive CRTCs */
7130 if (!crtc->fb)
7131 continue;
7132
7133 intel_crtc = to_intel_crtc(crtc);
7134 if (!intel_crtc->busy)
7135 intel_decrease_pllclock(crtc);
7136 }
7137
7138
7139 mutex_unlock(&dev->struct_mutex);
7140 }
7141
7142 /**
7143 * intel_mark_busy - mark the GPU and possibly the display busy
7144 * @dev: drm device
7145 * @obj: object we're operating on
7146 *
7147 * Callers can use this function to indicate that the GPU is busy processing
7148 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
7149 * buffer), we'll also mark the display as busy, so we know to increase its
7150 * clock frequency.
7151 */
intel_mark_busy(struct drm_device * dev,struct drm_i915_gem_object * obj)7152 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7153 {
7154 drm_i915_private_t *dev_priv = dev->dev_private;
7155 struct drm_crtc *crtc = NULL;
7156 struct intel_framebuffer *intel_fb;
7157 struct intel_crtc *intel_crtc;
7158
7159 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7160 return;
7161
7162 if (!dev_priv->busy)
7163 dev_priv->busy = true;
7164 else
7165 mod_timer(&dev_priv->idle_timer, jiffies +
7166 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7167
7168 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7169 if (!crtc->fb)
7170 continue;
7171
7172 intel_crtc = to_intel_crtc(crtc);
7173 intel_fb = to_intel_framebuffer(crtc->fb);
7174 if (intel_fb->obj == obj) {
7175 if (!intel_crtc->busy) {
7176 /* Non-busy -> busy, upclock */
7177 intel_increase_pllclock(crtc);
7178 intel_crtc->busy = true;
7179 } else {
7180 /* Busy -> busy, put off timer */
7181 mod_timer(&intel_crtc->idle_timer, jiffies +
7182 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7183 }
7184 }
7185 }
7186 }
7187
intel_crtc_destroy(struct drm_crtc * crtc)7188 static void intel_crtc_destroy(struct drm_crtc *crtc)
7189 {
7190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7191 struct drm_device *dev = crtc->dev;
7192 struct intel_unpin_work *work;
7193 unsigned long flags;
7194
7195 spin_lock_irqsave(&dev->event_lock, flags);
7196 work = intel_crtc->unpin_work;
7197 intel_crtc->unpin_work = NULL;
7198 spin_unlock_irqrestore(&dev->event_lock, flags);
7199
7200 if (work) {
7201 cancel_work_sync(&work->work);
7202 kfree(work);
7203 }
7204
7205 drm_crtc_cleanup(crtc);
7206
7207 kfree(intel_crtc);
7208 }
7209
intel_unpin_work_fn(struct work_struct * __work)7210 static void intel_unpin_work_fn(struct work_struct *__work)
7211 {
7212 struct intel_unpin_work *work =
7213 container_of(__work, struct intel_unpin_work, work);
7214
7215 mutex_lock(&work->dev->struct_mutex);
7216 intel_unpin_fb_obj(work->old_fb_obj);
7217 drm_gem_object_unreference(&work->pending_flip_obj->base);
7218 drm_gem_object_unreference(&work->old_fb_obj->base);
7219
7220 intel_update_fbc(work->dev);
7221 mutex_unlock(&work->dev->struct_mutex);
7222 kfree(work);
7223 }
7224
do_intel_finish_page_flip(struct drm_device * dev,struct drm_crtc * crtc)7225 static void do_intel_finish_page_flip(struct drm_device *dev,
7226 struct drm_crtc *crtc)
7227 {
7228 drm_i915_private_t *dev_priv = dev->dev_private;
7229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7230 struct intel_unpin_work *work;
7231 struct drm_i915_gem_object *obj;
7232 struct drm_pending_vblank_event *e;
7233 struct timeval tnow, tvbl;
7234 unsigned long flags;
7235
7236 /* Ignore early vblank irqs */
7237 if (intel_crtc == NULL)
7238 return;
7239
7240 do_gettimeofday(&tnow);
7241
7242 spin_lock_irqsave(&dev->event_lock, flags);
7243 work = intel_crtc->unpin_work;
7244 if (work == NULL || !work->pending) {
7245 spin_unlock_irqrestore(&dev->event_lock, flags);
7246 return;
7247 }
7248
7249 intel_crtc->unpin_work = NULL;
7250
7251 if (work->event) {
7252 e = work->event;
7253 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7254
7255 /* Called before vblank count and timestamps have
7256 * been updated for the vblank interval of flip
7257 * completion? Need to increment vblank count and
7258 * add one videorefresh duration to returned timestamp
7259 * to account for this. We assume this happened if we
7260 * get called over 0.9 frame durations after the last
7261 * timestamped vblank.
7262 *
7263 * This calculation can not be used with vrefresh rates
7264 * below 5Hz (10Hz to be on the safe side) without
7265 * promoting to 64 integers.
7266 */
7267 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7268 9 * crtc->framedur_ns) {
7269 e->event.sequence++;
7270 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7271 crtc->framedur_ns);
7272 }
7273
7274 e->event.tv_sec = tvbl.tv_sec;
7275 e->event.tv_usec = tvbl.tv_usec;
7276
7277 list_add_tail(&e->base.link,
7278 &e->base.file_priv->event_list);
7279 wake_up_interruptible(&e->base.file_priv->event_wait);
7280 }
7281
7282 drm_vblank_put(dev, intel_crtc->pipe);
7283
7284 spin_unlock_irqrestore(&dev->event_lock, flags);
7285
7286 obj = work->old_fb_obj;
7287
7288 atomic_clear_mask(1 << intel_crtc->plane,
7289 &obj->pending_flip.counter);
7290
7291 wake_up(&dev_priv->pending_flip_queue);
7292 schedule_work(&work->work);
7293
7294 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7295 }
7296
intel_finish_page_flip(struct drm_device * dev,int pipe)7297 void intel_finish_page_flip(struct drm_device *dev, int pipe)
7298 {
7299 drm_i915_private_t *dev_priv = dev->dev_private;
7300 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7301
7302 do_intel_finish_page_flip(dev, crtc);
7303 }
7304
intel_finish_page_flip_plane(struct drm_device * dev,int plane)7305 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7306 {
7307 drm_i915_private_t *dev_priv = dev->dev_private;
7308 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7309
7310 do_intel_finish_page_flip(dev, crtc);
7311 }
7312
intel_prepare_page_flip(struct drm_device * dev,int plane)7313 void intel_prepare_page_flip(struct drm_device *dev, int plane)
7314 {
7315 drm_i915_private_t *dev_priv = dev->dev_private;
7316 struct intel_crtc *intel_crtc =
7317 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7318 unsigned long flags;
7319
7320 spin_lock_irqsave(&dev->event_lock, flags);
7321 if (intel_crtc->unpin_work) {
7322 if ((++intel_crtc->unpin_work->pending) > 1)
7323 DRM_ERROR("Prepared flip multiple times\n");
7324 } else {
7325 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
7326 }
7327 spin_unlock_irqrestore(&dev->event_lock, flags);
7328 }
7329
intel_gen2_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7330 static int intel_gen2_queue_flip(struct drm_device *dev,
7331 struct drm_crtc *crtc,
7332 struct drm_framebuffer *fb,
7333 struct drm_i915_gem_object *obj)
7334 {
7335 struct drm_i915_private *dev_priv = dev->dev_private;
7336 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7337 unsigned long offset;
7338 u32 flip_mask;
7339 int ret;
7340
7341 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7342 if (ret)
7343 goto err;
7344
7345 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7346 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7347
7348 ret = BEGIN_LP_RING(6);
7349 if (ret)
7350 goto err_unpin;
7351
7352 /* Can't queue multiple flips, so wait for the previous
7353 * one to finish before executing the next.
7354 */
7355 if (intel_crtc->plane)
7356 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7357 else
7358 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7359 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7360 OUT_RING(MI_NOOP);
7361 OUT_RING(MI_DISPLAY_FLIP |
7362 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7363 OUT_RING(fb->pitches[0]);
7364 OUT_RING(obj->gtt_offset + offset);
7365 OUT_RING(0); /* aux display base address, unused */
7366 ADVANCE_LP_RING();
7367 return 0;
7368
7369 err_unpin:
7370 intel_unpin_fb_obj(obj);
7371 err:
7372 return ret;
7373 }
7374
intel_gen3_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7375 static int intel_gen3_queue_flip(struct drm_device *dev,
7376 struct drm_crtc *crtc,
7377 struct drm_framebuffer *fb,
7378 struct drm_i915_gem_object *obj)
7379 {
7380 struct drm_i915_private *dev_priv = dev->dev_private;
7381 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7382 unsigned long offset;
7383 u32 flip_mask;
7384 int ret;
7385
7386 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7387 if (ret)
7388 goto err;
7389
7390 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7391 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7392
7393 ret = BEGIN_LP_RING(6);
7394 if (ret)
7395 goto err_unpin;
7396
7397 if (intel_crtc->plane)
7398 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7399 else
7400 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7401 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7402 OUT_RING(MI_NOOP);
7403 OUT_RING(MI_DISPLAY_FLIP_I915 |
7404 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7405 OUT_RING(fb->pitches[0]);
7406 OUT_RING(obj->gtt_offset + offset);
7407 OUT_RING(MI_NOOP);
7408
7409 ADVANCE_LP_RING();
7410 return 0;
7411
7412 err_unpin:
7413 intel_unpin_fb_obj(obj);
7414 err:
7415 return ret;
7416 }
7417
intel_gen4_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7418 static int intel_gen4_queue_flip(struct drm_device *dev,
7419 struct drm_crtc *crtc,
7420 struct drm_framebuffer *fb,
7421 struct drm_i915_gem_object *obj)
7422 {
7423 struct drm_i915_private *dev_priv = dev->dev_private;
7424 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7425 uint32_t pf, pipesrc;
7426 int ret;
7427
7428 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7429 if (ret)
7430 goto err;
7431
7432 ret = BEGIN_LP_RING(4);
7433 if (ret)
7434 goto err_unpin;
7435
7436 /* i965+ uses the linear or tiled offsets from the
7437 * Display Registers (which do not change across a page-flip)
7438 * so we need only reprogram the base address.
7439 */
7440 OUT_RING(MI_DISPLAY_FLIP |
7441 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7442 OUT_RING(fb->pitches[0]);
7443 OUT_RING(obj->gtt_offset | obj->tiling_mode);
7444
7445 /* XXX Enabling the panel-fitter across page-flip is so far
7446 * untested on non-native modes, so ignore it for now.
7447 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7448 */
7449 pf = 0;
7450 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7451 OUT_RING(pf | pipesrc);
7452 ADVANCE_LP_RING();
7453 return 0;
7454
7455 err_unpin:
7456 intel_unpin_fb_obj(obj);
7457 err:
7458 return ret;
7459 }
7460
intel_gen6_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7461 static int intel_gen6_queue_flip(struct drm_device *dev,
7462 struct drm_crtc *crtc,
7463 struct drm_framebuffer *fb,
7464 struct drm_i915_gem_object *obj)
7465 {
7466 struct drm_i915_private *dev_priv = dev->dev_private;
7467 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7468 uint32_t pf, pipesrc;
7469 int ret;
7470
7471 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7472 if (ret)
7473 goto err;
7474
7475 ret = BEGIN_LP_RING(4);
7476 if (ret)
7477 goto err_unpin;
7478
7479 OUT_RING(MI_DISPLAY_FLIP |
7480 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7481 OUT_RING(fb->pitches[0] | obj->tiling_mode);
7482 OUT_RING(obj->gtt_offset);
7483
7484 /* Contrary to the suggestions in the documentation,
7485 * "Enable Panel Fitter" does not seem to be required when page
7486 * flipping with a non-native mode, and worse causes a normal
7487 * modeset to fail.
7488 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7489 */
7490 pf = 0;
7491 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7492 OUT_RING(pf | pipesrc);
7493 ADVANCE_LP_RING();
7494 return 0;
7495
7496 err_unpin:
7497 intel_unpin_fb_obj(obj);
7498 err:
7499 return ret;
7500 }
7501
7502 /*
7503 * On gen7 we currently use the blit ring because (in early silicon at least)
7504 * the render ring doesn't give us interrpts for page flip completion, which
7505 * means clients will hang after the first flip is queued. Fortunately the
7506 * blit ring generates interrupts properly, so use it instead.
7507 */
intel_gen7_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7508 static int intel_gen7_queue_flip(struct drm_device *dev,
7509 struct drm_crtc *crtc,
7510 struct drm_framebuffer *fb,
7511 struct drm_i915_gem_object *obj)
7512 {
7513 struct drm_i915_private *dev_priv = dev->dev_private;
7514 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7515 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7516 uint32_t plane_bit = 0;
7517 int ret;
7518
7519 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7520 if (ret)
7521 goto err;
7522
7523 switch(intel_crtc->plane) {
7524 case PLANE_A:
7525 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7526 break;
7527 case PLANE_B:
7528 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7529 break;
7530 case PLANE_C:
7531 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7532 break;
7533 default:
7534 WARN_ONCE(1, "unknown plane in flip command\n");
7535 ret = -ENODEV;
7536 goto err_unpin;
7537 }
7538
7539 ret = intel_ring_begin(ring, 4);
7540 if (ret)
7541 goto err_unpin;
7542
7543 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7544 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7545 intel_ring_emit(ring, (obj->gtt_offset));
7546 intel_ring_emit(ring, (MI_NOOP));
7547 intel_ring_advance(ring);
7548 return 0;
7549
7550 err_unpin:
7551 intel_unpin_fb_obj(obj);
7552 err:
7553 return ret;
7554 }
7555
intel_default_queue_flip(struct drm_device * dev,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_i915_gem_object * obj)7556 static int intel_default_queue_flip(struct drm_device *dev,
7557 struct drm_crtc *crtc,
7558 struct drm_framebuffer *fb,
7559 struct drm_i915_gem_object *obj)
7560 {
7561 return -ENODEV;
7562 }
7563
intel_crtc_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event)7564 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7565 struct drm_framebuffer *fb,
7566 struct drm_pending_vblank_event *event)
7567 {
7568 struct drm_device *dev = crtc->dev;
7569 struct drm_i915_private *dev_priv = dev->dev_private;
7570 struct drm_framebuffer *old_fb = crtc->fb;
7571 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
7572 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7573 struct intel_unpin_work *work;
7574 unsigned long flags;
7575 int ret;
7576
7577 work = kzalloc(sizeof *work, GFP_KERNEL);
7578 if (work == NULL)
7579 return -ENOMEM;
7580
7581 work->event = event;
7582 work->dev = crtc->dev;
7583 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
7584 INIT_WORK(&work->work, intel_unpin_work_fn);
7585
7586 ret = drm_vblank_get(dev, intel_crtc->pipe);
7587 if (ret)
7588 goto free_work;
7589
7590 /* We borrow the event spin lock for protecting unpin_work */
7591 spin_lock_irqsave(&dev->event_lock, flags);
7592 if (intel_crtc->unpin_work) {
7593 spin_unlock_irqrestore(&dev->event_lock, flags);
7594 kfree(work);
7595 drm_vblank_put(dev, intel_crtc->pipe);
7596
7597 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7598 return -EBUSY;
7599 }
7600 intel_crtc->unpin_work = work;
7601 spin_unlock_irqrestore(&dev->event_lock, flags);
7602
7603 mutex_lock(&dev->struct_mutex);
7604
7605 /* Reference the objects for the scheduled work. */
7606 drm_gem_object_reference(&work->old_fb_obj->base);
7607 drm_gem_object_reference(&obj->base);
7608
7609 crtc->fb = fb;
7610
7611 work->pending_flip_obj = obj;
7612
7613 work->enable_stall_check = true;
7614
7615 /* Block clients from rendering to the new back buffer until
7616 * the flip occurs and the object is no longer visible.
7617 */
7618 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7619
7620 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7621 if (ret)
7622 goto cleanup_pending;
7623
7624 intel_disable_fbc(dev);
7625 mutex_unlock(&dev->struct_mutex);
7626
7627 trace_i915_flip_request(intel_crtc->plane, obj);
7628
7629 return 0;
7630
7631 cleanup_pending:
7632 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7633 crtc->fb = old_fb;
7634 drm_gem_object_unreference(&work->old_fb_obj->base);
7635 drm_gem_object_unreference(&obj->base);
7636 mutex_unlock(&dev->struct_mutex);
7637
7638 spin_lock_irqsave(&dev->event_lock, flags);
7639 intel_crtc->unpin_work = NULL;
7640 spin_unlock_irqrestore(&dev->event_lock, flags);
7641
7642 drm_vblank_put(dev, intel_crtc->pipe);
7643 free_work:
7644 kfree(work);
7645
7646 return ret;
7647 }
7648
intel_sanitize_modesetting(struct drm_device * dev,int pipe,int plane)7649 static void intel_sanitize_modesetting(struct drm_device *dev,
7650 int pipe, int plane)
7651 {
7652 struct drm_i915_private *dev_priv = dev->dev_private;
7653 u32 reg, val;
7654 int i;
7655
7656 /* Clear any frame start delays used for debugging left by the BIOS */
7657 for_each_pipe(i) {
7658 reg = PIPECONF(i);
7659 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7660 }
7661
7662 if (HAS_PCH_SPLIT(dev))
7663 return;
7664
7665 /* Who knows what state these registers were left in by the BIOS or
7666 * grub?
7667 *
7668 * If we leave the registers in a conflicting state (e.g. with the
7669 * display plane reading from the other pipe than the one we intend
7670 * to use) then when we attempt to teardown the active mode, we will
7671 * not disable the pipes and planes in the correct order -- leaving
7672 * a plane reading from a disabled pipe and possibly leading to
7673 * undefined behaviour.
7674 */
7675
7676 reg = DSPCNTR(plane);
7677 val = I915_READ(reg);
7678
7679 if ((val & DISPLAY_PLANE_ENABLE) == 0)
7680 return;
7681 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7682 return;
7683
7684 /* This display plane is active and attached to the other CPU pipe. */
7685 pipe = !pipe;
7686
7687 /* Disable the plane and wait for it to stop reading from the pipe. */
7688 intel_disable_plane(dev_priv, plane, pipe);
7689 intel_disable_pipe(dev_priv, pipe);
7690 }
7691
intel_crtc_reset(struct drm_crtc * crtc)7692 static void intel_crtc_reset(struct drm_crtc *crtc)
7693 {
7694 struct drm_device *dev = crtc->dev;
7695 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7696
7697 /* Reset flags back to the 'unknown' status so that they
7698 * will be correctly set on the initial modeset.
7699 */
7700 intel_crtc->dpms_mode = -1;
7701
7702 /* We need to fix up any BIOS configuration that conflicts with
7703 * our expectations.
7704 */
7705 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7706 }
7707
7708 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7709 .dpms = intel_crtc_dpms,
7710 .mode_fixup = intel_crtc_mode_fixup,
7711 .mode_set = intel_crtc_mode_set,
7712 .mode_set_base = intel_pipe_set_base,
7713 .mode_set_base_atomic = intel_pipe_set_base_atomic,
7714 .load_lut = intel_crtc_load_lut,
7715 .disable = intel_crtc_disable,
7716 };
7717
7718 static const struct drm_crtc_funcs intel_crtc_funcs = {
7719 .reset = intel_crtc_reset,
7720 .cursor_set = intel_crtc_cursor_set,
7721 .cursor_move = intel_crtc_cursor_move,
7722 .gamma_set = intel_crtc_gamma_set,
7723 .set_config = drm_crtc_helper_set_config,
7724 .destroy = intel_crtc_destroy,
7725 .page_flip = intel_crtc_page_flip,
7726 };
7727
intel_crtc_init(struct drm_device * dev,int pipe)7728 static void intel_crtc_init(struct drm_device *dev, int pipe)
7729 {
7730 drm_i915_private_t *dev_priv = dev->dev_private;
7731 struct intel_crtc *intel_crtc;
7732 int i;
7733
7734 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7735 if (intel_crtc == NULL)
7736 return;
7737
7738 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7739
7740 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7741 for (i = 0; i < 256; i++) {
7742 intel_crtc->lut_r[i] = i;
7743 intel_crtc->lut_g[i] = i;
7744 intel_crtc->lut_b[i] = i;
7745 }
7746
7747 /* Swap pipes & planes for FBC on pre-965 */
7748 intel_crtc->pipe = pipe;
7749 intel_crtc->plane = pipe;
7750 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7751 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7752 intel_crtc->plane = !pipe;
7753 }
7754
7755 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7756 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7757 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7758 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7759
7760 intel_crtc_reset(&intel_crtc->base);
7761 intel_crtc->active = true; /* force the pipe off on setup_init_config */
7762 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7763
7764 if (HAS_PCH_SPLIT(dev)) {
7765 if (pipe == 2 && IS_IVYBRIDGE(dev))
7766 intel_crtc->no_pll = true;
7767 intel_helper_funcs.prepare = ironlake_crtc_prepare;
7768 intel_helper_funcs.commit = ironlake_crtc_commit;
7769 } else {
7770 intel_helper_funcs.prepare = i9xx_crtc_prepare;
7771 intel_helper_funcs.commit = i9xx_crtc_commit;
7772 }
7773
7774 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7775
7776 intel_crtc->busy = false;
7777
7778 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
7779 (unsigned long)intel_crtc);
7780 }
7781
intel_get_pipe_from_crtc_id(struct drm_device * dev,void * data,struct drm_file * file)7782 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7783 struct drm_file *file)
7784 {
7785 drm_i915_private_t *dev_priv = dev->dev_private;
7786 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7787 struct drm_mode_object *drmmode_obj;
7788 struct intel_crtc *crtc;
7789
7790 if (!dev_priv) {
7791 DRM_ERROR("called with no initialization\n");
7792 return -EINVAL;
7793 }
7794
7795 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7796 DRM_MODE_OBJECT_CRTC);
7797
7798 if (!drmmode_obj) {
7799 DRM_ERROR("no such CRTC id\n");
7800 return -EINVAL;
7801 }
7802
7803 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7804 pipe_from_crtc_id->pipe = crtc->pipe;
7805
7806 return 0;
7807 }
7808
intel_encoder_clones(struct drm_device * dev,int type_mask)7809 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
7810 {
7811 struct intel_encoder *encoder;
7812 int index_mask = 0;
7813 int entry = 0;
7814
7815 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7816 if (type_mask & encoder->clone_mask)
7817 index_mask |= (1 << entry);
7818 entry++;
7819 }
7820
7821 return index_mask;
7822 }
7823
has_edp_a(struct drm_device * dev)7824 static bool has_edp_a(struct drm_device *dev)
7825 {
7826 struct drm_i915_private *dev_priv = dev->dev_private;
7827
7828 if (!IS_MOBILE(dev))
7829 return false;
7830
7831 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7832 return false;
7833
7834 if (IS_GEN5(dev) &&
7835 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7836 return false;
7837
7838 return true;
7839 }
7840
intel_setup_outputs(struct drm_device * dev)7841 static void intel_setup_outputs(struct drm_device *dev)
7842 {
7843 struct drm_i915_private *dev_priv = dev->dev_private;
7844 struct intel_encoder *encoder;
7845 bool dpd_is_edp = false;
7846 bool has_lvds;
7847
7848 has_lvds = intel_lvds_init(dev);
7849 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7850 /* disable the panel fitter on everything but LVDS */
7851 I915_WRITE(PFIT_CONTROL, 0);
7852 }
7853
7854 if (HAS_PCH_SPLIT(dev)) {
7855 dpd_is_edp = intel_dpd_is_edp(dev);
7856
7857 if (has_edp_a(dev))
7858 intel_dp_init(dev, DP_A);
7859
7860 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7861 intel_dp_init(dev, PCH_DP_D);
7862 }
7863
7864 intel_crt_init(dev);
7865
7866 if (HAS_PCH_SPLIT(dev)) {
7867 int found;
7868
7869 if (I915_READ(HDMIB) & PORT_DETECTED) {
7870 /* PCH SDVOB multiplex with HDMIB */
7871 found = intel_sdvo_init(dev, PCH_SDVOB);
7872 if (!found)
7873 intel_hdmi_init(dev, HDMIB);
7874 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7875 intel_dp_init(dev, PCH_DP_B);
7876 }
7877
7878 if (I915_READ(HDMIC) & PORT_DETECTED)
7879 intel_hdmi_init(dev, HDMIC);
7880
7881 if (I915_READ(HDMID) & PORT_DETECTED)
7882 intel_hdmi_init(dev, HDMID);
7883
7884 if (I915_READ(PCH_DP_C) & DP_DETECTED)
7885 intel_dp_init(dev, PCH_DP_C);
7886
7887 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7888 intel_dp_init(dev, PCH_DP_D);
7889
7890 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7891 bool found = false;
7892
7893 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7894 DRM_DEBUG_KMS("probing SDVOB\n");
7895 found = intel_sdvo_init(dev, SDVOB);
7896 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7897 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7898 intel_hdmi_init(dev, SDVOB);
7899 }
7900
7901 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7902 DRM_DEBUG_KMS("probing DP_B\n");
7903 intel_dp_init(dev, DP_B);
7904 }
7905 }
7906
7907 /* Before G4X SDVOC doesn't have its own detect register */
7908
7909 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7910 DRM_DEBUG_KMS("probing SDVOC\n");
7911 found = intel_sdvo_init(dev, SDVOC);
7912 }
7913
7914 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7915
7916 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7917 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7918 intel_hdmi_init(dev, SDVOC);
7919 }
7920 if (SUPPORTS_INTEGRATED_DP(dev)) {
7921 DRM_DEBUG_KMS("probing DP_C\n");
7922 intel_dp_init(dev, DP_C);
7923 }
7924 }
7925
7926 if (SUPPORTS_INTEGRATED_DP(dev) &&
7927 (I915_READ(DP_D) & DP_DETECTED)) {
7928 DRM_DEBUG_KMS("probing DP_D\n");
7929 intel_dp_init(dev, DP_D);
7930 }
7931 } else if (IS_GEN2(dev))
7932 intel_dvo_init(dev);
7933
7934 if (SUPPORTS_TV(dev))
7935 intel_tv_init(dev);
7936
7937 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7938 encoder->base.possible_crtcs = encoder->crtc_mask;
7939 encoder->base.possible_clones =
7940 intel_encoder_clones(dev, encoder->clone_mask);
7941 }
7942
7943 /* disable all the possible outputs/crtcs before entering KMS mode */
7944 drm_helper_disable_unused_functions(dev);
7945
7946 if (HAS_PCH_SPLIT(dev))
7947 ironlake_init_pch_refclk(dev);
7948 }
7949
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)7950 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7951 {
7952 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7953
7954 drm_framebuffer_cleanup(fb);
7955 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7956
7957 kfree(intel_fb);
7958 }
7959
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)7960 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7961 struct drm_file *file,
7962 unsigned int *handle)
7963 {
7964 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7965 struct drm_i915_gem_object *obj = intel_fb->obj;
7966
7967 return drm_gem_handle_create(file, &obj->base, handle);
7968 }
7969
7970 static const struct drm_framebuffer_funcs intel_fb_funcs = {
7971 .destroy = intel_user_framebuffer_destroy,
7972 .create_handle = intel_user_framebuffer_create_handle,
7973 };
7974
intel_framebuffer_init(struct drm_device * dev,struct intel_framebuffer * intel_fb,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_i915_gem_object * obj)7975 int intel_framebuffer_init(struct drm_device *dev,
7976 struct intel_framebuffer *intel_fb,
7977 struct drm_mode_fb_cmd2 *mode_cmd,
7978 struct drm_i915_gem_object *obj)
7979 {
7980 int ret;
7981
7982 if (obj->tiling_mode == I915_TILING_Y)
7983 return -EINVAL;
7984
7985 if (mode_cmd->pitches[0] & 63)
7986 return -EINVAL;
7987
7988 switch (mode_cmd->pixel_format) {
7989 case DRM_FORMAT_RGB332:
7990 case DRM_FORMAT_RGB565:
7991 case DRM_FORMAT_XRGB8888:
7992 case DRM_FORMAT_XBGR8888:
7993 case DRM_FORMAT_ARGB8888:
7994 case DRM_FORMAT_XRGB2101010:
7995 case DRM_FORMAT_ARGB2101010:
7996 /* RGB formats are common across chipsets */
7997 break;
7998 case DRM_FORMAT_YUYV:
7999 case DRM_FORMAT_UYVY:
8000 case DRM_FORMAT_YVYU:
8001 case DRM_FORMAT_VYUY:
8002 break;
8003 default:
8004 DRM_DEBUG_KMS("unsupported pixel format %u\n",
8005 mode_cmd->pixel_format);
8006 return -EINVAL;
8007 }
8008
8009 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8010 if (ret) {
8011 DRM_ERROR("framebuffer init failed %d\n", ret);
8012 return ret;
8013 }
8014
8015 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8016 intel_fb->obj = obj;
8017 return 0;
8018 }
8019
8020 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,struct drm_mode_fb_cmd2 * mode_cmd)8021 intel_user_framebuffer_create(struct drm_device *dev,
8022 struct drm_file *filp,
8023 struct drm_mode_fb_cmd2 *mode_cmd)
8024 {
8025 struct drm_i915_gem_object *obj;
8026
8027 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8028 mode_cmd->handles[0]));
8029 if (&obj->base == NULL)
8030 return ERR_PTR(-ENOENT);
8031
8032 return intel_framebuffer_create(dev, mode_cmd, obj);
8033 }
8034
8035 static const struct drm_mode_config_funcs intel_mode_funcs = {
8036 .fb_create = intel_user_framebuffer_create,
8037 .output_poll_changed = intel_fb_output_poll_changed,
8038 };
8039
8040 static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device * dev)8041 intel_alloc_context_page(struct drm_device *dev)
8042 {
8043 struct drm_i915_gem_object *ctx;
8044 int ret;
8045
8046 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8047
8048 ctx = i915_gem_alloc_object(dev, 4096);
8049 if (!ctx) {
8050 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8051 return NULL;
8052 }
8053
8054 ret = i915_gem_object_pin(ctx, 4096, true);
8055 if (ret) {
8056 DRM_ERROR("failed to pin power context: %d\n", ret);
8057 goto err_unref;
8058 }
8059
8060 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8061 if (ret) {
8062 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8063 goto err_unpin;
8064 }
8065
8066 return ctx;
8067
8068 err_unpin:
8069 i915_gem_object_unpin(ctx);
8070 err_unref:
8071 drm_gem_object_unreference(&ctx->base);
8072 mutex_unlock(&dev->struct_mutex);
8073 return NULL;
8074 }
8075
ironlake_set_drps(struct drm_device * dev,u8 val)8076 bool ironlake_set_drps(struct drm_device *dev, u8 val)
8077 {
8078 struct drm_i915_private *dev_priv = dev->dev_private;
8079 u16 rgvswctl;
8080
8081 rgvswctl = I915_READ16(MEMSWCTL);
8082 if (rgvswctl & MEMCTL_CMD_STS) {
8083 DRM_DEBUG("gpu busy, RCS change rejected\n");
8084 return false; /* still busy with another command */
8085 }
8086
8087 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8088 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8089 I915_WRITE16(MEMSWCTL, rgvswctl);
8090 POSTING_READ16(MEMSWCTL);
8091
8092 rgvswctl |= MEMCTL_CMD_STS;
8093 I915_WRITE16(MEMSWCTL, rgvswctl);
8094
8095 return true;
8096 }
8097
ironlake_enable_drps(struct drm_device * dev)8098 void ironlake_enable_drps(struct drm_device *dev)
8099 {
8100 struct drm_i915_private *dev_priv = dev->dev_private;
8101 u32 rgvmodectl = I915_READ(MEMMODECTL);
8102 u8 fmax, fmin, fstart, vstart;
8103
8104 /* Enable temp reporting */
8105 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8106 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8107
8108 /* 100ms RC evaluation intervals */
8109 I915_WRITE(RCUPEI, 100000);
8110 I915_WRITE(RCDNEI, 100000);
8111
8112 /* Set max/min thresholds to 90ms and 80ms respectively */
8113 I915_WRITE(RCBMAXAVG, 90000);
8114 I915_WRITE(RCBMINAVG, 80000);
8115
8116 I915_WRITE(MEMIHYST, 1);
8117
8118 /* Set up min, max, and cur for interrupt handling */
8119 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8120 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8121 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8122 MEMMODE_FSTART_SHIFT;
8123
8124 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8125 PXVFREQ_PX_SHIFT;
8126
8127 dev_priv->fmax = fmax; /* IPS callback will increase this */
8128 dev_priv->fstart = fstart;
8129
8130 dev_priv->max_delay = fstart;
8131 dev_priv->min_delay = fmin;
8132 dev_priv->cur_delay = fstart;
8133
8134 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
8135 fmax, fmin, fstart);
8136
8137 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8138
8139 /*
8140 * Interrupts will be enabled in ironlake_irq_postinstall
8141 */
8142
8143 I915_WRITE(VIDSTART, vstart);
8144 POSTING_READ(VIDSTART);
8145
8146 rgvmodectl |= MEMMODE_SWMODE_EN;
8147 I915_WRITE(MEMMODECTL, rgvmodectl);
8148
8149 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
8150 DRM_ERROR("stuck trying to change perf mode\n");
8151 msleep(1);
8152
8153 ironlake_set_drps(dev, fstart);
8154
8155 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8156 I915_READ(0x112e0);
8157 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8158 dev_priv->last_count2 = I915_READ(0x112f4);
8159 getrawmonotonic(&dev_priv->last_time2);
8160 }
8161
ironlake_disable_drps(struct drm_device * dev)8162 void ironlake_disable_drps(struct drm_device *dev)
8163 {
8164 struct drm_i915_private *dev_priv = dev->dev_private;
8165 u16 rgvswctl = I915_READ16(MEMSWCTL);
8166
8167 /* Ack interrupts, disable EFC interrupt */
8168 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8169 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8170 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8171 I915_WRITE(DEIIR, DE_PCU_EVENT);
8172 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8173
8174 /* Go back to the starting frequency */
8175 ironlake_set_drps(dev, dev_priv->fstart);
8176 msleep(1);
8177 rgvswctl |= MEMCTL_CMD_STS;
8178 I915_WRITE(MEMSWCTL, rgvswctl);
8179 msleep(1);
8180
8181 }
8182
gen6_set_rps(struct drm_device * dev,u8 val)8183 void gen6_set_rps(struct drm_device *dev, u8 val)
8184 {
8185 struct drm_i915_private *dev_priv = dev->dev_private;
8186 u32 swreq;
8187
8188 swreq = (val & 0x3ff) << 25;
8189 I915_WRITE(GEN6_RPNSWREQ, swreq);
8190 }
8191
gen6_disable_rps(struct drm_device * dev)8192 void gen6_disable_rps(struct drm_device *dev)
8193 {
8194 struct drm_i915_private *dev_priv = dev->dev_private;
8195
8196 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8197 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8198 I915_WRITE(GEN6_PMIER, 0);
8199 /* Complete PM interrupt masking here doesn't race with the rps work
8200 * item again unmasking PM interrupts because that is using a different
8201 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8202 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8203
8204 spin_lock_irq(&dev_priv->rps_lock);
8205 dev_priv->pm_iir = 0;
8206 spin_unlock_irq(&dev_priv->rps_lock);
8207
8208 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8209 }
8210
intel_pxfreq(u32 vidfreq)8211 static unsigned long intel_pxfreq(u32 vidfreq)
8212 {
8213 unsigned long freq;
8214 int div = (vidfreq & 0x3f0000) >> 16;
8215 int post = (vidfreq & 0x3000) >> 12;
8216 int pre = (vidfreq & 0x7);
8217
8218 if (!pre)
8219 return 0;
8220
8221 freq = ((div * 133333) / ((1<<post) * pre));
8222
8223 return freq;
8224 }
8225
intel_init_emon(struct drm_device * dev)8226 void intel_init_emon(struct drm_device *dev)
8227 {
8228 struct drm_i915_private *dev_priv = dev->dev_private;
8229 u32 lcfuse;
8230 u8 pxw[16];
8231 int i;
8232
8233 /* Disable to program */
8234 I915_WRITE(ECR, 0);
8235 POSTING_READ(ECR);
8236
8237 /* Program energy weights for various events */
8238 I915_WRITE(SDEW, 0x15040d00);
8239 I915_WRITE(CSIEW0, 0x007f0000);
8240 I915_WRITE(CSIEW1, 0x1e220004);
8241 I915_WRITE(CSIEW2, 0x04000004);
8242
8243 for (i = 0; i < 5; i++)
8244 I915_WRITE(PEW + (i * 4), 0);
8245 for (i = 0; i < 3; i++)
8246 I915_WRITE(DEW + (i * 4), 0);
8247
8248 /* Program P-state weights to account for frequency power adjustment */
8249 for (i = 0; i < 16; i++) {
8250 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8251 unsigned long freq = intel_pxfreq(pxvidfreq);
8252 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8253 PXVFREQ_PX_SHIFT;
8254 unsigned long val;
8255
8256 val = vid * vid;
8257 val *= (freq / 1000);
8258 val *= 255;
8259 val /= (127*127*900);
8260 if (val > 0xff)
8261 DRM_ERROR("bad pxval: %ld\n", val);
8262 pxw[i] = val;
8263 }
8264 /* Render standby states get 0 weight */
8265 pxw[14] = 0;
8266 pxw[15] = 0;
8267
8268 for (i = 0; i < 4; i++) {
8269 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8270 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8271 I915_WRITE(PXW + (i * 4), val);
8272 }
8273
8274 /* Adjust magic regs to magic values (more experimental results) */
8275 I915_WRITE(OGW0, 0);
8276 I915_WRITE(OGW1, 0);
8277 I915_WRITE(EG0, 0x00007f00);
8278 I915_WRITE(EG1, 0x0000000e);
8279 I915_WRITE(EG2, 0x000e0000);
8280 I915_WRITE(EG3, 0x68000300);
8281 I915_WRITE(EG4, 0x42000000);
8282 I915_WRITE(EG5, 0x00140031);
8283 I915_WRITE(EG6, 0);
8284 I915_WRITE(EG7, 0);
8285
8286 for (i = 0; i < 8; i++)
8287 I915_WRITE(PXWL + (i * 4), 0);
8288
8289 /* Enable PMON + select events */
8290 I915_WRITE(ECR, 0x80000019);
8291
8292 lcfuse = I915_READ(LCFUSE02);
8293
8294 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8295 }
8296
intel_enable_rc6(struct drm_device * dev)8297 static int intel_enable_rc6(struct drm_device *dev)
8298 {
8299 /*
8300 * Respect the kernel parameter if it is set
8301 */
8302 if (i915_enable_rc6 >= 0)
8303 return i915_enable_rc6;
8304
8305 /*
8306 * Disable RC6 on Ironlake
8307 */
8308 if (INTEL_INFO(dev)->gen == 5)
8309 return 0;
8310
8311 /*
8312 * Disable rc6 on Sandybridge
8313 */
8314 if (INTEL_INFO(dev)->gen == 6) {
8315 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
8316 return INTEL_RC6_ENABLE;
8317 }
8318 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8319 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8320 }
8321
gen6_enable_rps(struct drm_i915_private * dev_priv)8322 void gen6_enable_rps(struct drm_i915_private *dev_priv)
8323 {
8324 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8325 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8326 u32 pcu_mbox, rc6_mask = 0;
8327 u32 gtfifodbg;
8328 int cur_freq, min_freq, max_freq;
8329 int rc6_mode;
8330 int i;
8331
8332 /* Here begins a magic sequence of register writes to enable
8333 * auto-downclocking.
8334 *
8335 * Perhaps there might be some value in exposing these to
8336 * userspace...
8337 */
8338 I915_WRITE(GEN6_RC_STATE, 0);
8339 mutex_lock(&dev_priv->dev->struct_mutex);
8340
8341 /* Clear the DBG now so we don't confuse earlier errors */
8342 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8343 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8344 I915_WRITE(GTFIFODBG, gtfifodbg);
8345 }
8346
8347 gen6_gt_force_wake_get(dev_priv);
8348
8349 /* disable the counters and set deterministic thresholds */
8350 I915_WRITE(GEN6_RC_CONTROL, 0);
8351
8352 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8353 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8354 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8355 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8356 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8357
8358 for (i = 0; i < I915_NUM_RINGS; i++)
8359 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
8360
8361 I915_WRITE(GEN6_RC_SLEEP, 0);
8362 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8363 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8364 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
8365 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8366
8367 rc6_mode = intel_enable_rc6(dev_priv->dev);
8368 if (rc6_mode & INTEL_RC6_ENABLE)
8369 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8370
8371 if (rc6_mode & INTEL_RC6p_ENABLE)
8372 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8373
8374 if (rc6_mode & INTEL_RC6pp_ENABLE)
8375 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8376
8377 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8378 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8379 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8380 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8381
8382 I915_WRITE(GEN6_RC_CONTROL,
8383 rc6_mask |
8384 GEN6_RC_CTL_EI_MODE(1) |
8385 GEN6_RC_CTL_HW_ENABLE);
8386
8387 I915_WRITE(GEN6_RPNSWREQ,
8388 GEN6_FREQUENCY(10) |
8389 GEN6_OFFSET(0) |
8390 GEN6_AGGRESSIVE_TURBO);
8391 I915_WRITE(GEN6_RC_VIDEO_FREQ,
8392 GEN6_FREQUENCY(12));
8393
8394 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8395 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8396 18 << 24 |
8397 6 << 16);
8398 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8399 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8400 I915_WRITE(GEN6_RP_UP_EI, 100000);
8401 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8402 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8403 I915_WRITE(GEN6_RP_CONTROL,
8404 GEN6_RP_MEDIA_TURBO |
8405 GEN6_RP_MEDIA_HW_NORMAL_MODE |
8406 GEN6_RP_MEDIA_IS_GFX |
8407 GEN6_RP_ENABLE |
8408 GEN6_RP_UP_BUSY_AVG |
8409 GEN6_RP_DOWN_IDLE_CONT);
8410
8411 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8412 500))
8413 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8414
8415 I915_WRITE(GEN6_PCODE_DATA, 0);
8416 I915_WRITE(GEN6_PCODE_MAILBOX,
8417 GEN6_PCODE_READY |
8418 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8419 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8420 500))
8421 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8422
8423 min_freq = (rp_state_cap & 0xff0000) >> 16;
8424 max_freq = rp_state_cap & 0xff;
8425 cur_freq = (gt_perf_status & 0xff00) >> 8;
8426
8427 /* Check for overclock support */
8428 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8429 500))
8430 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8431 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8432 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8433 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8434 500))
8435 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8436 if (pcu_mbox & (1<<31)) { /* OC supported */
8437 max_freq = pcu_mbox & 0xff;
8438 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8439 }
8440
8441 /* In units of 100MHz */
8442 dev_priv->max_delay = max_freq;
8443 dev_priv->min_delay = min_freq;
8444 dev_priv->cur_delay = cur_freq;
8445
8446 /* requires MSI enabled */
8447 I915_WRITE(GEN6_PMIER,
8448 GEN6_PM_MBOX_EVENT |
8449 GEN6_PM_THERMAL_EVENT |
8450 GEN6_PM_RP_DOWN_TIMEOUT |
8451 GEN6_PM_RP_UP_THRESHOLD |
8452 GEN6_PM_RP_DOWN_THRESHOLD |
8453 GEN6_PM_RP_UP_EI_EXPIRED |
8454 GEN6_PM_RP_DOWN_EI_EXPIRED);
8455 spin_lock_irq(&dev_priv->rps_lock);
8456 WARN_ON(dev_priv->pm_iir != 0);
8457 I915_WRITE(GEN6_PMIMR, 0);
8458 spin_unlock_irq(&dev_priv->rps_lock);
8459 /* enable all PM interrupts */
8460 I915_WRITE(GEN6_PMINTRMSK, 0);
8461
8462 gen6_gt_force_wake_put(dev_priv);
8463 mutex_unlock(&dev_priv->dev->struct_mutex);
8464 }
8465
gen6_update_ring_freq(struct drm_i915_private * dev_priv)8466 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8467 {
8468 int min_freq = 15;
8469 int gpu_freq, ia_freq, max_ia_freq;
8470 int scaling_factor = 180;
8471
8472 max_ia_freq = cpufreq_quick_get_max(0);
8473 /*
8474 * Default to measured freq if none found, PCU will ensure we don't go
8475 * over
8476 */
8477 if (!max_ia_freq)
8478 max_ia_freq = tsc_khz;
8479
8480 /* Convert from kHz to MHz */
8481 max_ia_freq /= 1000;
8482
8483 mutex_lock(&dev_priv->dev->struct_mutex);
8484
8485 /*
8486 * For each potential GPU frequency, load a ring frequency we'd like
8487 * to use for memory access. We do this by specifying the IA frequency
8488 * the PCU should use as a reference to determine the ring frequency.
8489 */
8490 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8491 gpu_freq--) {
8492 int diff = dev_priv->max_delay - gpu_freq;
8493
8494 /*
8495 * For GPU frequencies less than 750MHz, just use the lowest
8496 * ring freq.
8497 */
8498 if (gpu_freq < min_freq)
8499 ia_freq = 800;
8500 else
8501 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8502 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
8503
8504 I915_WRITE(GEN6_PCODE_DATA,
8505 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8506 gpu_freq);
8507 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8508 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8509 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
8510 GEN6_PCODE_READY) == 0, 10)) {
8511 DRM_ERROR("pcode write of freq table timed out\n");
8512 continue;
8513 }
8514 }
8515
8516 mutex_unlock(&dev_priv->dev->struct_mutex);
8517 }
8518
ironlake_init_clock_gating(struct drm_device * dev)8519 static void ironlake_init_clock_gating(struct drm_device *dev)
8520 {
8521 struct drm_i915_private *dev_priv = dev->dev_private;
8522 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8523
8524 /* Required for FBC */
8525 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8526 DPFCRUNIT_CLOCK_GATE_DISABLE |
8527 DPFDUNIT_CLOCK_GATE_DISABLE;
8528 /* Required for CxSR */
8529 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8530
8531 I915_WRITE(PCH_3DCGDIS0,
8532 MARIUNIT_CLOCK_GATE_DISABLE |
8533 SVSMUNIT_CLOCK_GATE_DISABLE);
8534 I915_WRITE(PCH_3DCGDIS1,
8535 VFMUNIT_CLOCK_GATE_DISABLE);
8536
8537 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8538
8539 /*
8540 * According to the spec the following bits should be set in
8541 * order to enable memory self-refresh
8542 * The bit 22/21 of 0x42004
8543 * The bit 5 of 0x42020
8544 * The bit 15 of 0x45000
8545 */
8546 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8547 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8548 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8549 I915_WRITE(ILK_DSPCLK_GATE,
8550 (I915_READ(ILK_DSPCLK_GATE) |
8551 ILK_DPARB_CLK_GATE));
8552 I915_WRITE(DISP_ARB_CTL,
8553 (I915_READ(DISP_ARB_CTL) |
8554 DISP_FBC_WM_DIS));
8555 I915_WRITE(WM3_LP_ILK, 0);
8556 I915_WRITE(WM2_LP_ILK, 0);
8557 I915_WRITE(WM1_LP_ILK, 0);
8558
8559 /*
8560 * Based on the document from hardware guys the following bits
8561 * should be set unconditionally in order to enable FBC.
8562 * The bit 22 of 0x42000
8563 * The bit 22 of 0x42004
8564 * The bit 7,8,9 of 0x42020.
8565 */
8566 if (IS_IRONLAKE_M(dev)) {
8567 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8568 I915_READ(ILK_DISPLAY_CHICKEN1) |
8569 ILK_FBCQ_DIS);
8570 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8571 I915_READ(ILK_DISPLAY_CHICKEN2) |
8572 ILK_DPARB_GATE);
8573 I915_WRITE(ILK_DSPCLK_GATE,
8574 I915_READ(ILK_DSPCLK_GATE) |
8575 ILK_DPFC_DIS1 |
8576 ILK_DPFC_DIS2 |
8577 ILK_CLK_FBC);
8578 }
8579
8580 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8581 I915_READ(ILK_DISPLAY_CHICKEN2) |
8582 ILK_ELPIN_409_SELECT);
8583 I915_WRITE(_3D_CHICKEN2,
8584 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8585 _3D_CHICKEN2_WM_READ_PIPELINED);
8586 }
8587
gen6_init_clock_gating(struct drm_device * dev)8588 static void gen6_init_clock_gating(struct drm_device *dev)
8589 {
8590 struct drm_i915_private *dev_priv = dev->dev_private;
8591 int pipe;
8592 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8593
8594 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8595
8596 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8597 I915_READ(ILK_DISPLAY_CHICKEN2) |
8598 ILK_ELPIN_409_SELECT);
8599
8600 /* WaDisableHiZPlanesWhenMSAAEnabled */
8601 I915_WRITE(_3D_CHICKEN,
8602 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
8603
8604 I915_WRITE(WM3_LP_ILK, 0);
8605 I915_WRITE(WM2_LP_ILK, 0);
8606 I915_WRITE(WM1_LP_ILK, 0);
8607
8608 I915_WRITE(GEN6_UCGCTL1,
8609 I915_READ(GEN6_UCGCTL1) |
8610 GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8611
8612 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8613 * gating disable must be set. Failure to set it results in
8614 * flickering pixels due to Z write ordering failures after
8615 * some amount of runtime in the Mesa "fire" demo, and Unigine
8616 * Sanctuary and Tropics, and apparently anything else with
8617 * alpha test or pixel discard.
8618 *
8619 * According to the spec, bit 11 (RCCUNIT) must also be set,
8620 * but we didn't debug actual testcases to find it out.
8621 */
8622 I915_WRITE(GEN6_UCGCTL2,
8623 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8624 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8625
8626 /*
8627 * According to the spec the following bits should be
8628 * set in order to enable memory self-refresh and fbc:
8629 * The bit21 and bit22 of 0x42000
8630 * The bit21 and bit22 of 0x42004
8631 * The bit5 and bit7 of 0x42020
8632 * The bit14 of 0x70180
8633 * The bit14 of 0x71180
8634 */
8635 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8636 I915_READ(ILK_DISPLAY_CHICKEN1) |
8637 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8638 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8639 I915_READ(ILK_DISPLAY_CHICKEN2) |
8640 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8641 I915_WRITE(ILK_DSPCLK_GATE,
8642 I915_READ(ILK_DSPCLK_GATE) |
8643 ILK_DPARB_CLK_GATE |
8644 ILK_DPFD_CLK_GATE);
8645
8646 for_each_pipe(pipe) {
8647 I915_WRITE(DSPCNTR(pipe),
8648 I915_READ(DSPCNTR(pipe)) |
8649 DISPPLANE_TRICKLE_FEED_DISABLE);
8650 intel_flush_display_plane(dev_priv, pipe);
8651 }
8652
8653 /* The default value should be 0x200 according to docs, but the two
8654 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
8655 I915_WRITE(GEN6_GT_MODE, 0xffff << 16);
8656 I915_WRITE(GEN6_GT_MODE, GEN6_GT_MODE_HI << 16 | GEN6_GT_MODE_HI);
8657 }
8658
gen7_setup_fixed_func_scheduler(struct drm_i915_private * dev_priv)8659 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
8660 {
8661 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
8662
8663 reg &= ~GEN7_FF_SCHED_MASK;
8664 reg |= GEN7_FF_TS_SCHED_HW;
8665 reg |= GEN7_FF_VS_SCHED_HW;
8666 reg |= GEN7_FF_DS_SCHED_HW;
8667
8668 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
8669 }
8670
ivybridge_init_clock_gating(struct drm_device * dev)8671 static void ivybridge_init_clock_gating(struct drm_device *dev)
8672 {
8673 struct drm_i915_private *dev_priv = dev->dev_private;
8674 int pipe;
8675 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8676
8677 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8678
8679 I915_WRITE(WM3_LP_ILK, 0);
8680 I915_WRITE(WM2_LP_ILK, 0);
8681 I915_WRITE(WM1_LP_ILK, 0);
8682
8683 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8684 * This implements the WaDisableRCZUnitClockGating workaround.
8685 */
8686 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8687
8688 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8689
8690 I915_WRITE(IVB_CHICKEN3,
8691 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8692 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8693
8694 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8695 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8696 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8697
8698 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8699 I915_WRITE(GEN7_L3CNTLREG1,
8700 GEN7_WA_FOR_GEN7_L3_CONTROL);
8701 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8702 GEN7_WA_L3_CHICKEN_MODE);
8703
8704 /* This is required by WaCatErrorRejectionIssue */
8705 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8706 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8707 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8708
8709 for_each_pipe(pipe) {
8710 I915_WRITE(DSPCNTR(pipe),
8711 I915_READ(DSPCNTR(pipe)) |
8712 DISPPLANE_TRICKLE_FEED_DISABLE);
8713 intel_flush_display_plane(dev_priv, pipe);
8714 }
8715
8716 gen7_setup_fixed_func_scheduler(dev_priv);
8717 }
8718
g4x_init_clock_gating(struct drm_device * dev)8719 static void g4x_init_clock_gating(struct drm_device *dev)
8720 {
8721 struct drm_i915_private *dev_priv = dev->dev_private;
8722 uint32_t dspclk_gate;
8723
8724 I915_WRITE(RENCLK_GATE_D1, 0);
8725 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8726 GS_UNIT_CLOCK_GATE_DISABLE |
8727 CL_UNIT_CLOCK_GATE_DISABLE);
8728 I915_WRITE(RAMCLK_GATE_D, 0);
8729 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8730 OVRUNIT_CLOCK_GATE_DISABLE |
8731 OVCUNIT_CLOCK_GATE_DISABLE;
8732 if (IS_GM45(dev))
8733 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8734 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8735 }
8736
crestline_init_clock_gating(struct drm_device * dev)8737 static void crestline_init_clock_gating(struct drm_device *dev)
8738 {
8739 struct drm_i915_private *dev_priv = dev->dev_private;
8740
8741 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8742 I915_WRITE(RENCLK_GATE_D2, 0);
8743 I915_WRITE(DSPCLK_GATE_D, 0);
8744 I915_WRITE(RAMCLK_GATE_D, 0);
8745 I915_WRITE16(DEUC, 0);
8746 }
8747
broadwater_init_clock_gating(struct drm_device * dev)8748 static void broadwater_init_clock_gating(struct drm_device *dev)
8749 {
8750 struct drm_i915_private *dev_priv = dev->dev_private;
8751
8752 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8753 I965_RCC_CLOCK_GATE_DISABLE |
8754 I965_RCPB_CLOCK_GATE_DISABLE |
8755 I965_ISC_CLOCK_GATE_DISABLE |
8756 I965_FBC_CLOCK_GATE_DISABLE);
8757 I915_WRITE(RENCLK_GATE_D2, 0);
8758 }
8759
gen3_init_clock_gating(struct drm_device * dev)8760 static void gen3_init_clock_gating(struct drm_device *dev)
8761 {
8762 struct drm_i915_private *dev_priv = dev->dev_private;
8763 u32 dstate = I915_READ(D_STATE);
8764
8765 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8766 DSTATE_DOT_CLOCK_GATING;
8767 I915_WRITE(D_STATE, dstate);
8768 }
8769
i85x_init_clock_gating(struct drm_device * dev)8770 static void i85x_init_clock_gating(struct drm_device *dev)
8771 {
8772 struct drm_i915_private *dev_priv = dev->dev_private;
8773
8774 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8775 }
8776
i830_init_clock_gating(struct drm_device * dev)8777 static void i830_init_clock_gating(struct drm_device *dev)
8778 {
8779 struct drm_i915_private *dev_priv = dev->dev_private;
8780
8781 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8782 }
8783
ibx_init_clock_gating(struct drm_device * dev)8784 static void ibx_init_clock_gating(struct drm_device *dev)
8785 {
8786 struct drm_i915_private *dev_priv = dev->dev_private;
8787
8788 /*
8789 * On Ibex Peak and Cougar Point, we need to disable clock
8790 * gating for the panel power sequencer or it will fail to
8791 * start up when no ports are active.
8792 */
8793 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8794 }
8795
cpt_init_clock_gating(struct drm_device * dev)8796 static void cpt_init_clock_gating(struct drm_device *dev)
8797 {
8798 struct drm_i915_private *dev_priv = dev->dev_private;
8799 int pipe;
8800
8801 /*
8802 * On Ibex Peak and Cougar Point, we need to disable clock
8803 * gating for the panel power sequencer or it will fail to
8804 * start up when no ports are active.
8805 */
8806 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8807 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8808 DPLS_EDP_PPS_FIX_DIS);
8809 /* Without this, mode sets may fail silently on FDI */
8810 for_each_pipe(pipe)
8811 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8812 }
8813
ironlake_teardown_rc6(struct drm_device * dev)8814 static void ironlake_teardown_rc6(struct drm_device *dev)
8815 {
8816 struct drm_i915_private *dev_priv = dev->dev_private;
8817
8818 if (dev_priv->renderctx) {
8819 i915_gem_object_unpin(dev_priv->renderctx);
8820 drm_gem_object_unreference(&dev_priv->renderctx->base);
8821 dev_priv->renderctx = NULL;
8822 }
8823
8824 if (dev_priv->pwrctx) {
8825 i915_gem_object_unpin(dev_priv->pwrctx);
8826 drm_gem_object_unreference(&dev_priv->pwrctx->base);
8827 dev_priv->pwrctx = NULL;
8828 }
8829 }
8830
ironlake_disable_rc6(struct drm_device * dev)8831 static void ironlake_disable_rc6(struct drm_device *dev)
8832 {
8833 struct drm_i915_private *dev_priv = dev->dev_private;
8834
8835 if (I915_READ(PWRCTXA)) {
8836 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8837 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8838 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8839 50);
8840
8841 I915_WRITE(PWRCTXA, 0);
8842 POSTING_READ(PWRCTXA);
8843
8844 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8845 POSTING_READ(RSTDBYCTL);
8846 }
8847
8848 ironlake_teardown_rc6(dev);
8849 }
8850
ironlake_setup_rc6(struct drm_device * dev)8851 static int ironlake_setup_rc6(struct drm_device *dev)
8852 {
8853 struct drm_i915_private *dev_priv = dev->dev_private;
8854
8855 if (dev_priv->renderctx == NULL)
8856 dev_priv->renderctx = intel_alloc_context_page(dev);
8857 if (!dev_priv->renderctx)
8858 return -ENOMEM;
8859
8860 if (dev_priv->pwrctx == NULL)
8861 dev_priv->pwrctx = intel_alloc_context_page(dev);
8862 if (!dev_priv->pwrctx) {
8863 ironlake_teardown_rc6(dev);
8864 return -ENOMEM;
8865 }
8866
8867 return 0;
8868 }
8869
ironlake_enable_rc6(struct drm_device * dev)8870 void ironlake_enable_rc6(struct drm_device *dev)
8871 {
8872 struct drm_i915_private *dev_priv = dev->dev_private;
8873 int ret;
8874
8875 /* rc6 disabled by default due to repeated reports of hanging during
8876 * boot and resume.
8877 */
8878 if (!intel_enable_rc6(dev))
8879 return;
8880
8881 mutex_lock(&dev->struct_mutex);
8882 ret = ironlake_setup_rc6(dev);
8883 if (ret) {
8884 mutex_unlock(&dev->struct_mutex);
8885 return;
8886 }
8887
8888 /*
8889 * GPU can automatically power down the render unit if given a page
8890 * to save state.
8891 */
8892 ret = BEGIN_LP_RING(6);
8893 if (ret) {
8894 ironlake_teardown_rc6(dev);
8895 mutex_unlock(&dev->struct_mutex);
8896 return;
8897 }
8898
8899 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8900 OUT_RING(MI_SET_CONTEXT);
8901 OUT_RING(dev_priv->renderctx->gtt_offset |
8902 MI_MM_SPACE_GTT |
8903 MI_SAVE_EXT_STATE_EN |
8904 MI_RESTORE_EXT_STATE_EN |
8905 MI_RESTORE_INHIBIT);
8906 OUT_RING(MI_SUSPEND_FLUSH);
8907 OUT_RING(MI_NOOP);
8908 OUT_RING(MI_FLUSH);
8909 ADVANCE_LP_RING();
8910
8911 /*
8912 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8913 * does an implicit flush, combined with MI_FLUSH above, it should be
8914 * safe to assume that renderctx is valid
8915 */
8916 ret = intel_wait_ring_idle(LP_RING(dev_priv));
8917 if (ret) {
8918 DRM_ERROR("failed to enable ironlake power power savings\n");
8919 ironlake_teardown_rc6(dev);
8920 mutex_unlock(&dev->struct_mutex);
8921 return;
8922 }
8923
8924 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8925 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8926 mutex_unlock(&dev->struct_mutex);
8927 }
8928
intel_init_clock_gating(struct drm_device * dev)8929 void intel_init_clock_gating(struct drm_device *dev)
8930 {
8931 struct drm_i915_private *dev_priv = dev->dev_private;
8932
8933 dev_priv->display.init_clock_gating(dev);
8934
8935 if (dev_priv->display.init_pch_clock_gating)
8936 dev_priv->display.init_pch_clock_gating(dev);
8937 }
8938
8939 /* Set up chip specific display functions */
intel_init_display(struct drm_device * dev)8940 static void intel_init_display(struct drm_device *dev)
8941 {
8942 struct drm_i915_private *dev_priv = dev->dev_private;
8943
8944 /* We always want a DPMS function */
8945 if (HAS_PCH_SPLIT(dev)) {
8946 dev_priv->display.dpms = ironlake_crtc_dpms;
8947 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8948 dev_priv->display.update_plane = ironlake_update_plane;
8949 } else {
8950 dev_priv->display.dpms = i9xx_crtc_dpms;
8951 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8952 dev_priv->display.update_plane = i9xx_update_plane;
8953 }
8954
8955 if (I915_HAS_FBC(dev)) {
8956 if (HAS_PCH_SPLIT(dev)) {
8957 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8958 dev_priv->display.enable_fbc = ironlake_enable_fbc;
8959 dev_priv->display.disable_fbc = ironlake_disable_fbc;
8960 } else if (IS_GM45(dev)) {
8961 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8962 dev_priv->display.enable_fbc = g4x_enable_fbc;
8963 dev_priv->display.disable_fbc = g4x_disable_fbc;
8964 } else if (IS_CRESTLINE(dev)) {
8965 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8966 dev_priv->display.enable_fbc = i8xx_enable_fbc;
8967 dev_priv->display.disable_fbc = i8xx_disable_fbc;
8968 }
8969 /* 855GM needs testing */
8970 }
8971
8972 /* Returns the core display clock speed */
8973 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8974 dev_priv->display.get_display_clock_speed =
8975 i945_get_display_clock_speed;
8976 else if (IS_I915G(dev))
8977 dev_priv->display.get_display_clock_speed =
8978 i915_get_display_clock_speed;
8979 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
8980 dev_priv->display.get_display_clock_speed =
8981 i9xx_misc_get_display_clock_speed;
8982 else if (IS_I915GM(dev))
8983 dev_priv->display.get_display_clock_speed =
8984 i915gm_get_display_clock_speed;
8985 else if (IS_I865G(dev))
8986 dev_priv->display.get_display_clock_speed =
8987 i865_get_display_clock_speed;
8988 else if (IS_I85X(dev))
8989 dev_priv->display.get_display_clock_speed =
8990 i855_get_display_clock_speed;
8991 else /* 852, 830 */
8992 dev_priv->display.get_display_clock_speed =
8993 i830_get_display_clock_speed;
8994
8995 /* For FIFO watermark updates */
8996 if (HAS_PCH_SPLIT(dev)) {
8997 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
8998 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
8999
9000 /* IVB configs may use multi-threaded forcewake */
9001 if (IS_IVYBRIDGE(dev)) {
9002 u32 ecobus;
9003
9004 /* A small trick here - if the bios hasn't configured MT forcewake,
9005 * and if the device is in RC6, then force_wake_mt_get will not wake
9006 * the device and the ECOBUS read will return zero. Which will be
9007 * (correctly) interpreted by the test below as MT forcewake being
9008 * disabled.
9009 */
9010 mutex_lock(&dev->struct_mutex);
9011 __gen6_gt_force_wake_mt_get(dev_priv);
9012 ecobus = I915_READ_NOTRACE(ECOBUS);
9013 __gen6_gt_force_wake_mt_put(dev_priv);
9014 mutex_unlock(&dev->struct_mutex);
9015
9016 if (ecobus & FORCEWAKE_MT_ENABLE) {
9017 DRM_DEBUG_KMS("Using MT version of forcewake\n");
9018 dev_priv->display.force_wake_get =
9019 __gen6_gt_force_wake_mt_get;
9020 dev_priv->display.force_wake_put =
9021 __gen6_gt_force_wake_mt_put;
9022 }
9023 }
9024
9025 if (HAS_PCH_IBX(dev))
9026 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9027 else if (HAS_PCH_CPT(dev))
9028 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9029
9030 if (IS_GEN5(dev)) {
9031 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9032 dev_priv->display.update_wm = ironlake_update_wm;
9033 else {
9034 DRM_DEBUG_KMS("Failed to get proper latency. "
9035 "Disable CxSR\n");
9036 dev_priv->display.update_wm = NULL;
9037 }
9038 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9039 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9040 dev_priv->display.write_eld = ironlake_write_eld;
9041 } else if (IS_GEN6(dev)) {
9042 if (SNB_READ_WM0_LATENCY()) {
9043 dev_priv->display.update_wm = sandybridge_update_wm;
9044 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9045 } else {
9046 DRM_DEBUG_KMS("Failed to read display plane latency. "
9047 "Disable CxSR\n");
9048 dev_priv->display.update_wm = NULL;
9049 }
9050 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9051 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9052 dev_priv->display.write_eld = ironlake_write_eld;
9053 } else if (IS_IVYBRIDGE(dev)) {
9054 /* FIXME: detect B0+ stepping and use auto training */
9055 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9056 if (SNB_READ_WM0_LATENCY()) {
9057 dev_priv->display.update_wm = sandybridge_update_wm;
9058 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9059 } else {
9060 DRM_DEBUG_KMS("Failed to read display plane latency. "
9061 "Disable CxSR\n");
9062 dev_priv->display.update_wm = NULL;
9063 }
9064 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9065 dev_priv->display.write_eld = ironlake_write_eld;
9066 } else
9067 dev_priv->display.update_wm = NULL;
9068 } else if (IS_PINEVIEW(dev)) {
9069 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9070 dev_priv->is_ddr3,
9071 dev_priv->fsb_freq,
9072 dev_priv->mem_freq)) {
9073 DRM_INFO("failed to find known CxSR latency "
9074 "(found ddr%s fsb freq %d, mem freq %d), "
9075 "disabling CxSR\n",
9076 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9077 dev_priv->fsb_freq, dev_priv->mem_freq);
9078 /* Disable CxSR and never update its watermark again */
9079 pineview_disable_cxsr(dev);
9080 dev_priv->display.update_wm = NULL;
9081 } else
9082 dev_priv->display.update_wm = pineview_update_wm;
9083 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9084 } else if (IS_G4X(dev)) {
9085 dev_priv->display.write_eld = g4x_write_eld;
9086 dev_priv->display.update_wm = g4x_update_wm;
9087 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9088 } else if (IS_GEN4(dev)) {
9089 dev_priv->display.update_wm = i965_update_wm;
9090 if (IS_CRESTLINE(dev))
9091 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9092 else if (IS_BROADWATER(dev))
9093 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9094 } else if (IS_GEN3(dev)) {
9095 dev_priv->display.update_wm = i9xx_update_wm;
9096 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9097 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9098 } else if (IS_I865G(dev)) {
9099 dev_priv->display.update_wm = i830_update_wm;
9100 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9101 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9102 } else if (IS_I85X(dev)) {
9103 dev_priv->display.update_wm = i9xx_update_wm;
9104 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9105 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9106 } else {
9107 dev_priv->display.update_wm = i830_update_wm;
9108 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9109 if (IS_845G(dev))
9110 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9111 else
9112 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9113 }
9114
9115 /* Default just returns -ENODEV to indicate unsupported */
9116 dev_priv->display.queue_flip = intel_default_queue_flip;
9117
9118 switch (INTEL_INFO(dev)->gen) {
9119 case 2:
9120 dev_priv->display.queue_flip = intel_gen2_queue_flip;
9121 break;
9122
9123 case 3:
9124 dev_priv->display.queue_flip = intel_gen3_queue_flip;
9125 break;
9126
9127 case 4:
9128 case 5:
9129 dev_priv->display.queue_flip = intel_gen4_queue_flip;
9130 break;
9131
9132 case 6:
9133 dev_priv->display.queue_flip = intel_gen6_queue_flip;
9134 break;
9135 case 7:
9136 dev_priv->display.queue_flip = intel_gen7_queue_flip;
9137 break;
9138 }
9139 }
9140
9141 /*
9142 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9143 * resume, or other times. This quirk makes sure that's the case for
9144 * affected systems.
9145 */
quirk_pipea_force(struct drm_device * dev)9146 static void quirk_pipea_force(struct drm_device *dev)
9147 {
9148 struct drm_i915_private *dev_priv = dev->dev_private;
9149
9150 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9151 DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
9152 }
9153
9154 /*
9155 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9156 */
quirk_ssc_force_disable(struct drm_device * dev)9157 static void quirk_ssc_force_disable(struct drm_device *dev)
9158 {
9159 struct drm_i915_private *dev_priv = dev->dev_private;
9160 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9161 }
9162
9163 /*
9164 * Some machines (Dell XPS13) suffer broken backlight controls if
9165 * BLM_PCH_PWM_ENABLE is set.
9166 */
quirk_no_pcm_pwm_enable(struct drm_device * dev)9167 static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
9168 {
9169 struct drm_i915_private *dev_priv = dev->dev_private;
9170 dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
9171 DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
9172 }
9173
9174 struct intel_quirk {
9175 int device;
9176 int subsystem_vendor;
9177 int subsystem_device;
9178 void (*hook)(struct drm_device *dev);
9179 };
9180
9181 struct intel_quirk intel_quirks[] = {
9182 /* HP Mini needs pipe A force quirk (LP: #322104) */
9183 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9184
9185 /* Thinkpad R31 needs pipe A force quirk */
9186 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9187 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9188 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9189
9190 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9191 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
9192 /* ThinkPad X40 needs pipe A force quirk */
9193
9194 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9195 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9196
9197 /* 855 & before need to leave pipe A & dpll A up */
9198 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9199 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9200
9201 /* Lenovo U160 cannot use SSC on LVDS */
9202 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9203
9204 /* Sony Vaio Y cannot use SSC on LVDS */
9205 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9206
9207 /* Dell XPS13 HD Sandy Bridge */
9208 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
9209 /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
9210 { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
9211 };
9212
intel_init_quirks(struct drm_device * dev)9213 static void intel_init_quirks(struct drm_device *dev)
9214 {
9215 struct pci_dev *d = dev->pdev;
9216 int i;
9217
9218 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
9219 struct intel_quirk *q = &intel_quirks[i];
9220
9221 if (d->device == q->device &&
9222 (d->subsystem_vendor == q->subsystem_vendor ||
9223 q->subsystem_vendor == PCI_ANY_ID) &&
9224 (d->subsystem_device == q->subsystem_device ||
9225 q->subsystem_device == PCI_ANY_ID))
9226 q->hook(dev);
9227 }
9228 }
9229
9230 /* Disable the VGA plane that we never use */
i915_disable_vga(struct drm_device * dev)9231 static void i915_disable_vga(struct drm_device *dev)
9232 {
9233 struct drm_i915_private *dev_priv = dev->dev_private;
9234 u8 sr1;
9235 u32 vga_reg;
9236
9237 if (HAS_PCH_SPLIT(dev))
9238 vga_reg = CPU_VGACNTRL;
9239 else
9240 vga_reg = VGACNTRL;
9241
9242 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9243 outb(1, VGA_SR_INDEX);
9244 sr1 = inb(VGA_SR_DATA);
9245 outb(sr1 | 1<<5, VGA_SR_DATA);
9246 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9247 udelay(300);
9248
9249 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9250 POSTING_READ(vga_reg);
9251 }
9252
intel_modeset_init(struct drm_device * dev)9253 void intel_modeset_init(struct drm_device *dev)
9254 {
9255 struct drm_i915_private *dev_priv = dev->dev_private;
9256 int i, ret;
9257
9258 drm_mode_config_init(dev);
9259
9260 dev->mode_config.min_width = 0;
9261 dev->mode_config.min_height = 0;
9262
9263 dev->mode_config.preferred_depth = 24;
9264 dev->mode_config.prefer_shadow = 1;
9265
9266 dev->mode_config.funcs = (void *)&intel_mode_funcs;
9267
9268 intel_init_quirks(dev);
9269
9270 intel_init_display(dev);
9271
9272 if (IS_GEN2(dev)) {
9273 dev->mode_config.max_width = 2048;
9274 dev->mode_config.max_height = 2048;
9275 } else if (IS_GEN3(dev)) {
9276 dev->mode_config.max_width = 4096;
9277 dev->mode_config.max_height = 4096;
9278 } else {
9279 dev->mode_config.max_width = 8192;
9280 dev->mode_config.max_height = 8192;
9281 }
9282 dev->mode_config.fb_base = dev->agp->base;
9283
9284 DRM_DEBUG_KMS("%d display pipe%s available.\n",
9285 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9286
9287 for (i = 0; i < dev_priv->num_pipe; i++) {
9288 intel_crtc_init(dev, i);
9289 ret = intel_plane_init(dev, i);
9290 if (ret)
9291 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9292 }
9293
9294 /* Just disable it once at startup */
9295 i915_disable_vga(dev);
9296 intel_setup_outputs(dev);
9297
9298 intel_init_clock_gating(dev);
9299
9300 if (IS_IRONLAKE_M(dev)) {
9301 ironlake_enable_drps(dev);
9302 intel_init_emon(dev);
9303 }
9304
9305 if (IS_GEN6(dev) || IS_GEN7(dev)) {
9306 gen6_enable_rps(dev_priv);
9307 gen6_update_ring_freq(dev_priv);
9308 }
9309
9310 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9311 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
9312 (unsigned long)dev);
9313 }
9314
intel_modeset_gem_init(struct drm_device * dev)9315 void intel_modeset_gem_init(struct drm_device *dev)
9316 {
9317 if (IS_IRONLAKE_M(dev))
9318 ironlake_enable_rc6(dev);
9319
9320 intel_setup_overlay(dev);
9321 }
9322
intel_modeset_cleanup(struct drm_device * dev)9323 void intel_modeset_cleanup(struct drm_device *dev)
9324 {
9325 struct drm_i915_private *dev_priv = dev->dev_private;
9326 struct drm_crtc *crtc;
9327 struct intel_crtc *intel_crtc;
9328
9329 drm_kms_helper_poll_fini(dev);
9330 mutex_lock(&dev->struct_mutex);
9331
9332 intel_unregister_dsm_handler();
9333
9334
9335 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9336 /* Skip inactive CRTCs */
9337 if (!crtc->fb)
9338 continue;
9339
9340 intel_crtc = to_intel_crtc(crtc);
9341 intel_increase_pllclock(crtc);
9342 }
9343
9344 intel_disable_fbc(dev);
9345
9346 if (IS_IRONLAKE_M(dev))
9347 ironlake_disable_drps(dev);
9348 if (IS_GEN6(dev) || IS_GEN7(dev))
9349 gen6_disable_rps(dev);
9350
9351 if (IS_IRONLAKE_M(dev))
9352 ironlake_disable_rc6(dev);
9353
9354 mutex_unlock(&dev->struct_mutex);
9355
9356 /* Disable the irq before mode object teardown, for the irq might
9357 * enqueue unpin/hotplug work. */
9358 drm_irq_uninstall(dev);
9359 cancel_work_sync(&dev_priv->hotplug_work);
9360 cancel_work_sync(&dev_priv->rps_work);
9361
9362 /* flush any delayed tasks or pending work */
9363 flush_scheduled_work();
9364
9365 /* Shut off idle work before the crtcs get freed. */
9366 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9367 intel_crtc = to_intel_crtc(crtc);
9368 del_timer_sync(&intel_crtc->idle_timer);
9369 }
9370 del_timer_sync(&dev_priv->idle_timer);
9371 cancel_work_sync(&dev_priv->idle_work);
9372
9373 drm_mode_config_cleanup(dev);
9374 }
9375
9376 /*
9377 * Return which encoder is currently attached for connector.
9378 */
intel_best_encoder(struct drm_connector * connector)9379 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9380 {
9381 return &intel_attached_encoder(connector)->base;
9382 }
9383
intel_connector_attach_encoder(struct intel_connector * connector,struct intel_encoder * encoder)9384 void intel_connector_attach_encoder(struct intel_connector *connector,
9385 struct intel_encoder *encoder)
9386 {
9387 connector->encoder = encoder;
9388 drm_mode_connector_attach_encoder(&connector->base,
9389 &encoder->base);
9390 }
9391
9392 /*
9393 * set vga decode state - true == enable VGA decode
9394 */
intel_modeset_vga_set_state(struct drm_device * dev,bool state)9395 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9396 {
9397 struct drm_i915_private *dev_priv = dev->dev_private;
9398 u16 gmch_ctrl;
9399
9400 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
9401 if (state)
9402 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9403 else
9404 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9405 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
9406 return 0;
9407 }
9408
9409 #ifdef CONFIG_DEBUG_FS
9410 #include <linux/seq_file.h>
9411
9412 struct intel_display_error_state {
9413 struct intel_cursor_error_state {
9414 u32 control;
9415 u32 position;
9416 u32 base;
9417 u32 size;
9418 } cursor[2];
9419
9420 struct intel_pipe_error_state {
9421 u32 conf;
9422 u32 source;
9423
9424 u32 htotal;
9425 u32 hblank;
9426 u32 hsync;
9427 u32 vtotal;
9428 u32 vblank;
9429 u32 vsync;
9430 } pipe[2];
9431
9432 struct intel_plane_error_state {
9433 u32 control;
9434 u32 stride;
9435 u32 size;
9436 u32 pos;
9437 u32 addr;
9438 u32 surface;
9439 u32 tile_offset;
9440 } plane[2];
9441 };
9442
9443 struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device * dev)9444 intel_display_capture_error_state(struct drm_device *dev)
9445 {
9446 drm_i915_private_t *dev_priv = dev->dev_private;
9447 struct intel_display_error_state *error;
9448 int i;
9449
9450 error = kmalloc(sizeof(*error), GFP_ATOMIC);
9451 if (error == NULL)
9452 return NULL;
9453
9454 for (i = 0; i < 2; i++) {
9455 error->cursor[i].control = I915_READ(CURCNTR(i));
9456 error->cursor[i].position = I915_READ(CURPOS(i));
9457 error->cursor[i].base = I915_READ(CURBASE(i));
9458
9459 error->plane[i].control = I915_READ(DSPCNTR(i));
9460 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9461 error->plane[i].size = I915_READ(DSPSIZE(i));
9462 error->plane[i].pos = I915_READ(DSPPOS(i));
9463 error->plane[i].addr = I915_READ(DSPADDR(i));
9464 if (INTEL_INFO(dev)->gen >= 4) {
9465 error->plane[i].surface = I915_READ(DSPSURF(i));
9466 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9467 }
9468
9469 error->pipe[i].conf = I915_READ(PIPECONF(i));
9470 error->pipe[i].source = I915_READ(PIPESRC(i));
9471 error->pipe[i].htotal = I915_READ(HTOTAL(i));
9472 error->pipe[i].hblank = I915_READ(HBLANK(i));
9473 error->pipe[i].hsync = I915_READ(HSYNC(i));
9474 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9475 error->pipe[i].vblank = I915_READ(VBLANK(i));
9476 error->pipe[i].vsync = I915_READ(VSYNC(i));
9477 }
9478
9479 return error;
9480 }
9481
9482 void
intel_display_print_error_state(struct seq_file * m,struct drm_device * dev,struct intel_display_error_state * error)9483 intel_display_print_error_state(struct seq_file *m,
9484 struct drm_device *dev,
9485 struct intel_display_error_state *error)
9486 {
9487 int i;
9488
9489 for (i = 0; i < 2; i++) {
9490 seq_printf(m, "Pipe [%d]:\n", i);
9491 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
9492 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
9493 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
9494 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
9495 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
9496 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
9497 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
9498 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
9499
9500 seq_printf(m, "Plane [%d]:\n", i);
9501 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
9502 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
9503 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
9504 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
9505 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
9506 if (INTEL_INFO(dev)->gen >= 4) {
9507 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
9508 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
9509 }
9510
9511 seq_printf(m, "Cursor [%d]:\n", i);
9512 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
9513 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
9514 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
9515 }
9516 }
9517 #endif
9518