1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
gen5_assert_iir_is_zero(struct drm_i915_private * dev_priv,u32 reg)142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
143 {
144 u32 val = I915_READ(reg);
145
146 if (val == 0)
147 return;
148
149 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
150 reg, val);
151 I915_WRITE(reg, 0xffffffff);
152 POSTING_READ(reg);
153 I915_WRITE(reg, 0xffffffff);
154 POSTING_READ(reg);
155 }
156
157 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
158 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
159 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
160 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
161 POSTING_READ(GEN8_##type##_IMR(which)); \
162 } while (0)
163
164 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
165 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
166 I915_WRITE(type##IER, (ier_val)); \
167 I915_WRITE(type##IMR, (imr_val)); \
168 POSTING_READ(type##IMR); \
169 } while (0)
170
171 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
172
173 /* For display hotplug interrupt */
174 static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private * dev_priv,uint32_t mask,uint32_t bits)175 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
176 uint32_t mask,
177 uint32_t bits)
178 {
179 uint32_t val;
180
181 assert_spin_locked(&dev_priv->irq_lock);
182 WARN_ON(bits & ~mask);
183
184 val = I915_READ(PORT_HOTPLUG_EN);
185 val &= ~mask;
186 val |= bits;
187 I915_WRITE(PORT_HOTPLUG_EN, val);
188 }
189
190 /**
191 * i915_hotplug_interrupt_update - update hotplug interrupt enable
192 * @dev_priv: driver private
193 * @mask: bits to update
194 * @bits: bits to enable
195 * NOTE: the HPD enable bits are modified both inside and outside
196 * of an interrupt context. To avoid that read-modify-write cycles
197 * interfer, these bits are protected by a spinlock. Since this
198 * function is usually not called from a context where the lock is
199 * held already, this function acquires the lock itself. A non-locking
200 * version is also available.
201 */
i915_hotplug_interrupt_update(struct drm_i915_private * dev_priv,uint32_t mask,uint32_t bits)202 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
203 uint32_t mask,
204 uint32_t bits)
205 {
206 spin_lock_irq(&dev_priv->irq_lock);
207 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
208 spin_unlock_irq(&dev_priv->irq_lock);
209 }
210
211 /**
212 * ilk_update_display_irq - update DEIMR
213 * @dev_priv: driver private
214 * @interrupt_mask: mask of interrupt bits to update
215 * @enabled_irq_mask: mask of interrupt bits to enable
216 */
ilk_update_display_irq(struct drm_i915_private * dev_priv,uint32_t interrupt_mask,uint32_t enabled_irq_mask)217 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
218 uint32_t interrupt_mask,
219 uint32_t enabled_irq_mask)
220 {
221 uint32_t new_val;
222
223 assert_spin_locked(&dev_priv->irq_lock);
224
225 WARN_ON(enabled_irq_mask & ~interrupt_mask);
226
227 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
228 return;
229
230 new_val = dev_priv->irq_mask;
231 new_val &= ~interrupt_mask;
232 new_val |= (~enabled_irq_mask & interrupt_mask);
233
234 if (new_val != dev_priv->irq_mask) {
235 dev_priv->irq_mask = new_val;
236 I915_WRITE(DEIMR, dev_priv->irq_mask);
237 POSTING_READ(DEIMR);
238 }
239 }
240
241 void
ironlake_enable_display_irq(struct drm_i915_private * dev_priv,u32 mask)242 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
243 {
244 ilk_update_display_irq(dev_priv, mask, mask);
245 }
246
247 void
ironlake_disable_display_irq(struct drm_i915_private * dev_priv,u32 mask)248 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
249 {
250 ilk_update_display_irq(dev_priv, mask, 0);
251 }
252
253 /**
254 * ilk_update_gt_irq - update GTIMR
255 * @dev_priv: driver private
256 * @interrupt_mask: mask of interrupt bits to update
257 * @enabled_irq_mask: mask of interrupt bits to enable
258 */
ilk_update_gt_irq(struct drm_i915_private * dev_priv,uint32_t interrupt_mask,uint32_t enabled_irq_mask)259 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262 {
263 assert_spin_locked(&dev_priv->irq_lock);
264
265 WARN_ON(enabled_irq_mask & ~interrupt_mask);
266
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 dev_priv->gt_irq_mask &= ~interrupt_mask;
271 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
272 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
273 POSTING_READ(GTIMR);
274 }
275
gen5_enable_gt_irq(struct drm_i915_private * dev_priv,uint32_t mask)276 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278 ilk_update_gt_irq(dev_priv, mask, mask);
279 }
280
gen5_disable_gt_irq(struct drm_i915_private * dev_priv,uint32_t mask)281 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282 {
283 ilk_update_gt_irq(dev_priv, mask, 0);
284 }
285
gen6_pm_iir(struct drm_i915_private * dev_priv)286 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
287 {
288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
289 }
290
gen6_pm_imr(struct drm_i915_private * dev_priv)291 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
292 {
293 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
294 }
295
gen6_pm_ier(struct drm_i915_private * dev_priv)296 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
297 {
298 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
299 }
300
301 /**
302 * snb_update_pm_irq - update GEN6_PMIMR
303 * @dev_priv: driver private
304 * @interrupt_mask: mask of interrupt bits to update
305 * @enabled_irq_mask: mask of interrupt bits to enable
306 */
snb_update_pm_irq(struct drm_i915_private * dev_priv,uint32_t interrupt_mask,uint32_t enabled_irq_mask)307 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
308 uint32_t interrupt_mask,
309 uint32_t enabled_irq_mask)
310 {
311 uint32_t new_val;
312
313 WARN_ON(enabled_irq_mask & ~interrupt_mask);
314
315 assert_spin_locked(&dev_priv->irq_lock);
316
317 new_val = dev_priv->pm_irq_mask;
318 new_val &= ~interrupt_mask;
319 new_val |= (~enabled_irq_mask & interrupt_mask);
320
321 if (new_val != dev_priv->pm_irq_mask) {
322 dev_priv->pm_irq_mask = new_val;
323 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
324 POSTING_READ(gen6_pm_imr(dev_priv));
325 }
326 }
327
gen6_enable_pm_irq(struct drm_i915_private * dev_priv,uint32_t mask)328 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
329 {
330 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
331 return;
332
333 snb_update_pm_irq(dev_priv, mask, mask);
334 }
335
__gen6_disable_pm_irq(struct drm_i915_private * dev_priv,uint32_t mask)336 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
337 uint32_t mask)
338 {
339 snb_update_pm_irq(dev_priv, mask, 0);
340 }
341
gen6_disable_pm_irq(struct drm_i915_private * dev_priv,uint32_t mask)342 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
343 {
344 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
345 return;
346
347 __gen6_disable_pm_irq(dev_priv, mask);
348 }
349
gen6_reset_rps_interrupts(struct drm_device * dev)350 void gen6_reset_rps_interrupts(struct drm_device *dev)
351 {
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 uint32_t reg = gen6_pm_iir(dev_priv);
354
355 spin_lock_irq(&dev_priv->irq_lock);
356 I915_WRITE(reg, dev_priv->pm_rps_events);
357 I915_WRITE(reg, dev_priv->pm_rps_events);
358 POSTING_READ(reg);
359 dev_priv->rps.pm_iir = 0;
360 spin_unlock_irq(&dev_priv->irq_lock);
361 }
362
gen6_enable_rps_interrupts(struct drm_device * dev)363 void gen6_enable_rps_interrupts(struct drm_device *dev)
364 {
365 struct drm_i915_private *dev_priv = dev->dev_private;
366
367 spin_lock_irq(&dev_priv->irq_lock);
368
369 WARN_ON(dev_priv->rps.pm_iir);
370 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
371 dev_priv->rps.interrupts_enabled = true;
372 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
373 dev_priv->pm_rps_events);
374 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
375
376 spin_unlock_irq(&dev_priv->irq_lock);
377 }
378
gen6_sanitize_rps_pm_mask(struct drm_i915_private * dev_priv,u32 mask)379 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
380 {
381 /*
382 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
383 * if GEN6_PM_UP_EI_EXPIRED is masked.
384 *
385 * TODO: verify if this can be reproduced on VLV,CHV.
386 */
387 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
388 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
389
390 if (INTEL_INFO(dev_priv)->gen >= 8)
391 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
392
393 return mask;
394 }
395
gen6_disable_rps_interrupts(struct drm_device * dev)396 void gen6_disable_rps_interrupts(struct drm_device *dev)
397 {
398 struct drm_i915_private *dev_priv = dev->dev_private;
399
400 spin_lock_irq(&dev_priv->irq_lock);
401 dev_priv->rps.interrupts_enabled = false;
402 spin_unlock_irq(&dev_priv->irq_lock);
403
404 cancel_work_sync(&dev_priv->rps.work);
405
406 spin_lock_irq(&dev_priv->irq_lock);
407
408 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
409
410 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
411 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
412 ~dev_priv->pm_rps_events);
413
414 spin_unlock_irq(&dev_priv->irq_lock);
415
416 synchronize_irq(dev->irq);
417 }
418
419 /**
420 * bdw_update_port_irq - update DE port interrupt
421 * @dev_priv: driver private
422 * @interrupt_mask: mask of interrupt bits to update
423 * @enabled_irq_mask: mask of interrupt bits to enable
424 */
bdw_update_port_irq(struct drm_i915_private * dev_priv,uint32_t interrupt_mask,uint32_t enabled_irq_mask)425 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
426 uint32_t interrupt_mask,
427 uint32_t enabled_irq_mask)
428 {
429 uint32_t new_val;
430 uint32_t old_val;
431
432 assert_spin_locked(&dev_priv->irq_lock);
433
434 WARN_ON(enabled_irq_mask & ~interrupt_mask);
435
436 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
437 return;
438
439 old_val = I915_READ(GEN8_DE_PORT_IMR);
440
441 new_val = old_val;
442 new_val &= ~interrupt_mask;
443 new_val |= (~enabled_irq_mask & interrupt_mask);
444
445 if (new_val != old_val) {
446 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
447 POSTING_READ(GEN8_DE_PORT_IMR);
448 }
449 }
450
451 /**
452 * ibx_display_interrupt_update - update SDEIMR
453 * @dev_priv: driver private
454 * @interrupt_mask: mask of interrupt bits to update
455 * @enabled_irq_mask: mask of interrupt bits to enable
456 */
ibx_display_interrupt_update(struct drm_i915_private * dev_priv,uint32_t interrupt_mask,uint32_t enabled_irq_mask)457 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
458 uint32_t interrupt_mask,
459 uint32_t enabled_irq_mask)
460 {
461 uint32_t sdeimr = I915_READ(SDEIMR);
462 sdeimr &= ~interrupt_mask;
463 sdeimr |= (~enabled_irq_mask & interrupt_mask);
464
465 WARN_ON(enabled_irq_mask & ~interrupt_mask);
466
467 assert_spin_locked(&dev_priv->irq_lock);
468
469 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
470 return;
471
472 I915_WRITE(SDEIMR, sdeimr);
473 POSTING_READ(SDEIMR);
474 }
475
476 static void
__i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 enable_mask,u32 status_mask)477 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
478 u32 enable_mask, u32 status_mask)
479 {
480 u32 reg = PIPESTAT(pipe);
481 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
482
483 assert_spin_locked(&dev_priv->irq_lock);
484 WARN_ON(!intel_irqs_enabled(dev_priv));
485
486 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
487 status_mask & ~PIPESTAT_INT_STATUS_MASK,
488 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
489 pipe_name(pipe), enable_mask, status_mask))
490 return;
491
492 if ((pipestat & enable_mask) == enable_mask)
493 return;
494
495 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
496
497 /* Enable the interrupt, clear any pending status */
498 pipestat |= enable_mask | status_mask;
499 I915_WRITE(reg, pipestat);
500 POSTING_READ(reg);
501 }
502
503 static void
__i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 enable_mask,u32 status_mask)504 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
505 u32 enable_mask, u32 status_mask)
506 {
507 u32 reg = PIPESTAT(pipe);
508 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
509
510 assert_spin_locked(&dev_priv->irq_lock);
511 WARN_ON(!intel_irqs_enabled(dev_priv));
512
513 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
514 status_mask & ~PIPESTAT_INT_STATUS_MASK,
515 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
516 pipe_name(pipe), enable_mask, status_mask))
517 return;
518
519 if ((pipestat & enable_mask) == 0)
520 return;
521
522 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
523
524 pipestat &= ~enable_mask;
525 I915_WRITE(reg, pipestat);
526 POSTING_READ(reg);
527 }
528
vlv_get_pipestat_enable_mask(struct drm_device * dev,u32 status_mask)529 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
530 {
531 u32 enable_mask = status_mask << 16;
532
533 /*
534 * On pipe A we don't support the PSR interrupt yet,
535 * on pipe B and C the same bit MBZ.
536 */
537 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
538 return 0;
539 /*
540 * On pipe B and C we don't support the PSR interrupt yet, on pipe
541 * A the same bit is for perf counters which we don't use either.
542 */
543 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
544 return 0;
545
546 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
547 SPRITE0_FLIP_DONE_INT_EN_VLV |
548 SPRITE1_FLIP_DONE_INT_EN_VLV);
549 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
550 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
551 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
552 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
553
554 return enable_mask;
555 }
556
557 void
i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)558 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
559 u32 status_mask)
560 {
561 u32 enable_mask;
562
563 if (IS_VALLEYVIEW(dev_priv->dev))
564 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
565 status_mask);
566 else
567 enable_mask = status_mask << 16;
568 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
569 }
570
571 void
i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)572 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
573 u32 status_mask)
574 {
575 u32 enable_mask;
576
577 if (IS_VALLEYVIEW(dev_priv->dev))
578 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
579 status_mask);
580 else
581 enable_mask = status_mask << 16;
582 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
583 }
584
585 /**
586 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
587 * @dev: drm device
588 */
i915_enable_asle_pipestat(struct drm_device * dev)589 static void i915_enable_asle_pipestat(struct drm_device *dev)
590 {
591 struct drm_i915_private *dev_priv = dev->dev_private;
592
593 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
594 return;
595
596 spin_lock_irq(&dev_priv->irq_lock);
597
598 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
599 if (INTEL_INFO(dev)->gen >= 4)
600 i915_enable_pipestat(dev_priv, PIPE_A,
601 PIPE_LEGACY_BLC_EVENT_STATUS);
602
603 spin_unlock_irq(&dev_priv->irq_lock);
604 }
605
606 /*
607 * This timing diagram depicts the video signal in and
608 * around the vertical blanking period.
609 *
610 * Assumptions about the fictitious mode used in this example:
611 * vblank_start >= 3
612 * vsync_start = vblank_start + 1
613 * vsync_end = vblank_start + 2
614 * vtotal = vblank_start + 3
615 *
616 * start of vblank:
617 * latch double buffered registers
618 * increment frame counter (ctg+)
619 * generate start of vblank interrupt (gen4+)
620 * |
621 * | frame start:
622 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
623 * | may be shifted forward 1-3 extra lines via PIPECONF
624 * | |
625 * | | start of vsync:
626 * | | generate vsync interrupt
627 * | | |
628 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
629 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
630 * ----va---> <-----------------vb--------------------> <--------va-------------
631 * | | <----vs-----> |
632 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
633 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
634 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
635 * | | |
636 * last visible pixel first visible pixel
637 * | increment frame counter (gen3/4)
638 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
639 *
640 * x = horizontal active
641 * _ = horizontal blanking
642 * hs = horizontal sync
643 * va = vertical active
644 * vb = vertical blanking
645 * vs = vertical sync
646 * vbs = vblank_start (number)
647 *
648 * Summary:
649 * - most events happen at the start of horizontal sync
650 * - frame start happens at the start of horizontal blank, 1-4 lines
651 * (depending on PIPECONF settings) after the start of vblank
652 * - gen3/4 pixel and frame counter are synchronized with the start
653 * of horizontal active on the first line of vertical active
654 */
655
i8xx_get_vblank_counter(struct drm_device * dev,unsigned int pipe)656 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
657 {
658 /* Gen2 doesn't have a hardware frame counter */
659 return 0;
660 }
661
662 /* Called from drm generic code, passed a 'crtc', which
663 * we use as a pipe index
664 */
i915_get_vblank_counter(struct drm_device * dev,unsigned int pipe)665 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
666 {
667 struct drm_i915_private *dev_priv = dev->dev_private;
668 unsigned long high_frame;
669 unsigned long low_frame;
670 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
671 struct intel_crtc *intel_crtc =
672 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
673 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
674
675 htotal = mode->crtc_htotal;
676 hsync_start = mode->crtc_hsync_start;
677 vbl_start = mode->crtc_vblank_start;
678 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
679 vbl_start = DIV_ROUND_UP(vbl_start, 2);
680
681 /* Convert to pixel count */
682 vbl_start *= htotal;
683
684 /* Start of vblank event occurs at start of hsync */
685 vbl_start -= htotal - hsync_start;
686
687 high_frame = PIPEFRAME(pipe);
688 low_frame = PIPEFRAMEPIXEL(pipe);
689
690 /*
691 * High & low register fields aren't synchronized, so make sure
692 * we get a low value that's stable across two reads of the high
693 * register.
694 */
695 do {
696 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
697 low = I915_READ(low_frame);
698 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
699 } while (high1 != high2);
700
701 high1 >>= PIPE_FRAME_HIGH_SHIFT;
702 pixel = low & PIPE_PIXEL_MASK;
703 low >>= PIPE_FRAME_LOW_SHIFT;
704
705 /*
706 * The frame counter increments at beginning of active.
707 * Cook up a vblank counter by also checking the pixel
708 * counter against vblank start.
709 */
710 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
711 }
712
g4x_get_vblank_counter(struct drm_device * dev,unsigned int pipe)713 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
714 {
715 struct drm_i915_private *dev_priv = dev->dev_private;
716
717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
718 }
719
720 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
721 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
722
__intel_get_crtc_scanline(struct intel_crtc * crtc)723 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724 {
725 struct drm_device *dev = crtc->base.dev;
726 struct drm_i915_private *dev_priv = dev->dev_private;
727 const struct drm_display_mode *mode = &crtc->base.hwmode;
728 enum pipe pipe = crtc->pipe;
729 int position, vtotal;
730
731 vtotal = mode->crtc_vtotal;
732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
733 vtotal /= 2;
734
735 if (IS_GEN2(dev))
736 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
737 else
738 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
739
740 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
745 *
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
751 */
752 if (HAS_DDI(dev) && !position) {
753 int i, temp;
754
755 for (i = 0; i < 100; i++) {
756 udelay(1);
757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
758 DSL_LINEMASK_GEN3;
759 if (temp != position) {
760 position = temp;
761 break;
762 }
763 }
764 }
765
766 /*
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
769 */
770 return (position + crtc->scanline_offset) % vtotal;
771 }
772
i915_get_crtc_scanoutpos(struct drm_device * dev,unsigned int pipe,unsigned int flags,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)773 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
774 unsigned int flags, int *vpos, int *hpos,
775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
777 {
778 struct drm_i915_private *dev_priv = dev->dev_private;
779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
781 int position;
782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
783 bool in_vbl = true;
784 int ret = 0;
785 unsigned long irqflags;
786
787 if (WARN_ON(!mode->crtc_clock)) {
788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789 "pipe %c\n", pipe_name(pipe));
790 return 0;
791 }
792
793 htotal = mode->crtc_htotal;
794 hsync_start = mode->crtc_hsync_start;
795 vtotal = mode->crtc_vtotal;
796 vbl_start = mode->crtc_vblank_start;
797 vbl_end = mode->crtc_vblank_end;
798
799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
800 vbl_start = DIV_ROUND_UP(vbl_start, 2);
801 vbl_end /= 2;
802 vtotal /= 2;
803 }
804
805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
806
807 /*
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
811 */
812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
813
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
815
816 /* Get optional system timestamp before query. */
817 if (stime)
818 *stime = ktime_get();
819
820 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
823 */
824 position = __intel_get_crtc_scanline(intel_crtc);
825 } else {
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
828 * scanout position.
829 */
830 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
831
832 /* convert to pixel counts */
833 vbl_start *= htotal;
834 vbl_end *= htotal;
835 vtotal *= htotal;
836
837 /*
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
845 */
846 if (position >= vtotal)
847 position = vtotal - 1;
848
849 /*
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
857 */
858 position = (position + htotal - hsync_start) % vtotal;
859 }
860
861 /* Get optional system timestamp after query. */
862 if (etime)
863 *etime = ktime_get();
864
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
866
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
868
869 in_vbl = position >= vbl_start && position < vbl_end;
870
871 /*
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
875 * up since vbl_end.
876 */
877 if (position >= vbl_start)
878 position -= vbl_end;
879 else
880 position += vtotal - vbl_end;
881
882 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
883 *vpos = position;
884 *hpos = 0;
885 } else {
886 *vpos = position / htotal;
887 *hpos = position - (*vpos * htotal);
888 }
889
890 /* In vblank? */
891 if (in_vbl)
892 ret |= DRM_SCANOUTPOS_IN_VBLANK;
893
894 return ret;
895 }
896
intel_get_crtc_scanline(struct intel_crtc * crtc)897 int intel_get_crtc_scanline(struct intel_crtc *crtc)
898 {
899 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
900 unsigned long irqflags;
901 int position;
902
903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
904 position = __intel_get_crtc_scanline(crtc);
905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
906
907 return position;
908 }
909
i915_get_vblank_timestamp(struct drm_device * dev,unsigned int pipe,int * max_error,struct timeval * vblank_time,unsigned flags)910 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
911 int *max_error,
912 struct timeval *vblank_time,
913 unsigned flags)
914 {
915 struct drm_crtc *crtc;
916
917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
918 DRM_ERROR("Invalid crtc %u\n", pipe);
919 return -EINVAL;
920 }
921
922 /* Get drm_crtc to timestamp: */
923 crtc = intel_get_crtc_for_pipe(dev, pipe);
924 if (crtc == NULL) {
925 DRM_ERROR("Invalid crtc %u\n", pipe);
926 return -EINVAL;
927 }
928
929 if (!crtc->hwmode.crtc_clock) {
930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
931 return -EBUSY;
932 }
933
934 /* Helper routine in DRM core does all the work: */
935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
936 vblank_time, flags,
937 &crtc->hwmode);
938 }
939
ironlake_rps_change_irq_handler(struct drm_device * dev)940 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
941 {
942 struct drm_i915_private *dev_priv = dev->dev_private;
943 u32 busy_up, busy_down, max_avg, min_avg;
944 u8 new_delay;
945
946 spin_lock(&mchdev_lock);
947
948 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
949
950 new_delay = dev_priv->ips.cur_delay;
951
952 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
953 busy_up = I915_READ(RCPREVBSYTUPAVG);
954 busy_down = I915_READ(RCPREVBSYTDNAVG);
955 max_avg = I915_READ(RCBMAXAVG);
956 min_avg = I915_READ(RCBMINAVG);
957
958 /* Handle RCS change request from hw */
959 if (busy_up > max_avg) {
960 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
961 new_delay = dev_priv->ips.cur_delay - 1;
962 if (new_delay < dev_priv->ips.max_delay)
963 new_delay = dev_priv->ips.max_delay;
964 } else if (busy_down < min_avg) {
965 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
966 new_delay = dev_priv->ips.cur_delay + 1;
967 if (new_delay > dev_priv->ips.min_delay)
968 new_delay = dev_priv->ips.min_delay;
969 }
970
971 if (ironlake_set_drps(dev, new_delay))
972 dev_priv->ips.cur_delay = new_delay;
973
974 spin_unlock(&mchdev_lock);
975
976 return;
977 }
978
notify_ring(struct intel_engine_cs * ring)979 static void notify_ring(struct intel_engine_cs *ring)
980 {
981 if (!intel_ring_initialized(ring))
982 return;
983
984 trace_i915_gem_request_notify(ring);
985
986 wake_up_all(&ring->irq_queue);
987 }
988
vlv_c0_read(struct drm_i915_private * dev_priv,struct intel_rps_ei * ei)989 static void vlv_c0_read(struct drm_i915_private *dev_priv,
990 struct intel_rps_ei *ei)
991 {
992 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
993 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
994 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
995 }
996
gen6_rps_reset_ei(struct drm_i915_private * dev_priv)997 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
998 {
999 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1000 }
1001
vlv_wa_c0_ei(struct drm_i915_private * dev_priv,u32 pm_iir)1002 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1003 {
1004 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1005 struct intel_rps_ei now;
1006 u32 events = 0;
1007
1008 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1009 return 0;
1010
1011 vlv_c0_read(dev_priv, &now);
1012 if (now.cz_clock == 0)
1013 return 0;
1014
1015 if (prev->cz_clock) {
1016 u64 time, c0;
1017 unsigned int mul;
1018
1019 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
1020 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1021 mul <<= 8;
1022
1023 time = now.cz_clock - prev->cz_clock;
1024 time *= dev_priv->czclk_freq;
1025
1026 /* Workload can be split between render + media,
1027 * e.g. SwapBuffers being blitted in X after being rendered in
1028 * mesa. To account for this we need to combine both engines
1029 * into our activity counter.
1030 */
1031 c0 = now.render_c0 - prev->render_c0;
1032 c0 += now.media_c0 - prev->media_c0;
1033 c0 *= mul;
1034
1035 if (c0 > time * dev_priv->rps.up_threshold)
1036 events = GEN6_PM_RP_UP_THRESHOLD;
1037 else if (c0 < time * dev_priv->rps.down_threshold)
1038 events = GEN6_PM_RP_DOWN_THRESHOLD;
1039 }
1040
1041 dev_priv->rps.ei = now;
1042 return events;
1043 }
1044
any_waiters(struct drm_i915_private * dev_priv)1045 static bool any_waiters(struct drm_i915_private *dev_priv)
1046 {
1047 struct intel_engine_cs *ring;
1048 int i;
1049
1050 for_each_ring(ring, dev_priv, i)
1051 if (ring->irq_refcount)
1052 return true;
1053
1054 return false;
1055 }
1056
gen6_pm_rps_work(struct work_struct * work)1057 static void gen6_pm_rps_work(struct work_struct *work)
1058 {
1059 struct drm_i915_private *dev_priv =
1060 container_of(work, struct drm_i915_private, rps.work);
1061 bool client_boost;
1062 int new_delay, adj, min, max;
1063 u32 pm_iir;
1064
1065 spin_lock_irq(&dev_priv->irq_lock);
1066 /* Speed up work cancelation during disabling rps interrupts. */
1067 if (!dev_priv->rps.interrupts_enabled) {
1068 spin_unlock_irq(&dev_priv->irq_lock);
1069 return;
1070 }
1071 pm_iir = dev_priv->rps.pm_iir;
1072 dev_priv->rps.pm_iir = 0;
1073 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1074 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1075 client_boost = dev_priv->rps.client_boost;
1076 dev_priv->rps.client_boost = false;
1077 spin_unlock_irq(&dev_priv->irq_lock);
1078
1079 /* Make sure we didn't queue anything we're not going to process. */
1080 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1081
1082 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1083 return;
1084
1085 mutex_lock(&dev_priv->rps.hw_lock);
1086
1087 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1088
1089 adj = dev_priv->rps.last_adj;
1090 new_delay = dev_priv->rps.cur_freq;
1091 min = dev_priv->rps.min_freq_softlimit;
1092 max = dev_priv->rps.max_freq_softlimit;
1093
1094 if (client_boost) {
1095 new_delay = dev_priv->rps.max_freq_softlimit;
1096 adj = 0;
1097 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1098 if (adj > 0)
1099 adj *= 2;
1100 else /* CHV needs even encode values */
1101 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1102 /*
1103 * For better performance, jump directly
1104 * to RPe if we're below it.
1105 */
1106 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1107 new_delay = dev_priv->rps.efficient_freq;
1108 adj = 0;
1109 }
1110 } else if (any_waiters(dev_priv)) {
1111 adj = 0;
1112 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1113 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1114 new_delay = dev_priv->rps.efficient_freq;
1115 else
1116 new_delay = dev_priv->rps.min_freq_softlimit;
1117 adj = 0;
1118 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1119 if (adj < 0)
1120 adj *= 2;
1121 else /* CHV needs even encode values */
1122 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1123 } else { /* unknown event */
1124 adj = 0;
1125 }
1126
1127 dev_priv->rps.last_adj = adj;
1128
1129 /* sysfs frequency interfaces may have snuck in while servicing the
1130 * interrupt
1131 */
1132 new_delay += adj;
1133 new_delay = clamp_t(int, new_delay, min, max);
1134
1135 intel_set_rps(dev_priv->dev, new_delay);
1136
1137 mutex_unlock(&dev_priv->rps.hw_lock);
1138 }
1139
1140
1141 /**
1142 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1143 * occurred.
1144 * @work: workqueue struct
1145 *
1146 * Doesn't actually do anything except notify userspace. As a consequence of
1147 * this event, userspace should try to remap the bad rows since statistically
1148 * it is likely the same row is more likely to go bad again.
1149 */
ivybridge_parity_work(struct work_struct * work)1150 static void ivybridge_parity_work(struct work_struct *work)
1151 {
1152 struct drm_i915_private *dev_priv =
1153 container_of(work, struct drm_i915_private, l3_parity.error_work);
1154 u32 error_status, row, bank, subbank;
1155 char *parity_event[6];
1156 uint32_t misccpctl;
1157 uint8_t slice = 0;
1158
1159 /* We must turn off DOP level clock gating to access the L3 registers.
1160 * In order to prevent a get/put style interface, acquire struct mutex
1161 * any time we access those registers.
1162 */
1163 mutex_lock(&dev_priv->dev->struct_mutex);
1164
1165 /* If we've screwed up tracking, just let the interrupt fire again */
1166 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1167 goto out;
1168
1169 misccpctl = I915_READ(GEN7_MISCCPCTL);
1170 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1171 POSTING_READ(GEN7_MISCCPCTL);
1172
1173 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1174 u32 reg;
1175
1176 slice--;
1177 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1178 break;
1179
1180 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1181
1182 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1183
1184 error_status = I915_READ(reg);
1185 row = GEN7_PARITY_ERROR_ROW(error_status);
1186 bank = GEN7_PARITY_ERROR_BANK(error_status);
1187 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1188
1189 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1190 POSTING_READ(reg);
1191
1192 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1193 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1194 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1195 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1196 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1197 parity_event[5] = NULL;
1198
1199 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1200 KOBJ_CHANGE, parity_event);
1201
1202 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1203 slice, row, bank, subbank);
1204
1205 kfree(parity_event[4]);
1206 kfree(parity_event[3]);
1207 kfree(parity_event[2]);
1208 kfree(parity_event[1]);
1209 }
1210
1211 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1212
1213 out:
1214 WARN_ON(dev_priv->l3_parity.which_slice);
1215 spin_lock_irq(&dev_priv->irq_lock);
1216 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1217 spin_unlock_irq(&dev_priv->irq_lock);
1218
1219 mutex_unlock(&dev_priv->dev->struct_mutex);
1220 }
1221
ivybridge_parity_error_irq_handler(struct drm_device * dev,u32 iir)1222 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1223 {
1224 struct drm_i915_private *dev_priv = dev->dev_private;
1225
1226 if (!HAS_L3_DPF(dev))
1227 return;
1228
1229 spin_lock(&dev_priv->irq_lock);
1230 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1231 spin_unlock(&dev_priv->irq_lock);
1232
1233 iir &= GT_PARITY_ERROR(dev);
1234 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1235 dev_priv->l3_parity.which_slice |= 1 << 1;
1236
1237 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1238 dev_priv->l3_parity.which_slice |= 1 << 0;
1239
1240 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1241 }
1242
ilk_gt_irq_handler(struct drm_device * dev,struct drm_i915_private * dev_priv,u32 gt_iir)1243 static void ilk_gt_irq_handler(struct drm_device *dev,
1244 struct drm_i915_private *dev_priv,
1245 u32 gt_iir)
1246 {
1247 if (gt_iir &
1248 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1249 notify_ring(&dev_priv->ring[RCS]);
1250 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1251 notify_ring(&dev_priv->ring[VCS]);
1252 }
1253
snb_gt_irq_handler(struct drm_device * dev,struct drm_i915_private * dev_priv,u32 gt_iir)1254 static void snb_gt_irq_handler(struct drm_device *dev,
1255 struct drm_i915_private *dev_priv,
1256 u32 gt_iir)
1257 {
1258
1259 if (gt_iir &
1260 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1261 notify_ring(&dev_priv->ring[RCS]);
1262 if (gt_iir & GT_BSD_USER_INTERRUPT)
1263 notify_ring(&dev_priv->ring[VCS]);
1264 if (gt_iir & GT_BLT_USER_INTERRUPT)
1265 notify_ring(&dev_priv->ring[BCS]);
1266
1267 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1268 GT_BSD_CS_ERROR_INTERRUPT |
1269 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1270 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1271
1272 if (gt_iir & GT_PARITY_ERROR(dev))
1273 ivybridge_parity_error_irq_handler(dev, gt_iir);
1274 }
1275
gen8_gt_irq_handler(struct drm_i915_private * dev_priv,u32 master_ctl)1276 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1277 u32 master_ctl)
1278 {
1279 irqreturn_t ret = IRQ_NONE;
1280
1281 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1282 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1283 if (tmp) {
1284 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1285 ret = IRQ_HANDLED;
1286
1287 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1288 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1289 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1290 notify_ring(&dev_priv->ring[RCS]);
1291
1292 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1293 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1294 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1295 notify_ring(&dev_priv->ring[BCS]);
1296 } else
1297 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1298 }
1299
1300 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1301 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1302 if (tmp) {
1303 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1304 ret = IRQ_HANDLED;
1305
1306 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1307 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1308 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1309 notify_ring(&dev_priv->ring[VCS]);
1310
1311 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1312 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1313 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1314 notify_ring(&dev_priv->ring[VCS2]);
1315 } else
1316 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1317 }
1318
1319 if (master_ctl & GEN8_GT_VECS_IRQ) {
1320 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1321 if (tmp) {
1322 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1323 ret = IRQ_HANDLED;
1324
1325 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1326 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1327 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1328 notify_ring(&dev_priv->ring[VECS]);
1329 } else
1330 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1331 }
1332
1333 if (master_ctl & GEN8_GT_PM_IRQ) {
1334 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1335 if (tmp & dev_priv->pm_rps_events) {
1336 I915_WRITE_FW(GEN8_GT_IIR(2),
1337 tmp & dev_priv->pm_rps_events);
1338 ret = IRQ_HANDLED;
1339 gen6_rps_irq_handler(dev_priv, tmp);
1340 } else
1341 DRM_ERROR("The master control interrupt lied (PM)!\n");
1342 }
1343
1344 return ret;
1345 }
1346
bxt_port_hotplug_long_detect(enum port port,u32 val)1347 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1348 {
1349 switch (port) {
1350 case PORT_A:
1351 return val & PORTA_HOTPLUG_LONG_DETECT;
1352 case PORT_B:
1353 return val & PORTB_HOTPLUG_LONG_DETECT;
1354 case PORT_C:
1355 return val & PORTC_HOTPLUG_LONG_DETECT;
1356 default:
1357 return false;
1358 }
1359 }
1360
spt_port_hotplug2_long_detect(enum port port,u32 val)1361 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1362 {
1363 switch (port) {
1364 case PORT_E:
1365 return val & PORTE_HOTPLUG_LONG_DETECT;
1366 default:
1367 return false;
1368 }
1369 }
1370
spt_port_hotplug_long_detect(enum port port,u32 val)1371 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1372 {
1373 switch (port) {
1374 case PORT_A:
1375 return val & PORTA_HOTPLUG_LONG_DETECT;
1376 case PORT_B:
1377 return val & PORTB_HOTPLUG_LONG_DETECT;
1378 case PORT_C:
1379 return val & PORTC_HOTPLUG_LONG_DETECT;
1380 case PORT_D:
1381 return val & PORTD_HOTPLUG_LONG_DETECT;
1382 default:
1383 return false;
1384 }
1385 }
1386
ilk_port_hotplug_long_detect(enum port port,u32 val)1387 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1388 {
1389 switch (port) {
1390 case PORT_A:
1391 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1392 default:
1393 return false;
1394 }
1395 }
1396
pch_port_hotplug_long_detect(enum port port,u32 val)1397 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1398 {
1399 switch (port) {
1400 case PORT_B:
1401 return val & PORTB_HOTPLUG_LONG_DETECT;
1402 case PORT_C:
1403 return val & PORTC_HOTPLUG_LONG_DETECT;
1404 case PORT_D:
1405 return val & PORTD_HOTPLUG_LONG_DETECT;
1406 default:
1407 return false;
1408 }
1409 }
1410
i9xx_port_hotplug_long_detect(enum port port,u32 val)1411 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1412 {
1413 switch (port) {
1414 case PORT_B:
1415 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1416 case PORT_C:
1417 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1418 case PORT_D:
1419 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1420 default:
1421 return false;
1422 }
1423 }
1424
1425 /*
1426 * Get a bit mask of pins that have triggered, and which ones may be long.
1427 * This can be called multiple times with the same masks to accumulate
1428 * hotplug detection results from several registers.
1429 *
1430 * Note that the caller is expected to zero out the masks initially.
1431 */
intel_get_hpd_pins(u32 * pin_mask,u32 * long_mask,u32 hotplug_trigger,u32 dig_hotplug_reg,const u32 hpd[HPD_NUM_PINS],bool long_pulse_detect (enum port port,u32 val))1432 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1433 u32 hotplug_trigger, u32 dig_hotplug_reg,
1434 const u32 hpd[HPD_NUM_PINS],
1435 bool long_pulse_detect(enum port port, u32 val))
1436 {
1437 enum port port;
1438 int i;
1439
1440 for_each_hpd_pin(i) {
1441 if ((hpd[i] & hotplug_trigger) == 0)
1442 continue;
1443
1444 *pin_mask |= BIT(i);
1445
1446 if (!intel_hpd_pin_to_port(i, &port))
1447 continue;
1448
1449 if (long_pulse_detect(port, dig_hotplug_reg))
1450 *long_mask |= BIT(i);
1451 }
1452
1453 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1454 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1455
1456 }
1457
gmbus_irq_handler(struct drm_device * dev)1458 static void gmbus_irq_handler(struct drm_device *dev)
1459 {
1460 struct drm_i915_private *dev_priv = dev->dev_private;
1461
1462 wake_up_all(&dev_priv->gmbus_wait_queue);
1463 }
1464
dp_aux_irq_handler(struct drm_device * dev)1465 static void dp_aux_irq_handler(struct drm_device *dev)
1466 {
1467 struct drm_i915_private *dev_priv = dev->dev_private;
1468
1469 wake_up_all(&dev_priv->gmbus_wait_queue);
1470 }
1471
1472 #if defined(CONFIG_DEBUG_FS)
display_pipe_crc_irq_handler(struct drm_device * dev,enum pipe pipe,uint32_t crc0,uint32_t crc1,uint32_t crc2,uint32_t crc3,uint32_t crc4)1473 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1474 uint32_t crc0, uint32_t crc1,
1475 uint32_t crc2, uint32_t crc3,
1476 uint32_t crc4)
1477 {
1478 struct drm_i915_private *dev_priv = dev->dev_private;
1479 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1480 struct intel_pipe_crc_entry *entry;
1481 int head, tail;
1482
1483 spin_lock(&pipe_crc->lock);
1484
1485 if (!pipe_crc->entries) {
1486 spin_unlock(&pipe_crc->lock);
1487 DRM_DEBUG_KMS("spurious interrupt\n");
1488 return;
1489 }
1490
1491 head = pipe_crc->head;
1492 tail = pipe_crc->tail;
1493
1494 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1495 spin_unlock(&pipe_crc->lock);
1496 DRM_ERROR("CRC buffer overflowing\n");
1497 return;
1498 }
1499
1500 entry = &pipe_crc->entries[head];
1501
1502 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1503 entry->crc[0] = crc0;
1504 entry->crc[1] = crc1;
1505 entry->crc[2] = crc2;
1506 entry->crc[3] = crc3;
1507 entry->crc[4] = crc4;
1508
1509 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1510 pipe_crc->head = head;
1511
1512 spin_unlock(&pipe_crc->lock);
1513
1514 wake_up_interruptible(&pipe_crc->wq);
1515 }
1516 #else
1517 static inline void
display_pipe_crc_irq_handler(struct drm_device * dev,enum pipe pipe,uint32_t crc0,uint32_t crc1,uint32_t crc2,uint32_t crc3,uint32_t crc4)1518 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1519 uint32_t crc0, uint32_t crc1,
1520 uint32_t crc2, uint32_t crc3,
1521 uint32_t crc4) {}
1522 #endif
1523
1524
hsw_pipe_crc_irq_handler(struct drm_device * dev,enum pipe pipe)1525 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1526 {
1527 struct drm_i915_private *dev_priv = dev->dev_private;
1528
1529 display_pipe_crc_irq_handler(dev, pipe,
1530 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1531 0, 0, 0, 0);
1532 }
1533
ivb_pipe_crc_irq_handler(struct drm_device * dev,enum pipe pipe)1534 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1535 {
1536 struct drm_i915_private *dev_priv = dev->dev_private;
1537
1538 display_pipe_crc_irq_handler(dev, pipe,
1539 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1540 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1541 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1542 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1543 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1544 }
1545
i9xx_pipe_crc_irq_handler(struct drm_device * dev,enum pipe pipe)1546 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1547 {
1548 struct drm_i915_private *dev_priv = dev->dev_private;
1549 uint32_t res1, res2;
1550
1551 if (INTEL_INFO(dev)->gen >= 3)
1552 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1553 else
1554 res1 = 0;
1555
1556 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1557 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1558 else
1559 res2 = 0;
1560
1561 display_pipe_crc_irq_handler(dev, pipe,
1562 I915_READ(PIPE_CRC_RES_RED(pipe)),
1563 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1564 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1565 res1, res2);
1566 }
1567
1568 /* The RPS events need forcewake, so we add them to a work queue and mask their
1569 * IMR bits until the work is done. Other interrupts can be processed without
1570 * the work queue. */
gen6_rps_irq_handler(struct drm_i915_private * dev_priv,u32 pm_iir)1571 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1572 {
1573 if (pm_iir & dev_priv->pm_rps_events) {
1574 spin_lock(&dev_priv->irq_lock);
1575 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1576 if (dev_priv->rps.interrupts_enabled) {
1577 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1578 queue_work(dev_priv->wq, &dev_priv->rps.work);
1579 }
1580 spin_unlock(&dev_priv->irq_lock);
1581 }
1582
1583 if (INTEL_INFO(dev_priv)->gen >= 8)
1584 return;
1585
1586 if (HAS_VEBOX(dev_priv->dev)) {
1587 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1588 notify_ring(&dev_priv->ring[VECS]);
1589
1590 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1591 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1592 }
1593 }
1594
intel_pipe_handle_vblank(struct drm_device * dev,enum pipe pipe)1595 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1596 {
1597 if (!drm_handle_vblank(dev, pipe))
1598 return false;
1599
1600 return true;
1601 }
1602
valleyview_pipestat_irq_handler(struct drm_device * dev,u32 iir)1603 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1604 {
1605 struct drm_i915_private *dev_priv = dev->dev_private;
1606 u32 pipe_stats[I915_MAX_PIPES] = { };
1607 int pipe;
1608
1609 spin_lock(&dev_priv->irq_lock);
1610 for_each_pipe(dev_priv, pipe) {
1611 int reg;
1612 u32 mask, iir_bit = 0;
1613
1614 /*
1615 * PIPESTAT bits get signalled even when the interrupt is
1616 * disabled with the mask bits, and some of the status bits do
1617 * not generate interrupts at all (like the underrun bit). Hence
1618 * we need to be careful that we only handle what we want to
1619 * handle.
1620 */
1621
1622 /* fifo underruns are filterered in the underrun handler. */
1623 mask = PIPE_FIFO_UNDERRUN_STATUS;
1624
1625 switch (pipe) {
1626 case PIPE_A:
1627 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1628 break;
1629 case PIPE_B:
1630 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1631 break;
1632 case PIPE_C:
1633 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1634 break;
1635 }
1636 if (iir & iir_bit)
1637 mask |= dev_priv->pipestat_irq_mask[pipe];
1638
1639 if (!mask)
1640 continue;
1641
1642 reg = PIPESTAT(pipe);
1643 mask |= PIPESTAT_INT_ENABLE_MASK;
1644 pipe_stats[pipe] = I915_READ(reg) & mask;
1645
1646 /*
1647 * Clear the PIPE*STAT regs before the IIR
1648 */
1649 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1650 PIPESTAT_INT_STATUS_MASK))
1651 I915_WRITE(reg, pipe_stats[pipe]);
1652 }
1653 spin_unlock(&dev_priv->irq_lock);
1654
1655 for_each_pipe(dev_priv, pipe) {
1656 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1657 intel_pipe_handle_vblank(dev, pipe))
1658 intel_check_page_flip(dev, pipe);
1659
1660 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1661 intel_prepare_page_flip(dev, pipe);
1662 intel_finish_page_flip(dev, pipe);
1663 }
1664
1665 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1666 i9xx_pipe_crc_irq_handler(dev, pipe);
1667
1668 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1669 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1670 }
1671
1672 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1673 gmbus_irq_handler(dev);
1674 }
1675
i9xx_hpd_irq_handler(struct drm_device * dev)1676 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1677 {
1678 struct drm_i915_private *dev_priv = dev->dev_private;
1679 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1680 u32 pin_mask = 0, long_mask = 0;
1681
1682 if (!hotplug_status)
1683 return;
1684
1685 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1686 /*
1687 * Make sure hotplug status is cleared before we clear IIR, or else we
1688 * may miss hotplug events.
1689 */
1690 POSTING_READ(PORT_HOTPLUG_STAT);
1691
1692 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1693 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1694
1695 if (hotplug_trigger) {
1696 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1697 hotplug_trigger, hpd_status_g4x,
1698 i9xx_port_hotplug_long_detect);
1699
1700 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1701 }
1702
1703 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1704 dp_aux_irq_handler(dev);
1705 } else {
1706 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1707
1708 if (hotplug_trigger) {
1709 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1710 hotplug_trigger, hpd_status_i915,
1711 i9xx_port_hotplug_long_detect);
1712 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1713 }
1714 }
1715 }
1716
valleyview_irq_handler(int irq,void * arg)1717 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1718 {
1719 struct drm_device *dev = arg;
1720 struct drm_i915_private *dev_priv = dev->dev_private;
1721 u32 iir, gt_iir, pm_iir;
1722 irqreturn_t ret = IRQ_NONE;
1723
1724 if (!intel_irqs_enabled(dev_priv))
1725 return IRQ_NONE;
1726
1727 while (true) {
1728 /* Find, clear, then process each source of interrupt */
1729
1730 gt_iir = I915_READ(GTIIR);
1731 if (gt_iir)
1732 I915_WRITE(GTIIR, gt_iir);
1733
1734 pm_iir = I915_READ(GEN6_PMIIR);
1735 if (pm_iir)
1736 I915_WRITE(GEN6_PMIIR, pm_iir);
1737
1738 iir = I915_READ(VLV_IIR);
1739 if (iir) {
1740 /* Consume port before clearing IIR or we'll miss events */
1741 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1742 i9xx_hpd_irq_handler(dev);
1743 I915_WRITE(VLV_IIR, iir);
1744 }
1745
1746 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1747 goto out;
1748
1749 ret = IRQ_HANDLED;
1750
1751 if (gt_iir)
1752 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1753 if (pm_iir)
1754 gen6_rps_irq_handler(dev_priv, pm_iir);
1755 /* Call regardless, as some status bits might not be
1756 * signalled in iir */
1757 valleyview_pipestat_irq_handler(dev, iir);
1758 }
1759
1760 out:
1761 return ret;
1762 }
1763
cherryview_irq_handler(int irq,void * arg)1764 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1765 {
1766 struct drm_device *dev = arg;
1767 struct drm_i915_private *dev_priv = dev->dev_private;
1768 u32 master_ctl, iir;
1769 irqreturn_t ret = IRQ_NONE;
1770
1771 if (!intel_irqs_enabled(dev_priv))
1772 return IRQ_NONE;
1773
1774 for (;;) {
1775 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1776 iir = I915_READ(VLV_IIR);
1777
1778 if (master_ctl == 0 && iir == 0)
1779 break;
1780
1781 ret = IRQ_HANDLED;
1782
1783 I915_WRITE(GEN8_MASTER_IRQ, 0);
1784
1785 /* Find, clear, then process each source of interrupt */
1786
1787 if (iir) {
1788 /* Consume port before clearing IIR or we'll miss events */
1789 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1790 i9xx_hpd_irq_handler(dev);
1791 I915_WRITE(VLV_IIR, iir);
1792 }
1793
1794 gen8_gt_irq_handler(dev_priv, master_ctl);
1795
1796 /* Call regardless, as some status bits might not be
1797 * signalled in iir */
1798 valleyview_pipestat_irq_handler(dev, iir);
1799
1800 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1801 POSTING_READ(GEN8_MASTER_IRQ);
1802 }
1803
1804 return ret;
1805 }
1806
ibx_hpd_irq_handler(struct drm_device * dev,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])1807 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1808 const u32 hpd[HPD_NUM_PINS])
1809 {
1810 struct drm_i915_private *dev_priv = to_i915(dev);
1811 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1812
1813 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1814 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1815
1816 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1817 dig_hotplug_reg, hpd,
1818 pch_port_hotplug_long_detect);
1819
1820 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1821 }
1822
ibx_irq_handler(struct drm_device * dev,u32 pch_iir)1823 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1824 {
1825 struct drm_i915_private *dev_priv = dev->dev_private;
1826 int pipe;
1827 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1828
1829 if (hotplug_trigger)
1830 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1831
1832 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1833 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1834 SDE_AUDIO_POWER_SHIFT);
1835 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1836 port_name(port));
1837 }
1838
1839 if (pch_iir & SDE_AUX_MASK)
1840 dp_aux_irq_handler(dev);
1841
1842 if (pch_iir & SDE_GMBUS)
1843 gmbus_irq_handler(dev);
1844
1845 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1846 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1847
1848 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1849 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1850
1851 if (pch_iir & SDE_POISON)
1852 DRM_ERROR("PCH poison interrupt\n");
1853
1854 if (pch_iir & SDE_FDI_MASK)
1855 for_each_pipe(dev_priv, pipe)
1856 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1857 pipe_name(pipe),
1858 I915_READ(FDI_RX_IIR(pipe)));
1859
1860 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1861 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1862
1863 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1864 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1865
1866 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1867 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1868
1869 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1870 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1871 }
1872
ivb_err_int_handler(struct drm_device * dev)1873 static void ivb_err_int_handler(struct drm_device *dev)
1874 {
1875 struct drm_i915_private *dev_priv = dev->dev_private;
1876 u32 err_int = I915_READ(GEN7_ERR_INT);
1877 enum pipe pipe;
1878
1879 if (err_int & ERR_INT_POISON)
1880 DRM_ERROR("Poison interrupt\n");
1881
1882 for_each_pipe(dev_priv, pipe) {
1883 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1884 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1885
1886 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1887 if (IS_IVYBRIDGE(dev))
1888 ivb_pipe_crc_irq_handler(dev, pipe);
1889 else
1890 hsw_pipe_crc_irq_handler(dev, pipe);
1891 }
1892 }
1893
1894 I915_WRITE(GEN7_ERR_INT, err_int);
1895 }
1896
cpt_serr_int_handler(struct drm_device * dev)1897 static void cpt_serr_int_handler(struct drm_device *dev)
1898 {
1899 struct drm_i915_private *dev_priv = dev->dev_private;
1900 u32 serr_int = I915_READ(SERR_INT);
1901
1902 if (serr_int & SERR_INT_POISON)
1903 DRM_ERROR("PCH poison interrupt\n");
1904
1905 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1906 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1907
1908 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1909 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1910
1911 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1912 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1913
1914 I915_WRITE(SERR_INT, serr_int);
1915 }
1916
cpt_irq_handler(struct drm_device * dev,u32 pch_iir)1917 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1918 {
1919 struct drm_i915_private *dev_priv = dev->dev_private;
1920 int pipe;
1921 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1922
1923 if (hotplug_trigger)
1924 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1925
1926 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1927 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1928 SDE_AUDIO_POWER_SHIFT_CPT);
1929 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1930 port_name(port));
1931 }
1932
1933 if (pch_iir & SDE_AUX_MASK_CPT)
1934 dp_aux_irq_handler(dev);
1935
1936 if (pch_iir & SDE_GMBUS_CPT)
1937 gmbus_irq_handler(dev);
1938
1939 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1940 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1941
1942 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1943 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1944
1945 if (pch_iir & SDE_FDI_MASK_CPT)
1946 for_each_pipe(dev_priv, pipe)
1947 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1948 pipe_name(pipe),
1949 I915_READ(FDI_RX_IIR(pipe)));
1950
1951 if (pch_iir & SDE_ERROR_CPT)
1952 cpt_serr_int_handler(dev);
1953 }
1954
spt_irq_handler(struct drm_device * dev,u32 pch_iir)1955 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1956 {
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1958 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1959 ~SDE_PORTE_HOTPLUG_SPT;
1960 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1961 u32 pin_mask = 0, long_mask = 0;
1962
1963 if (hotplug_trigger) {
1964 u32 dig_hotplug_reg;
1965
1966 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1967 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1968
1969 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1970 dig_hotplug_reg, hpd_spt,
1971 spt_port_hotplug_long_detect);
1972 }
1973
1974 if (hotplug2_trigger) {
1975 u32 dig_hotplug_reg;
1976
1977 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1978 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1979
1980 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1981 dig_hotplug_reg, hpd_spt,
1982 spt_port_hotplug2_long_detect);
1983 }
1984
1985 if (pin_mask)
1986 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1987
1988 if (pch_iir & SDE_GMBUS_CPT)
1989 gmbus_irq_handler(dev);
1990 }
1991
ilk_hpd_irq_handler(struct drm_device * dev,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])1992 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1993 const u32 hpd[HPD_NUM_PINS])
1994 {
1995 struct drm_i915_private *dev_priv = to_i915(dev);
1996 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1997
1998 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1999 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2000
2001 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2002 dig_hotplug_reg, hpd,
2003 ilk_port_hotplug_long_detect);
2004
2005 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2006 }
2007
ilk_display_irq_handler(struct drm_device * dev,u32 de_iir)2008 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2009 {
2010 struct drm_i915_private *dev_priv = dev->dev_private;
2011 enum pipe pipe;
2012 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2013
2014 if (hotplug_trigger)
2015 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2016
2017 if (de_iir & DE_AUX_CHANNEL_A)
2018 dp_aux_irq_handler(dev);
2019
2020 if (de_iir & DE_GSE)
2021 intel_opregion_asle_intr(dev);
2022
2023 if (de_iir & DE_POISON)
2024 DRM_ERROR("Poison interrupt\n");
2025
2026 for_each_pipe(dev_priv, pipe) {
2027 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2028 intel_pipe_handle_vblank(dev, pipe))
2029 intel_check_page_flip(dev, pipe);
2030
2031 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2032 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2033
2034 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2035 i9xx_pipe_crc_irq_handler(dev, pipe);
2036
2037 /* plane/pipes map 1:1 on ilk+ */
2038 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2039 intel_prepare_page_flip(dev, pipe);
2040 intel_finish_page_flip_plane(dev, pipe);
2041 }
2042 }
2043
2044 /* check event from PCH */
2045 if (de_iir & DE_PCH_EVENT) {
2046 u32 pch_iir = I915_READ(SDEIIR);
2047
2048 if (HAS_PCH_CPT(dev))
2049 cpt_irq_handler(dev, pch_iir);
2050 else
2051 ibx_irq_handler(dev, pch_iir);
2052
2053 /* should clear PCH hotplug event before clear CPU irq */
2054 I915_WRITE(SDEIIR, pch_iir);
2055 }
2056
2057 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2058 ironlake_rps_change_irq_handler(dev);
2059 }
2060
ivb_display_irq_handler(struct drm_device * dev,u32 de_iir)2061 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2062 {
2063 struct drm_i915_private *dev_priv = dev->dev_private;
2064 enum pipe pipe;
2065 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2066
2067 if (hotplug_trigger)
2068 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2069
2070 if (de_iir & DE_ERR_INT_IVB)
2071 ivb_err_int_handler(dev);
2072
2073 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2074 dp_aux_irq_handler(dev);
2075
2076 if (de_iir & DE_GSE_IVB)
2077 intel_opregion_asle_intr(dev);
2078
2079 for_each_pipe(dev_priv, pipe) {
2080 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2081 intel_pipe_handle_vblank(dev, pipe))
2082 intel_check_page_flip(dev, pipe);
2083
2084 /* plane/pipes map 1:1 on ilk+ */
2085 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2086 intel_prepare_page_flip(dev, pipe);
2087 intel_finish_page_flip_plane(dev, pipe);
2088 }
2089 }
2090
2091 /* check event from PCH */
2092 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2093 u32 pch_iir = I915_READ(SDEIIR);
2094
2095 cpt_irq_handler(dev, pch_iir);
2096
2097 /* clear PCH hotplug event before clear CPU irq */
2098 I915_WRITE(SDEIIR, pch_iir);
2099 }
2100 }
2101
2102 /*
2103 * To handle irqs with the minimum potential races with fresh interrupts, we:
2104 * 1 - Disable Master Interrupt Control.
2105 * 2 - Find the source(s) of the interrupt.
2106 * 3 - Clear the Interrupt Identity bits (IIR).
2107 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2108 * 5 - Re-enable Master Interrupt Control.
2109 */
ironlake_irq_handler(int irq,void * arg)2110 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2111 {
2112 struct drm_device *dev = arg;
2113 struct drm_i915_private *dev_priv = dev->dev_private;
2114 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2115 irqreturn_t ret = IRQ_NONE;
2116
2117 if (!intel_irqs_enabled(dev_priv))
2118 return IRQ_NONE;
2119
2120 /* We get interrupts on unclaimed registers, so check for this before we
2121 * do any I915_{READ,WRITE}. */
2122 intel_uncore_check_errors(dev);
2123
2124 /* disable master interrupt before clearing iir */
2125 de_ier = I915_READ(DEIER);
2126 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2127 POSTING_READ(DEIER);
2128
2129 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2130 * interrupts will will be stored on its back queue, and then we'll be
2131 * able to process them after we restore SDEIER (as soon as we restore
2132 * it, we'll get an interrupt if SDEIIR still has something to process
2133 * due to its back queue). */
2134 if (!HAS_PCH_NOP(dev)) {
2135 sde_ier = I915_READ(SDEIER);
2136 I915_WRITE(SDEIER, 0);
2137 POSTING_READ(SDEIER);
2138 }
2139
2140 /* Find, clear, then process each source of interrupt */
2141
2142 gt_iir = I915_READ(GTIIR);
2143 if (gt_iir) {
2144 I915_WRITE(GTIIR, gt_iir);
2145 ret = IRQ_HANDLED;
2146 if (INTEL_INFO(dev)->gen >= 6)
2147 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2148 else
2149 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2150 }
2151
2152 de_iir = I915_READ(DEIIR);
2153 if (de_iir) {
2154 I915_WRITE(DEIIR, de_iir);
2155 ret = IRQ_HANDLED;
2156 if (INTEL_INFO(dev)->gen >= 7)
2157 ivb_display_irq_handler(dev, de_iir);
2158 else
2159 ilk_display_irq_handler(dev, de_iir);
2160 }
2161
2162 if (INTEL_INFO(dev)->gen >= 6) {
2163 u32 pm_iir = I915_READ(GEN6_PMIIR);
2164 if (pm_iir) {
2165 I915_WRITE(GEN6_PMIIR, pm_iir);
2166 ret = IRQ_HANDLED;
2167 gen6_rps_irq_handler(dev_priv, pm_iir);
2168 }
2169 }
2170
2171 I915_WRITE(DEIER, de_ier);
2172 POSTING_READ(DEIER);
2173 if (!HAS_PCH_NOP(dev)) {
2174 I915_WRITE(SDEIER, sde_ier);
2175 POSTING_READ(SDEIER);
2176 }
2177
2178 return ret;
2179 }
2180
bxt_hpd_irq_handler(struct drm_device * dev,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])2181 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2182 const u32 hpd[HPD_NUM_PINS])
2183 {
2184 struct drm_i915_private *dev_priv = to_i915(dev);
2185 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2186
2187 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2188 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2189
2190 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2191 dig_hotplug_reg, hpd,
2192 bxt_port_hotplug_long_detect);
2193
2194 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2195 }
2196
gen8_irq_handler(int irq,void * arg)2197 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2198 {
2199 struct drm_device *dev = arg;
2200 struct drm_i915_private *dev_priv = dev->dev_private;
2201 u32 master_ctl;
2202 irqreturn_t ret = IRQ_NONE;
2203 uint32_t tmp = 0;
2204 enum pipe pipe;
2205 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2206
2207 if (!intel_irqs_enabled(dev_priv))
2208 return IRQ_NONE;
2209
2210 if (INTEL_INFO(dev_priv)->gen >= 9)
2211 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2212 GEN9_AUX_CHANNEL_D;
2213
2214 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2215 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2216 if (!master_ctl)
2217 return IRQ_NONE;
2218
2219 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2220
2221 /* Find, clear, then process each source of interrupt */
2222
2223 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2224
2225 if (master_ctl & GEN8_DE_MISC_IRQ) {
2226 tmp = I915_READ(GEN8_DE_MISC_IIR);
2227 if (tmp) {
2228 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2229 ret = IRQ_HANDLED;
2230 if (tmp & GEN8_DE_MISC_GSE)
2231 intel_opregion_asle_intr(dev);
2232 else
2233 DRM_ERROR("Unexpected DE Misc interrupt\n");
2234 }
2235 else
2236 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2237 }
2238
2239 if (master_ctl & GEN8_DE_PORT_IRQ) {
2240 tmp = I915_READ(GEN8_DE_PORT_IIR);
2241 if (tmp) {
2242 bool found = false;
2243 u32 hotplug_trigger = 0;
2244
2245 if (IS_BROXTON(dev_priv))
2246 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2247 else if (IS_BROADWELL(dev_priv))
2248 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2249
2250 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2251 ret = IRQ_HANDLED;
2252
2253 if (tmp & aux_mask) {
2254 dp_aux_irq_handler(dev);
2255 found = true;
2256 }
2257
2258 if (hotplug_trigger) {
2259 if (IS_BROXTON(dev))
2260 bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2261 else
2262 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2263 found = true;
2264 }
2265
2266 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2267 gmbus_irq_handler(dev);
2268 found = true;
2269 }
2270
2271 if (!found)
2272 DRM_ERROR("Unexpected DE Port interrupt\n");
2273 }
2274 else
2275 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2276 }
2277
2278 for_each_pipe(dev_priv, pipe) {
2279 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2280
2281 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2282 continue;
2283
2284 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2285 if (pipe_iir) {
2286 ret = IRQ_HANDLED;
2287 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2288
2289 if (pipe_iir & GEN8_PIPE_VBLANK &&
2290 intel_pipe_handle_vblank(dev, pipe))
2291 intel_check_page_flip(dev, pipe);
2292
2293 if (INTEL_INFO(dev_priv)->gen >= 9)
2294 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2295 else
2296 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2297
2298 if (flip_done) {
2299 intel_prepare_page_flip(dev, pipe);
2300 intel_finish_page_flip_plane(dev, pipe);
2301 }
2302
2303 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2304 hsw_pipe_crc_irq_handler(dev, pipe);
2305
2306 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2307 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2308 pipe);
2309
2310
2311 if (INTEL_INFO(dev_priv)->gen >= 9)
2312 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2313 else
2314 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2315
2316 if (fault_errors)
2317 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2318 pipe_name(pipe),
2319 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2320 } else
2321 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2322 }
2323
2324 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2325 master_ctl & GEN8_DE_PCH_IRQ) {
2326 /*
2327 * FIXME(BDW): Assume for now that the new interrupt handling
2328 * scheme also closed the SDE interrupt handling race we've seen
2329 * on older pch-split platforms. But this needs testing.
2330 */
2331 u32 pch_iir = I915_READ(SDEIIR);
2332 if (pch_iir) {
2333 I915_WRITE(SDEIIR, pch_iir);
2334 ret = IRQ_HANDLED;
2335
2336 if (HAS_PCH_SPT(dev_priv))
2337 spt_irq_handler(dev, pch_iir);
2338 else
2339 cpt_irq_handler(dev, pch_iir);
2340 } else {
2341 /*
2342 * Like on previous PCH there seems to be something
2343 * fishy going on with forwarding PCH interrupts.
2344 */
2345 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2346 }
2347 }
2348
2349 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2350 POSTING_READ_FW(GEN8_MASTER_IRQ);
2351
2352 return ret;
2353 }
2354
i915_error_wake_up(struct drm_i915_private * dev_priv,bool reset_completed)2355 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2356 bool reset_completed)
2357 {
2358 struct intel_engine_cs *ring;
2359 int i;
2360
2361 /*
2362 * Notify all waiters for GPU completion events that reset state has
2363 * been changed, and that they need to restart their wait after
2364 * checking for potential errors (and bail out to drop locks if there is
2365 * a gpu reset pending so that i915_error_work_func can acquire them).
2366 */
2367
2368 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2369 for_each_ring(ring, dev_priv, i)
2370 wake_up_all(&ring->irq_queue);
2371
2372 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2373 wake_up_all(&dev_priv->pending_flip_queue);
2374
2375 /*
2376 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2377 * reset state is cleared.
2378 */
2379 if (reset_completed)
2380 wake_up_all(&dev_priv->gpu_error.reset_queue);
2381 }
2382
2383 /**
2384 * i915_reset_and_wakeup - do process context error handling work
2385 * @dev: drm device
2386 *
2387 * Fire an error uevent so userspace can see that a hang or error
2388 * was detected.
2389 */
i915_reset_and_wakeup(struct drm_device * dev)2390 static void i915_reset_and_wakeup(struct drm_device *dev)
2391 {
2392 struct drm_i915_private *dev_priv = to_i915(dev);
2393 struct i915_gpu_error *error = &dev_priv->gpu_error;
2394 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2395 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2396 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2397 int ret;
2398
2399 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2400
2401 /*
2402 * Note that there's only one work item which does gpu resets, so we
2403 * need not worry about concurrent gpu resets potentially incrementing
2404 * error->reset_counter twice. We only need to take care of another
2405 * racing irq/hangcheck declaring the gpu dead for a second time. A
2406 * quick check for that is good enough: schedule_work ensures the
2407 * correct ordering between hang detection and this work item, and since
2408 * the reset in-progress bit is only ever set by code outside of this
2409 * work we don't need to worry about any other races.
2410 */
2411 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2412 DRM_DEBUG_DRIVER("resetting chip\n");
2413 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2414 reset_event);
2415
2416 /*
2417 * In most cases it's guaranteed that we get here with an RPM
2418 * reference held, for example because there is a pending GPU
2419 * request that won't finish until the reset is done. This
2420 * isn't the case at least when we get here by doing a
2421 * simulated reset via debugs, so get an RPM reference.
2422 */
2423 intel_runtime_pm_get(dev_priv);
2424
2425 intel_prepare_reset(dev);
2426
2427 /*
2428 * All state reset _must_ be completed before we update the
2429 * reset counter, for otherwise waiters might miss the reset
2430 * pending state and not properly drop locks, resulting in
2431 * deadlocks with the reset work.
2432 */
2433 ret = i915_reset(dev);
2434
2435 intel_finish_reset(dev);
2436
2437 intel_runtime_pm_put(dev_priv);
2438
2439 if (ret == 0) {
2440 /*
2441 * After all the gem state is reset, increment the reset
2442 * counter and wake up everyone waiting for the reset to
2443 * complete.
2444 *
2445 * Since unlock operations are a one-sided barrier only,
2446 * we need to insert a barrier here to order any seqno
2447 * updates before
2448 * the counter increment.
2449 */
2450 smp_mb__before_atomic();
2451 atomic_inc(&dev_priv->gpu_error.reset_counter);
2452
2453 kobject_uevent_env(&dev->primary->kdev->kobj,
2454 KOBJ_CHANGE, reset_done_event);
2455 } else {
2456 atomic_or(I915_WEDGED, &error->reset_counter);
2457 }
2458
2459 /*
2460 * Note: The wake_up also serves as a memory barrier so that
2461 * waiters see the update value of the reset counter atomic_t.
2462 */
2463 i915_error_wake_up(dev_priv, true);
2464 }
2465 }
2466
i915_report_and_clear_eir(struct drm_device * dev)2467 static void i915_report_and_clear_eir(struct drm_device *dev)
2468 {
2469 struct drm_i915_private *dev_priv = dev->dev_private;
2470 uint32_t instdone[I915_NUM_INSTDONE_REG];
2471 u32 eir = I915_READ(EIR);
2472 int pipe, i;
2473
2474 if (!eir)
2475 return;
2476
2477 pr_err("render error detected, EIR: 0x%08x\n", eir);
2478
2479 i915_get_extra_instdone(dev, instdone);
2480
2481 if (IS_G4X(dev)) {
2482 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2483 u32 ipeir = I915_READ(IPEIR_I965);
2484
2485 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2486 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2487 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2488 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2489 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2490 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2491 I915_WRITE(IPEIR_I965, ipeir);
2492 POSTING_READ(IPEIR_I965);
2493 }
2494 if (eir & GM45_ERROR_PAGE_TABLE) {
2495 u32 pgtbl_err = I915_READ(PGTBL_ER);
2496 pr_err("page table error\n");
2497 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2498 I915_WRITE(PGTBL_ER, pgtbl_err);
2499 POSTING_READ(PGTBL_ER);
2500 }
2501 }
2502
2503 if (!IS_GEN2(dev)) {
2504 if (eir & I915_ERROR_PAGE_TABLE) {
2505 u32 pgtbl_err = I915_READ(PGTBL_ER);
2506 pr_err("page table error\n");
2507 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2508 I915_WRITE(PGTBL_ER, pgtbl_err);
2509 POSTING_READ(PGTBL_ER);
2510 }
2511 }
2512
2513 if (eir & I915_ERROR_MEMORY_REFRESH) {
2514 pr_err("memory refresh error:\n");
2515 for_each_pipe(dev_priv, pipe)
2516 pr_err("pipe %c stat: 0x%08x\n",
2517 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2518 /* pipestat has already been acked */
2519 }
2520 if (eir & I915_ERROR_INSTRUCTION) {
2521 pr_err("instruction error\n");
2522 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2523 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2524 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2525 if (INTEL_INFO(dev)->gen < 4) {
2526 u32 ipeir = I915_READ(IPEIR);
2527
2528 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2529 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2530 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2531 I915_WRITE(IPEIR, ipeir);
2532 POSTING_READ(IPEIR);
2533 } else {
2534 u32 ipeir = I915_READ(IPEIR_I965);
2535
2536 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2537 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2538 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2539 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2540 I915_WRITE(IPEIR_I965, ipeir);
2541 POSTING_READ(IPEIR_I965);
2542 }
2543 }
2544
2545 I915_WRITE(EIR, eir);
2546 POSTING_READ(EIR);
2547 eir = I915_READ(EIR);
2548 if (eir) {
2549 /*
2550 * some errors might have become stuck,
2551 * mask them.
2552 */
2553 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2554 I915_WRITE(EMR, I915_READ(EMR) | eir);
2555 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2556 }
2557 }
2558
2559 /**
2560 * i915_handle_error - handle a gpu error
2561 * @dev: drm device
2562 *
2563 * Do some basic checking of register state at error time and
2564 * dump it to the syslog. Also call i915_capture_error_state() to make
2565 * sure we get a record and make it available in debugfs. Fire a uevent
2566 * so userspace knows something bad happened (should trigger collection
2567 * of a ring dump etc.).
2568 */
i915_handle_error(struct drm_device * dev,bool wedged,const char * fmt,...)2569 void i915_handle_error(struct drm_device *dev, bool wedged,
2570 const char *fmt, ...)
2571 {
2572 struct drm_i915_private *dev_priv = dev->dev_private;
2573 va_list args;
2574 char error_msg[80];
2575
2576 va_start(args, fmt);
2577 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2578 va_end(args);
2579
2580 i915_capture_error_state(dev, wedged, error_msg);
2581 i915_report_and_clear_eir(dev);
2582
2583 if (wedged) {
2584 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2585 &dev_priv->gpu_error.reset_counter);
2586
2587 /*
2588 * Wakeup waiting processes so that the reset function
2589 * i915_reset_and_wakeup doesn't deadlock trying to grab
2590 * various locks. By bumping the reset counter first, the woken
2591 * processes will see a reset in progress and back off,
2592 * releasing their locks and then wait for the reset completion.
2593 * We must do this for _all_ gpu waiters that might hold locks
2594 * that the reset work needs to acquire.
2595 *
2596 * Note: The wake_up serves as the required memory barrier to
2597 * ensure that the waiters see the updated value of the reset
2598 * counter atomic_t.
2599 */
2600 i915_error_wake_up(dev_priv, false);
2601 }
2602
2603 i915_reset_and_wakeup(dev);
2604 }
2605
2606 /* Called from drm generic code, passed 'crtc' which
2607 * we use as a pipe index
2608 */
i915_enable_vblank(struct drm_device * dev,unsigned int pipe)2609 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2610 {
2611 struct drm_i915_private *dev_priv = dev->dev_private;
2612 unsigned long irqflags;
2613
2614 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2615 if (INTEL_INFO(dev)->gen >= 4)
2616 i915_enable_pipestat(dev_priv, pipe,
2617 PIPE_START_VBLANK_INTERRUPT_STATUS);
2618 else
2619 i915_enable_pipestat(dev_priv, pipe,
2620 PIPE_VBLANK_INTERRUPT_STATUS);
2621 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2622
2623 return 0;
2624 }
2625
ironlake_enable_vblank(struct drm_device * dev,unsigned int pipe)2626 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2627 {
2628 struct drm_i915_private *dev_priv = dev->dev_private;
2629 unsigned long irqflags;
2630 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2631 DE_PIPE_VBLANK(pipe);
2632
2633 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2634 ironlake_enable_display_irq(dev_priv, bit);
2635 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2636
2637 return 0;
2638 }
2639
valleyview_enable_vblank(struct drm_device * dev,unsigned int pipe)2640 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2641 {
2642 struct drm_i915_private *dev_priv = dev->dev_private;
2643 unsigned long irqflags;
2644
2645 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2646 i915_enable_pipestat(dev_priv, pipe,
2647 PIPE_START_VBLANK_INTERRUPT_STATUS);
2648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649
2650 return 0;
2651 }
2652
gen8_enable_vblank(struct drm_device * dev,unsigned int pipe)2653 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2654 {
2655 struct drm_i915_private *dev_priv = dev->dev_private;
2656 unsigned long irqflags;
2657
2658 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2659 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2660 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2661 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2662 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2663 return 0;
2664 }
2665
2666 /* Called from drm generic code, passed 'crtc' which
2667 * we use as a pipe index
2668 */
i915_disable_vblank(struct drm_device * dev,unsigned int pipe)2669 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2670 {
2671 struct drm_i915_private *dev_priv = dev->dev_private;
2672 unsigned long irqflags;
2673
2674 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2675 i915_disable_pipestat(dev_priv, pipe,
2676 PIPE_VBLANK_INTERRUPT_STATUS |
2677 PIPE_START_VBLANK_INTERRUPT_STATUS);
2678 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2679 }
2680
ironlake_disable_vblank(struct drm_device * dev,unsigned int pipe)2681 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2682 {
2683 struct drm_i915_private *dev_priv = dev->dev_private;
2684 unsigned long irqflags;
2685 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2686 DE_PIPE_VBLANK(pipe);
2687
2688 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2689 ironlake_disable_display_irq(dev_priv, bit);
2690 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2691 }
2692
valleyview_disable_vblank(struct drm_device * dev,unsigned int pipe)2693 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2694 {
2695 struct drm_i915_private *dev_priv = dev->dev_private;
2696 unsigned long irqflags;
2697
2698 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2699 i915_disable_pipestat(dev_priv, pipe,
2700 PIPE_START_VBLANK_INTERRUPT_STATUS);
2701 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2702 }
2703
gen8_disable_vblank(struct drm_device * dev,unsigned int pipe)2704 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2705 {
2706 struct drm_i915_private *dev_priv = dev->dev_private;
2707 unsigned long irqflags;
2708
2709 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2710 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2711 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2712 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2713 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2714 }
2715
2716 static bool
ring_idle(struct intel_engine_cs * ring,u32 seqno)2717 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2718 {
2719 return (list_empty(&ring->request_list) ||
2720 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2721 }
2722
2723 static bool
ipehr_is_semaphore_wait(struct drm_device * dev,u32 ipehr)2724 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2725 {
2726 if (INTEL_INFO(dev)->gen >= 8) {
2727 return (ipehr >> 23) == 0x1c;
2728 } else {
2729 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2730 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2731 MI_SEMAPHORE_REGISTER);
2732 }
2733 }
2734
2735 static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs * ring,u32 ipehr,u64 offset)2736 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2737 {
2738 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2739 struct intel_engine_cs *signaller;
2740 int i;
2741
2742 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2743 for_each_ring(signaller, dev_priv, i) {
2744 if (ring == signaller)
2745 continue;
2746
2747 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2748 return signaller;
2749 }
2750 } else {
2751 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2752
2753 for_each_ring(signaller, dev_priv, i) {
2754 if(ring == signaller)
2755 continue;
2756
2757 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2758 return signaller;
2759 }
2760 }
2761
2762 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2763 ring->id, ipehr, offset);
2764
2765 return NULL;
2766 }
2767
2768 static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs * ring,u32 * seqno)2769 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2770 {
2771 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2772 u32 cmd, ipehr, head;
2773 u64 offset = 0;
2774 int i, backwards;
2775
2776 /*
2777 * This function does not support execlist mode - any attempt to
2778 * proceed further into this function will result in a kernel panic
2779 * when dereferencing ring->buffer, which is not set up in execlist
2780 * mode.
2781 *
2782 * The correct way of doing it would be to derive the currently
2783 * executing ring buffer from the current context, which is derived
2784 * from the currently running request. Unfortunately, to get the
2785 * current request we would have to grab the struct_mutex before doing
2786 * anything else, which would be ill-advised since some other thread
2787 * might have grabbed it already and managed to hang itself, causing
2788 * the hang checker to deadlock.
2789 *
2790 * Therefore, this function does not support execlist mode in its
2791 * current form. Just return NULL and move on.
2792 */
2793 if (ring->buffer == NULL)
2794 return NULL;
2795
2796 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2797 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2798 return NULL;
2799
2800 /*
2801 * HEAD is likely pointing to the dword after the actual command,
2802 * so scan backwards until we find the MBOX. But limit it to just 3
2803 * or 4 dwords depending on the semaphore wait command size.
2804 * Note that we don't care about ACTHD here since that might
2805 * point at at batch, and semaphores are always emitted into the
2806 * ringbuffer itself.
2807 */
2808 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2809 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2810
2811 for (i = backwards; i; --i) {
2812 /*
2813 * Be paranoid and presume the hw has gone off into the wild -
2814 * our ring is smaller than what the hardware (and hence
2815 * HEAD_ADDR) allows. Also handles wrap-around.
2816 */
2817 head &= ring->buffer->size - 1;
2818
2819 /* This here seems to blow up */
2820 cmd = ioread32(ring->buffer->virtual_start + head);
2821 if (cmd == ipehr)
2822 break;
2823
2824 head -= 4;
2825 }
2826
2827 if (!i)
2828 return NULL;
2829
2830 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2831 if (INTEL_INFO(ring->dev)->gen >= 8) {
2832 offset = ioread32(ring->buffer->virtual_start + head + 12);
2833 offset <<= 32;
2834 offset = ioread32(ring->buffer->virtual_start + head + 8);
2835 }
2836 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2837 }
2838
semaphore_passed(struct intel_engine_cs * ring)2839 static int semaphore_passed(struct intel_engine_cs *ring)
2840 {
2841 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2842 struct intel_engine_cs *signaller;
2843 u32 seqno;
2844
2845 ring->hangcheck.deadlock++;
2846
2847 signaller = semaphore_waits_for(ring, &seqno);
2848 if (signaller == NULL)
2849 return -1;
2850
2851 /* Prevent pathological recursion due to driver bugs */
2852 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2853 return -1;
2854
2855 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2856 return 1;
2857
2858 /* cursory check for an unkickable deadlock */
2859 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2860 semaphore_passed(signaller) < 0)
2861 return -1;
2862
2863 return 0;
2864 }
2865
semaphore_clear_deadlocks(struct drm_i915_private * dev_priv)2866 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2867 {
2868 struct intel_engine_cs *ring;
2869 int i;
2870
2871 for_each_ring(ring, dev_priv, i)
2872 ring->hangcheck.deadlock = 0;
2873 }
2874
2875 static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs * ring,u64 acthd)2876 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2877 {
2878 struct drm_device *dev = ring->dev;
2879 struct drm_i915_private *dev_priv = dev->dev_private;
2880 u32 tmp;
2881
2882 if (acthd != ring->hangcheck.acthd) {
2883 if (acthd > ring->hangcheck.max_acthd) {
2884 ring->hangcheck.max_acthd = acthd;
2885 return HANGCHECK_ACTIVE;
2886 }
2887
2888 return HANGCHECK_ACTIVE_LOOP;
2889 }
2890
2891 if (IS_GEN2(dev))
2892 return HANGCHECK_HUNG;
2893
2894 /* Is the chip hanging on a WAIT_FOR_EVENT?
2895 * If so we can simply poke the RB_WAIT bit
2896 * and break the hang. This should work on
2897 * all but the second generation chipsets.
2898 */
2899 tmp = I915_READ_CTL(ring);
2900 if (tmp & RING_WAIT) {
2901 i915_handle_error(dev, false,
2902 "Kicking stuck wait on %s",
2903 ring->name);
2904 I915_WRITE_CTL(ring, tmp);
2905 return HANGCHECK_KICK;
2906 }
2907
2908 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2909 switch (semaphore_passed(ring)) {
2910 default:
2911 return HANGCHECK_HUNG;
2912 case 1:
2913 i915_handle_error(dev, false,
2914 "Kicking stuck semaphore on %s",
2915 ring->name);
2916 I915_WRITE_CTL(ring, tmp);
2917 return HANGCHECK_KICK;
2918 case 0:
2919 return HANGCHECK_WAIT;
2920 }
2921 }
2922
2923 return HANGCHECK_HUNG;
2924 }
2925
2926 /*
2927 * This is called when the chip hasn't reported back with completed
2928 * batchbuffers in a long time. We keep track per ring seqno progress and
2929 * if there are no progress, hangcheck score for that ring is increased.
2930 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2931 * we kick the ring. If we see no progress on three subsequent calls
2932 * we assume chip is wedged and try to fix it by resetting the chip.
2933 */
i915_hangcheck_elapsed(struct work_struct * work)2934 static void i915_hangcheck_elapsed(struct work_struct *work)
2935 {
2936 struct drm_i915_private *dev_priv =
2937 container_of(work, typeof(*dev_priv),
2938 gpu_error.hangcheck_work.work);
2939 struct drm_device *dev = dev_priv->dev;
2940 struct intel_engine_cs *ring;
2941 int i;
2942 int busy_count = 0, rings_hung = 0;
2943 bool stuck[I915_NUM_RINGS] = { 0 };
2944 #define BUSY 1
2945 #define KICK 5
2946 #define HUNG 20
2947
2948 if (!i915.enable_hangcheck)
2949 return;
2950
2951 for_each_ring(ring, dev_priv, i) {
2952 u64 acthd;
2953 u32 seqno;
2954 bool busy = true;
2955
2956 semaphore_clear_deadlocks(dev_priv);
2957
2958 seqno = ring->get_seqno(ring, false);
2959 acthd = intel_ring_get_active_head(ring);
2960
2961 if (ring->hangcheck.seqno == seqno) {
2962 if (ring_idle(ring, seqno)) {
2963 ring->hangcheck.action = HANGCHECK_IDLE;
2964
2965 if (waitqueue_active(&ring->irq_queue)) {
2966 /* Issue a wake-up to catch stuck h/w. */
2967 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2968 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2969 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2970 ring->name);
2971 else
2972 DRM_INFO("Fake missed irq on %s\n",
2973 ring->name);
2974 wake_up_all(&ring->irq_queue);
2975 }
2976 /* Safeguard against driver failure */
2977 ring->hangcheck.score += BUSY;
2978 } else
2979 busy = false;
2980 } else {
2981 /* We always increment the hangcheck score
2982 * if the ring is busy and still processing
2983 * the same request, so that no single request
2984 * can run indefinitely (such as a chain of
2985 * batches). The only time we do not increment
2986 * the hangcheck score on this ring, if this
2987 * ring is in a legitimate wait for another
2988 * ring. In that case the waiting ring is a
2989 * victim and we want to be sure we catch the
2990 * right culprit. Then every time we do kick
2991 * the ring, add a small increment to the
2992 * score so that we can catch a batch that is
2993 * being repeatedly kicked and so responsible
2994 * for stalling the machine.
2995 */
2996 ring->hangcheck.action = ring_stuck(ring,
2997 acthd);
2998
2999 switch (ring->hangcheck.action) {
3000 case HANGCHECK_IDLE:
3001 case HANGCHECK_WAIT:
3002 case HANGCHECK_ACTIVE:
3003 break;
3004 case HANGCHECK_ACTIVE_LOOP:
3005 ring->hangcheck.score += BUSY;
3006 break;
3007 case HANGCHECK_KICK:
3008 ring->hangcheck.score += KICK;
3009 break;
3010 case HANGCHECK_HUNG:
3011 ring->hangcheck.score += HUNG;
3012 stuck[i] = true;
3013 break;
3014 }
3015 }
3016 } else {
3017 ring->hangcheck.action = HANGCHECK_ACTIVE;
3018
3019 /* Gradually reduce the count so that we catch DoS
3020 * attempts across multiple batches.
3021 */
3022 if (ring->hangcheck.score > 0)
3023 ring->hangcheck.score--;
3024
3025 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3026 }
3027
3028 ring->hangcheck.seqno = seqno;
3029 ring->hangcheck.acthd = acthd;
3030 busy_count += busy;
3031 }
3032
3033 for_each_ring(ring, dev_priv, i) {
3034 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3035 DRM_INFO("%s on %s\n",
3036 stuck[i] ? "stuck" : "no progress",
3037 ring->name);
3038 rings_hung++;
3039 }
3040 }
3041
3042 if (rings_hung)
3043 return i915_handle_error(dev, true, "Ring hung");
3044
3045 if (busy_count)
3046 /* Reset timer case chip hangs without another request
3047 * being added */
3048 i915_queue_hangcheck(dev);
3049 }
3050
i915_queue_hangcheck(struct drm_device * dev)3051 void i915_queue_hangcheck(struct drm_device *dev)
3052 {
3053 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3054
3055 if (!i915.enable_hangcheck)
3056 return;
3057
3058 /* Don't continually defer the hangcheck so that it is always run at
3059 * least once after work has been scheduled on any ring. Otherwise,
3060 * we will ignore a hung ring if a second ring is kept busy.
3061 */
3062
3063 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3064 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3065 }
3066
ibx_irq_reset(struct drm_device * dev)3067 static void ibx_irq_reset(struct drm_device *dev)
3068 {
3069 struct drm_i915_private *dev_priv = dev->dev_private;
3070
3071 if (HAS_PCH_NOP(dev))
3072 return;
3073
3074 GEN5_IRQ_RESET(SDE);
3075
3076 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3077 I915_WRITE(SERR_INT, 0xffffffff);
3078 }
3079
3080 /*
3081 * SDEIER is also touched by the interrupt handler to work around missed PCH
3082 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3083 * instead we unconditionally enable all PCH interrupt sources here, but then
3084 * only unmask them as needed with SDEIMR.
3085 *
3086 * This function needs to be called before interrupts are enabled.
3087 */
ibx_irq_pre_postinstall(struct drm_device * dev)3088 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3089 {
3090 struct drm_i915_private *dev_priv = dev->dev_private;
3091
3092 if (HAS_PCH_NOP(dev))
3093 return;
3094
3095 WARN_ON(I915_READ(SDEIER) != 0);
3096 I915_WRITE(SDEIER, 0xffffffff);
3097 POSTING_READ(SDEIER);
3098 }
3099
gen5_gt_irq_reset(struct drm_device * dev)3100 static void gen5_gt_irq_reset(struct drm_device *dev)
3101 {
3102 struct drm_i915_private *dev_priv = dev->dev_private;
3103
3104 GEN5_IRQ_RESET(GT);
3105 if (INTEL_INFO(dev)->gen >= 6)
3106 GEN5_IRQ_RESET(GEN6_PM);
3107 }
3108
3109 /* drm_dma.h hooks
3110 */
ironlake_irq_reset(struct drm_device * dev)3111 static void ironlake_irq_reset(struct drm_device *dev)
3112 {
3113 struct drm_i915_private *dev_priv = dev->dev_private;
3114
3115 I915_WRITE(HWSTAM, 0xffffffff);
3116
3117 GEN5_IRQ_RESET(DE);
3118 if (IS_GEN7(dev))
3119 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3120
3121 gen5_gt_irq_reset(dev);
3122
3123 ibx_irq_reset(dev);
3124 }
3125
vlv_display_irq_reset(struct drm_i915_private * dev_priv)3126 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3127 {
3128 enum pipe pipe;
3129
3130 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3131 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3132
3133 for_each_pipe(dev_priv, pipe)
3134 I915_WRITE(PIPESTAT(pipe), 0xffff);
3135
3136 GEN5_IRQ_RESET(VLV_);
3137 }
3138
valleyview_irq_preinstall(struct drm_device * dev)3139 static void valleyview_irq_preinstall(struct drm_device *dev)
3140 {
3141 struct drm_i915_private *dev_priv = dev->dev_private;
3142
3143 /* VLV magic */
3144 I915_WRITE(VLV_IMR, 0);
3145 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3146 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3147 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3148
3149 gen5_gt_irq_reset(dev);
3150
3151 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3152
3153 vlv_display_irq_reset(dev_priv);
3154 }
3155
gen8_gt_irq_reset(struct drm_i915_private * dev_priv)3156 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3157 {
3158 GEN8_IRQ_RESET_NDX(GT, 0);
3159 GEN8_IRQ_RESET_NDX(GT, 1);
3160 GEN8_IRQ_RESET_NDX(GT, 2);
3161 GEN8_IRQ_RESET_NDX(GT, 3);
3162 }
3163
gen8_irq_reset(struct drm_device * dev)3164 static void gen8_irq_reset(struct drm_device *dev)
3165 {
3166 struct drm_i915_private *dev_priv = dev->dev_private;
3167 int pipe;
3168
3169 I915_WRITE(GEN8_MASTER_IRQ, 0);
3170 POSTING_READ(GEN8_MASTER_IRQ);
3171
3172 gen8_gt_irq_reset(dev_priv);
3173
3174 for_each_pipe(dev_priv, pipe)
3175 if (intel_display_power_is_enabled(dev_priv,
3176 POWER_DOMAIN_PIPE(pipe)))
3177 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3178
3179 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3180 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3181 GEN5_IRQ_RESET(GEN8_PCU_);
3182
3183 if (HAS_PCH_SPLIT(dev))
3184 ibx_irq_reset(dev);
3185 }
3186
gen8_irq_power_well_post_enable(struct drm_i915_private * dev_priv,unsigned int pipe_mask)3187 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3188 unsigned int pipe_mask)
3189 {
3190 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3191
3192 spin_lock_irq(&dev_priv->irq_lock);
3193 if (pipe_mask & 1 << PIPE_A)
3194 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3195 dev_priv->de_irq_mask[PIPE_A],
3196 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3197 if (pipe_mask & 1 << PIPE_B)
3198 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3199 dev_priv->de_irq_mask[PIPE_B],
3200 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3201 if (pipe_mask & 1 << PIPE_C)
3202 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3203 dev_priv->de_irq_mask[PIPE_C],
3204 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3205 spin_unlock_irq(&dev_priv->irq_lock);
3206 }
3207
cherryview_irq_preinstall(struct drm_device * dev)3208 static void cherryview_irq_preinstall(struct drm_device *dev)
3209 {
3210 struct drm_i915_private *dev_priv = dev->dev_private;
3211
3212 I915_WRITE(GEN8_MASTER_IRQ, 0);
3213 POSTING_READ(GEN8_MASTER_IRQ);
3214
3215 gen8_gt_irq_reset(dev_priv);
3216
3217 GEN5_IRQ_RESET(GEN8_PCU_);
3218
3219 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3220
3221 vlv_display_irq_reset(dev_priv);
3222 }
3223
intel_hpd_enabled_irqs(struct drm_device * dev,const u32 hpd[HPD_NUM_PINS])3224 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3225 const u32 hpd[HPD_NUM_PINS])
3226 {
3227 struct drm_i915_private *dev_priv = to_i915(dev);
3228 struct intel_encoder *encoder;
3229 u32 enabled_irqs = 0;
3230
3231 for_each_intel_encoder(dev, encoder)
3232 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3233 enabled_irqs |= hpd[encoder->hpd_pin];
3234
3235 return enabled_irqs;
3236 }
3237
ibx_hpd_irq_setup(struct drm_device * dev)3238 static void ibx_hpd_irq_setup(struct drm_device *dev)
3239 {
3240 struct drm_i915_private *dev_priv = dev->dev_private;
3241 u32 hotplug_irqs, hotplug, enabled_irqs;
3242
3243 if (HAS_PCH_IBX(dev)) {
3244 hotplug_irqs = SDE_HOTPLUG_MASK;
3245 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3246 } else {
3247 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3248 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3249 }
3250
3251 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3252
3253 /*
3254 * Enable digital hotplug on the PCH, and configure the DP short pulse
3255 * duration to 2ms (which is the minimum in the Display Port spec).
3256 * The pulse duration bits are reserved on LPT+.
3257 */
3258 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3259 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3260 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3261 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3262 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3263 /*
3264 * When CPU and PCH are on the same package, port A
3265 * HPD must be enabled in both north and south.
3266 */
3267 if (HAS_PCH_LPT_LP(dev))
3268 hotplug |= PORTA_HOTPLUG_ENABLE;
3269 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3270 }
3271
spt_hpd_irq_setup(struct drm_device * dev)3272 static void spt_hpd_irq_setup(struct drm_device *dev)
3273 {
3274 struct drm_i915_private *dev_priv = dev->dev_private;
3275 u32 hotplug_irqs, hotplug, enabled_irqs;
3276
3277 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3278 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3279
3280 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3281
3282 /* Enable digital hotplug on the PCH */
3283 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3284 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3285 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3286 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3287
3288 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3289 hotplug |= PORTE_HOTPLUG_ENABLE;
3290 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3291 }
3292
ilk_hpd_irq_setup(struct drm_device * dev)3293 static void ilk_hpd_irq_setup(struct drm_device *dev)
3294 {
3295 struct drm_i915_private *dev_priv = dev->dev_private;
3296 u32 hotplug_irqs, hotplug, enabled_irqs;
3297
3298 if (INTEL_INFO(dev)->gen >= 8) {
3299 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3300 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3301
3302 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3303 } else if (INTEL_INFO(dev)->gen >= 7) {
3304 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3305 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3306
3307 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3308 } else {
3309 hotplug_irqs = DE_DP_A_HOTPLUG;
3310 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3311
3312 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3313 }
3314
3315 /*
3316 * Enable digital hotplug on the CPU, and configure the DP short pulse
3317 * duration to 2ms (which is the minimum in the Display Port spec)
3318 * The pulse duration bits are reserved on HSW+.
3319 */
3320 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3321 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3322 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3323 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3324
3325 ibx_hpd_irq_setup(dev);
3326 }
3327
bxt_hpd_irq_setup(struct drm_device * dev)3328 static void bxt_hpd_irq_setup(struct drm_device *dev)
3329 {
3330 struct drm_i915_private *dev_priv = dev->dev_private;
3331 u32 hotplug_irqs, hotplug, enabled_irqs;
3332
3333 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3334 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3335
3336 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3337
3338 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3339 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3340 PORTA_HOTPLUG_ENABLE;
3341 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3342 }
3343
ibx_irq_postinstall(struct drm_device * dev)3344 static void ibx_irq_postinstall(struct drm_device *dev)
3345 {
3346 struct drm_i915_private *dev_priv = dev->dev_private;
3347 u32 mask;
3348
3349 if (HAS_PCH_NOP(dev))
3350 return;
3351
3352 if (HAS_PCH_IBX(dev))
3353 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3354 else
3355 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3356
3357 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3358 I915_WRITE(SDEIMR, ~mask);
3359 }
3360
gen5_gt_irq_postinstall(struct drm_device * dev)3361 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3362 {
3363 struct drm_i915_private *dev_priv = dev->dev_private;
3364 u32 pm_irqs, gt_irqs;
3365
3366 pm_irqs = gt_irqs = 0;
3367
3368 dev_priv->gt_irq_mask = ~0;
3369 if (HAS_L3_DPF(dev)) {
3370 /* L3 parity interrupt is always unmasked. */
3371 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3372 gt_irqs |= GT_PARITY_ERROR(dev);
3373 }
3374
3375 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3376 if (IS_GEN5(dev)) {
3377 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3378 ILK_BSD_USER_INTERRUPT;
3379 } else {
3380 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3381 }
3382
3383 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3384
3385 if (INTEL_INFO(dev)->gen >= 6) {
3386 /*
3387 * RPS interrupts will get enabled/disabled on demand when RPS
3388 * itself is enabled/disabled.
3389 */
3390 if (HAS_VEBOX(dev))
3391 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3392
3393 dev_priv->pm_irq_mask = 0xffffffff;
3394 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3395 }
3396 }
3397
ironlake_irq_postinstall(struct drm_device * dev)3398 static int ironlake_irq_postinstall(struct drm_device *dev)
3399 {
3400 struct drm_i915_private *dev_priv = dev->dev_private;
3401 u32 display_mask, extra_mask;
3402
3403 if (INTEL_INFO(dev)->gen >= 7) {
3404 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3405 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3406 DE_PLANEB_FLIP_DONE_IVB |
3407 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3408 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3409 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3410 DE_DP_A_HOTPLUG_IVB);
3411 } else {
3412 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3413 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3414 DE_AUX_CHANNEL_A |
3415 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3416 DE_POISON);
3417 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3418 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3419 DE_DP_A_HOTPLUG);
3420 }
3421
3422 dev_priv->irq_mask = ~display_mask;
3423
3424 I915_WRITE(HWSTAM, 0xeffe);
3425
3426 ibx_irq_pre_postinstall(dev);
3427
3428 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3429
3430 gen5_gt_irq_postinstall(dev);
3431
3432 ibx_irq_postinstall(dev);
3433
3434 if (IS_IRONLAKE_M(dev)) {
3435 /* Enable PCU event interrupts
3436 *
3437 * spinlocking not required here for correctness since interrupt
3438 * setup is guaranteed to run in single-threaded context. But we
3439 * need it to make the assert_spin_locked happy. */
3440 spin_lock_irq(&dev_priv->irq_lock);
3441 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3442 spin_unlock_irq(&dev_priv->irq_lock);
3443 }
3444
3445 return 0;
3446 }
3447
valleyview_display_irqs_install(struct drm_i915_private * dev_priv)3448 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3449 {
3450 u32 pipestat_mask;
3451 u32 iir_mask;
3452 enum pipe pipe;
3453
3454 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3455 PIPE_FIFO_UNDERRUN_STATUS;
3456
3457 for_each_pipe(dev_priv, pipe)
3458 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3459 POSTING_READ(PIPESTAT(PIPE_A));
3460
3461 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3462 PIPE_CRC_DONE_INTERRUPT_STATUS;
3463
3464 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3465 for_each_pipe(dev_priv, pipe)
3466 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3467
3468 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3469 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3470 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3471 if (IS_CHERRYVIEW(dev_priv))
3472 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3473 dev_priv->irq_mask &= ~iir_mask;
3474
3475 I915_WRITE(VLV_IIR, iir_mask);
3476 I915_WRITE(VLV_IIR, iir_mask);
3477 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3478 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3479 POSTING_READ(VLV_IMR);
3480 }
3481
valleyview_display_irqs_uninstall(struct drm_i915_private * dev_priv)3482 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3483 {
3484 u32 pipestat_mask;
3485 u32 iir_mask;
3486 enum pipe pipe;
3487
3488 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3489 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3490 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3491 if (IS_CHERRYVIEW(dev_priv))
3492 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3493
3494 dev_priv->irq_mask |= iir_mask;
3495 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3496 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3497 I915_WRITE(VLV_IIR, iir_mask);
3498 I915_WRITE(VLV_IIR, iir_mask);
3499 POSTING_READ(VLV_IIR);
3500
3501 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3502 PIPE_CRC_DONE_INTERRUPT_STATUS;
3503
3504 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3505 for_each_pipe(dev_priv, pipe)
3506 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3507
3508 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3509 PIPE_FIFO_UNDERRUN_STATUS;
3510
3511 for_each_pipe(dev_priv, pipe)
3512 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3513 POSTING_READ(PIPESTAT(PIPE_A));
3514 }
3515
valleyview_enable_display_irqs(struct drm_i915_private * dev_priv)3516 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3517 {
3518 assert_spin_locked(&dev_priv->irq_lock);
3519
3520 if (dev_priv->display_irqs_enabled)
3521 return;
3522
3523 dev_priv->display_irqs_enabled = true;
3524
3525 if (intel_irqs_enabled(dev_priv))
3526 valleyview_display_irqs_install(dev_priv);
3527 }
3528
valleyview_disable_display_irqs(struct drm_i915_private * dev_priv)3529 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3530 {
3531 assert_spin_locked(&dev_priv->irq_lock);
3532
3533 if (!dev_priv->display_irqs_enabled)
3534 return;
3535
3536 dev_priv->display_irqs_enabled = false;
3537
3538 if (intel_irqs_enabled(dev_priv))
3539 valleyview_display_irqs_uninstall(dev_priv);
3540 }
3541
vlv_display_irq_postinstall(struct drm_i915_private * dev_priv)3542 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3543 {
3544 dev_priv->irq_mask = ~0;
3545
3546 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3547 POSTING_READ(PORT_HOTPLUG_EN);
3548
3549 I915_WRITE(VLV_IIR, 0xffffffff);
3550 I915_WRITE(VLV_IIR, 0xffffffff);
3551 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3552 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3553 POSTING_READ(VLV_IMR);
3554
3555 /* Interrupt setup is already guaranteed to be single-threaded, this is
3556 * just to make the assert_spin_locked check happy. */
3557 spin_lock_irq(&dev_priv->irq_lock);
3558 if (dev_priv->display_irqs_enabled)
3559 valleyview_display_irqs_install(dev_priv);
3560 spin_unlock_irq(&dev_priv->irq_lock);
3561 }
3562
valleyview_irq_postinstall(struct drm_device * dev)3563 static int valleyview_irq_postinstall(struct drm_device *dev)
3564 {
3565 struct drm_i915_private *dev_priv = dev->dev_private;
3566
3567 vlv_display_irq_postinstall(dev_priv);
3568
3569 gen5_gt_irq_postinstall(dev);
3570
3571 /* ack & enable invalid PTE error interrupts */
3572 #if 0 /* FIXME: add support to irq handler for checking these bits */
3573 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3574 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3575 #endif
3576
3577 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3578
3579 return 0;
3580 }
3581
gen8_gt_irq_postinstall(struct drm_i915_private * dev_priv)3582 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3583 {
3584 /* These are interrupts we'll toggle with the ring mask register */
3585 uint32_t gt_interrupts[] = {
3586 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3587 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3588 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3589 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3590 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3591 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3592 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3593 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3594 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3595 0,
3596 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3597 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3598 };
3599
3600 dev_priv->pm_irq_mask = 0xffffffff;
3601 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3602 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3603 /*
3604 * RPS interrupts will get enabled/disabled on demand when RPS itself
3605 * is enabled/disabled.
3606 */
3607 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3608 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3609 }
3610
gen8_de_irq_postinstall(struct drm_i915_private * dev_priv)3611 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3612 {
3613 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3614 uint32_t de_pipe_enables;
3615 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3616 u32 de_port_enables;
3617 enum pipe pipe;
3618
3619 if (INTEL_INFO(dev_priv)->gen >= 9) {
3620 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3621 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3622 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3623 GEN9_AUX_CHANNEL_D;
3624 if (IS_BROXTON(dev_priv))
3625 de_port_masked |= BXT_DE_PORT_GMBUS;
3626 } else {
3627 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3628 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3629 }
3630
3631 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3632 GEN8_PIPE_FIFO_UNDERRUN;
3633
3634 de_port_enables = de_port_masked;
3635 if (IS_BROXTON(dev_priv))
3636 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3637 else if (IS_BROADWELL(dev_priv))
3638 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3639
3640 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3641 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3642 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3643
3644 for_each_pipe(dev_priv, pipe)
3645 if (intel_display_power_is_enabled(dev_priv,
3646 POWER_DOMAIN_PIPE(pipe)))
3647 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3648 dev_priv->de_irq_mask[pipe],
3649 de_pipe_enables);
3650
3651 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3652 }
3653
gen8_irq_postinstall(struct drm_device * dev)3654 static int gen8_irq_postinstall(struct drm_device *dev)
3655 {
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3657
3658 if (HAS_PCH_SPLIT(dev))
3659 ibx_irq_pre_postinstall(dev);
3660
3661 gen8_gt_irq_postinstall(dev_priv);
3662 gen8_de_irq_postinstall(dev_priv);
3663
3664 if (HAS_PCH_SPLIT(dev))
3665 ibx_irq_postinstall(dev);
3666
3667 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3668 POSTING_READ(GEN8_MASTER_IRQ);
3669
3670 return 0;
3671 }
3672
cherryview_irq_postinstall(struct drm_device * dev)3673 static int cherryview_irq_postinstall(struct drm_device *dev)
3674 {
3675 struct drm_i915_private *dev_priv = dev->dev_private;
3676
3677 vlv_display_irq_postinstall(dev_priv);
3678
3679 gen8_gt_irq_postinstall(dev_priv);
3680
3681 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3682 POSTING_READ(GEN8_MASTER_IRQ);
3683
3684 return 0;
3685 }
3686
gen8_irq_uninstall(struct drm_device * dev)3687 static void gen8_irq_uninstall(struct drm_device *dev)
3688 {
3689 struct drm_i915_private *dev_priv = dev->dev_private;
3690
3691 if (!dev_priv)
3692 return;
3693
3694 gen8_irq_reset(dev);
3695 }
3696
vlv_display_irq_uninstall(struct drm_i915_private * dev_priv)3697 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3698 {
3699 /* Interrupt setup is already guaranteed to be single-threaded, this is
3700 * just to make the assert_spin_locked check happy. */
3701 spin_lock_irq(&dev_priv->irq_lock);
3702 if (dev_priv->display_irqs_enabled)
3703 valleyview_display_irqs_uninstall(dev_priv);
3704 spin_unlock_irq(&dev_priv->irq_lock);
3705
3706 vlv_display_irq_reset(dev_priv);
3707
3708 dev_priv->irq_mask = ~0;
3709 }
3710
valleyview_irq_uninstall(struct drm_device * dev)3711 static void valleyview_irq_uninstall(struct drm_device *dev)
3712 {
3713 struct drm_i915_private *dev_priv = dev->dev_private;
3714
3715 if (!dev_priv)
3716 return;
3717
3718 I915_WRITE(VLV_MASTER_IER, 0);
3719
3720 gen5_gt_irq_reset(dev);
3721
3722 I915_WRITE(HWSTAM, 0xffffffff);
3723
3724 vlv_display_irq_uninstall(dev_priv);
3725 }
3726
cherryview_irq_uninstall(struct drm_device * dev)3727 static void cherryview_irq_uninstall(struct drm_device *dev)
3728 {
3729 struct drm_i915_private *dev_priv = dev->dev_private;
3730
3731 if (!dev_priv)
3732 return;
3733
3734 I915_WRITE(GEN8_MASTER_IRQ, 0);
3735 POSTING_READ(GEN8_MASTER_IRQ);
3736
3737 gen8_gt_irq_reset(dev_priv);
3738
3739 GEN5_IRQ_RESET(GEN8_PCU_);
3740
3741 vlv_display_irq_uninstall(dev_priv);
3742 }
3743
ironlake_irq_uninstall(struct drm_device * dev)3744 static void ironlake_irq_uninstall(struct drm_device *dev)
3745 {
3746 struct drm_i915_private *dev_priv = dev->dev_private;
3747
3748 if (!dev_priv)
3749 return;
3750
3751 ironlake_irq_reset(dev);
3752 }
3753
i8xx_irq_preinstall(struct drm_device * dev)3754 static void i8xx_irq_preinstall(struct drm_device * dev)
3755 {
3756 struct drm_i915_private *dev_priv = dev->dev_private;
3757 int pipe;
3758
3759 for_each_pipe(dev_priv, pipe)
3760 I915_WRITE(PIPESTAT(pipe), 0);
3761 I915_WRITE16(IMR, 0xffff);
3762 I915_WRITE16(IER, 0x0);
3763 POSTING_READ16(IER);
3764 }
3765
i8xx_irq_postinstall(struct drm_device * dev)3766 static int i8xx_irq_postinstall(struct drm_device *dev)
3767 {
3768 struct drm_i915_private *dev_priv = dev->dev_private;
3769
3770 I915_WRITE16(EMR,
3771 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3772
3773 /* Unmask the interrupts that we always want on. */
3774 dev_priv->irq_mask =
3775 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3776 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3777 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3778 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3779 I915_WRITE16(IMR, dev_priv->irq_mask);
3780
3781 I915_WRITE16(IER,
3782 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3783 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3784 I915_USER_INTERRUPT);
3785 POSTING_READ16(IER);
3786
3787 /* Interrupt setup is already guaranteed to be single-threaded, this is
3788 * just to make the assert_spin_locked check happy. */
3789 spin_lock_irq(&dev_priv->irq_lock);
3790 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3791 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3792 spin_unlock_irq(&dev_priv->irq_lock);
3793
3794 return 0;
3795 }
3796
3797 /*
3798 * Returns true when a page flip has completed.
3799 */
i8xx_handle_vblank(struct drm_device * dev,int plane,int pipe,u32 iir)3800 static bool i8xx_handle_vblank(struct drm_device *dev,
3801 int plane, int pipe, u32 iir)
3802 {
3803 struct drm_i915_private *dev_priv = dev->dev_private;
3804 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3805
3806 if (!intel_pipe_handle_vblank(dev, pipe))
3807 return false;
3808
3809 if ((iir & flip_pending) == 0)
3810 goto check_page_flip;
3811
3812 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3813 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3814 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3815 * the flip is completed (no longer pending). Since this doesn't raise
3816 * an interrupt per se, we watch for the change at vblank.
3817 */
3818 if (I915_READ16(ISR) & flip_pending)
3819 goto check_page_flip;
3820
3821 intel_prepare_page_flip(dev, plane);
3822 intel_finish_page_flip(dev, pipe);
3823 return true;
3824
3825 check_page_flip:
3826 intel_check_page_flip(dev, pipe);
3827 return false;
3828 }
3829
i8xx_irq_handler(int irq,void * arg)3830 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3831 {
3832 struct drm_device *dev = arg;
3833 struct drm_i915_private *dev_priv = dev->dev_private;
3834 u16 iir, new_iir;
3835 u32 pipe_stats[2];
3836 int pipe;
3837 u16 flip_mask =
3838 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3839 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3840
3841 if (!intel_irqs_enabled(dev_priv))
3842 return IRQ_NONE;
3843
3844 iir = I915_READ16(IIR);
3845 if (iir == 0)
3846 return IRQ_NONE;
3847
3848 while (iir & ~flip_mask) {
3849 /* Can't rely on pipestat interrupt bit in iir as it might
3850 * have been cleared after the pipestat interrupt was received.
3851 * It doesn't set the bit in iir again, but it still produces
3852 * interrupts (for non-MSI).
3853 */
3854 spin_lock(&dev_priv->irq_lock);
3855 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3856 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3857
3858 for_each_pipe(dev_priv, pipe) {
3859 int reg = PIPESTAT(pipe);
3860 pipe_stats[pipe] = I915_READ(reg);
3861
3862 /*
3863 * Clear the PIPE*STAT regs before the IIR
3864 */
3865 if (pipe_stats[pipe] & 0x8000ffff)
3866 I915_WRITE(reg, pipe_stats[pipe]);
3867 }
3868 spin_unlock(&dev_priv->irq_lock);
3869
3870 I915_WRITE16(IIR, iir & ~flip_mask);
3871 new_iir = I915_READ16(IIR); /* Flush posted writes */
3872
3873 if (iir & I915_USER_INTERRUPT)
3874 notify_ring(&dev_priv->ring[RCS]);
3875
3876 for_each_pipe(dev_priv, pipe) {
3877 int plane = pipe;
3878 if (HAS_FBC(dev))
3879 plane = !plane;
3880
3881 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3882 i8xx_handle_vblank(dev, plane, pipe, iir))
3883 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3884
3885 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3886 i9xx_pipe_crc_irq_handler(dev, pipe);
3887
3888 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3889 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3890 pipe);
3891 }
3892
3893 iir = new_iir;
3894 }
3895
3896 return IRQ_HANDLED;
3897 }
3898
i8xx_irq_uninstall(struct drm_device * dev)3899 static void i8xx_irq_uninstall(struct drm_device * dev)
3900 {
3901 struct drm_i915_private *dev_priv = dev->dev_private;
3902 int pipe;
3903
3904 for_each_pipe(dev_priv, pipe) {
3905 /* Clear enable bits; then clear status bits */
3906 I915_WRITE(PIPESTAT(pipe), 0);
3907 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3908 }
3909 I915_WRITE16(IMR, 0xffff);
3910 I915_WRITE16(IER, 0x0);
3911 I915_WRITE16(IIR, I915_READ16(IIR));
3912 }
3913
i915_irq_preinstall(struct drm_device * dev)3914 static void i915_irq_preinstall(struct drm_device * dev)
3915 {
3916 struct drm_i915_private *dev_priv = dev->dev_private;
3917 int pipe;
3918
3919 if (I915_HAS_HOTPLUG(dev)) {
3920 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3921 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3922 }
3923
3924 I915_WRITE16(HWSTAM, 0xeffe);
3925 for_each_pipe(dev_priv, pipe)
3926 I915_WRITE(PIPESTAT(pipe), 0);
3927 I915_WRITE(IMR, 0xffffffff);
3928 I915_WRITE(IER, 0x0);
3929 POSTING_READ(IER);
3930 }
3931
i915_irq_postinstall(struct drm_device * dev)3932 static int i915_irq_postinstall(struct drm_device *dev)
3933 {
3934 struct drm_i915_private *dev_priv = dev->dev_private;
3935 u32 enable_mask;
3936
3937 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3938
3939 /* Unmask the interrupts that we always want on. */
3940 dev_priv->irq_mask =
3941 ~(I915_ASLE_INTERRUPT |
3942 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3943 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3944 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3945 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3946
3947 enable_mask =
3948 I915_ASLE_INTERRUPT |
3949 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3950 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3951 I915_USER_INTERRUPT;
3952
3953 if (I915_HAS_HOTPLUG(dev)) {
3954 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3955 POSTING_READ(PORT_HOTPLUG_EN);
3956
3957 /* Enable in IER... */
3958 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3959 /* and unmask in IMR */
3960 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3961 }
3962
3963 I915_WRITE(IMR, dev_priv->irq_mask);
3964 I915_WRITE(IER, enable_mask);
3965 POSTING_READ(IER);
3966
3967 i915_enable_asle_pipestat(dev);
3968
3969 /* Interrupt setup is already guaranteed to be single-threaded, this is
3970 * just to make the assert_spin_locked check happy. */
3971 spin_lock_irq(&dev_priv->irq_lock);
3972 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3973 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3974 spin_unlock_irq(&dev_priv->irq_lock);
3975
3976 return 0;
3977 }
3978
3979 /*
3980 * Returns true when a page flip has completed.
3981 */
i915_handle_vblank(struct drm_device * dev,int plane,int pipe,u32 iir)3982 static bool i915_handle_vblank(struct drm_device *dev,
3983 int plane, int pipe, u32 iir)
3984 {
3985 struct drm_i915_private *dev_priv = dev->dev_private;
3986 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3987
3988 if (!intel_pipe_handle_vblank(dev, pipe))
3989 return false;
3990
3991 if ((iir & flip_pending) == 0)
3992 goto check_page_flip;
3993
3994 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3995 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3996 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3997 * the flip is completed (no longer pending). Since this doesn't raise
3998 * an interrupt per se, we watch for the change at vblank.
3999 */
4000 if (I915_READ(ISR) & flip_pending)
4001 goto check_page_flip;
4002
4003 intel_prepare_page_flip(dev, plane);
4004 intel_finish_page_flip(dev, pipe);
4005 return true;
4006
4007 check_page_flip:
4008 intel_check_page_flip(dev, pipe);
4009 return false;
4010 }
4011
i915_irq_handler(int irq,void * arg)4012 static irqreturn_t i915_irq_handler(int irq, void *arg)
4013 {
4014 struct drm_device *dev = arg;
4015 struct drm_i915_private *dev_priv = dev->dev_private;
4016 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4017 u32 flip_mask =
4018 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4019 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4020 int pipe, ret = IRQ_NONE;
4021
4022 if (!intel_irqs_enabled(dev_priv))
4023 return IRQ_NONE;
4024
4025 iir = I915_READ(IIR);
4026 do {
4027 bool irq_received = (iir & ~flip_mask) != 0;
4028 bool blc_event = false;
4029
4030 /* Can't rely on pipestat interrupt bit in iir as it might
4031 * have been cleared after the pipestat interrupt was received.
4032 * It doesn't set the bit in iir again, but it still produces
4033 * interrupts (for non-MSI).
4034 */
4035 spin_lock(&dev_priv->irq_lock);
4036 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4037 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4038
4039 for_each_pipe(dev_priv, pipe) {
4040 int reg = PIPESTAT(pipe);
4041 pipe_stats[pipe] = I915_READ(reg);
4042
4043 /* Clear the PIPE*STAT regs before the IIR */
4044 if (pipe_stats[pipe] & 0x8000ffff) {
4045 I915_WRITE(reg, pipe_stats[pipe]);
4046 irq_received = true;
4047 }
4048 }
4049 spin_unlock(&dev_priv->irq_lock);
4050
4051 if (!irq_received)
4052 break;
4053
4054 /* Consume port. Then clear IIR or we'll miss events */
4055 if (I915_HAS_HOTPLUG(dev) &&
4056 iir & I915_DISPLAY_PORT_INTERRUPT)
4057 i9xx_hpd_irq_handler(dev);
4058
4059 I915_WRITE(IIR, iir & ~flip_mask);
4060 new_iir = I915_READ(IIR); /* Flush posted writes */
4061
4062 if (iir & I915_USER_INTERRUPT)
4063 notify_ring(&dev_priv->ring[RCS]);
4064
4065 for_each_pipe(dev_priv, pipe) {
4066 int plane = pipe;
4067 if (HAS_FBC(dev))
4068 plane = !plane;
4069
4070 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4071 i915_handle_vblank(dev, plane, pipe, iir))
4072 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4073
4074 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4075 blc_event = true;
4076
4077 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4078 i9xx_pipe_crc_irq_handler(dev, pipe);
4079
4080 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4081 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4082 pipe);
4083 }
4084
4085 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4086 intel_opregion_asle_intr(dev);
4087
4088 /* With MSI, interrupts are only generated when iir
4089 * transitions from zero to nonzero. If another bit got
4090 * set while we were handling the existing iir bits, then
4091 * we would never get another interrupt.
4092 *
4093 * This is fine on non-MSI as well, as if we hit this path
4094 * we avoid exiting the interrupt handler only to generate
4095 * another one.
4096 *
4097 * Note that for MSI this could cause a stray interrupt report
4098 * if an interrupt landed in the time between writing IIR and
4099 * the posting read. This should be rare enough to never
4100 * trigger the 99% of 100,000 interrupts test for disabling
4101 * stray interrupts.
4102 */
4103 ret = IRQ_HANDLED;
4104 iir = new_iir;
4105 } while (iir & ~flip_mask);
4106
4107 return ret;
4108 }
4109
i915_irq_uninstall(struct drm_device * dev)4110 static void i915_irq_uninstall(struct drm_device * dev)
4111 {
4112 struct drm_i915_private *dev_priv = dev->dev_private;
4113 int pipe;
4114
4115 if (I915_HAS_HOTPLUG(dev)) {
4116 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4117 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4118 }
4119
4120 I915_WRITE16(HWSTAM, 0xffff);
4121 for_each_pipe(dev_priv, pipe) {
4122 /* Clear enable bits; then clear status bits */
4123 I915_WRITE(PIPESTAT(pipe), 0);
4124 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4125 }
4126 I915_WRITE(IMR, 0xffffffff);
4127 I915_WRITE(IER, 0x0);
4128
4129 I915_WRITE(IIR, I915_READ(IIR));
4130 }
4131
i965_irq_preinstall(struct drm_device * dev)4132 static void i965_irq_preinstall(struct drm_device * dev)
4133 {
4134 struct drm_i915_private *dev_priv = dev->dev_private;
4135 int pipe;
4136
4137 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4138 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4139
4140 I915_WRITE(HWSTAM, 0xeffe);
4141 for_each_pipe(dev_priv, pipe)
4142 I915_WRITE(PIPESTAT(pipe), 0);
4143 I915_WRITE(IMR, 0xffffffff);
4144 I915_WRITE(IER, 0x0);
4145 POSTING_READ(IER);
4146 }
4147
i965_irq_postinstall(struct drm_device * dev)4148 static int i965_irq_postinstall(struct drm_device *dev)
4149 {
4150 struct drm_i915_private *dev_priv = dev->dev_private;
4151 u32 enable_mask;
4152 u32 error_mask;
4153
4154 /* Unmask the interrupts that we always want on. */
4155 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4156 I915_DISPLAY_PORT_INTERRUPT |
4157 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4158 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4159 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4160 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4161 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4162
4163 enable_mask = ~dev_priv->irq_mask;
4164 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4165 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4166 enable_mask |= I915_USER_INTERRUPT;
4167
4168 if (IS_G4X(dev))
4169 enable_mask |= I915_BSD_USER_INTERRUPT;
4170
4171 /* Interrupt setup is already guaranteed to be single-threaded, this is
4172 * just to make the assert_spin_locked check happy. */
4173 spin_lock_irq(&dev_priv->irq_lock);
4174 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4175 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4176 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4177 spin_unlock_irq(&dev_priv->irq_lock);
4178
4179 /*
4180 * Enable some error detection, note the instruction error mask
4181 * bit is reserved, so we leave it masked.
4182 */
4183 if (IS_G4X(dev)) {
4184 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4185 GM45_ERROR_MEM_PRIV |
4186 GM45_ERROR_CP_PRIV |
4187 I915_ERROR_MEMORY_REFRESH);
4188 } else {
4189 error_mask = ~(I915_ERROR_PAGE_TABLE |
4190 I915_ERROR_MEMORY_REFRESH);
4191 }
4192 I915_WRITE(EMR, error_mask);
4193
4194 I915_WRITE(IMR, dev_priv->irq_mask);
4195 I915_WRITE(IER, enable_mask);
4196 POSTING_READ(IER);
4197
4198 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4199 POSTING_READ(PORT_HOTPLUG_EN);
4200
4201 i915_enable_asle_pipestat(dev);
4202
4203 return 0;
4204 }
4205
i915_hpd_irq_setup(struct drm_device * dev)4206 static void i915_hpd_irq_setup(struct drm_device *dev)
4207 {
4208 struct drm_i915_private *dev_priv = dev->dev_private;
4209 u32 hotplug_en;
4210
4211 assert_spin_locked(&dev_priv->irq_lock);
4212
4213 /* Note HDMI and DP share hotplug bits */
4214 /* enable bits are the same for all generations */
4215 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4216 /* Programming the CRT detection parameters tends
4217 to generate a spurious hotplug event about three
4218 seconds later. So just do it once.
4219 */
4220 if (IS_G4X(dev))
4221 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4222 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4223
4224 /* Ignore TV since it's buggy */
4225 i915_hotplug_interrupt_update_locked(dev_priv,
4226 HOTPLUG_INT_EN_MASK |
4227 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4228 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4229 hotplug_en);
4230 }
4231
i965_irq_handler(int irq,void * arg)4232 static irqreturn_t i965_irq_handler(int irq, void *arg)
4233 {
4234 struct drm_device *dev = arg;
4235 struct drm_i915_private *dev_priv = dev->dev_private;
4236 u32 iir, new_iir;
4237 u32 pipe_stats[I915_MAX_PIPES];
4238 int ret = IRQ_NONE, pipe;
4239 u32 flip_mask =
4240 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4241 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4242
4243 if (!intel_irqs_enabled(dev_priv))
4244 return IRQ_NONE;
4245
4246 iir = I915_READ(IIR);
4247
4248 for (;;) {
4249 bool irq_received = (iir & ~flip_mask) != 0;
4250 bool blc_event = false;
4251
4252 /* Can't rely on pipestat interrupt bit in iir as it might
4253 * have been cleared after the pipestat interrupt was received.
4254 * It doesn't set the bit in iir again, but it still produces
4255 * interrupts (for non-MSI).
4256 */
4257 spin_lock(&dev_priv->irq_lock);
4258 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4259 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4260
4261 for_each_pipe(dev_priv, pipe) {
4262 int reg = PIPESTAT(pipe);
4263 pipe_stats[pipe] = I915_READ(reg);
4264
4265 /*
4266 * Clear the PIPE*STAT regs before the IIR
4267 */
4268 if (pipe_stats[pipe] & 0x8000ffff) {
4269 I915_WRITE(reg, pipe_stats[pipe]);
4270 irq_received = true;
4271 }
4272 }
4273 spin_unlock(&dev_priv->irq_lock);
4274
4275 if (!irq_received)
4276 break;
4277
4278 ret = IRQ_HANDLED;
4279
4280 /* Consume port. Then clear IIR or we'll miss events */
4281 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4282 i9xx_hpd_irq_handler(dev);
4283
4284 I915_WRITE(IIR, iir & ~flip_mask);
4285 new_iir = I915_READ(IIR); /* Flush posted writes */
4286
4287 if (iir & I915_USER_INTERRUPT)
4288 notify_ring(&dev_priv->ring[RCS]);
4289 if (iir & I915_BSD_USER_INTERRUPT)
4290 notify_ring(&dev_priv->ring[VCS]);
4291
4292 for_each_pipe(dev_priv, pipe) {
4293 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4294 i915_handle_vblank(dev, pipe, pipe, iir))
4295 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4296
4297 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4298 blc_event = true;
4299
4300 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4301 i9xx_pipe_crc_irq_handler(dev, pipe);
4302
4303 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4304 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4305 }
4306
4307 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4308 intel_opregion_asle_intr(dev);
4309
4310 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4311 gmbus_irq_handler(dev);
4312
4313 /* With MSI, interrupts are only generated when iir
4314 * transitions from zero to nonzero. If another bit got
4315 * set while we were handling the existing iir bits, then
4316 * we would never get another interrupt.
4317 *
4318 * This is fine on non-MSI as well, as if we hit this path
4319 * we avoid exiting the interrupt handler only to generate
4320 * another one.
4321 *
4322 * Note that for MSI this could cause a stray interrupt report
4323 * if an interrupt landed in the time between writing IIR and
4324 * the posting read. This should be rare enough to never
4325 * trigger the 99% of 100,000 interrupts test for disabling
4326 * stray interrupts.
4327 */
4328 iir = new_iir;
4329 }
4330
4331 return ret;
4332 }
4333
i965_irq_uninstall(struct drm_device * dev)4334 static void i965_irq_uninstall(struct drm_device * dev)
4335 {
4336 struct drm_i915_private *dev_priv = dev->dev_private;
4337 int pipe;
4338
4339 if (!dev_priv)
4340 return;
4341
4342 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4343 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4344
4345 I915_WRITE(HWSTAM, 0xffffffff);
4346 for_each_pipe(dev_priv, pipe)
4347 I915_WRITE(PIPESTAT(pipe), 0);
4348 I915_WRITE(IMR, 0xffffffff);
4349 I915_WRITE(IER, 0x0);
4350
4351 for_each_pipe(dev_priv, pipe)
4352 I915_WRITE(PIPESTAT(pipe),
4353 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4354 I915_WRITE(IIR, I915_READ(IIR));
4355 }
4356
4357 /**
4358 * intel_irq_init - initializes irq support
4359 * @dev_priv: i915 device instance
4360 *
4361 * This function initializes all the irq support including work items, timers
4362 * and all the vtables. It does not setup the interrupt itself though.
4363 */
intel_irq_init(struct drm_i915_private * dev_priv)4364 void intel_irq_init(struct drm_i915_private *dev_priv)
4365 {
4366 struct drm_device *dev = dev_priv->dev;
4367
4368 intel_hpd_init_work(dev_priv);
4369
4370 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4371 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4372
4373 /* Let's track the enabled rps events */
4374 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4375 /* WaGsvRC0ResidencyMethod:vlv */
4376 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4377 else
4378 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4379
4380 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4381 i915_hangcheck_elapsed);
4382
4383 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4384
4385 if (IS_GEN2(dev_priv)) {
4386 dev->max_vblank_count = 0;
4387 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4388 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4389 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4390 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4391 } else {
4392 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4393 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4394 }
4395
4396 /*
4397 * Opt out of the vblank disable timer on everything except gen2.
4398 * Gen2 doesn't have a hardware frame counter and so depends on
4399 * vblank interrupts to produce sane vblank seuquence numbers.
4400 */
4401 if (!IS_GEN2(dev_priv))
4402 dev->vblank_disable_immediate = true;
4403
4404 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4405 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4406
4407 if (IS_CHERRYVIEW(dev_priv)) {
4408 dev->driver->irq_handler = cherryview_irq_handler;
4409 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4410 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4411 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4412 dev->driver->enable_vblank = valleyview_enable_vblank;
4413 dev->driver->disable_vblank = valleyview_disable_vblank;
4414 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4415 } else if (IS_VALLEYVIEW(dev_priv)) {
4416 dev->driver->irq_handler = valleyview_irq_handler;
4417 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4418 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4419 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4420 dev->driver->enable_vblank = valleyview_enable_vblank;
4421 dev->driver->disable_vblank = valleyview_disable_vblank;
4422 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4423 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4424 dev->driver->irq_handler = gen8_irq_handler;
4425 dev->driver->irq_preinstall = gen8_irq_reset;
4426 dev->driver->irq_postinstall = gen8_irq_postinstall;
4427 dev->driver->irq_uninstall = gen8_irq_uninstall;
4428 dev->driver->enable_vblank = gen8_enable_vblank;
4429 dev->driver->disable_vblank = gen8_disable_vblank;
4430 if (IS_BROXTON(dev))
4431 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4432 else if (HAS_PCH_SPT(dev))
4433 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4434 else
4435 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4436 } else if (HAS_PCH_SPLIT(dev)) {
4437 dev->driver->irq_handler = ironlake_irq_handler;
4438 dev->driver->irq_preinstall = ironlake_irq_reset;
4439 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4440 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4441 dev->driver->enable_vblank = ironlake_enable_vblank;
4442 dev->driver->disable_vblank = ironlake_disable_vblank;
4443 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4444 } else {
4445 if (INTEL_INFO(dev_priv)->gen == 2) {
4446 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4447 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4448 dev->driver->irq_handler = i8xx_irq_handler;
4449 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4450 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4451 dev->driver->irq_preinstall = i915_irq_preinstall;
4452 dev->driver->irq_postinstall = i915_irq_postinstall;
4453 dev->driver->irq_uninstall = i915_irq_uninstall;
4454 dev->driver->irq_handler = i915_irq_handler;
4455 } else {
4456 dev->driver->irq_preinstall = i965_irq_preinstall;
4457 dev->driver->irq_postinstall = i965_irq_postinstall;
4458 dev->driver->irq_uninstall = i965_irq_uninstall;
4459 dev->driver->irq_handler = i965_irq_handler;
4460 }
4461 if (I915_HAS_HOTPLUG(dev_priv))
4462 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4463 dev->driver->enable_vblank = i915_enable_vblank;
4464 dev->driver->disable_vblank = i915_disable_vblank;
4465 }
4466 }
4467
4468 /**
4469 * intel_irq_install - enables the hardware interrupt
4470 * @dev_priv: i915 device instance
4471 *
4472 * This function enables the hardware interrupt handling, but leaves the hotplug
4473 * handling still disabled. It is called after intel_irq_init().
4474 *
4475 * In the driver load and resume code we need working interrupts in a few places
4476 * but don't want to deal with the hassle of concurrent probe and hotplug
4477 * workers. Hence the split into this two-stage approach.
4478 */
intel_irq_install(struct drm_i915_private * dev_priv)4479 int intel_irq_install(struct drm_i915_private *dev_priv)
4480 {
4481 /*
4482 * We enable some interrupt sources in our postinstall hooks, so mark
4483 * interrupts as enabled _before_ actually enabling them to avoid
4484 * special cases in our ordering checks.
4485 */
4486 dev_priv->pm.irqs_enabled = true;
4487
4488 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4489 }
4490
4491 /**
4492 * intel_irq_uninstall - finilizes all irq handling
4493 * @dev_priv: i915 device instance
4494 *
4495 * This stops interrupt and hotplug handling and unregisters and frees all
4496 * resources acquired in the init functions.
4497 */
intel_irq_uninstall(struct drm_i915_private * dev_priv)4498 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4499 {
4500 drm_irq_uninstall(dev_priv->dev);
4501 intel_hpd_cancel_work(dev_priv);
4502 dev_priv->pm.irqs_enabled = false;
4503 }
4504
4505 /**
4506 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4507 * @dev_priv: i915 device instance
4508 *
4509 * This function is used to disable interrupts at runtime, both in the runtime
4510 * pm and the system suspend/resume code.
4511 */
intel_runtime_pm_disable_interrupts(struct drm_i915_private * dev_priv)4512 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4513 {
4514 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4515 dev_priv->pm.irqs_enabled = false;
4516 synchronize_irq(dev_priv->dev->irq);
4517 }
4518
4519 /**
4520 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4521 * @dev_priv: i915 device instance
4522 *
4523 * This function is used to enable interrupts at runtime, both in the runtime
4524 * pm and the system suspend/resume code.
4525 */
intel_runtime_pm_enable_interrupts(struct drm_i915_private * dev_priv)4526 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4527 {
4528 dev_priv->pm.irqs_enabled = true;
4529 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4530 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4531 }
4532