1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/sysrq.h>
32#include <linux/slab.h>
33#include <linux/circ_buf.h>
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
36#include "i915_drv.h"
37#include "i915_trace.h"
38#include "intel_drv.h"
39
40
41
42
43
44
45
46
47
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
60static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
68static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
76static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
111
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
118
119#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
129#define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
137} while (0)
138
139
140
141
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 i915_mmio_reg_offset(reg), val);
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
157
158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
170} while (0)
171
172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
174
175
176static inline void
177i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
178 uint32_t mask,
179 uint32_t bits)
180{
181 uint32_t val;
182
183 lockdep_assert_held(&dev_priv->irq_lock);
184 WARN_ON(bits & ~mask);
185
186 val = I915_READ(PORT_HOTPLUG_EN);
187 val &= ~mask;
188 val |= bits;
189 I915_WRITE(PORT_HOTPLUG_EN, val);
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
205 uint32_t mask,
206 uint32_t bits)
207{
208 spin_lock_irq(&dev_priv->irq_lock);
209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
210 spin_unlock_irq(&dev_priv->irq_lock);
211}
212
213
214
215
216
217
218
219void ilk_update_display_irq(struct drm_i915_private *dev_priv,
220 uint32_t interrupt_mask,
221 uint32_t enabled_irq_mask)
222{
223 uint32_t new_val;
224
225 lockdep_assert_held(&dev_priv->irq_lock);
226
227 WARN_ON(enabled_irq_mask & ~interrupt_mask);
228
229 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
230 return;
231
232 new_val = dev_priv->irq_mask;
233 new_val &= ~interrupt_mask;
234 new_val |= (~enabled_irq_mask & interrupt_mask);
235
236 if (new_val != dev_priv->irq_mask) {
237 dev_priv->irq_mask = new_val;
238 I915_WRITE(DEIMR, dev_priv->irq_mask);
239 POSTING_READ(DEIMR);
240 }
241}
242
243
244
245
246
247
248
249static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
250 uint32_t interrupt_mask,
251 uint32_t enabled_irq_mask)
252{
253 lockdep_assert_held(&dev_priv->irq_lock);
254
255 WARN_ON(enabled_irq_mask & ~interrupt_mask);
256
257 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
258 return;
259
260 dev_priv->gt_irq_mask &= ~interrupt_mask;
261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
263}
264
265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266{
267 ilk_update_gt_irq(dev_priv, mask, mask);
268 POSTING_READ_FW(GTIMR);
269}
270
271void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272{
273 ilk_update_gt_irq(dev_priv, mask, 0);
274}
275
276static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
277{
278 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
279}
280
281static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
282{
283 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
284}
285
286static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
287{
288 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
289}
290
291
292
293
294
295
296
297static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
298 uint32_t interrupt_mask,
299 uint32_t enabled_irq_mask)
300{
301 uint32_t new_val;
302
303 WARN_ON(enabled_irq_mask & ~interrupt_mask);
304
305 lockdep_assert_held(&dev_priv->irq_lock);
306
307 new_val = dev_priv->pm_imr;
308 new_val &= ~interrupt_mask;
309 new_val |= (~enabled_irq_mask & interrupt_mask);
310
311 if (new_val != dev_priv->pm_imr) {
312 dev_priv->pm_imr = new_val;
313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
314 POSTING_READ(gen6_pm_imr(dev_priv));
315 }
316}
317
318void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
319{
320 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
321 return;
322
323 snb_update_pm_irq(dev_priv, mask, mask);
324}
325
326static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
331void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
332{
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_mask_pm_irq(dev_priv, mask);
337}
338
339void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
340{
341 i915_reg_t reg = gen6_pm_iir(dev_priv);
342
343 lockdep_assert_held(&dev_priv->irq_lock);
344
345 I915_WRITE(reg, reset_mask);
346 I915_WRITE(reg, reset_mask);
347 POSTING_READ(reg);
348}
349
350void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
351{
352 lockdep_assert_held(&dev_priv->irq_lock);
353
354 dev_priv->pm_ier |= enable_mask;
355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
356 gen6_unmask_pm_irq(dev_priv, enable_mask);
357
358}
359
360void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
361{
362 lockdep_assert_held(&dev_priv->irq_lock);
363
364 dev_priv->pm_ier &= ~disable_mask;
365 __gen6_mask_pm_irq(dev_priv, disable_mask);
366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
367
368}
369
370void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
371{
372 spin_lock_irq(&dev_priv->irq_lock);
373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
374 dev_priv->rps.pm_iir = 0;
375 spin_unlock_irq(&dev_priv->irq_lock);
376}
377
378void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
379{
380 if (READ_ONCE(dev_priv->rps.interrupts_enabled))
381 return;
382
383 spin_lock_irq(&dev_priv->irq_lock);
384 WARN_ON_ONCE(dev_priv->rps.pm_iir);
385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
386 dev_priv->rps.interrupts_enabled = true;
387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
388
389 spin_unlock_irq(&dev_priv->irq_lock);
390}
391
392void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
393{
394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
395 return;
396
397 spin_lock_irq(&dev_priv->irq_lock);
398 dev_priv->rps.interrupts_enabled = false;
399
400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
401
402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
403
404 spin_unlock_irq(&dev_priv->irq_lock);
405 synchronize_irq(dev_priv->drm.irq);
406
407
408
409
410
411
412 cancel_work_sync(&dev_priv->rps.work);
413 gen6_reset_rps_interrupts(dev_priv);
414}
415
416void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
417{
418 spin_lock_irq(&dev_priv->irq_lock);
419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
420 spin_unlock_irq(&dev_priv->irq_lock);
421}
422
423void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
424{
425 spin_lock_irq(&dev_priv->irq_lock);
426 if (!dev_priv->guc.interrupts_enabled) {
427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
428 dev_priv->pm_guc_events);
429 dev_priv->guc.interrupts_enabled = true;
430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
431 }
432 spin_unlock_irq(&dev_priv->irq_lock);
433}
434
435void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
436{
437 spin_lock_irq(&dev_priv->irq_lock);
438 dev_priv->guc.interrupts_enabled = false;
439
440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
441
442 spin_unlock_irq(&dev_priv->irq_lock);
443 synchronize_irq(dev_priv->drm.irq);
444
445 gen9_reset_guc_interrupts(dev_priv);
446}
447
448
449
450
451
452
453
454static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
455 uint32_t interrupt_mask,
456 uint32_t enabled_irq_mask)
457{
458 uint32_t new_val;
459 uint32_t old_val;
460
461 lockdep_assert_held(&dev_priv->irq_lock);
462
463 WARN_ON(enabled_irq_mask & ~interrupt_mask);
464
465 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
466 return;
467
468 old_val = I915_READ(GEN8_DE_PORT_IMR);
469
470 new_val = old_val;
471 new_val &= ~interrupt_mask;
472 new_val |= (~enabled_irq_mask & interrupt_mask);
473
474 if (new_val != old_val) {
475 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
476 POSTING_READ(GEN8_DE_PORT_IMR);
477 }
478}
479
480
481
482
483
484
485
486
487void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
488 enum pipe pipe,
489 uint32_t interrupt_mask,
490 uint32_t enabled_irq_mask)
491{
492 uint32_t new_val;
493
494 lockdep_assert_held(&dev_priv->irq_lock);
495
496 WARN_ON(enabled_irq_mask & ~interrupt_mask);
497
498 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
499 return;
500
501 new_val = dev_priv->de_irq_mask[pipe];
502 new_val &= ~interrupt_mask;
503 new_val |= (~enabled_irq_mask & interrupt_mask);
504
505 if (new_val != dev_priv->de_irq_mask[pipe]) {
506 dev_priv->de_irq_mask[pipe] = new_val;
507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
509 }
510}
511
512
513
514
515
516
517
518void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
519 uint32_t interrupt_mask,
520 uint32_t enabled_irq_mask)
521{
522 uint32_t sdeimr = I915_READ(SDEIMR);
523 sdeimr &= ~interrupt_mask;
524 sdeimr |= (~enabled_irq_mask & interrupt_mask);
525
526 WARN_ON(enabled_irq_mask & ~interrupt_mask);
527
528 lockdep_assert_held(&dev_priv->irq_lock);
529
530 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
531 return;
532
533 I915_WRITE(SDEIMR, sdeimr);
534 POSTING_READ(SDEIMR);
535}
536
537static void
538__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
539 u32 enable_mask, u32 status_mask)
540{
541 i915_reg_t reg = PIPESTAT(pipe);
542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
543
544 lockdep_assert_held(&dev_priv->irq_lock);
545 WARN_ON(!intel_irqs_enabled(dev_priv));
546
547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
548 status_mask & ~PIPESTAT_INT_STATUS_MASK,
549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
550 pipe_name(pipe), enable_mask, status_mask))
551 return;
552
553 if ((pipestat & enable_mask) == enable_mask)
554 return;
555
556 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
557
558
559 pipestat |= enable_mask | status_mask;
560 I915_WRITE(reg, pipestat);
561 POSTING_READ(reg);
562}
563
564static void
565__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
566 u32 enable_mask, u32 status_mask)
567{
568 i915_reg_t reg = PIPESTAT(pipe);
569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
570
571 lockdep_assert_held(&dev_priv->irq_lock);
572 WARN_ON(!intel_irqs_enabled(dev_priv));
573
574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
575 status_mask & ~PIPESTAT_INT_STATUS_MASK,
576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
577 pipe_name(pipe), enable_mask, status_mask))
578 return;
579
580 if ((pipestat & enable_mask) == 0)
581 return;
582
583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584
585 pipestat &= ~enable_mask;
586 I915_WRITE(reg, pipestat);
587 POSTING_READ(reg);
588}
589
590static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
591{
592 u32 enable_mask = status_mask << 16;
593
594
595
596
597
598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
599 return 0;
600
601
602
603
604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
605 return 0;
606
607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
608 SPRITE0_FLIP_DONE_INT_EN_VLV |
609 SPRITE1_FLIP_DONE_INT_EN_VLV);
610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
614
615 return enable_mask;
616}
617
618void
619i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
620 u32 status_mask)
621{
622 u32 enable_mask;
623
624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
626 status_mask);
627 else
628 enable_mask = status_mask << 16;
629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
630}
631
632void
633i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
634 u32 status_mask)
635{
636 u32 enable_mask;
637
638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
640 status_mask);
641 else
642 enable_mask = status_mask << 16;
643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
644}
645
646
647
648
649
650static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
651{
652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
653 return;
654
655 spin_lock_irq(&dev_priv->irq_lock);
656
657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
658 if (INTEL_GEN(dev_priv) >= 4)
659 i915_enable_pipestat(dev_priv, PIPE_A,
660 PIPE_LEGACY_BLC_EVENT_STATUS);
661
662 spin_unlock_irq(&dev_priv->irq_lock);
663}
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
719{
720 struct drm_i915_private *dev_priv = to_i915(dev);
721 i915_reg_t high_frame, low_frame;
722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
723 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
724 unsigned long irqflags;
725
726 htotal = mode->crtc_htotal;
727 hsync_start = mode->crtc_hsync_start;
728 vbl_start = mode->crtc_vblank_start;
729 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730 vbl_start = DIV_ROUND_UP(vbl_start, 2);
731
732
733 vbl_start *= htotal;
734
735
736 vbl_start -= htotal - hsync_start;
737
738 high_frame = PIPEFRAME(pipe);
739 low_frame = PIPEFRAMEPIXEL(pipe);
740
741 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
742
743
744
745
746
747
748 do {
749 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
750 low = I915_READ_FW(low_frame);
751 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
752 } while (high1 != high2);
753
754 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
755
756 high1 >>= PIPE_FRAME_HIGH_SHIFT;
757 pixel = low & PIPE_PIXEL_MASK;
758 low >>= PIPE_FRAME_LOW_SHIFT;
759
760
761
762
763
764
765 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
766}
767
768static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
769{
770 struct drm_i915_private *dev_priv = to_i915(dev);
771
772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
773}
774
775
776static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
777{
778 struct drm_device *dev = crtc->base.dev;
779 struct drm_i915_private *dev_priv = to_i915(dev);
780 const struct drm_display_mode *mode;
781 struct drm_vblank_crtc *vblank;
782 enum pipe pipe = crtc->pipe;
783 int position, vtotal;
784
785 if (!crtc->active)
786 return -1;
787
788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
789 mode = &vblank->hwmode;
790
791 vtotal = mode->crtc_vtotal;
792 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
793 vtotal /= 2;
794
795 if (IS_GEN2(dev_priv))
796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
797 else
798 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
799
800
801
802
803
804
805
806
807
808
809
810
811
812 if (HAS_DDI(dev_priv) && !position) {
813 int i, temp;
814
815 for (i = 0; i < 100; i++) {
816 udelay(1);
817 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
818 if (temp != position) {
819 position = temp;
820 break;
821 }
822 }
823 }
824
825
826
827
828
829 return (position + crtc->scanline_offset) % vtotal;
830}
831
832static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
833 bool in_vblank_irq, int *vpos, int *hpos,
834 ktime_t *stime, ktime_t *etime,
835 const struct drm_display_mode *mode)
836{
837 struct drm_i915_private *dev_priv = to_i915(dev);
838 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
839 pipe);
840 int position;
841 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
842 unsigned long irqflags;
843
844 if (WARN_ON(!mode->crtc_clock)) {
845 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
846 "pipe %c\n", pipe_name(pipe));
847 return false;
848 }
849
850 htotal = mode->crtc_htotal;
851 hsync_start = mode->crtc_hsync_start;
852 vtotal = mode->crtc_vtotal;
853 vbl_start = mode->crtc_vblank_start;
854 vbl_end = mode->crtc_vblank_end;
855
856 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
857 vbl_start = DIV_ROUND_UP(vbl_start, 2);
858 vbl_end /= 2;
859 vtotal /= 2;
860 }
861
862
863
864
865
866
867 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
868
869
870
871
872 if (stime)
873 *stime = ktime_get();
874
875 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
876
877
878
879 position = __intel_get_crtc_scanline(intel_crtc);
880 } else {
881
882
883
884
885 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
886
887
888 vbl_start *= htotal;
889 vbl_end *= htotal;
890 vtotal *= htotal;
891
892
893
894
895
896
897
898
899
900
901 if (position >= vtotal)
902 position = vtotal - 1;
903
904
905
906
907
908
909
910
911
912
913 position = (position + htotal - hsync_start) % vtotal;
914 }
915
916
917 if (etime)
918 *etime = ktime_get();
919
920
921
922 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
923
924
925
926
927
928
929
930 if (position >= vbl_start)
931 position -= vbl_end;
932 else
933 position += vtotal - vbl_end;
934
935 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
936 *vpos = position;
937 *hpos = 0;
938 } else {
939 *vpos = position / htotal;
940 *hpos = position - (*vpos * htotal);
941 }
942
943 return true;
944}
945
946int intel_get_crtc_scanline(struct intel_crtc *crtc)
947{
948 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
949 unsigned long irqflags;
950 int position;
951
952 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
953 position = __intel_get_crtc_scanline(crtc);
954 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
955
956 return position;
957}
958
959static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
960{
961 u32 busy_up, busy_down, max_avg, min_avg;
962 u8 new_delay;
963
964 spin_lock(&mchdev_lock);
965
966 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
967
968 new_delay = dev_priv->ips.cur_delay;
969
970 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
971 busy_up = I915_READ(RCPREVBSYTUPAVG);
972 busy_down = I915_READ(RCPREVBSYTDNAVG);
973 max_avg = I915_READ(RCBMAXAVG);
974 min_avg = I915_READ(RCBMINAVG);
975
976
977 if (busy_up > max_avg) {
978 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
979 new_delay = dev_priv->ips.cur_delay - 1;
980 if (new_delay < dev_priv->ips.max_delay)
981 new_delay = dev_priv->ips.max_delay;
982 } else if (busy_down < min_avg) {
983 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
984 new_delay = dev_priv->ips.cur_delay + 1;
985 if (new_delay > dev_priv->ips.min_delay)
986 new_delay = dev_priv->ips.min_delay;
987 }
988
989 if (ironlake_set_drps(dev_priv, new_delay))
990 dev_priv->ips.cur_delay = new_delay;
991
992 spin_unlock(&mchdev_lock);
993
994 return;
995}
996
997static void notify_ring(struct intel_engine_cs *engine)
998{
999 struct drm_i915_gem_request *rq = NULL;
1000 struct intel_wait *wait;
1001
1002 atomic_inc(&engine->irq_count);
1003 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
1004
1005 spin_lock(&engine->breadcrumbs.irq_lock);
1006 wait = engine->breadcrumbs.irq_wait;
1007 if (wait) {
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 if (i915_seqno_passed(intel_engine_get_seqno(engine),
1020 wait->seqno) &&
1021 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1022 &wait->request->fence.flags))
1023 rq = i915_gem_request_get(wait->request);
1024
1025 wake_up_process(wait->tsk);
1026 } else {
1027 __intel_engine_disarm_breadcrumbs(engine);
1028 }
1029 spin_unlock(&engine->breadcrumbs.irq_lock);
1030
1031 if (rq) {
1032 dma_fence_signal(&rq->fence);
1033 i915_gem_request_put(rq);
1034 }
1035
1036 trace_intel_engine_notify(engine, wait);
1037}
1038
1039static void vlv_c0_read(struct drm_i915_private *dev_priv,
1040 struct intel_rps_ei *ei)
1041{
1042 ei->ktime = ktime_get_raw();
1043 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1044 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1045}
1046
1047void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1048{
1049 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1050}
1051
1052static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1053{
1054 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1055 struct intel_rps_ei now;
1056 u32 events = 0;
1057
1058 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1059 return 0;
1060
1061 vlv_c0_read(dev_priv, &now);
1062
1063 if (prev->ktime) {
1064 u64 time, c0;
1065 u32 render, media;
1066
1067 time = ktime_us_delta(now.ktime, prev->ktime);
1068
1069 time *= dev_priv->czclk_freq;
1070
1071
1072
1073
1074
1075
1076 render = now.render_c0 - prev->render_c0;
1077 media = now.media_c0 - prev->media_c0;
1078 c0 = max(render, media);
1079 c0 *= 1000 * 100 << 8;
1080
1081 if (c0 > time * dev_priv->rps.up_threshold)
1082 events = GEN6_PM_RP_UP_THRESHOLD;
1083 else if (c0 < time * dev_priv->rps.down_threshold)
1084 events = GEN6_PM_RP_DOWN_THRESHOLD;
1085 }
1086
1087 dev_priv->rps.ei = now;
1088 return events;
1089}
1090
1091static void gen6_pm_rps_work(struct work_struct *work)
1092{
1093 struct drm_i915_private *dev_priv =
1094 container_of(work, struct drm_i915_private, rps.work);
1095 bool client_boost = false;
1096 int new_delay, adj, min, max;
1097 u32 pm_iir = 0;
1098
1099 spin_lock_irq(&dev_priv->irq_lock);
1100 if (dev_priv->rps.interrupts_enabled) {
1101 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
1102 client_boost = atomic_read(&dev_priv->rps.num_waiters);
1103 }
1104 spin_unlock_irq(&dev_priv->irq_lock);
1105
1106
1107 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1108 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1109 goto out;
1110
1111 mutex_lock(&dev_priv->rps.hw_lock);
1112
1113 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1114
1115 adj = dev_priv->rps.last_adj;
1116 new_delay = dev_priv->rps.cur_freq;
1117 min = dev_priv->rps.min_freq_softlimit;
1118 max = dev_priv->rps.max_freq_softlimit;
1119 if (client_boost)
1120 max = dev_priv->rps.max_freq;
1121 if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1122 new_delay = dev_priv->rps.boost_freq;
1123 adj = 0;
1124 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1125 if (adj > 0)
1126 adj *= 2;
1127 else
1128 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1129
1130 if (new_delay >= dev_priv->rps.max_freq_softlimit)
1131 adj = 0;
1132 } else if (client_boost) {
1133 adj = 0;
1134 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1135 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1136 new_delay = dev_priv->rps.efficient_freq;
1137 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1138 new_delay = dev_priv->rps.min_freq_softlimit;
1139 adj = 0;
1140 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1141 if (adj < 0)
1142 adj *= 2;
1143 else
1144 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1145
1146 if (new_delay <= dev_priv->rps.min_freq_softlimit)
1147 adj = 0;
1148 } else {
1149 adj = 0;
1150 }
1151
1152 dev_priv->rps.last_adj = adj;
1153
1154
1155
1156
1157 new_delay += adj;
1158 new_delay = clamp_t(int, new_delay, min, max);
1159
1160 if (intel_set_rps(dev_priv, new_delay)) {
1161 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1162 dev_priv->rps.last_adj = 0;
1163 }
1164
1165 mutex_unlock(&dev_priv->rps.hw_lock);
1166
1167out:
1168
1169 spin_lock_irq(&dev_priv->irq_lock);
1170 if (dev_priv->rps.interrupts_enabled)
1171 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1172 spin_unlock_irq(&dev_priv->irq_lock);
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static void ivybridge_parity_work(struct work_struct *work)
1186{
1187 struct drm_i915_private *dev_priv =
1188 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1189 u32 error_status, row, bank, subbank;
1190 char *parity_event[6];
1191 uint32_t misccpctl;
1192 uint8_t slice = 0;
1193
1194
1195
1196
1197
1198 mutex_lock(&dev_priv->drm.struct_mutex);
1199
1200
1201 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1202 goto out;
1203
1204 misccpctl = I915_READ(GEN7_MISCCPCTL);
1205 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1206 POSTING_READ(GEN7_MISCCPCTL);
1207
1208 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1209 i915_reg_t reg;
1210
1211 slice--;
1212 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1213 break;
1214
1215 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1216
1217 reg = GEN7_L3CDERRST1(slice);
1218
1219 error_status = I915_READ(reg);
1220 row = GEN7_PARITY_ERROR_ROW(error_status);
1221 bank = GEN7_PARITY_ERROR_BANK(error_status);
1222 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1223
1224 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1225 POSTING_READ(reg);
1226
1227 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1228 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1229 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1230 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1231 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1232 parity_event[5] = NULL;
1233
1234 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1235 KOBJ_CHANGE, parity_event);
1236
1237 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1238 slice, row, bank, subbank);
1239
1240 kfree(parity_event[4]);
1241 kfree(parity_event[3]);
1242 kfree(parity_event[2]);
1243 kfree(parity_event[1]);
1244 }
1245
1246 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1247
1248out:
1249 WARN_ON(dev_priv->l3_parity.which_slice);
1250 spin_lock_irq(&dev_priv->irq_lock);
1251 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1252 spin_unlock_irq(&dev_priv->irq_lock);
1253
1254 mutex_unlock(&dev_priv->drm.struct_mutex);
1255}
1256
1257static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1258 u32 iir)
1259{
1260 if (!HAS_L3_DPF(dev_priv))
1261 return;
1262
1263 spin_lock(&dev_priv->irq_lock);
1264 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1265 spin_unlock(&dev_priv->irq_lock);
1266
1267 iir &= GT_PARITY_ERROR(dev_priv);
1268 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1269 dev_priv->l3_parity.which_slice |= 1 << 1;
1270
1271 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1272 dev_priv->l3_parity.which_slice |= 1 << 0;
1273
1274 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1275}
1276
1277static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1278 u32 gt_iir)
1279{
1280 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1281 notify_ring(dev_priv->engine[RCS]);
1282 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1283 notify_ring(dev_priv->engine[VCS]);
1284}
1285
1286static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1287 u32 gt_iir)
1288{
1289 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1290 notify_ring(dev_priv->engine[RCS]);
1291 if (gt_iir & GT_BSD_USER_INTERRUPT)
1292 notify_ring(dev_priv->engine[VCS]);
1293 if (gt_iir & GT_BLT_USER_INTERRUPT)
1294 notify_ring(dev_priv->engine[BCS]);
1295
1296 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1297 GT_BSD_CS_ERROR_INTERRUPT |
1298 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1299 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1300
1301 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1302 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1303}
1304
1305static void
1306gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1307{
1308 bool tasklet = false;
1309
1310 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
1311 if (port_count(&engine->execlist_port[0])) {
1312 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1313 tasklet = true;
1314 }
1315 }
1316
1317 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
1318 notify_ring(engine);
1319 tasklet |= i915.enable_guc_submission;
1320 }
1321
1322 if (tasklet)
1323 tasklet_hi_schedule(&engine->irq_tasklet);
1324}
1325
1326static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1327 u32 master_ctl,
1328 u32 gt_iir[4])
1329{
1330 irqreturn_t ret = IRQ_NONE;
1331
1332 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1333 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1334 if (gt_iir[0]) {
1335 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1336 ret = IRQ_HANDLED;
1337 } else
1338 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1339 }
1340
1341 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1342 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1343 if (gt_iir[1]) {
1344 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1345 ret = IRQ_HANDLED;
1346 } else
1347 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1348 }
1349
1350 if (master_ctl & GEN8_GT_VECS_IRQ) {
1351 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1352 if (gt_iir[3]) {
1353 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1354 ret = IRQ_HANDLED;
1355 } else
1356 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1357 }
1358
1359 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1360 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1361 if (gt_iir[2] & (dev_priv->pm_rps_events |
1362 dev_priv->pm_guc_events)) {
1363 I915_WRITE_FW(GEN8_GT_IIR(2),
1364 gt_iir[2] & (dev_priv->pm_rps_events |
1365 dev_priv->pm_guc_events));
1366 ret = IRQ_HANDLED;
1367 } else
1368 DRM_ERROR("The master control interrupt lied (PM)!\n");
1369 }
1370
1371 return ret;
1372}
1373
1374static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1375 u32 gt_iir[4])
1376{
1377 if (gt_iir[0]) {
1378 gen8_cs_irq_handler(dev_priv->engine[RCS],
1379 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1380 gen8_cs_irq_handler(dev_priv->engine[BCS],
1381 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1382 }
1383
1384 if (gt_iir[1]) {
1385 gen8_cs_irq_handler(dev_priv->engine[VCS],
1386 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1387 gen8_cs_irq_handler(dev_priv->engine[VCS2],
1388 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1389 }
1390
1391 if (gt_iir[3])
1392 gen8_cs_irq_handler(dev_priv->engine[VECS],
1393 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1394
1395 if (gt_iir[2] & dev_priv->pm_rps_events)
1396 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1397
1398 if (gt_iir[2] & dev_priv->pm_guc_events)
1399 gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1400}
1401
1402static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1403{
1404 switch (port) {
1405 case PORT_A:
1406 return val & PORTA_HOTPLUG_LONG_DETECT;
1407 case PORT_B:
1408 return val & PORTB_HOTPLUG_LONG_DETECT;
1409 case PORT_C:
1410 return val & PORTC_HOTPLUG_LONG_DETECT;
1411 default:
1412 return false;
1413 }
1414}
1415
1416static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1417{
1418 switch (port) {
1419 case PORT_E:
1420 return val & PORTE_HOTPLUG_LONG_DETECT;
1421 default:
1422 return false;
1423 }
1424}
1425
1426static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1427{
1428 switch (port) {
1429 case PORT_A:
1430 return val & PORTA_HOTPLUG_LONG_DETECT;
1431 case PORT_B:
1432 return val & PORTB_HOTPLUG_LONG_DETECT;
1433 case PORT_C:
1434 return val & PORTC_HOTPLUG_LONG_DETECT;
1435 case PORT_D:
1436 return val & PORTD_HOTPLUG_LONG_DETECT;
1437 default:
1438 return false;
1439 }
1440}
1441
1442static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1443{
1444 switch (port) {
1445 case PORT_A:
1446 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1447 default:
1448 return false;
1449 }
1450}
1451
1452static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1453{
1454 switch (port) {
1455 case PORT_B:
1456 return val & PORTB_HOTPLUG_LONG_DETECT;
1457 case PORT_C:
1458 return val & PORTC_HOTPLUG_LONG_DETECT;
1459 case PORT_D:
1460 return val & PORTD_HOTPLUG_LONG_DETECT;
1461 default:
1462 return false;
1463 }
1464}
1465
1466static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1467{
1468 switch (port) {
1469 case PORT_B:
1470 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1471 case PORT_C:
1472 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1473 case PORT_D:
1474 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1475 default:
1476 return false;
1477 }
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1488 u32 hotplug_trigger, u32 dig_hotplug_reg,
1489 const u32 hpd[HPD_NUM_PINS],
1490 bool long_pulse_detect(enum port port, u32 val))
1491{
1492 enum port port;
1493 int i;
1494
1495 for_each_hpd_pin(i) {
1496 if ((hpd[i] & hotplug_trigger) == 0)
1497 continue;
1498
1499 *pin_mask |= BIT(i);
1500
1501 port = intel_hpd_pin_to_port(i);
1502 if (port == PORT_NONE)
1503 continue;
1504
1505 if (long_pulse_detect(port, dig_hotplug_reg))
1506 *long_mask |= BIT(i);
1507 }
1508
1509 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1510 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1511
1512}
1513
1514static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1515{
1516 wake_up_all(&dev_priv->gmbus_wait_queue);
1517}
1518
1519static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1520{
1521 wake_up_all(&dev_priv->gmbus_wait_queue);
1522}
1523
1524#if defined(CONFIG_DEBUG_FS)
1525static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1526 enum pipe pipe,
1527 uint32_t crc0, uint32_t crc1,
1528 uint32_t crc2, uint32_t crc3,
1529 uint32_t crc4)
1530{
1531 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1532 struct intel_pipe_crc_entry *entry;
1533 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1534 struct drm_driver *driver = dev_priv->drm.driver;
1535 uint32_t crcs[5];
1536 int head, tail;
1537
1538 spin_lock(&pipe_crc->lock);
1539 if (pipe_crc->source) {
1540 if (!pipe_crc->entries) {
1541 spin_unlock(&pipe_crc->lock);
1542 DRM_DEBUG_KMS("spurious interrupt\n");
1543 return;
1544 }
1545
1546 head = pipe_crc->head;
1547 tail = pipe_crc->tail;
1548
1549 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1550 spin_unlock(&pipe_crc->lock);
1551 DRM_ERROR("CRC buffer overflowing\n");
1552 return;
1553 }
1554
1555 entry = &pipe_crc->entries[head];
1556
1557 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1558 entry->crc[0] = crc0;
1559 entry->crc[1] = crc1;
1560 entry->crc[2] = crc2;
1561 entry->crc[3] = crc3;
1562 entry->crc[4] = crc4;
1563
1564 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1565 pipe_crc->head = head;
1566
1567 spin_unlock(&pipe_crc->lock);
1568
1569 wake_up_interruptible(&pipe_crc->wq);
1570 } else {
1571
1572
1573
1574
1575
1576
1577
1578
1579 if (pipe_crc->skipped == 0 ||
1580 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
1581 pipe_crc->skipped++;
1582 spin_unlock(&pipe_crc->lock);
1583 return;
1584 }
1585 spin_unlock(&pipe_crc->lock);
1586 crcs[0] = crc0;
1587 crcs[1] = crc1;
1588 crcs[2] = crc2;
1589 crcs[3] = crc3;
1590 crcs[4] = crc4;
1591 drm_crtc_add_crc_entry(&crtc->base, true,
1592 drm_crtc_accurate_vblank_count(&crtc->base),
1593 crcs);
1594 }
1595}
1596#else
1597static inline void
1598display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1599 enum pipe pipe,
1600 uint32_t crc0, uint32_t crc1,
1601 uint32_t crc2, uint32_t crc3,
1602 uint32_t crc4) {}
1603#endif
1604
1605
1606static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1607 enum pipe pipe)
1608{
1609 display_pipe_crc_irq_handler(dev_priv, pipe,
1610 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1611 0, 0, 0, 0);
1612}
1613
1614static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1615 enum pipe pipe)
1616{
1617 display_pipe_crc_irq_handler(dev_priv, pipe,
1618 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1619 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1620 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1621 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1622 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1623}
1624
1625static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1626 enum pipe pipe)
1627{
1628 uint32_t res1, res2;
1629
1630 if (INTEL_GEN(dev_priv) >= 3)
1631 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1632 else
1633 res1 = 0;
1634
1635 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1636 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1637 else
1638 res2 = 0;
1639
1640 display_pipe_crc_irq_handler(dev_priv, pipe,
1641 I915_READ(PIPE_CRC_RES_RED(pipe)),
1642 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1643 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1644 res1, res2);
1645}
1646
1647
1648
1649
1650static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1651{
1652 if (pm_iir & dev_priv->pm_rps_events) {
1653 spin_lock(&dev_priv->irq_lock);
1654 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1655 if (dev_priv->rps.interrupts_enabled) {
1656 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1657 schedule_work(&dev_priv->rps.work);
1658 }
1659 spin_unlock(&dev_priv->irq_lock);
1660 }
1661
1662 if (INTEL_GEN(dev_priv) >= 8)
1663 return;
1664
1665 if (HAS_VEBOX(dev_priv)) {
1666 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1667 notify_ring(dev_priv->engine[VECS]);
1668
1669 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1670 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1671 }
1672}
1673
1674static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1675{
1676 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 u32 msg, flush;
1688
1689 msg = I915_READ(SOFT_SCRATCH(15));
1690 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
1691 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
1692 if (flush) {
1693
1694 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
1695
1696
1697 queue_work(dev_priv->guc.log.runtime.flush_wq,
1698 &dev_priv->guc.log.runtime.flush_work);
1699
1700 dev_priv->guc.log.flush_interrupt_count++;
1701 } else {
1702
1703
1704
1705 }
1706 }
1707}
1708
1709static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1710 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1711{
1712 int pipe;
1713
1714 spin_lock(&dev_priv->irq_lock);
1715
1716 if (!dev_priv->display_irqs_enabled) {
1717 spin_unlock(&dev_priv->irq_lock);
1718 return;
1719 }
1720
1721 for_each_pipe(dev_priv, pipe) {
1722 i915_reg_t reg;
1723 u32 mask, iir_bit = 0;
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 mask = PIPE_FIFO_UNDERRUN_STATUS;
1735
1736 switch (pipe) {
1737 case PIPE_A:
1738 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1739 break;
1740 case PIPE_B:
1741 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1742 break;
1743 case PIPE_C:
1744 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1745 break;
1746 }
1747 if (iir & iir_bit)
1748 mask |= dev_priv->pipestat_irq_mask[pipe];
1749
1750 if (!mask)
1751 continue;
1752
1753 reg = PIPESTAT(pipe);
1754 mask |= PIPESTAT_INT_ENABLE_MASK;
1755 pipe_stats[pipe] = I915_READ(reg) & mask;
1756
1757
1758
1759
1760 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1761 PIPESTAT_INT_STATUS_MASK))
1762 I915_WRITE(reg, pipe_stats[pipe]);
1763 }
1764 spin_unlock(&dev_priv->irq_lock);
1765}
1766
1767static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1768 u32 pipe_stats[I915_MAX_PIPES])
1769{
1770 enum pipe pipe;
1771
1772 for_each_pipe(dev_priv, pipe) {
1773 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1774 drm_handle_vblank(&dev_priv->drm, pipe);
1775
1776 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1777 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1778
1779 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1780 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1781 }
1782
1783 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1784 gmbus_irq_handler(dev_priv);
1785}
1786
1787static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1788{
1789 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1790
1791 if (hotplug_status)
1792 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1793
1794 return hotplug_status;
1795}
1796
1797static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1798 u32 hotplug_status)
1799{
1800 u32 pin_mask = 0, long_mask = 0;
1801
1802 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1803 IS_CHERRYVIEW(dev_priv)) {
1804 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1805
1806 if (hotplug_trigger) {
1807 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1808 hotplug_trigger, hpd_status_g4x,
1809 i9xx_port_hotplug_long_detect);
1810
1811 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1812 }
1813
1814 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1815 dp_aux_irq_handler(dev_priv);
1816 } else {
1817 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1818
1819 if (hotplug_trigger) {
1820 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1821 hotplug_trigger, hpd_status_i915,
1822 i9xx_port_hotplug_long_detect);
1823 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1824 }
1825 }
1826}
1827
1828static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1829{
1830 struct drm_device *dev = arg;
1831 struct drm_i915_private *dev_priv = to_i915(dev);
1832 irqreturn_t ret = IRQ_NONE;
1833
1834 if (!intel_irqs_enabled(dev_priv))
1835 return IRQ_NONE;
1836
1837
1838 disable_rpm_wakeref_asserts(dev_priv);
1839
1840 do {
1841 u32 iir, gt_iir, pm_iir;
1842 u32 pipe_stats[I915_MAX_PIPES] = {};
1843 u32 hotplug_status = 0;
1844 u32 ier = 0;
1845
1846 gt_iir = I915_READ(GTIIR);
1847 pm_iir = I915_READ(GEN6_PMIIR);
1848 iir = I915_READ(VLV_IIR);
1849
1850 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1851 break;
1852
1853 ret = IRQ_HANDLED;
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868 I915_WRITE(VLV_MASTER_IER, 0);
1869 ier = I915_READ(VLV_IER);
1870 I915_WRITE(VLV_IER, 0);
1871
1872 if (gt_iir)
1873 I915_WRITE(GTIIR, gt_iir);
1874 if (pm_iir)
1875 I915_WRITE(GEN6_PMIIR, pm_iir);
1876
1877 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1878 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1879
1880
1881
1882 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1883
1884 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1885 I915_LPE_PIPE_B_INTERRUPT))
1886 intel_lpe_audio_irq_handler(dev_priv);
1887
1888
1889
1890
1891
1892 if (iir)
1893 I915_WRITE(VLV_IIR, iir);
1894
1895 I915_WRITE(VLV_IER, ier);
1896 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1897 POSTING_READ(VLV_MASTER_IER);
1898
1899 if (gt_iir)
1900 snb_gt_irq_handler(dev_priv, gt_iir);
1901 if (pm_iir)
1902 gen6_rps_irq_handler(dev_priv, pm_iir);
1903
1904 if (hotplug_status)
1905 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1906
1907 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1908 } while (0);
1909
1910 enable_rpm_wakeref_asserts(dev_priv);
1911
1912 return ret;
1913}
1914
1915static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1916{
1917 struct drm_device *dev = arg;
1918 struct drm_i915_private *dev_priv = to_i915(dev);
1919 irqreturn_t ret = IRQ_NONE;
1920
1921 if (!intel_irqs_enabled(dev_priv))
1922 return IRQ_NONE;
1923
1924
1925 disable_rpm_wakeref_asserts(dev_priv);
1926
1927 do {
1928 u32 master_ctl, iir;
1929 u32 gt_iir[4] = {};
1930 u32 pipe_stats[I915_MAX_PIPES] = {};
1931 u32 hotplug_status = 0;
1932 u32 ier = 0;
1933
1934 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1935 iir = I915_READ(VLV_IIR);
1936
1937 if (master_ctl == 0 && iir == 0)
1938 break;
1939
1940 ret = IRQ_HANDLED;
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955 I915_WRITE(GEN8_MASTER_IRQ, 0);
1956 ier = I915_READ(VLV_IER);
1957 I915_WRITE(VLV_IER, 0);
1958
1959 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1960
1961 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1962 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1963
1964
1965
1966 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1967
1968 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1969 I915_LPE_PIPE_B_INTERRUPT |
1970 I915_LPE_PIPE_C_INTERRUPT))
1971 intel_lpe_audio_irq_handler(dev_priv);
1972
1973
1974
1975
1976
1977 if (iir)
1978 I915_WRITE(VLV_IIR, iir);
1979
1980 I915_WRITE(VLV_IER, ier);
1981 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1982 POSTING_READ(GEN8_MASTER_IRQ);
1983
1984 gen8_gt_irq_handler(dev_priv, gt_iir);
1985
1986 if (hotplug_status)
1987 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1988
1989 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1990 } while (0);
1991
1992 enable_rpm_wakeref_asserts(dev_priv);
1993
1994 return ret;
1995}
1996
1997static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1998 u32 hotplug_trigger,
1999 const u32 hpd[HPD_NUM_PINS])
2000{
2001 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2002
2003
2004
2005
2006
2007
2008
2009 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2010 if (!hotplug_trigger) {
2011 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2012 PORTD_HOTPLUG_STATUS_MASK |
2013 PORTC_HOTPLUG_STATUS_MASK |
2014 PORTB_HOTPLUG_STATUS_MASK;
2015 dig_hotplug_reg &= ~mask;
2016 }
2017
2018 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2019 if (!hotplug_trigger)
2020 return;
2021
2022 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2023 dig_hotplug_reg, hpd,
2024 pch_port_hotplug_long_detect);
2025
2026 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2027}
2028
2029static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2030{
2031 int pipe;
2032 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2033
2034 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2035
2036 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2037 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2038 SDE_AUDIO_POWER_SHIFT);
2039 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2040 port_name(port));
2041 }
2042
2043 if (pch_iir & SDE_AUX_MASK)
2044 dp_aux_irq_handler(dev_priv);
2045
2046 if (pch_iir & SDE_GMBUS)
2047 gmbus_irq_handler(dev_priv);
2048
2049 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2050 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2051
2052 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2053 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2054
2055 if (pch_iir & SDE_POISON)
2056 DRM_ERROR("PCH poison interrupt\n");
2057
2058 if (pch_iir & SDE_FDI_MASK)
2059 for_each_pipe(dev_priv, pipe)
2060 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2061 pipe_name(pipe),
2062 I915_READ(FDI_RX_IIR(pipe)));
2063
2064 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2065 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2066
2067 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2068 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2069
2070 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2071 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2072
2073 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2074 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2075}
2076
2077static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2078{
2079 u32 err_int = I915_READ(GEN7_ERR_INT);
2080 enum pipe pipe;
2081
2082 if (err_int & ERR_INT_POISON)
2083 DRM_ERROR("Poison interrupt\n");
2084
2085 for_each_pipe(dev_priv, pipe) {
2086 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2087 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2088
2089 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2090 if (IS_IVYBRIDGE(dev_priv))
2091 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2092 else
2093 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2094 }
2095 }
2096
2097 I915_WRITE(GEN7_ERR_INT, err_int);
2098}
2099
2100static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2101{
2102 u32 serr_int = I915_READ(SERR_INT);
2103
2104 if (serr_int & SERR_INT_POISON)
2105 DRM_ERROR("PCH poison interrupt\n");
2106
2107 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2108 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2109
2110 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2111 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2112
2113 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2114 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
2115
2116 I915_WRITE(SERR_INT, serr_int);
2117}
2118
2119static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2120{
2121 int pipe;
2122 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2123
2124 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2125
2126 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2127 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2128 SDE_AUDIO_POWER_SHIFT_CPT);
2129 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2130 port_name(port));
2131 }
2132
2133 if (pch_iir & SDE_AUX_MASK_CPT)
2134 dp_aux_irq_handler(dev_priv);
2135
2136 if (pch_iir & SDE_GMBUS_CPT)
2137 gmbus_irq_handler(dev_priv);
2138
2139 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2140 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2141
2142 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2143 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2144
2145 if (pch_iir & SDE_FDI_MASK_CPT)
2146 for_each_pipe(dev_priv, pipe)
2147 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2148 pipe_name(pipe),
2149 I915_READ(FDI_RX_IIR(pipe)));
2150
2151 if (pch_iir & SDE_ERROR_CPT)
2152 cpt_serr_int_handler(dev_priv);
2153}
2154
2155static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2156{
2157 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2158 ~SDE_PORTE_HOTPLUG_SPT;
2159 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2160 u32 pin_mask = 0, long_mask = 0;
2161
2162 if (hotplug_trigger) {
2163 u32 dig_hotplug_reg;
2164
2165 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2166 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2167
2168 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2169 dig_hotplug_reg, hpd_spt,
2170 spt_port_hotplug_long_detect);
2171 }
2172
2173 if (hotplug2_trigger) {
2174 u32 dig_hotplug_reg;
2175
2176 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2177 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2178
2179 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2180 dig_hotplug_reg, hpd_spt,
2181 spt_port_hotplug2_long_detect);
2182 }
2183
2184 if (pin_mask)
2185 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2186
2187 if (pch_iir & SDE_GMBUS_CPT)
2188 gmbus_irq_handler(dev_priv);
2189}
2190
2191static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2192 u32 hotplug_trigger,
2193 const u32 hpd[HPD_NUM_PINS])
2194{
2195 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2196
2197 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2198 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2199
2200 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2201 dig_hotplug_reg, hpd,
2202 ilk_port_hotplug_long_detect);
2203
2204 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2205}
2206
2207static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2208 u32 de_iir)
2209{
2210 enum pipe pipe;
2211 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2212
2213 if (hotplug_trigger)
2214 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2215
2216 if (de_iir & DE_AUX_CHANNEL_A)
2217 dp_aux_irq_handler(dev_priv);
2218
2219 if (de_iir & DE_GSE)
2220 intel_opregion_asle_intr(dev_priv);
2221
2222 if (de_iir & DE_POISON)
2223 DRM_ERROR("Poison interrupt\n");
2224
2225 for_each_pipe(dev_priv, pipe) {
2226 if (de_iir & DE_PIPE_VBLANK(pipe))
2227 drm_handle_vblank(&dev_priv->drm, pipe);
2228
2229 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2230 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2231
2232 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2233 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2234 }
2235
2236
2237 if (de_iir & DE_PCH_EVENT) {
2238 u32 pch_iir = I915_READ(SDEIIR);
2239
2240 if (HAS_PCH_CPT(dev_priv))
2241 cpt_irq_handler(dev_priv, pch_iir);
2242 else
2243 ibx_irq_handler(dev_priv, pch_iir);
2244
2245
2246 I915_WRITE(SDEIIR, pch_iir);
2247 }
2248
2249 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2250 ironlake_rps_change_irq_handler(dev_priv);
2251}
2252
2253static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2254 u32 de_iir)
2255{
2256 enum pipe pipe;
2257 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2258
2259 if (hotplug_trigger)
2260 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2261
2262 if (de_iir & DE_ERR_INT_IVB)
2263 ivb_err_int_handler(dev_priv);
2264
2265 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2266 dp_aux_irq_handler(dev_priv);
2267
2268 if (de_iir & DE_GSE_IVB)
2269 intel_opregion_asle_intr(dev_priv);
2270
2271 for_each_pipe(dev_priv, pipe) {
2272 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2273 drm_handle_vblank(&dev_priv->drm, pipe);
2274 }
2275
2276
2277 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2278 u32 pch_iir = I915_READ(SDEIIR);
2279
2280 cpt_irq_handler(dev_priv, pch_iir);
2281
2282
2283 I915_WRITE(SDEIIR, pch_iir);
2284 }
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2296{
2297 struct drm_device *dev = arg;
2298 struct drm_i915_private *dev_priv = to_i915(dev);
2299 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2300 irqreturn_t ret = IRQ_NONE;
2301
2302 if (!intel_irqs_enabled(dev_priv))
2303 return IRQ_NONE;
2304
2305
2306 disable_rpm_wakeref_asserts(dev_priv);
2307
2308
2309 de_ier = I915_READ(DEIER);
2310 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2311 POSTING_READ(DEIER);
2312
2313
2314
2315
2316
2317
2318 if (!HAS_PCH_NOP(dev_priv)) {
2319 sde_ier = I915_READ(SDEIER);
2320 I915_WRITE(SDEIER, 0);
2321 POSTING_READ(SDEIER);
2322 }
2323
2324
2325
2326 gt_iir = I915_READ(GTIIR);
2327 if (gt_iir) {
2328 I915_WRITE(GTIIR, gt_iir);
2329 ret = IRQ_HANDLED;
2330 if (INTEL_GEN(dev_priv) >= 6)
2331 snb_gt_irq_handler(dev_priv, gt_iir);
2332 else
2333 ilk_gt_irq_handler(dev_priv, gt_iir);
2334 }
2335
2336 de_iir = I915_READ(DEIIR);
2337 if (de_iir) {
2338 I915_WRITE(DEIIR, de_iir);
2339 ret = IRQ_HANDLED;
2340 if (INTEL_GEN(dev_priv) >= 7)
2341 ivb_display_irq_handler(dev_priv, de_iir);
2342 else
2343 ilk_display_irq_handler(dev_priv, de_iir);
2344 }
2345
2346 if (INTEL_GEN(dev_priv) >= 6) {
2347 u32 pm_iir = I915_READ(GEN6_PMIIR);
2348 if (pm_iir) {
2349 I915_WRITE(GEN6_PMIIR, pm_iir);
2350 ret = IRQ_HANDLED;
2351 gen6_rps_irq_handler(dev_priv, pm_iir);
2352 }
2353 }
2354
2355 I915_WRITE(DEIER, de_ier);
2356 POSTING_READ(DEIER);
2357 if (!HAS_PCH_NOP(dev_priv)) {
2358 I915_WRITE(SDEIER, sde_ier);
2359 POSTING_READ(SDEIER);
2360 }
2361
2362
2363 enable_rpm_wakeref_asserts(dev_priv);
2364
2365 return ret;
2366}
2367
2368static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2369 u32 hotplug_trigger,
2370 const u32 hpd[HPD_NUM_PINS])
2371{
2372 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2373
2374 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2375 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2376
2377 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2378 dig_hotplug_reg, hpd,
2379 bxt_port_hotplug_long_detect);
2380
2381 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2382}
2383
2384static irqreturn_t
2385gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2386{
2387 irqreturn_t ret = IRQ_NONE;
2388 u32 iir;
2389 enum pipe pipe;
2390
2391 if (master_ctl & GEN8_DE_MISC_IRQ) {
2392 iir = I915_READ(GEN8_DE_MISC_IIR);
2393 if (iir) {
2394 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2395 ret = IRQ_HANDLED;
2396 if (iir & GEN8_DE_MISC_GSE)
2397 intel_opregion_asle_intr(dev_priv);
2398 else
2399 DRM_ERROR("Unexpected DE Misc interrupt\n");
2400 }
2401 else
2402 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2403 }
2404
2405 if (master_ctl & GEN8_DE_PORT_IRQ) {
2406 iir = I915_READ(GEN8_DE_PORT_IIR);
2407 if (iir) {
2408 u32 tmp_mask;
2409 bool found = false;
2410
2411 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2412 ret = IRQ_HANDLED;
2413
2414 tmp_mask = GEN8_AUX_CHANNEL_A;
2415 if (INTEL_GEN(dev_priv) >= 9)
2416 tmp_mask |= GEN9_AUX_CHANNEL_B |
2417 GEN9_AUX_CHANNEL_C |
2418 GEN9_AUX_CHANNEL_D;
2419
2420 if (iir & tmp_mask) {
2421 dp_aux_irq_handler(dev_priv);
2422 found = true;
2423 }
2424
2425 if (IS_GEN9_LP(dev_priv)) {
2426 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2427 if (tmp_mask) {
2428 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2429 hpd_bxt);
2430 found = true;
2431 }
2432 } else if (IS_BROADWELL(dev_priv)) {
2433 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2434 if (tmp_mask) {
2435 ilk_hpd_irq_handler(dev_priv,
2436 tmp_mask, hpd_bdw);
2437 found = true;
2438 }
2439 }
2440
2441 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2442 gmbus_irq_handler(dev_priv);
2443 found = true;
2444 }
2445
2446 if (!found)
2447 DRM_ERROR("Unexpected DE Port interrupt\n");
2448 }
2449 else
2450 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2451 }
2452
2453 for_each_pipe(dev_priv, pipe) {
2454 u32 fault_errors;
2455
2456 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2457 continue;
2458
2459 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2460 if (!iir) {
2461 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2462 continue;
2463 }
2464
2465 ret = IRQ_HANDLED;
2466 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2467
2468 if (iir & GEN8_PIPE_VBLANK)
2469 drm_handle_vblank(&dev_priv->drm, pipe);
2470
2471 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2472 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2473
2474 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2475 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2476
2477 fault_errors = iir;
2478 if (INTEL_GEN(dev_priv) >= 9)
2479 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2480 else
2481 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2482
2483 if (fault_errors)
2484 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2485 pipe_name(pipe),
2486 fault_errors);
2487 }
2488
2489 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2490 master_ctl & GEN8_DE_PCH_IRQ) {
2491
2492
2493
2494
2495
2496 iir = I915_READ(SDEIIR);
2497 if (iir) {
2498 I915_WRITE(SDEIIR, iir);
2499 ret = IRQ_HANDLED;
2500
2501 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
2502 HAS_PCH_CNP(dev_priv))
2503 spt_irq_handler(dev_priv, iir);
2504 else
2505 cpt_irq_handler(dev_priv, iir);
2506 } else {
2507
2508
2509
2510
2511 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2512 }
2513 }
2514
2515 return ret;
2516}
2517
2518static irqreturn_t gen8_irq_handler(int irq, void *arg)
2519{
2520 struct drm_device *dev = arg;
2521 struct drm_i915_private *dev_priv = to_i915(dev);
2522 u32 master_ctl;
2523 u32 gt_iir[4] = {};
2524 irqreturn_t ret;
2525
2526 if (!intel_irqs_enabled(dev_priv))
2527 return IRQ_NONE;
2528
2529 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2530 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2531 if (!master_ctl)
2532 return IRQ_NONE;
2533
2534 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2535
2536
2537 disable_rpm_wakeref_asserts(dev_priv);
2538
2539
2540 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2541 gen8_gt_irq_handler(dev_priv, gt_iir);
2542 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2543
2544 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2545 POSTING_READ_FW(GEN8_MASTER_IRQ);
2546
2547 enable_rpm_wakeref_asserts(dev_priv);
2548
2549 return ret;
2550}
2551
2552struct wedge_me {
2553 struct delayed_work work;
2554 struct drm_i915_private *i915;
2555 const char *name;
2556};
2557
2558static void wedge_me(struct work_struct *work)
2559{
2560 struct wedge_me *w = container_of(work, typeof(*w), work.work);
2561
2562 dev_err(w->i915->drm.dev,
2563 "%s timed out, cancelling all in-flight rendering.\n",
2564 w->name);
2565 i915_gem_set_wedged(w->i915);
2566}
2567
2568static void __init_wedge(struct wedge_me *w,
2569 struct drm_i915_private *i915,
2570 long timeout,
2571 const char *name)
2572{
2573 w->i915 = i915;
2574 w->name = name;
2575
2576 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2577 schedule_delayed_work(&w->work, timeout);
2578}
2579
2580static void __fini_wedge(struct wedge_me *w)
2581{
2582 cancel_delayed_work_sync(&w->work);
2583 destroy_delayed_work_on_stack(&w->work);
2584 w->i915 = NULL;
2585}
2586
2587#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
2588 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
2589 (W)->i915; \
2590 __fini_wedge((W)))
2591
2592
2593
2594
2595
2596
2597
2598
2599static void i915_reset_device(struct drm_i915_private *dev_priv)
2600{
2601 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2602 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2603 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2604 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2605 struct wedge_me w;
2606
2607 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2608
2609 DRM_DEBUG_DRIVER("resetting chip\n");
2610 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2611
2612
2613 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
2614 intel_prepare_reset(dev_priv);
2615
2616
2617 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
2618 wake_up_all(&dev_priv->gpu_error.wait_queue);
2619
2620
2621
2622
2623 do {
2624 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2625 i915_reset(dev_priv, 0);
2626 mutex_unlock(&dev_priv->drm.struct_mutex);
2627 }
2628 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
2629 I915_RESET_HANDOFF,
2630 TASK_UNINTERRUPTIBLE,
2631 1));
2632
2633 intel_finish_reset(dev_priv);
2634 }
2635
2636 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2637 kobject_uevent_env(kobj,
2638 KOBJ_CHANGE, reset_done_event);
2639}
2640
2641static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2642{
2643 u32 eir;
2644
2645 if (!IS_GEN2(dev_priv))
2646 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2647
2648 if (INTEL_GEN(dev_priv) < 4)
2649 I915_WRITE(IPEIR, I915_READ(IPEIR));
2650 else
2651 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2652
2653 I915_WRITE(EIR, I915_READ(EIR));
2654 eir = I915_READ(EIR);
2655 if (eir) {
2656
2657
2658
2659
2660 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
2661 I915_WRITE(EMR, I915_READ(EMR) | eir);
2662 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2663 }
2664}
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678void i915_handle_error(struct drm_i915_private *dev_priv,
2679 u32 engine_mask,
2680 const char *fmt, ...)
2681{
2682 struct intel_engine_cs *engine;
2683 unsigned int tmp;
2684 va_list args;
2685 char error_msg[80];
2686
2687 va_start(args, fmt);
2688 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2689 va_end(args);
2690
2691
2692
2693
2694
2695
2696
2697
2698 intel_runtime_pm_get(dev_priv);
2699
2700 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2701 i915_clear_error_registers(dev_priv);
2702
2703
2704
2705
2706
2707 if (intel_has_reset_engine(dev_priv)) {
2708 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
2709 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
2710 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2711 &dev_priv->gpu_error.flags))
2712 continue;
2713
2714 if (i915_reset_engine(engine, 0) == 0)
2715 engine_mask &= ~intel_engine_flag(engine);
2716
2717 clear_bit(I915_RESET_ENGINE + engine->id,
2718 &dev_priv->gpu_error.flags);
2719 wake_up_bit(&dev_priv->gpu_error.flags,
2720 I915_RESET_ENGINE + engine->id);
2721 }
2722 }
2723
2724 if (!engine_mask)
2725 goto out;
2726
2727
2728 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
2729 wait_event(dev_priv->gpu_error.reset_queue,
2730 !test_bit(I915_RESET_BACKOFF,
2731 &dev_priv->gpu_error.flags));
2732 goto out;
2733 }
2734
2735
2736 for_each_engine(engine, dev_priv, tmp) {
2737 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2738 &dev_priv->gpu_error.flags))
2739 wait_on_bit(&dev_priv->gpu_error.flags,
2740 I915_RESET_ENGINE + engine->id,
2741 TASK_UNINTERRUPTIBLE);
2742 }
2743
2744 i915_reset_device(dev_priv);
2745
2746 for_each_engine(engine, dev_priv, tmp) {
2747 clear_bit(I915_RESET_ENGINE + engine->id,
2748 &dev_priv->gpu_error.flags);
2749 }
2750
2751 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
2752 wake_up_all(&dev_priv->gpu_error.reset_queue);
2753
2754out:
2755 intel_runtime_pm_put(dev_priv);
2756}
2757
2758
2759
2760
2761static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2762{
2763 struct drm_i915_private *dev_priv = to_i915(dev);
2764 unsigned long irqflags;
2765
2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2767 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2768 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2769
2770 return 0;
2771}
2772
2773static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2774{
2775 struct drm_i915_private *dev_priv = to_i915(dev);
2776 unsigned long irqflags;
2777
2778 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2779 i915_enable_pipestat(dev_priv, pipe,
2780 PIPE_START_VBLANK_INTERRUPT_STATUS);
2781 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2782
2783 return 0;
2784}
2785
2786static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2787{
2788 struct drm_i915_private *dev_priv = to_i915(dev);
2789 unsigned long irqflags;
2790 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2791 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2792
2793 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2794 ilk_enable_display_irq(dev_priv, bit);
2795 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2796
2797 return 0;
2798}
2799
2800static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2801{
2802 struct drm_i915_private *dev_priv = to_i915(dev);
2803 unsigned long irqflags;
2804
2805 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2806 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2807 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2808
2809 return 0;
2810}
2811
2812
2813
2814
2815static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2816{
2817 struct drm_i915_private *dev_priv = to_i915(dev);
2818 unsigned long irqflags;
2819
2820 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2821 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2822 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2823}
2824
2825static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2826{
2827 struct drm_i915_private *dev_priv = to_i915(dev);
2828 unsigned long irqflags;
2829
2830 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2831 i915_disable_pipestat(dev_priv, pipe,
2832 PIPE_START_VBLANK_INTERRUPT_STATUS);
2833 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2834}
2835
2836static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2837{
2838 struct drm_i915_private *dev_priv = to_i915(dev);
2839 unsigned long irqflags;
2840 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2841 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2842
2843 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2844 ilk_disable_display_irq(dev_priv, bit);
2845 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2846}
2847
2848static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2849{
2850 struct drm_i915_private *dev_priv = to_i915(dev);
2851 unsigned long irqflags;
2852
2853 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2854 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2855 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2856}
2857
2858static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2859{
2860 if (HAS_PCH_NOP(dev_priv))
2861 return;
2862
2863 GEN5_IRQ_RESET(SDE);
2864
2865 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2866 I915_WRITE(SERR_INT, 0xffffffff);
2867}
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877static void ibx_irq_pre_postinstall(struct drm_device *dev)
2878{
2879 struct drm_i915_private *dev_priv = to_i915(dev);
2880
2881 if (HAS_PCH_NOP(dev_priv))
2882 return;
2883
2884 WARN_ON(I915_READ(SDEIER) != 0);
2885 I915_WRITE(SDEIER, 0xffffffff);
2886 POSTING_READ(SDEIER);
2887}
2888
2889static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
2890{
2891 GEN5_IRQ_RESET(GT);
2892 if (INTEL_GEN(dev_priv) >= 6)
2893 GEN5_IRQ_RESET(GEN6_PM);
2894}
2895
2896static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2897{
2898 enum pipe pipe;
2899
2900 if (IS_CHERRYVIEW(dev_priv))
2901 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2902 else
2903 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2904
2905 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2906 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2907
2908 for_each_pipe(dev_priv, pipe) {
2909 I915_WRITE(PIPESTAT(pipe),
2910 PIPE_FIFO_UNDERRUN_STATUS |
2911 PIPESTAT_INT_STATUS_MASK);
2912 dev_priv->pipestat_irq_mask[pipe] = 0;
2913 }
2914
2915 GEN5_IRQ_RESET(VLV_);
2916 dev_priv->irq_mask = ~0;
2917}
2918
2919static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2920{
2921 u32 pipestat_mask;
2922 u32 enable_mask;
2923 enum pipe pipe;
2924
2925 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
2926 PIPE_CRC_DONE_INTERRUPT_STATUS;
2927
2928 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2929 for_each_pipe(dev_priv, pipe)
2930 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2931
2932 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2933 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2934 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2935 I915_LPE_PIPE_A_INTERRUPT |
2936 I915_LPE_PIPE_B_INTERRUPT;
2937
2938 if (IS_CHERRYVIEW(dev_priv))
2939 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2940 I915_LPE_PIPE_C_INTERRUPT;
2941
2942 WARN_ON(dev_priv->irq_mask != ~0);
2943
2944 dev_priv->irq_mask = ~enable_mask;
2945
2946 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
2947}
2948
2949
2950
2951static void ironlake_irq_reset(struct drm_device *dev)
2952{
2953 struct drm_i915_private *dev_priv = to_i915(dev);
2954
2955 I915_WRITE(HWSTAM, 0xffffffff);
2956
2957 GEN5_IRQ_RESET(DE);
2958 if (IS_GEN7(dev_priv))
2959 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2960
2961 gen5_gt_irq_reset(dev_priv);
2962
2963 ibx_irq_reset(dev_priv);
2964}
2965
2966static void valleyview_irq_preinstall(struct drm_device *dev)
2967{
2968 struct drm_i915_private *dev_priv = to_i915(dev);
2969
2970 I915_WRITE(VLV_MASTER_IER, 0);
2971 POSTING_READ(VLV_MASTER_IER);
2972
2973 gen5_gt_irq_reset(dev_priv);
2974
2975 spin_lock_irq(&dev_priv->irq_lock);
2976 if (dev_priv->display_irqs_enabled)
2977 vlv_display_irq_reset(dev_priv);
2978 spin_unlock_irq(&dev_priv->irq_lock);
2979}
2980
2981static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2982{
2983 GEN8_IRQ_RESET_NDX(GT, 0);
2984 GEN8_IRQ_RESET_NDX(GT, 1);
2985 GEN8_IRQ_RESET_NDX(GT, 2);
2986 GEN8_IRQ_RESET_NDX(GT, 3);
2987}
2988
2989static void gen8_irq_reset(struct drm_device *dev)
2990{
2991 struct drm_i915_private *dev_priv = to_i915(dev);
2992 int pipe;
2993
2994 I915_WRITE(GEN8_MASTER_IRQ, 0);
2995 POSTING_READ(GEN8_MASTER_IRQ);
2996
2997 gen8_gt_irq_reset(dev_priv);
2998
2999 for_each_pipe(dev_priv, pipe)
3000 if (intel_display_power_is_enabled(dev_priv,
3001 POWER_DOMAIN_PIPE(pipe)))
3002 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3003
3004 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3005 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3006 GEN5_IRQ_RESET(GEN8_PCU_);
3007
3008 if (HAS_PCH_SPLIT(dev_priv))
3009 ibx_irq_reset(dev_priv);
3010}
3011
3012void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3013 u8 pipe_mask)
3014{
3015 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3016 enum pipe pipe;
3017
3018 spin_lock_irq(&dev_priv->irq_lock);
3019 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3020 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3021 dev_priv->de_irq_mask[pipe],
3022 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3023 spin_unlock_irq(&dev_priv->irq_lock);
3024}
3025
3026void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3027 u8 pipe_mask)
3028{
3029 enum pipe pipe;
3030
3031 spin_lock_irq(&dev_priv->irq_lock);
3032 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3033 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3034 spin_unlock_irq(&dev_priv->irq_lock);
3035
3036
3037 synchronize_irq(dev_priv->drm.irq);
3038}
3039
3040static void cherryview_irq_preinstall(struct drm_device *dev)
3041{
3042 struct drm_i915_private *dev_priv = to_i915(dev);
3043
3044 I915_WRITE(GEN8_MASTER_IRQ, 0);
3045 POSTING_READ(GEN8_MASTER_IRQ);
3046
3047 gen8_gt_irq_reset(dev_priv);
3048
3049 GEN5_IRQ_RESET(GEN8_PCU_);
3050
3051 spin_lock_irq(&dev_priv->irq_lock);
3052 if (dev_priv->display_irqs_enabled)
3053 vlv_display_irq_reset(dev_priv);
3054 spin_unlock_irq(&dev_priv->irq_lock);
3055}
3056
3057static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3058 const u32 hpd[HPD_NUM_PINS])
3059{
3060 struct intel_encoder *encoder;
3061 u32 enabled_irqs = 0;
3062
3063 for_each_intel_encoder(&dev_priv->drm, encoder)
3064 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3065 enabled_irqs |= hpd[encoder->hpd_pin];
3066
3067 return enabled_irqs;
3068}
3069
3070static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3071{
3072 u32 hotplug;
3073
3074
3075
3076
3077
3078
3079 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3080 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3081 PORTC_PULSE_DURATION_MASK |
3082 PORTD_PULSE_DURATION_MASK);
3083 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3084 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3085 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3086
3087
3088
3089
3090 if (HAS_PCH_LPT_LP(dev_priv))
3091 hotplug |= PORTA_HOTPLUG_ENABLE;
3092 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3093}
3094
3095static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3096{
3097 u32 hotplug_irqs, enabled_irqs;
3098
3099 if (HAS_PCH_IBX(dev_priv)) {
3100 hotplug_irqs = SDE_HOTPLUG_MASK;
3101 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3102 } else {
3103 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3104 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3105 }
3106
3107 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3108
3109 ibx_hpd_detection_setup(dev_priv);
3110}
3111
3112static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3113{
3114 u32 hotplug;
3115
3116
3117 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3118 hotplug |= PORTA_HOTPLUG_ENABLE |
3119 PORTB_HOTPLUG_ENABLE |
3120 PORTC_HOTPLUG_ENABLE |
3121 PORTD_HOTPLUG_ENABLE;
3122 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3123
3124 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3125 hotplug |= PORTE_HOTPLUG_ENABLE;
3126 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3127}
3128
3129static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3130{
3131 u32 hotplug_irqs, enabled_irqs;
3132
3133 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3134 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3135
3136 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3137
3138 spt_hpd_detection_setup(dev_priv);
3139}
3140
3141static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3142{
3143 u32 hotplug;
3144
3145
3146
3147
3148
3149
3150 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3151 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3152 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3153 DIGITAL_PORTA_PULSE_DURATION_2ms;
3154 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3155}
3156
3157static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3158{
3159 u32 hotplug_irqs, enabled_irqs;
3160
3161 if (INTEL_GEN(dev_priv) >= 8) {
3162 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3163 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3164
3165 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3166 } else if (INTEL_GEN(dev_priv) >= 7) {
3167 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3168 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3169
3170 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3171 } else {
3172 hotplug_irqs = DE_DP_A_HOTPLUG;
3173 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3174
3175 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3176 }
3177
3178 ilk_hpd_detection_setup(dev_priv);
3179
3180 ibx_hpd_irq_setup(dev_priv);
3181}
3182
3183static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3184 u32 enabled_irqs)
3185{
3186 u32 hotplug;
3187
3188 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3189 hotplug |= PORTA_HOTPLUG_ENABLE |
3190 PORTB_HOTPLUG_ENABLE |
3191 PORTC_HOTPLUG_ENABLE;
3192
3193 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3194 hotplug, enabled_irqs);
3195 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3196
3197
3198
3199
3200
3201 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3202 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3203 hotplug |= BXT_DDIA_HPD_INVERT;
3204 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3205 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3206 hotplug |= BXT_DDIB_HPD_INVERT;
3207 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3208 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3209 hotplug |= BXT_DDIC_HPD_INVERT;
3210
3211 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3212}
3213
3214static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3215{
3216 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3217}
3218
3219static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3220{
3221 u32 hotplug_irqs, enabled_irqs;
3222
3223 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3224 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3225
3226 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3227
3228 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3229}
3230
3231static void ibx_irq_postinstall(struct drm_device *dev)
3232{
3233 struct drm_i915_private *dev_priv = to_i915(dev);
3234 u32 mask;
3235
3236 if (HAS_PCH_NOP(dev_priv))
3237 return;
3238
3239 if (HAS_PCH_IBX(dev_priv))
3240 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3241 else
3242 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3243
3244 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3245 I915_WRITE(SDEIMR, ~mask);
3246
3247 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3248 HAS_PCH_LPT(dev_priv))
3249 ibx_hpd_detection_setup(dev_priv);
3250 else
3251 spt_hpd_detection_setup(dev_priv);
3252}
3253
3254static void gen5_gt_irq_postinstall(struct drm_device *dev)
3255{
3256 struct drm_i915_private *dev_priv = to_i915(dev);
3257 u32 pm_irqs, gt_irqs;
3258
3259 pm_irqs = gt_irqs = 0;
3260
3261 dev_priv->gt_irq_mask = ~0;
3262 if (HAS_L3_DPF(dev_priv)) {
3263
3264 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3265 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3266 }
3267
3268 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3269 if (IS_GEN5(dev_priv)) {
3270 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3271 } else {
3272 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3273 }
3274
3275 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3276
3277 if (INTEL_GEN(dev_priv) >= 6) {
3278
3279
3280
3281
3282 if (HAS_VEBOX(dev_priv)) {
3283 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3284 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3285 }
3286
3287 dev_priv->pm_imr = 0xffffffff;
3288 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3289 }
3290}
3291
3292static int ironlake_irq_postinstall(struct drm_device *dev)
3293{
3294 struct drm_i915_private *dev_priv = to_i915(dev);
3295 u32 display_mask, extra_mask;
3296
3297 if (INTEL_GEN(dev_priv) >= 7) {
3298 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3299 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3300 DE_PLANEB_FLIP_DONE_IVB |
3301 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3302 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3303 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3304 DE_DP_A_HOTPLUG_IVB);
3305 } else {
3306 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3307 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3308 DE_AUX_CHANNEL_A |
3309 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3310 DE_POISON);
3311 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3312 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3313 DE_DP_A_HOTPLUG);
3314 }
3315
3316 dev_priv->irq_mask = ~display_mask;
3317
3318 I915_WRITE(HWSTAM, 0xeffe);
3319
3320 ibx_irq_pre_postinstall(dev);
3321
3322 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3323
3324 gen5_gt_irq_postinstall(dev);
3325
3326 ilk_hpd_detection_setup(dev_priv);
3327
3328 ibx_irq_postinstall(dev);
3329
3330 if (IS_IRONLAKE_M(dev_priv)) {
3331
3332
3333
3334
3335
3336 spin_lock_irq(&dev_priv->irq_lock);
3337 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3338 spin_unlock_irq(&dev_priv->irq_lock);
3339 }
3340
3341 return 0;
3342}
3343
3344void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3345{
3346 lockdep_assert_held(&dev_priv->irq_lock);
3347
3348 if (dev_priv->display_irqs_enabled)
3349 return;
3350
3351 dev_priv->display_irqs_enabled = true;
3352
3353 if (intel_irqs_enabled(dev_priv)) {
3354 vlv_display_irq_reset(dev_priv);
3355 vlv_display_irq_postinstall(dev_priv);
3356 }
3357}
3358
3359void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3360{
3361 lockdep_assert_held(&dev_priv->irq_lock);
3362
3363 if (!dev_priv->display_irqs_enabled)
3364 return;
3365
3366 dev_priv->display_irqs_enabled = false;
3367
3368 if (intel_irqs_enabled(dev_priv))
3369 vlv_display_irq_reset(dev_priv);
3370}
3371
3372
3373static int valleyview_irq_postinstall(struct drm_device *dev)
3374{
3375 struct drm_i915_private *dev_priv = to_i915(dev);
3376
3377 gen5_gt_irq_postinstall(dev);
3378
3379 spin_lock_irq(&dev_priv->irq_lock);
3380 if (dev_priv->display_irqs_enabled)
3381 vlv_display_irq_postinstall(dev_priv);
3382 spin_unlock_irq(&dev_priv->irq_lock);
3383
3384 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3385 POSTING_READ(VLV_MASTER_IER);
3386
3387 return 0;
3388}
3389
3390static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3391{
3392
3393 uint32_t gt_interrupts[] = {
3394 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3395 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3396 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3397 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3398 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3399 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3400 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3401 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3402 0,
3403 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3404 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3405 };
3406
3407 if (HAS_L3_DPF(dev_priv))
3408 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3409
3410 dev_priv->pm_ier = 0x0;
3411 dev_priv->pm_imr = ~dev_priv->pm_ier;
3412 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3413 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3414
3415
3416
3417
3418 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3419 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3420}
3421
3422static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3423{
3424 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3425 uint32_t de_pipe_enables;
3426 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3427 u32 de_port_enables;
3428 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3429 enum pipe pipe;
3430
3431 if (INTEL_GEN(dev_priv) >= 9) {
3432 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3433 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3434 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3435 GEN9_AUX_CHANNEL_D;
3436 if (IS_GEN9_LP(dev_priv))
3437 de_port_masked |= BXT_DE_PORT_GMBUS;
3438 } else {
3439 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3440 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3441 }
3442
3443 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3444 GEN8_PIPE_FIFO_UNDERRUN;
3445
3446 de_port_enables = de_port_masked;
3447 if (IS_GEN9_LP(dev_priv))
3448 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3449 else if (IS_BROADWELL(dev_priv))
3450 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3451
3452 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3453 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3454 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3455
3456 for_each_pipe(dev_priv, pipe)
3457 if (intel_display_power_is_enabled(dev_priv,
3458 POWER_DOMAIN_PIPE(pipe)))
3459 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3460 dev_priv->de_irq_mask[pipe],
3461 de_pipe_enables);
3462
3463 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3464 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3465
3466 if (IS_GEN9_LP(dev_priv))
3467 bxt_hpd_detection_setup(dev_priv);
3468 else if (IS_BROADWELL(dev_priv))
3469 ilk_hpd_detection_setup(dev_priv);
3470}
3471
3472static int gen8_irq_postinstall(struct drm_device *dev)
3473{
3474 struct drm_i915_private *dev_priv = to_i915(dev);
3475
3476 if (HAS_PCH_SPLIT(dev_priv))
3477 ibx_irq_pre_postinstall(dev);
3478
3479 gen8_gt_irq_postinstall(dev_priv);
3480 gen8_de_irq_postinstall(dev_priv);
3481
3482 if (HAS_PCH_SPLIT(dev_priv))
3483 ibx_irq_postinstall(dev);
3484
3485 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3486 POSTING_READ(GEN8_MASTER_IRQ);
3487
3488 return 0;
3489}
3490
3491static int cherryview_irq_postinstall(struct drm_device *dev)
3492{
3493 struct drm_i915_private *dev_priv = to_i915(dev);
3494
3495 gen8_gt_irq_postinstall(dev_priv);
3496
3497 spin_lock_irq(&dev_priv->irq_lock);
3498 if (dev_priv->display_irqs_enabled)
3499 vlv_display_irq_postinstall(dev_priv);
3500 spin_unlock_irq(&dev_priv->irq_lock);
3501
3502 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3503 POSTING_READ(GEN8_MASTER_IRQ);
3504
3505 return 0;
3506}
3507
3508static void gen8_irq_uninstall(struct drm_device *dev)
3509{
3510 struct drm_i915_private *dev_priv = to_i915(dev);
3511
3512 if (!dev_priv)
3513 return;
3514
3515 gen8_irq_reset(dev);
3516}
3517
3518static void valleyview_irq_uninstall(struct drm_device *dev)
3519{
3520 struct drm_i915_private *dev_priv = to_i915(dev);
3521
3522 if (!dev_priv)
3523 return;
3524
3525 I915_WRITE(VLV_MASTER_IER, 0);
3526 POSTING_READ(VLV_MASTER_IER);
3527
3528 gen5_gt_irq_reset(dev_priv);
3529
3530 I915_WRITE(HWSTAM, 0xffffffff);
3531
3532 spin_lock_irq(&dev_priv->irq_lock);
3533 if (dev_priv->display_irqs_enabled)
3534 vlv_display_irq_reset(dev_priv);
3535 spin_unlock_irq(&dev_priv->irq_lock);
3536}
3537
3538static void cherryview_irq_uninstall(struct drm_device *dev)
3539{
3540 struct drm_i915_private *dev_priv = to_i915(dev);
3541
3542 if (!dev_priv)
3543 return;
3544
3545 I915_WRITE(GEN8_MASTER_IRQ, 0);
3546 POSTING_READ(GEN8_MASTER_IRQ);
3547
3548 gen8_gt_irq_reset(dev_priv);
3549
3550 GEN5_IRQ_RESET(GEN8_PCU_);
3551
3552 spin_lock_irq(&dev_priv->irq_lock);
3553 if (dev_priv->display_irqs_enabled)
3554 vlv_display_irq_reset(dev_priv);
3555 spin_unlock_irq(&dev_priv->irq_lock);
3556}
3557
3558static void ironlake_irq_uninstall(struct drm_device *dev)
3559{
3560 struct drm_i915_private *dev_priv = to_i915(dev);
3561
3562 if (!dev_priv)
3563 return;
3564
3565 ironlake_irq_reset(dev);
3566}
3567
3568static void i8xx_irq_preinstall(struct drm_device * dev)
3569{
3570 struct drm_i915_private *dev_priv = to_i915(dev);
3571 int pipe;
3572
3573 for_each_pipe(dev_priv, pipe)
3574 I915_WRITE(PIPESTAT(pipe), 0);
3575 I915_WRITE16(IMR, 0xffff);
3576 I915_WRITE16(IER, 0x0);
3577 POSTING_READ16(IER);
3578}
3579
3580static int i8xx_irq_postinstall(struct drm_device *dev)
3581{
3582 struct drm_i915_private *dev_priv = to_i915(dev);
3583
3584 I915_WRITE16(EMR,
3585 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3586
3587
3588 dev_priv->irq_mask =
3589 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3590 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3591 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3592 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3593 I915_WRITE16(IMR, dev_priv->irq_mask);
3594
3595 I915_WRITE16(IER,
3596 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3597 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3598 I915_USER_INTERRUPT);
3599 POSTING_READ16(IER);
3600
3601
3602
3603 spin_lock_irq(&dev_priv->irq_lock);
3604 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3605 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3606 spin_unlock_irq(&dev_priv->irq_lock);
3607
3608 return 0;
3609}
3610
3611
3612
3613
3614static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3615{
3616 struct drm_device *dev = arg;
3617 struct drm_i915_private *dev_priv = to_i915(dev);
3618 u16 iir, new_iir;
3619 u32 pipe_stats[2];
3620 int pipe;
3621 irqreturn_t ret;
3622
3623 if (!intel_irqs_enabled(dev_priv))
3624 return IRQ_NONE;
3625
3626
3627 disable_rpm_wakeref_asserts(dev_priv);
3628
3629 ret = IRQ_NONE;
3630 iir = I915_READ16(IIR);
3631 if (iir == 0)
3632 goto out;
3633
3634 while (iir) {
3635
3636
3637
3638
3639
3640 spin_lock(&dev_priv->irq_lock);
3641 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3642 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3643
3644 for_each_pipe(dev_priv, pipe) {
3645 i915_reg_t reg = PIPESTAT(pipe);
3646 pipe_stats[pipe] = I915_READ(reg);
3647
3648
3649
3650
3651 if (pipe_stats[pipe] & 0x8000ffff)
3652 I915_WRITE(reg, pipe_stats[pipe]);
3653 }
3654 spin_unlock(&dev_priv->irq_lock);
3655
3656 I915_WRITE16(IIR, iir);
3657 new_iir = I915_READ16(IIR);
3658
3659 if (iir & I915_USER_INTERRUPT)
3660 notify_ring(dev_priv->engine[RCS]);
3661
3662 for_each_pipe(dev_priv, pipe) {
3663 int plane = pipe;
3664 if (HAS_FBC(dev_priv))
3665 plane = !plane;
3666
3667 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
3668 drm_handle_vblank(&dev_priv->drm, pipe);
3669
3670 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3671 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3672
3673 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3674 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3675 pipe);
3676 }
3677
3678 iir = new_iir;
3679 }
3680 ret = IRQ_HANDLED;
3681
3682out:
3683 enable_rpm_wakeref_asserts(dev_priv);
3684
3685 return ret;
3686}
3687
3688static void i8xx_irq_uninstall(struct drm_device * dev)
3689{
3690 struct drm_i915_private *dev_priv = to_i915(dev);
3691 int pipe;
3692
3693 for_each_pipe(dev_priv, pipe) {
3694
3695 I915_WRITE(PIPESTAT(pipe), 0);
3696 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3697 }
3698 I915_WRITE16(IMR, 0xffff);
3699 I915_WRITE16(IER, 0x0);
3700 I915_WRITE16(IIR, I915_READ16(IIR));
3701}
3702
3703static void i915_irq_preinstall(struct drm_device * dev)
3704{
3705 struct drm_i915_private *dev_priv = to_i915(dev);
3706 int pipe;
3707
3708 if (I915_HAS_HOTPLUG(dev_priv)) {
3709 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3710 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3711 }
3712
3713 I915_WRITE16(HWSTAM, 0xeffe);
3714 for_each_pipe(dev_priv, pipe)
3715 I915_WRITE(PIPESTAT(pipe), 0);
3716 I915_WRITE(IMR, 0xffffffff);
3717 I915_WRITE(IER, 0x0);
3718 POSTING_READ(IER);
3719}
3720
3721static int i915_irq_postinstall(struct drm_device *dev)
3722{
3723 struct drm_i915_private *dev_priv = to_i915(dev);
3724 u32 enable_mask;
3725
3726 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3727
3728
3729 dev_priv->irq_mask =
3730 ~(I915_ASLE_INTERRUPT |
3731 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3732 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3733 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3734 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3735
3736 enable_mask =
3737 I915_ASLE_INTERRUPT |
3738 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3739 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3740 I915_USER_INTERRUPT;
3741
3742 if (I915_HAS_HOTPLUG(dev_priv)) {
3743 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3744 POSTING_READ(PORT_HOTPLUG_EN);
3745
3746
3747 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3748
3749 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3750 }
3751
3752 I915_WRITE(IMR, dev_priv->irq_mask);
3753 I915_WRITE(IER, enable_mask);
3754 POSTING_READ(IER);
3755
3756 i915_enable_asle_pipestat(dev_priv);
3757
3758
3759
3760 spin_lock_irq(&dev_priv->irq_lock);
3761 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3762 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3763 spin_unlock_irq(&dev_priv->irq_lock);
3764
3765 return 0;
3766}
3767
3768static irqreturn_t i915_irq_handler(int irq, void *arg)
3769{
3770 struct drm_device *dev = arg;
3771 struct drm_i915_private *dev_priv = to_i915(dev);
3772 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3773 int pipe, ret = IRQ_NONE;
3774
3775 if (!intel_irqs_enabled(dev_priv))
3776 return IRQ_NONE;
3777
3778
3779 disable_rpm_wakeref_asserts(dev_priv);
3780
3781 iir = I915_READ(IIR);
3782 do {
3783 bool irq_received = (iir) != 0;
3784 bool blc_event = false;
3785
3786
3787
3788
3789
3790
3791 spin_lock(&dev_priv->irq_lock);
3792 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3793 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3794
3795 for_each_pipe(dev_priv, pipe) {
3796 i915_reg_t reg = PIPESTAT(pipe);
3797 pipe_stats[pipe] = I915_READ(reg);
3798
3799
3800 if (pipe_stats[pipe] & 0x8000ffff) {
3801 I915_WRITE(reg, pipe_stats[pipe]);
3802 irq_received = true;
3803 }
3804 }
3805 spin_unlock(&dev_priv->irq_lock);
3806
3807 if (!irq_received)
3808 break;
3809
3810
3811 if (I915_HAS_HOTPLUG(dev_priv) &&
3812 iir & I915_DISPLAY_PORT_INTERRUPT) {
3813 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3814 if (hotplug_status)
3815 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3816 }
3817
3818 I915_WRITE(IIR, iir);
3819 new_iir = I915_READ(IIR);
3820
3821 if (iir & I915_USER_INTERRUPT)
3822 notify_ring(dev_priv->engine[RCS]);
3823
3824 for_each_pipe(dev_priv, pipe) {
3825 int plane = pipe;
3826 if (HAS_FBC(dev_priv))
3827 plane = !plane;
3828
3829 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
3830 drm_handle_vblank(&dev_priv->drm, pipe);
3831
3832 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3833 blc_event = true;
3834
3835 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3836 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3837
3838 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3839 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3840 pipe);
3841 }
3842
3843 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3844 intel_opregion_asle_intr(dev_priv);
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861 ret = IRQ_HANDLED;
3862 iir = new_iir;
3863 } while (iir);
3864
3865 enable_rpm_wakeref_asserts(dev_priv);
3866
3867 return ret;
3868}
3869
3870static void i915_irq_uninstall(struct drm_device * dev)
3871{
3872 struct drm_i915_private *dev_priv = to_i915(dev);
3873 int pipe;
3874
3875 if (I915_HAS_HOTPLUG(dev_priv)) {
3876 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3877 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3878 }
3879
3880 I915_WRITE16(HWSTAM, 0xffff);
3881 for_each_pipe(dev_priv, pipe) {
3882
3883 I915_WRITE(PIPESTAT(pipe), 0);
3884 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3885 }
3886 I915_WRITE(IMR, 0xffffffff);
3887 I915_WRITE(IER, 0x0);
3888
3889 I915_WRITE(IIR, I915_READ(IIR));
3890}
3891
3892static void i965_irq_preinstall(struct drm_device * dev)
3893{
3894 struct drm_i915_private *dev_priv = to_i915(dev);
3895 int pipe;
3896
3897 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3898 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3899
3900 I915_WRITE(HWSTAM, 0xeffe);
3901 for_each_pipe(dev_priv, pipe)
3902 I915_WRITE(PIPESTAT(pipe), 0);
3903 I915_WRITE(IMR, 0xffffffff);
3904 I915_WRITE(IER, 0x0);
3905 POSTING_READ(IER);
3906}
3907
3908static int i965_irq_postinstall(struct drm_device *dev)
3909{
3910 struct drm_i915_private *dev_priv = to_i915(dev);
3911 u32 enable_mask;
3912 u32 error_mask;
3913
3914
3915 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3916 I915_DISPLAY_PORT_INTERRUPT |
3917 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3918 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3919 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3920 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3921 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3922
3923 enable_mask = ~dev_priv->irq_mask;
3924 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3925 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3926 enable_mask |= I915_USER_INTERRUPT;
3927
3928 if (IS_G4X(dev_priv))
3929 enable_mask |= I915_BSD_USER_INTERRUPT;
3930
3931
3932
3933 spin_lock_irq(&dev_priv->irq_lock);
3934 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3935 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3936 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3937 spin_unlock_irq(&dev_priv->irq_lock);
3938
3939
3940
3941
3942
3943 if (IS_G4X(dev_priv)) {
3944 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3945 GM45_ERROR_MEM_PRIV |
3946 GM45_ERROR_CP_PRIV |
3947 I915_ERROR_MEMORY_REFRESH);
3948 } else {
3949 error_mask = ~(I915_ERROR_PAGE_TABLE |
3950 I915_ERROR_MEMORY_REFRESH);
3951 }
3952 I915_WRITE(EMR, error_mask);
3953
3954 I915_WRITE(IMR, dev_priv->irq_mask);
3955 I915_WRITE(IER, enable_mask);
3956 POSTING_READ(IER);
3957
3958 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3959 POSTING_READ(PORT_HOTPLUG_EN);
3960
3961 i915_enable_asle_pipestat(dev_priv);
3962
3963 return 0;
3964}
3965
3966static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3967{
3968 u32 hotplug_en;
3969
3970 lockdep_assert_held(&dev_priv->irq_lock);
3971
3972
3973
3974 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3975
3976
3977
3978
3979 if (IS_G4X(dev_priv))
3980 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3981 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3982
3983
3984 i915_hotplug_interrupt_update_locked(dev_priv,
3985 HOTPLUG_INT_EN_MASK |
3986 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3987 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3988 hotplug_en);
3989}
3990
3991static irqreturn_t i965_irq_handler(int irq, void *arg)
3992{
3993 struct drm_device *dev = arg;
3994 struct drm_i915_private *dev_priv = to_i915(dev);
3995 u32 iir, new_iir;
3996 u32 pipe_stats[I915_MAX_PIPES];
3997 int ret = IRQ_NONE, pipe;
3998
3999 if (!intel_irqs_enabled(dev_priv))
4000 return IRQ_NONE;
4001
4002
4003 disable_rpm_wakeref_asserts(dev_priv);
4004
4005 iir = I915_READ(IIR);
4006
4007 for (;;) {
4008 bool irq_received = (iir) != 0;
4009 bool blc_event = false;
4010
4011
4012
4013
4014
4015
4016 spin_lock(&dev_priv->irq_lock);
4017 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4018 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4019
4020 for_each_pipe(dev_priv, pipe) {
4021 i915_reg_t reg = PIPESTAT(pipe);
4022 pipe_stats[pipe] = I915_READ(reg);
4023
4024
4025
4026
4027 if (pipe_stats[pipe] & 0x8000ffff) {
4028 I915_WRITE(reg, pipe_stats[pipe]);
4029 irq_received = true;
4030 }
4031 }
4032 spin_unlock(&dev_priv->irq_lock);
4033
4034 if (!irq_received)
4035 break;
4036
4037 ret = IRQ_HANDLED;
4038
4039
4040 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4041 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4042 if (hotplug_status)
4043 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4044 }
4045
4046 I915_WRITE(IIR, iir);
4047 new_iir = I915_READ(IIR);
4048
4049 if (iir & I915_USER_INTERRUPT)
4050 notify_ring(dev_priv->engine[RCS]);
4051 if (iir & I915_BSD_USER_INTERRUPT)
4052 notify_ring(dev_priv->engine[VCS]);
4053
4054 for_each_pipe(dev_priv, pipe) {
4055 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
4056 drm_handle_vblank(&dev_priv->drm, pipe);
4057
4058 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4059 blc_event = true;
4060
4061 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4062 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4063
4064 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4065 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4066 }
4067
4068 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4069 intel_opregion_asle_intr(dev_priv);
4070
4071 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4072 gmbus_irq_handler(dev_priv);
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089 iir = new_iir;
4090 }
4091
4092 enable_rpm_wakeref_asserts(dev_priv);
4093
4094 return ret;
4095}
4096
4097static void i965_irq_uninstall(struct drm_device * dev)
4098{
4099 struct drm_i915_private *dev_priv = to_i915(dev);
4100 int pipe;
4101
4102 if (!dev_priv)
4103 return;
4104
4105 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4106 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4107
4108 I915_WRITE(HWSTAM, 0xffffffff);
4109 for_each_pipe(dev_priv, pipe)
4110 I915_WRITE(PIPESTAT(pipe), 0);
4111 I915_WRITE(IMR, 0xffffffff);
4112 I915_WRITE(IER, 0x0);
4113
4114 for_each_pipe(dev_priv, pipe)
4115 I915_WRITE(PIPESTAT(pipe),
4116 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4117 I915_WRITE(IIR, I915_READ(IIR));
4118}
4119
4120
4121
4122
4123
4124
4125
4126
4127void intel_irq_init(struct drm_i915_private *dev_priv)
4128{
4129 struct drm_device *dev = &dev_priv->drm;
4130 int i;
4131
4132 intel_hpd_init_work(dev_priv);
4133
4134 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4135
4136 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4137 for (i = 0; i < MAX_L3_SLICES; ++i)
4138 dev_priv->l3_parity.remap_info[i] = NULL;
4139
4140 if (HAS_GUC_SCHED(dev_priv))
4141 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4142
4143
4144 if (IS_VALLEYVIEW(dev_priv))
4145
4146 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4147 else
4148 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4149
4150 dev_priv->rps.pm_intrmsk_mbz = 0;
4151
4152
4153
4154
4155
4156
4157
4158 if (INTEL_GEN(dev_priv) <= 7)
4159 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4160
4161 if (INTEL_GEN(dev_priv) >= 8)
4162 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4163
4164 if (IS_GEN2(dev_priv)) {
4165
4166 dev->max_vblank_count = 0;
4167 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4168 dev->max_vblank_count = 0xffffffff;
4169 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4170 } else {
4171 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4172 dev->max_vblank_count = 0xffffff;
4173 }
4174
4175
4176
4177
4178
4179
4180 if (!IS_GEN2(dev_priv))
4181 dev->vblank_disable_immediate = true;
4182
4183
4184
4185
4186
4187
4188
4189 dev_priv->display_irqs_enabled = true;
4190 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4191 dev_priv->display_irqs_enabled = false;
4192
4193 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4194
4195 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4196 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4197
4198 if (IS_CHERRYVIEW(dev_priv)) {
4199 dev->driver->irq_handler = cherryview_irq_handler;
4200 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4201 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4202 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4203 dev->driver->enable_vblank = i965_enable_vblank;
4204 dev->driver->disable_vblank = i965_disable_vblank;
4205 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4206 } else if (IS_VALLEYVIEW(dev_priv)) {
4207 dev->driver->irq_handler = valleyview_irq_handler;
4208 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4209 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4210 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4211 dev->driver->enable_vblank = i965_enable_vblank;
4212 dev->driver->disable_vblank = i965_disable_vblank;
4213 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4214 } else if (INTEL_GEN(dev_priv) >= 8) {
4215 dev->driver->irq_handler = gen8_irq_handler;
4216 dev->driver->irq_preinstall = gen8_irq_reset;
4217 dev->driver->irq_postinstall = gen8_irq_postinstall;
4218 dev->driver->irq_uninstall = gen8_irq_uninstall;
4219 dev->driver->enable_vblank = gen8_enable_vblank;
4220 dev->driver->disable_vblank = gen8_disable_vblank;
4221 if (IS_GEN9_LP(dev_priv))
4222 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4223 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4224 HAS_PCH_CNP(dev_priv))
4225 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4226 else
4227 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4228 } else if (HAS_PCH_SPLIT(dev_priv)) {
4229 dev->driver->irq_handler = ironlake_irq_handler;
4230 dev->driver->irq_preinstall = ironlake_irq_reset;
4231 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4232 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4233 dev->driver->enable_vblank = ironlake_enable_vblank;
4234 dev->driver->disable_vblank = ironlake_disable_vblank;
4235 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4236 } else {
4237 if (IS_GEN2(dev_priv)) {
4238 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4239 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4240 dev->driver->irq_handler = i8xx_irq_handler;
4241 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4242 dev->driver->enable_vblank = i8xx_enable_vblank;
4243 dev->driver->disable_vblank = i8xx_disable_vblank;
4244 } else if (IS_GEN3(dev_priv)) {
4245 dev->driver->irq_preinstall = i915_irq_preinstall;
4246 dev->driver->irq_postinstall = i915_irq_postinstall;
4247 dev->driver->irq_uninstall = i915_irq_uninstall;
4248 dev->driver->irq_handler = i915_irq_handler;
4249 dev->driver->enable_vblank = i8xx_enable_vblank;
4250 dev->driver->disable_vblank = i8xx_disable_vblank;
4251 } else {
4252 dev->driver->irq_preinstall = i965_irq_preinstall;
4253 dev->driver->irq_postinstall = i965_irq_postinstall;
4254 dev->driver->irq_uninstall = i965_irq_uninstall;
4255 dev->driver->irq_handler = i965_irq_handler;
4256 dev->driver->enable_vblank = i965_enable_vblank;
4257 dev->driver->disable_vblank = i965_disable_vblank;
4258 }
4259 if (I915_HAS_HOTPLUG(dev_priv))
4260 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4261 }
4262}
4263
4264
4265
4266
4267
4268
4269
4270void intel_irq_fini(struct drm_i915_private *i915)
4271{
4272 int i;
4273
4274 for (i = 0; i < MAX_L3_SLICES; ++i)
4275 kfree(i915->l3_parity.remap_info[i]);
4276}
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289int intel_irq_install(struct drm_i915_private *dev_priv)
4290{
4291
4292
4293
4294
4295
4296 dev_priv->pm.irqs_enabled = true;
4297
4298 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4299}
4300
4301
4302
4303
4304
4305
4306
4307
4308void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4309{
4310 drm_irq_uninstall(&dev_priv->drm);
4311 intel_hpd_cancel_work(dev_priv);
4312 dev_priv->pm.irqs_enabled = false;
4313}
4314
4315
4316
4317
4318
4319
4320
4321
4322void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4323{
4324 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4325 dev_priv->pm.irqs_enabled = false;
4326 synchronize_irq(dev_priv->drm.irq);
4327}
4328
4329
4330
4331
4332
4333
4334
4335
4336void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4337{
4338 dev_priv->pm.irqs_enabled = true;
4339 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4340 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4341}
4342