1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/sysrq.h>
32#include <linux/slab.h>
33#include <linux/circ_buf.h>
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
36#include "i915_drv.h"
37#include "i915_trace.h"
38#include "intel_drv.h"
39
40
41
42
43
44
45
46
47
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
60static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
68static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
76static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
111
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
118
119#define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
129#define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
137} while (0)
138
139
140
141
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 i915_mmio_reg_offset(reg), val);
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
157
158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
170} while (0)
171
172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
174
175static inline void
176i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179{
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189}
190
191
192
193
194
195
196
197
198
199
200
201
202
203void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206{
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210}
211
212
213
214
215
216
217
218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
221{
222 uint32_t new_val;
223
224 assert_spin_locked(&dev_priv->irq_lock);
225
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
229 return;
230
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
237 I915_WRITE(DEIMR, dev_priv->irq_mask);
238 POSTING_READ(DEIMR);
239 }
240}
241
242
243
244
245
246
247
248static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251{
252 assert_spin_locked(&dev_priv->irq_lock);
253
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
257 return;
258
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 POSTING_READ(GTIMR);
263}
264
265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266{
267 ilk_update_gt_irq(dev_priv, mask, mask);
268}
269
270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
271{
272 ilk_update_gt_irq(dev_priv, mask, 0);
273}
274
275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
276{
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278}
279
280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
281{
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283}
284
285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
286{
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288}
289
290
291
292
293
294
295
296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299{
300 uint32_t new_val;
301
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
304 assert_spin_locked(&dev_priv->irq_lock);
305
306 new_val = dev_priv->pm_irq_mask;
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
314 }
315}
316
317void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
318{
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
322 snb_update_pm_irq(dev_priv, mask, mask);
323}
324
325static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
331void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332{
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
337}
338
339void gen6_reset_rps_interrupts(struct drm_device *dev)
340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 i915_reg_t reg = gen6_pm_iir(dev_priv);
343
344 spin_lock_irq(&dev_priv->irq_lock);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 I915_WRITE(reg, dev_priv->pm_rps_events);
347 POSTING_READ(reg);
348 dev_priv->rps.pm_iir = 0;
349 spin_unlock_irq(&dev_priv->irq_lock);
350}
351
352void gen6_enable_rps_interrupts(struct drm_device *dev)
353{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355
356 spin_lock_irq(&dev_priv->irq_lock);
357
358 WARN_ON(dev_priv->rps.pm_iir);
359 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
360 dev_priv->rps.interrupts_enabled = true;
361 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
362 dev_priv->pm_rps_events);
363 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
364
365 spin_unlock_irq(&dev_priv->irq_lock);
366}
367
368u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369{
370
371
372
373
374
375
376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378
379 if (INTEL_INFO(dev_priv)->gen >= 8)
380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381
382 return mask;
383}
384
385void gen6_disable_rps_interrupts(struct drm_device *dev)
386{
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
389 spin_lock_irq(&dev_priv->irq_lock);
390 dev_priv->rps.interrupts_enabled = false;
391 spin_unlock_irq(&dev_priv->irq_lock);
392
393 cancel_work_sync(&dev_priv->rps.work);
394
395 spin_lock_irq(&dev_priv->irq_lock);
396
397 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
398
399 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
400 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
401 ~dev_priv->pm_rps_events);
402
403 spin_unlock_irq(&dev_priv->irq_lock);
404
405 synchronize_irq(dev->irq);
406}
407
408
409
410
411
412
413
414static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
415 uint32_t interrupt_mask,
416 uint32_t enabled_irq_mask)
417{
418 uint32_t new_val;
419 uint32_t old_val;
420
421 assert_spin_locked(&dev_priv->irq_lock);
422
423 WARN_ON(enabled_irq_mask & ~interrupt_mask);
424
425 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
426 return;
427
428 old_val = I915_READ(GEN8_DE_PORT_IMR);
429
430 new_val = old_val;
431 new_val &= ~interrupt_mask;
432 new_val |= (~enabled_irq_mask & interrupt_mask);
433
434 if (new_val != old_val) {
435 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
436 POSTING_READ(GEN8_DE_PORT_IMR);
437 }
438}
439
440
441
442
443
444
445
446
447void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
448 enum pipe pipe,
449 uint32_t interrupt_mask,
450 uint32_t enabled_irq_mask)
451{
452 uint32_t new_val;
453
454 assert_spin_locked(&dev_priv->irq_lock);
455
456 WARN_ON(enabled_irq_mask & ~interrupt_mask);
457
458 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
459 return;
460
461 new_val = dev_priv->de_irq_mask[pipe];
462 new_val &= ~interrupt_mask;
463 new_val |= (~enabled_irq_mask & interrupt_mask);
464
465 if (new_val != dev_priv->de_irq_mask[pipe]) {
466 dev_priv->de_irq_mask[pipe] = new_val;
467 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
468 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
469 }
470}
471
472
473
474
475
476
477
478void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
479 uint32_t interrupt_mask,
480 uint32_t enabled_irq_mask)
481{
482 uint32_t sdeimr = I915_READ(SDEIMR);
483 sdeimr &= ~interrupt_mask;
484 sdeimr |= (~enabled_irq_mask & interrupt_mask);
485
486 WARN_ON(enabled_irq_mask & ~interrupt_mask);
487
488 assert_spin_locked(&dev_priv->irq_lock);
489
490 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
491 return;
492
493 I915_WRITE(SDEIMR, sdeimr);
494 POSTING_READ(SDEIMR);
495}
496
497static void
498__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
499 u32 enable_mask, u32 status_mask)
500{
501 i915_reg_t reg = PIPESTAT(pipe);
502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
503
504 assert_spin_locked(&dev_priv->irq_lock);
505 WARN_ON(!intel_irqs_enabled(dev_priv));
506
507 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
508 status_mask & ~PIPESTAT_INT_STATUS_MASK,
509 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
510 pipe_name(pipe), enable_mask, status_mask))
511 return;
512
513 if ((pipestat & enable_mask) == enable_mask)
514 return;
515
516 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
517
518
519 pipestat |= enable_mask | status_mask;
520 I915_WRITE(reg, pipestat);
521 POSTING_READ(reg);
522}
523
524static void
525__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
526 u32 enable_mask, u32 status_mask)
527{
528 i915_reg_t reg = PIPESTAT(pipe);
529 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
530
531 assert_spin_locked(&dev_priv->irq_lock);
532 WARN_ON(!intel_irqs_enabled(dev_priv));
533
534 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
535 status_mask & ~PIPESTAT_INT_STATUS_MASK,
536 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
537 pipe_name(pipe), enable_mask, status_mask))
538 return;
539
540 if ((pipestat & enable_mask) == 0)
541 return;
542
543 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
544
545 pipestat &= ~enable_mask;
546 I915_WRITE(reg, pipestat);
547 POSTING_READ(reg);
548}
549
550static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
551{
552 u32 enable_mask = status_mask << 16;
553
554
555
556
557
558 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
559 return 0;
560
561
562
563
564 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
565 return 0;
566
567 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
568 SPRITE0_FLIP_DONE_INT_EN_VLV |
569 SPRITE1_FLIP_DONE_INT_EN_VLV);
570 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
571 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
572 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
573 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
574
575 return enable_mask;
576}
577
578void
579i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
580 u32 status_mask)
581{
582 u32 enable_mask;
583
584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
585 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
586 status_mask);
587 else
588 enable_mask = status_mask << 16;
589 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
590}
591
592void
593i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
594 u32 status_mask)
595{
596 u32 enable_mask;
597
598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
599 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
600 status_mask);
601 else
602 enable_mask = status_mask << 16;
603 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
604}
605
606
607
608
609
610static void i915_enable_asle_pipestat(struct drm_device *dev)
611{
612 struct drm_i915_private *dev_priv = dev->dev_private;
613
614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 return;
616
617 spin_lock_irq(&dev_priv->irq_lock);
618
619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
620 if (INTEL_INFO(dev)->gen >= 4)
621 i915_enable_pipestat(dev_priv, PIPE_A,
622 PIPE_LEGACY_BLC_EVENT_STATUS);
623
624 spin_unlock_irq(&dev_priv->irq_lock);
625}
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
678{
679
680 return 0;
681}
682
683
684
685
686static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
687{
688 struct drm_i915_private *dev_priv = dev->dev_private;
689 i915_reg_t high_frame, low_frame;
690 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
691 struct intel_crtc *intel_crtc =
692 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
693 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
694
695 htotal = mode->crtc_htotal;
696 hsync_start = mode->crtc_hsync_start;
697 vbl_start = mode->crtc_vblank_start;
698 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
699 vbl_start = DIV_ROUND_UP(vbl_start, 2);
700
701
702 vbl_start *= htotal;
703
704
705 vbl_start -= htotal - hsync_start;
706
707 high_frame = PIPEFRAME(pipe);
708 low_frame = PIPEFRAMEPIXEL(pipe);
709
710
711
712
713
714
715 do {
716 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
717 low = I915_READ(low_frame);
718 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
719 } while (high1 != high2);
720
721 high1 >>= PIPE_FRAME_HIGH_SHIFT;
722 pixel = low & PIPE_PIXEL_MASK;
723 low >>= PIPE_FRAME_LOW_SHIFT;
724
725
726
727
728
729
730 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
731}
732
733static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
734{
735 struct drm_i915_private *dev_priv = dev->dev_private;
736
737 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
738}
739
740
741static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
742{
743 struct drm_device *dev = crtc->base.dev;
744 struct drm_i915_private *dev_priv = dev->dev_private;
745 const struct drm_display_mode *mode = &crtc->base.hwmode;
746 enum pipe pipe = crtc->pipe;
747 int position, vtotal;
748
749 vtotal = mode->crtc_vtotal;
750 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 vtotal /= 2;
752
753 if (IS_GEN2(dev))
754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
755 else
756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
757
758
759
760
761
762
763
764
765
766
767
768
769
770 if (HAS_DDI(dev) && !position) {
771 int i, temp;
772
773 for (i = 0; i < 100; i++) {
774 udelay(1);
775 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
776 DSL_LINEMASK_GEN3;
777 if (temp != position) {
778 position = temp;
779 break;
780 }
781 }
782 }
783
784
785
786
787
788 return (position + crtc->scanline_offset) % vtotal;
789}
790
791static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
792 unsigned int flags, int *vpos, int *hpos,
793 ktime_t *stime, ktime_t *etime,
794 const struct drm_display_mode *mode)
795{
796 struct drm_i915_private *dev_priv = dev->dev_private;
797 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
799 int position;
800 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
801 bool in_vbl = true;
802 int ret = 0;
803 unsigned long irqflags;
804
805 if (WARN_ON(!mode->crtc_clock)) {
806 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
807 "pipe %c\n", pipe_name(pipe));
808 return 0;
809 }
810
811 htotal = mode->crtc_htotal;
812 hsync_start = mode->crtc_hsync_start;
813 vtotal = mode->crtc_vtotal;
814 vbl_start = mode->crtc_vblank_start;
815 vbl_end = mode->crtc_vblank_end;
816
817 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
818 vbl_start = DIV_ROUND_UP(vbl_start, 2);
819 vbl_end /= 2;
820 vtotal /= 2;
821 }
822
823 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
824
825
826
827
828
829
830 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
831
832
833
834
835 if (stime)
836 *stime = ktime_get();
837
838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
839
840
841
842 position = __intel_get_crtc_scanline(intel_crtc);
843 } else {
844
845
846
847
848 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
849
850
851 vbl_start *= htotal;
852 vbl_end *= htotal;
853 vtotal *= htotal;
854
855
856
857
858
859
860
861
862
863
864 if (position >= vtotal)
865 position = vtotal - 1;
866
867
868
869
870
871
872
873
874
875
876 position = (position + htotal - hsync_start) % vtotal;
877 }
878
879
880 if (etime)
881 *etime = ktime_get();
882
883
884
885 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
886
887 in_vbl = position >= vbl_start && position < vbl_end;
888
889
890
891
892
893
894
895 if (position >= vbl_start)
896 position -= vbl_end;
897 else
898 position += vtotal - vbl_end;
899
900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
901 *vpos = position;
902 *hpos = 0;
903 } else {
904 *vpos = position / htotal;
905 *hpos = position - (*vpos * htotal);
906 }
907
908
909 if (in_vbl)
910 ret |= DRM_SCANOUTPOS_IN_VBLANK;
911
912 return ret;
913}
914
915int intel_get_crtc_scanline(struct intel_crtc *crtc)
916{
917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
918 unsigned long irqflags;
919 int position;
920
921 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
922 position = __intel_get_crtc_scanline(crtc);
923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
924
925 return position;
926}
927
928static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
929 int *max_error,
930 struct timeval *vblank_time,
931 unsigned flags)
932{
933 struct drm_crtc *crtc;
934
935 if (pipe >= INTEL_INFO(dev)->num_pipes) {
936 DRM_ERROR("Invalid crtc %u\n", pipe);
937 return -EINVAL;
938 }
939
940
941 crtc = intel_get_crtc_for_pipe(dev, pipe);
942 if (crtc == NULL) {
943 DRM_ERROR("Invalid crtc %u\n", pipe);
944 return -EINVAL;
945 }
946
947 if (!crtc->hwmode.crtc_clock) {
948 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
949 return -EBUSY;
950 }
951
952
953 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
954 vblank_time, flags,
955 &crtc->hwmode);
956}
957
958static void ironlake_rps_change_irq_handler(struct drm_device *dev)
959{
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 u32 busy_up, busy_down, max_avg, min_avg;
962 u8 new_delay;
963
964 spin_lock(&mchdev_lock);
965
966 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
967
968 new_delay = dev_priv->ips.cur_delay;
969
970 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
971 busy_up = I915_READ(RCPREVBSYTUPAVG);
972 busy_down = I915_READ(RCPREVBSYTDNAVG);
973 max_avg = I915_READ(RCBMAXAVG);
974 min_avg = I915_READ(RCBMINAVG);
975
976
977 if (busy_up > max_avg) {
978 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
979 new_delay = dev_priv->ips.cur_delay - 1;
980 if (new_delay < dev_priv->ips.max_delay)
981 new_delay = dev_priv->ips.max_delay;
982 } else if (busy_down < min_avg) {
983 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
984 new_delay = dev_priv->ips.cur_delay + 1;
985 if (new_delay > dev_priv->ips.min_delay)
986 new_delay = dev_priv->ips.min_delay;
987 }
988
989 if (ironlake_set_drps(dev, new_delay))
990 dev_priv->ips.cur_delay = new_delay;
991
992 spin_unlock(&mchdev_lock);
993
994 return;
995}
996
997static void notify_ring(struct intel_engine_cs *ring)
998{
999 if (!intel_ring_initialized(ring))
1000 return;
1001
1002 trace_i915_gem_request_notify(ring);
1003
1004 wake_up_all(&ring->irq_queue);
1005}
1006
1007static void vlv_c0_read(struct drm_i915_private *dev_priv,
1008 struct intel_rps_ei *ei)
1009{
1010 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1011 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1012 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1013}
1014
1015static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1016 const struct intel_rps_ei *old,
1017 const struct intel_rps_ei *now,
1018 int threshold)
1019{
1020 u64 time, c0;
1021 unsigned int mul = 100;
1022
1023 if (old->cz_clock == 0)
1024 return false;
1025
1026 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1027 mul <<= 8;
1028
1029 time = now->cz_clock - old->cz_clock;
1030 time *= threshold * dev_priv->czclk_freq;
1031
1032
1033
1034
1035
1036 c0 = now->render_c0 - old->render_c0;
1037 c0 += now->media_c0 - old->media_c0;
1038 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1039
1040 return c0 >= time;
1041}
1042
1043void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1044{
1045 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1046 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1047}
1048
1049static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1050{
1051 struct intel_rps_ei now;
1052 u32 events = 0;
1053
1054 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1055 return 0;
1056
1057 vlv_c0_read(dev_priv, &now);
1058 if (now.cz_clock == 0)
1059 return 0;
1060
1061 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1062 if (!vlv_c0_above(dev_priv,
1063 &dev_priv->rps.down_ei, &now,
1064 dev_priv->rps.down_threshold))
1065 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1066 dev_priv->rps.down_ei = now;
1067 }
1068
1069 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1070 if (vlv_c0_above(dev_priv,
1071 &dev_priv->rps.up_ei, &now,
1072 dev_priv->rps.up_threshold))
1073 events |= GEN6_PM_RP_UP_THRESHOLD;
1074 dev_priv->rps.up_ei = now;
1075 }
1076
1077 return events;
1078}
1079
1080static bool any_waiters(struct drm_i915_private *dev_priv)
1081{
1082 struct intel_engine_cs *ring;
1083 int i;
1084
1085 for_each_ring(ring, dev_priv, i)
1086 if (ring->irq_refcount)
1087 return true;
1088
1089 return false;
1090}
1091
1092static void gen6_pm_rps_work(struct work_struct *work)
1093{
1094 struct drm_i915_private *dev_priv =
1095 container_of(work, struct drm_i915_private, rps.work);
1096 bool client_boost;
1097 int new_delay, adj, min, max;
1098 u32 pm_iir;
1099
1100 spin_lock_irq(&dev_priv->irq_lock);
1101
1102 if (!dev_priv->rps.interrupts_enabled) {
1103 spin_unlock_irq(&dev_priv->irq_lock);
1104 return;
1105 }
1106
1107
1108
1109
1110
1111
1112 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1113
1114 pm_iir = dev_priv->rps.pm_iir;
1115 dev_priv->rps.pm_iir = 0;
1116
1117 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1118 client_boost = dev_priv->rps.client_boost;
1119 dev_priv->rps.client_boost = false;
1120 spin_unlock_irq(&dev_priv->irq_lock);
1121
1122
1123 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1124
1125 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1126 goto out;
1127
1128 mutex_lock(&dev_priv->rps.hw_lock);
1129
1130 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1131
1132 adj = dev_priv->rps.last_adj;
1133 new_delay = dev_priv->rps.cur_freq;
1134 min = dev_priv->rps.min_freq_softlimit;
1135 max = dev_priv->rps.max_freq_softlimit;
1136
1137 if (client_boost) {
1138 new_delay = dev_priv->rps.max_freq_softlimit;
1139 adj = 0;
1140 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1141 if (adj > 0)
1142 adj *= 2;
1143 else
1144 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1145
1146
1147
1148
1149 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1150 new_delay = dev_priv->rps.efficient_freq;
1151 adj = 0;
1152 }
1153 } else if (any_waiters(dev_priv)) {
1154 adj = 0;
1155 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1156 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1157 new_delay = dev_priv->rps.efficient_freq;
1158 else
1159 new_delay = dev_priv->rps.min_freq_softlimit;
1160 adj = 0;
1161 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1162 if (adj < 0)
1163 adj *= 2;
1164 else
1165 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1166 } else {
1167 adj = 0;
1168 }
1169
1170 dev_priv->rps.last_adj = adj;
1171
1172
1173
1174
1175 new_delay += adj;
1176 new_delay = clamp_t(int, new_delay, min, max);
1177
1178 intel_set_rps(dev_priv->dev, new_delay);
1179
1180 mutex_unlock(&dev_priv->rps.hw_lock);
1181out:
1182 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195static void ivybridge_parity_work(struct work_struct *work)
1196{
1197 struct drm_i915_private *dev_priv =
1198 container_of(work, struct drm_i915_private, l3_parity.error_work);
1199 u32 error_status, row, bank, subbank;
1200 char *parity_event[6];
1201 uint32_t misccpctl;
1202 uint8_t slice = 0;
1203
1204
1205
1206
1207
1208 mutex_lock(&dev_priv->dev->struct_mutex);
1209
1210
1211 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1212 goto out;
1213
1214 misccpctl = I915_READ(GEN7_MISCCPCTL);
1215 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1216 POSTING_READ(GEN7_MISCCPCTL);
1217
1218 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1219 i915_reg_t reg;
1220
1221 slice--;
1222 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1223 break;
1224
1225 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1226
1227 reg = GEN7_L3CDERRST1(slice);
1228
1229 error_status = I915_READ(reg);
1230 row = GEN7_PARITY_ERROR_ROW(error_status);
1231 bank = GEN7_PARITY_ERROR_BANK(error_status);
1232 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1233
1234 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1235 POSTING_READ(reg);
1236
1237 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1238 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1239 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1240 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1241 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1242 parity_event[5] = NULL;
1243
1244 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1245 KOBJ_CHANGE, parity_event);
1246
1247 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1248 slice, row, bank, subbank);
1249
1250 kfree(parity_event[4]);
1251 kfree(parity_event[3]);
1252 kfree(parity_event[2]);
1253 kfree(parity_event[1]);
1254 }
1255
1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1257
1258out:
1259 WARN_ON(dev_priv->l3_parity.which_slice);
1260 spin_lock_irq(&dev_priv->irq_lock);
1261 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1262 spin_unlock_irq(&dev_priv->irq_lock);
1263
1264 mutex_unlock(&dev_priv->dev->struct_mutex);
1265}
1266
1267static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1268{
1269 struct drm_i915_private *dev_priv = dev->dev_private;
1270
1271 if (!HAS_L3_DPF(dev))
1272 return;
1273
1274 spin_lock(&dev_priv->irq_lock);
1275 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1276 spin_unlock(&dev_priv->irq_lock);
1277
1278 iir &= GT_PARITY_ERROR(dev);
1279 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1280 dev_priv->l3_parity.which_slice |= 1 << 1;
1281
1282 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1283 dev_priv->l3_parity.which_slice |= 1 << 0;
1284
1285 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1286}
1287
1288static void ilk_gt_irq_handler(struct drm_device *dev,
1289 struct drm_i915_private *dev_priv,
1290 u32 gt_iir)
1291{
1292 if (gt_iir &
1293 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1294 notify_ring(&dev_priv->ring[RCS]);
1295 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1296 notify_ring(&dev_priv->ring[VCS]);
1297}
1298
1299static void snb_gt_irq_handler(struct drm_device *dev,
1300 struct drm_i915_private *dev_priv,
1301 u32 gt_iir)
1302{
1303
1304 if (gt_iir &
1305 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1306 notify_ring(&dev_priv->ring[RCS]);
1307 if (gt_iir & GT_BSD_USER_INTERRUPT)
1308 notify_ring(&dev_priv->ring[VCS]);
1309 if (gt_iir & GT_BLT_USER_INTERRUPT)
1310 notify_ring(&dev_priv->ring[BCS]);
1311
1312 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1313 GT_BSD_CS_ERROR_INTERRUPT |
1314 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1315 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1316
1317 if (gt_iir & GT_PARITY_ERROR(dev))
1318 ivybridge_parity_error_irq_handler(dev, gt_iir);
1319}
1320
1321static __always_inline void
1322gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1323{
1324 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1325 notify_ring(ring);
1326 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1327 intel_lrc_irq_handler(ring);
1328}
1329
1330static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1331 u32 master_ctl)
1332{
1333 irqreturn_t ret = IRQ_NONE;
1334
1335 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1336 u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1337 if (iir) {
1338 I915_WRITE_FW(GEN8_GT_IIR(0), iir);
1339 ret = IRQ_HANDLED;
1340
1341 gen8_cs_irq_handler(&dev_priv->ring[RCS],
1342 iir, GEN8_RCS_IRQ_SHIFT);
1343
1344 gen8_cs_irq_handler(&dev_priv->ring[BCS],
1345 iir, GEN8_BCS_IRQ_SHIFT);
1346 } else
1347 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1348 }
1349
1350 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1351 u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1352 if (iir) {
1353 I915_WRITE_FW(GEN8_GT_IIR(1), iir);
1354 ret = IRQ_HANDLED;
1355
1356 gen8_cs_irq_handler(&dev_priv->ring[VCS],
1357 iir, GEN8_VCS1_IRQ_SHIFT);
1358
1359 gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1360 iir, GEN8_VCS2_IRQ_SHIFT);
1361 } else
1362 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1363 }
1364
1365 if (master_ctl & GEN8_GT_VECS_IRQ) {
1366 u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1367 if (iir) {
1368 I915_WRITE_FW(GEN8_GT_IIR(3), iir);
1369 ret = IRQ_HANDLED;
1370
1371 gen8_cs_irq_handler(&dev_priv->ring[VECS],
1372 iir, GEN8_VECS_IRQ_SHIFT);
1373 } else
1374 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1375 }
1376
1377 if (master_ctl & GEN8_GT_PM_IRQ) {
1378 u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1379 if (iir & dev_priv->pm_rps_events) {
1380 I915_WRITE_FW(GEN8_GT_IIR(2),
1381 iir & dev_priv->pm_rps_events);
1382 ret = IRQ_HANDLED;
1383 gen6_rps_irq_handler(dev_priv, iir);
1384 } else
1385 DRM_ERROR("The master control interrupt lied (PM)!\n");
1386 }
1387
1388 return ret;
1389}
1390
1391static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1392{
1393 switch (port) {
1394 case PORT_A:
1395 return val & PORTA_HOTPLUG_LONG_DETECT;
1396 case PORT_B:
1397 return val & PORTB_HOTPLUG_LONG_DETECT;
1398 case PORT_C:
1399 return val & PORTC_HOTPLUG_LONG_DETECT;
1400 default:
1401 return false;
1402 }
1403}
1404
1405static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1406{
1407 switch (port) {
1408 case PORT_E:
1409 return val & PORTE_HOTPLUG_LONG_DETECT;
1410 default:
1411 return false;
1412 }
1413}
1414
1415static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1416{
1417 switch (port) {
1418 case PORT_A:
1419 return val & PORTA_HOTPLUG_LONG_DETECT;
1420 case PORT_B:
1421 return val & PORTB_HOTPLUG_LONG_DETECT;
1422 case PORT_C:
1423 return val & PORTC_HOTPLUG_LONG_DETECT;
1424 case PORT_D:
1425 return val & PORTD_HOTPLUG_LONG_DETECT;
1426 default:
1427 return false;
1428 }
1429}
1430
1431static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1432{
1433 switch (port) {
1434 case PORT_A:
1435 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1436 default:
1437 return false;
1438 }
1439}
1440
1441static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1442{
1443 switch (port) {
1444 case PORT_B:
1445 return val & PORTB_HOTPLUG_LONG_DETECT;
1446 case PORT_C:
1447 return val & PORTC_HOTPLUG_LONG_DETECT;
1448 case PORT_D:
1449 return val & PORTD_HOTPLUG_LONG_DETECT;
1450 default:
1451 return false;
1452 }
1453}
1454
1455static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1456{
1457 switch (port) {
1458 case PORT_B:
1459 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1460 case PORT_C:
1461 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1462 case PORT_D:
1463 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1464 default:
1465 return false;
1466 }
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1477 u32 hotplug_trigger, u32 dig_hotplug_reg,
1478 const u32 hpd[HPD_NUM_PINS],
1479 bool long_pulse_detect(enum port port, u32 val))
1480{
1481 enum port port;
1482 int i;
1483
1484 for_each_hpd_pin(i) {
1485 if ((hpd[i] & hotplug_trigger) == 0)
1486 continue;
1487
1488 *pin_mask |= BIT(i);
1489
1490 if (!intel_hpd_pin_to_port(i, &port))
1491 continue;
1492
1493 if (long_pulse_detect(port, dig_hotplug_reg))
1494 *long_mask |= BIT(i);
1495 }
1496
1497 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1498 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1499
1500}
1501
1502static void gmbus_irq_handler(struct drm_device *dev)
1503{
1504 struct drm_i915_private *dev_priv = dev->dev_private;
1505
1506 wake_up_all(&dev_priv->gmbus_wait_queue);
1507}
1508
1509static void dp_aux_irq_handler(struct drm_device *dev)
1510{
1511 struct drm_i915_private *dev_priv = dev->dev_private;
1512
1513 wake_up_all(&dev_priv->gmbus_wait_queue);
1514}
1515
1516#if defined(CONFIG_DEBUG_FS)
1517static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1518 uint32_t crc0, uint32_t crc1,
1519 uint32_t crc2, uint32_t crc3,
1520 uint32_t crc4)
1521{
1522 struct drm_i915_private *dev_priv = dev->dev_private;
1523 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1524 struct intel_pipe_crc_entry *entry;
1525 int head, tail;
1526
1527 spin_lock(&pipe_crc->lock);
1528
1529 if (!pipe_crc->entries) {
1530 spin_unlock(&pipe_crc->lock);
1531 DRM_DEBUG_KMS("spurious interrupt\n");
1532 return;
1533 }
1534
1535 head = pipe_crc->head;
1536 tail = pipe_crc->tail;
1537
1538 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1539 spin_unlock(&pipe_crc->lock);
1540 DRM_ERROR("CRC buffer overflowing\n");
1541 return;
1542 }
1543
1544 entry = &pipe_crc->entries[head];
1545
1546 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1547 entry->crc[0] = crc0;
1548 entry->crc[1] = crc1;
1549 entry->crc[2] = crc2;
1550 entry->crc[3] = crc3;
1551 entry->crc[4] = crc4;
1552
1553 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1554 pipe_crc->head = head;
1555
1556 spin_unlock(&pipe_crc->lock);
1557
1558 wake_up_interruptible(&pipe_crc->wq);
1559}
1560#else
1561static inline void
1562display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1563 uint32_t crc0, uint32_t crc1,
1564 uint32_t crc2, uint32_t crc3,
1565 uint32_t crc4) {}
1566#endif
1567
1568
1569static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1570{
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572
1573 display_pipe_crc_irq_handler(dev, pipe,
1574 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1575 0, 0, 0, 0);
1576}
1577
1578static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1579{
1580 struct drm_i915_private *dev_priv = dev->dev_private;
1581
1582 display_pipe_crc_irq_handler(dev, pipe,
1583 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1584 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1585 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1586 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1587 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1588}
1589
1590static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1591{
1592 struct drm_i915_private *dev_priv = dev->dev_private;
1593 uint32_t res1, res2;
1594
1595 if (INTEL_INFO(dev)->gen >= 3)
1596 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1597 else
1598 res1 = 0;
1599
1600 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1601 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1602 else
1603 res2 = 0;
1604
1605 display_pipe_crc_irq_handler(dev, pipe,
1606 I915_READ(PIPE_CRC_RES_RED(pipe)),
1607 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1608 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1609 res1, res2);
1610}
1611
1612
1613
1614
1615static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1616{
1617 if (pm_iir & dev_priv->pm_rps_events) {
1618 spin_lock(&dev_priv->irq_lock);
1619 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1620 if (dev_priv->rps.interrupts_enabled) {
1621 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1622 queue_work(dev_priv->wq, &dev_priv->rps.work);
1623 }
1624 spin_unlock(&dev_priv->irq_lock);
1625 }
1626
1627 if (INTEL_INFO(dev_priv)->gen >= 8)
1628 return;
1629
1630 if (HAS_VEBOX(dev_priv->dev)) {
1631 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1632 notify_ring(&dev_priv->ring[VECS]);
1633
1634 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1635 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1636 }
1637}
1638
1639static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1640{
1641 if (!drm_handle_vblank(dev, pipe))
1642 return false;
1643
1644 return true;
1645}
1646
1647static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1648{
1649 struct drm_i915_private *dev_priv = dev->dev_private;
1650 u32 pipe_stats[I915_MAX_PIPES] = { };
1651 int pipe;
1652
1653 spin_lock(&dev_priv->irq_lock);
1654
1655 if (!dev_priv->display_irqs_enabled) {
1656 spin_unlock(&dev_priv->irq_lock);
1657 return;
1658 }
1659
1660 for_each_pipe(dev_priv, pipe) {
1661 i915_reg_t reg;
1662 u32 mask, iir_bit = 0;
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 mask = PIPE_FIFO_UNDERRUN_STATUS;
1674
1675 switch (pipe) {
1676 case PIPE_A:
1677 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1678 break;
1679 case PIPE_B:
1680 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1681 break;
1682 case PIPE_C:
1683 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1684 break;
1685 }
1686 if (iir & iir_bit)
1687 mask |= dev_priv->pipestat_irq_mask[pipe];
1688
1689 if (!mask)
1690 continue;
1691
1692 reg = PIPESTAT(pipe);
1693 mask |= PIPESTAT_INT_ENABLE_MASK;
1694 pipe_stats[pipe] = I915_READ(reg) & mask;
1695
1696
1697
1698
1699 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1700 PIPESTAT_INT_STATUS_MASK))
1701 I915_WRITE(reg, pipe_stats[pipe]);
1702 }
1703 spin_unlock(&dev_priv->irq_lock);
1704
1705 for_each_pipe(dev_priv, pipe) {
1706 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1707 intel_pipe_handle_vblank(dev, pipe))
1708 intel_check_page_flip(dev, pipe);
1709
1710 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1711 intel_prepare_page_flip(dev, pipe);
1712 intel_finish_page_flip(dev, pipe);
1713 }
1714
1715 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1716 i9xx_pipe_crc_irq_handler(dev, pipe);
1717
1718 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1719 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1720 }
1721
1722 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1723 gmbus_irq_handler(dev);
1724}
1725
1726static void i9xx_hpd_irq_handler(struct drm_device *dev)
1727{
1728 struct drm_i915_private *dev_priv = dev->dev_private;
1729 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1730 u32 pin_mask = 0, long_mask = 0;
1731
1732 if (!hotplug_status)
1733 return;
1734
1735 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1736
1737
1738
1739
1740 POSTING_READ(PORT_HOTPLUG_STAT);
1741
1742 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1743 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1744
1745 if (hotplug_trigger) {
1746 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1747 hotplug_trigger, hpd_status_g4x,
1748 i9xx_port_hotplug_long_detect);
1749
1750 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1751 }
1752
1753 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1754 dp_aux_irq_handler(dev);
1755 } else {
1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1757
1758 if (hotplug_trigger) {
1759 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1760 hotplug_trigger, hpd_status_i915,
1761 i9xx_port_hotplug_long_detect);
1762 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1763 }
1764 }
1765}
1766
1767static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1768{
1769 struct drm_device *dev = arg;
1770 struct drm_i915_private *dev_priv = dev->dev_private;
1771 u32 iir, gt_iir, pm_iir;
1772 irqreturn_t ret = IRQ_NONE;
1773
1774 if (!intel_irqs_enabled(dev_priv))
1775 return IRQ_NONE;
1776
1777
1778 disable_rpm_wakeref_asserts(dev_priv);
1779
1780 while (true) {
1781
1782
1783 gt_iir = I915_READ(GTIIR);
1784 if (gt_iir)
1785 I915_WRITE(GTIIR, gt_iir);
1786
1787 pm_iir = I915_READ(GEN6_PMIIR);
1788 if (pm_iir)
1789 I915_WRITE(GEN6_PMIIR, pm_iir);
1790
1791 iir = I915_READ(VLV_IIR);
1792 if (iir) {
1793
1794 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1795 i9xx_hpd_irq_handler(dev);
1796 I915_WRITE(VLV_IIR, iir);
1797 }
1798
1799 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1800 goto out;
1801
1802 ret = IRQ_HANDLED;
1803
1804 if (gt_iir)
1805 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1806 if (pm_iir)
1807 gen6_rps_irq_handler(dev_priv, pm_iir);
1808
1809
1810 valleyview_pipestat_irq_handler(dev, iir);
1811 }
1812
1813out:
1814 enable_rpm_wakeref_asserts(dev_priv);
1815
1816 return ret;
1817}
1818
1819static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1820{
1821 struct drm_device *dev = arg;
1822 struct drm_i915_private *dev_priv = dev->dev_private;
1823 u32 master_ctl, iir;
1824 irqreturn_t ret = IRQ_NONE;
1825
1826 if (!intel_irqs_enabled(dev_priv))
1827 return IRQ_NONE;
1828
1829
1830 disable_rpm_wakeref_asserts(dev_priv);
1831
1832 do {
1833 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1834 iir = I915_READ(VLV_IIR);
1835
1836 if (master_ctl == 0 && iir == 0)
1837 break;
1838
1839 ret = IRQ_HANDLED;
1840
1841 I915_WRITE(GEN8_MASTER_IRQ, 0);
1842
1843
1844
1845 if (iir) {
1846
1847 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1848 i9xx_hpd_irq_handler(dev);
1849 I915_WRITE(VLV_IIR, iir);
1850 }
1851
1852 gen8_gt_irq_handler(dev_priv, master_ctl);
1853
1854
1855
1856 valleyview_pipestat_irq_handler(dev, iir);
1857
1858 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1859 POSTING_READ(GEN8_MASTER_IRQ);
1860 } while (0);
1861
1862 enable_rpm_wakeref_asserts(dev_priv);
1863
1864 return ret;
1865}
1866
1867static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1868 const u32 hpd[HPD_NUM_PINS])
1869{
1870 struct drm_i915_private *dev_priv = to_i915(dev);
1871 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1872
1873
1874
1875
1876
1877
1878
1879 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1880 if (!hotplug_trigger) {
1881 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1882 PORTD_HOTPLUG_STATUS_MASK |
1883 PORTC_HOTPLUG_STATUS_MASK |
1884 PORTB_HOTPLUG_STATUS_MASK;
1885 dig_hotplug_reg &= ~mask;
1886 }
1887
1888 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1889 if (!hotplug_trigger)
1890 return;
1891
1892 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1893 dig_hotplug_reg, hpd,
1894 pch_port_hotplug_long_detect);
1895
1896 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1897}
1898
1899static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1900{
1901 struct drm_i915_private *dev_priv = dev->dev_private;
1902 int pipe;
1903 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1904
1905 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1906
1907 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1908 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1909 SDE_AUDIO_POWER_SHIFT);
1910 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1911 port_name(port));
1912 }
1913
1914 if (pch_iir & SDE_AUX_MASK)
1915 dp_aux_irq_handler(dev);
1916
1917 if (pch_iir & SDE_GMBUS)
1918 gmbus_irq_handler(dev);
1919
1920 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1921 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1922
1923 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1924 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1925
1926 if (pch_iir & SDE_POISON)
1927 DRM_ERROR("PCH poison interrupt\n");
1928
1929 if (pch_iir & SDE_FDI_MASK)
1930 for_each_pipe(dev_priv, pipe)
1931 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1932 pipe_name(pipe),
1933 I915_READ(FDI_RX_IIR(pipe)));
1934
1935 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1936 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1937
1938 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1939 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1940
1941 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1942 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1943
1944 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1945 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1946}
1947
1948static void ivb_err_int_handler(struct drm_device *dev)
1949{
1950 struct drm_i915_private *dev_priv = dev->dev_private;
1951 u32 err_int = I915_READ(GEN7_ERR_INT);
1952 enum pipe pipe;
1953
1954 if (err_int & ERR_INT_POISON)
1955 DRM_ERROR("Poison interrupt\n");
1956
1957 for_each_pipe(dev_priv, pipe) {
1958 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1959 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1960
1961 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1962 if (IS_IVYBRIDGE(dev))
1963 ivb_pipe_crc_irq_handler(dev, pipe);
1964 else
1965 hsw_pipe_crc_irq_handler(dev, pipe);
1966 }
1967 }
1968
1969 I915_WRITE(GEN7_ERR_INT, err_int);
1970}
1971
1972static void cpt_serr_int_handler(struct drm_device *dev)
1973{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 u32 serr_int = I915_READ(SERR_INT);
1976
1977 if (serr_int & SERR_INT_POISON)
1978 DRM_ERROR("PCH poison interrupt\n");
1979
1980 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1982
1983 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1984 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1985
1986 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1987 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1988
1989 I915_WRITE(SERR_INT, serr_int);
1990}
1991
1992static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1993{
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 int pipe;
1996 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1997
1998 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1999
2000 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2001 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2002 SDE_AUDIO_POWER_SHIFT_CPT);
2003 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2004 port_name(port));
2005 }
2006
2007 if (pch_iir & SDE_AUX_MASK_CPT)
2008 dp_aux_irq_handler(dev);
2009
2010 if (pch_iir & SDE_GMBUS_CPT)
2011 gmbus_irq_handler(dev);
2012
2013 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2014 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2015
2016 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2017 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2018
2019 if (pch_iir & SDE_FDI_MASK_CPT)
2020 for_each_pipe(dev_priv, pipe)
2021 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2022 pipe_name(pipe),
2023 I915_READ(FDI_RX_IIR(pipe)));
2024
2025 if (pch_iir & SDE_ERROR_CPT)
2026 cpt_serr_int_handler(dev);
2027}
2028
2029static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2030{
2031 struct drm_i915_private *dev_priv = dev->dev_private;
2032 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2033 ~SDE_PORTE_HOTPLUG_SPT;
2034 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2035 u32 pin_mask = 0, long_mask = 0;
2036
2037 if (hotplug_trigger) {
2038 u32 dig_hotplug_reg;
2039
2040 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2041 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2042
2043 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2044 dig_hotplug_reg, hpd_spt,
2045 spt_port_hotplug_long_detect);
2046 }
2047
2048 if (hotplug2_trigger) {
2049 u32 dig_hotplug_reg;
2050
2051 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2052 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2053
2054 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2055 dig_hotplug_reg, hpd_spt,
2056 spt_port_hotplug2_long_detect);
2057 }
2058
2059 if (pin_mask)
2060 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2061
2062 if (pch_iir & SDE_GMBUS_CPT)
2063 gmbus_irq_handler(dev);
2064}
2065
2066static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2067 const u32 hpd[HPD_NUM_PINS])
2068{
2069 struct drm_i915_private *dev_priv = to_i915(dev);
2070 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2071
2072 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2073 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2074
2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2076 dig_hotplug_reg, hpd,
2077 ilk_port_hotplug_long_detect);
2078
2079 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2080}
2081
2082static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2083{
2084 struct drm_i915_private *dev_priv = dev->dev_private;
2085 enum pipe pipe;
2086 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2087
2088 if (hotplug_trigger)
2089 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2090
2091 if (de_iir & DE_AUX_CHANNEL_A)
2092 dp_aux_irq_handler(dev);
2093
2094 if (de_iir & DE_GSE)
2095 intel_opregion_asle_intr(dev);
2096
2097 if (de_iir & DE_POISON)
2098 DRM_ERROR("Poison interrupt\n");
2099
2100 for_each_pipe(dev_priv, pipe) {
2101 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2102 intel_pipe_handle_vblank(dev, pipe))
2103 intel_check_page_flip(dev, pipe);
2104
2105 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2106 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2107
2108 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2109 i9xx_pipe_crc_irq_handler(dev, pipe);
2110
2111
2112 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2113 intel_prepare_page_flip(dev, pipe);
2114 intel_finish_page_flip_plane(dev, pipe);
2115 }
2116 }
2117
2118
2119 if (de_iir & DE_PCH_EVENT) {
2120 u32 pch_iir = I915_READ(SDEIIR);
2121
2122 if (HAS_PCH_CPT(dev))
2123 cpt_irq_handler(dev, pch_iir);
2124 else
2125 ibx_irq_handler(dev, pch_iir);
2126
2127
2128 I915_WRITE(SDEIIR, pch_iir);
2129 }
2130
2131 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2132 ironlake_rps_change_irq_handler(dev);
2133}
2134
2135static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2136{
2137 struct drm_i915_private *dev_priv = dev->dev_private;
2138 enum pipe pipe;
2139 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2140
2141 if (hotplug_trigger)
2142 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2143
2144 if (de_iir & DE_ERR_INT_IVB)
2145 ivb_err_int_handler(dev);
2146
2147 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2148 dp_aux_irq_handler(dev);
2149
2150 if (de_iir & DE_GSE_IVB)
2151 intel_opregion_asle_intr(dev);
2152
2153 for_each_pipe(dev_priv, pipe) {
2154 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2155 intel_pipe_handle_vblank(dev, pipe))
2156 intel_check_page_flip(dev, pipe);
2157
2158
2159 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2160 intel_prepare_page_flip(dev, pipe);
2161 intel_finish_page_flip_plane(dev, pipe);
2162 }
2163 }
2164
2165
2166 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2167 u32 pch_iir = I915_READ(SDEIIR);
2168
2169 cpt_irq_handler(dev, pch_iir);
2170
2171
2172 I915_WRITE(SDEIIR, pch_iir);
2173 }
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2185{
2186 struct drm_device *dev = arg;
2187 struct drm_i915_private *dev_priv = dev->dev_private;
2188 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2189 irqreturn_t ret = IRQ_NONE;
2190
2191 if (!intel_irqs_enabled(dev_priv))
2192 return IRQ_NONE;
2193
2194
2195 disable_rpm_wakeref_asserts(dev_priv);
2196
2197
2198 de_ier = I915_READ(DEIER);
2199 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2200 POSTING_READ(DEIER);
2201
2202
2203
2204
2205
2206
2207 if (!HAS_PCH_NOP(dev)) {
2208 sde_ier = I915_READ(SDEIER);
2209 I915_WRITE(SDEIER, 0);
2210 POSTING_READ(SDEIER);
2211 }
2212
2213
2214
2215 gt_iir = I915_READ(GTIIR);
2216 if (gt_iir) {
2217 I915_WRITE(GTIIR, gt_iir);
2218 ret = IRQ_HANDLED;
2219 if (INTEL_INFO(dev)->gen >= 6)
2220 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2221 else
2222 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2223 }
2224
2225 de_iir = I915_READ(DEIIR);
2226 if (de_iir) {
2227 I915_WRITE(DEIIR, de_iir);
2228 ret = IRQ_HANDLED;
2229 if (INTEL_INFO(dev)->gen >= 7)
2230 ivb_display_irq_handler(dev, de_iir);
2231 else
2232 ilk_display_irq_handler(dev, de_iir);
2233 }
2234
2235 if (INTEL_INFO(dev)->gen >= 6) {
2236 u32 pm_iir = I915_READ(GEN6_PMIIR);
2237 if (pm_iir) {
2238 I915_WRITE(GEN6_PMIIR, pm_iir);
2239 ret = IRQ_HANDLED;
2240 gen6_rps_irq_handler(dev_priv, pm_iir);
2241 }
2242 }
2243
2244 I915_WRITE(DEIER, de_ier);
2245 POSTING_READ(DEIER);
2246 if (!HAS_PCH_NOP(dev)) {
2247 I915_WRITE(SDEIER, sde_ier);
2248 POSTING_READ(SDEIER);
2249 }
2250
2251
2252 enable_rpm_wakeref_asserts(dev_priv);
2253
2254 return ret;
2255}
2256
2257static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2258 const u32 hpd[HPD_NUM_PINS])
2259{
2260 struct drm_i915_private *dev_priv = to_i915(dev);
2261 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2262
2263 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2264 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2265
2266 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2267 dig_hotplug_reg, hpd,
2268 bxt_port_hotplug_long_detect);
2269
2270 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2271}
2272
2273static irqreturn_t
2274gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2275{
2276 struct drm_device *dev = dev_priv->dev;
2277 irqreturn_t ret = IRQ_NONE;
2278 u32 iir;
2279 enum pipe pipe;
2280
2281 if (master_ctl & GEN8_DE_MISC_IRQ) {
2282 iir = I915_READ(GEN8_DE_MISC_IIR);
2283 if (iir) {
2284 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2285 ret = IRQ_HANDLED;
2286 if (iir & GEN8_DE_MISC_GSE)
2287 intel_opregion_asle_intr(dev);
2288 else
2289 DRM_ERROR("Unexpected DE Misc interrupt\n");
2290 }
2291 else
2292 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2293 }
2294
2295 if (master_ctl & GEN8_DE_PORT_IRQ) {
2296 iir = I915_READ(GEN8_DE_PORT_IIR);
2297 if (iir) {
2298 u32 tmp_mask;
2299 bool found = false;
2300
2301 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2302 ret = IRQ_HANDLED;
2303
2304 tmp_mask = GEN8_AUX_CHANNEL_A;
2305 if (INTEL_INFO(dev_priv)->gen >= 9)
2306 tmp_mask |= GEN9_AUX_CHANNEL_B |
2307 GEN9_AUX_CHANNEL_C |
2308 GEN9_AUX_CHANNEL_D;
2309
2310 if (iir & tmp_mask) {
2311 dp_aux_irq_handler(dev);
2312 found = true;
2313 }
2314
2315 if (IS_BROXTON(dev_priv)) {
2316 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2317 if (tmp_mask) {
2318 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
2319 found = true;
2320 }
2321 } else if (IS_BROADWELL(dev_priv)) {
2322 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2323 if (tmp_mask) {
2324 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
2325 found = true;
2326 }
2327 }
2328
2329 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
2330 gmbus_irq_handler(dev);
2331 found = true;
2332 }
2333
2334 if (!found)
2335 DRM_ERROR("Unexpected DE Port interrupt\n");
2336 }
2337 else
2338 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2339 }
2340
2341 for_each_pipe(dev_priv, pipe) {
2342 u32 flip_done, fault_errors;
2343
2344 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2345 continue;
2346
2347 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2348 if (!iir) {
2349 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2350 continue;
2351 }
2352
2353 ret = IRQ_HANDLED;
2354 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2355
2356 if (iir & GEN8_PIPE_VBLANK &&
2357 intel_pipe_handle_vblank(dev, pipe))
2358 intel_check_page_flip(dev, pipe);
2359
2360 flip_done = iir;
2361 if (INTEL_INFO(dev_priv)->gen >= 9)
2362 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2363 else
2364 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2365
2366 if (flip_done) {
2367 intel_prepare_page_flip(dev, pipe);
2368 intel_finish_page_flip_plane(dev, pipe);
2369 }
2370
2371 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2372 hsw_pipe_crc_irq_handler(dev, pipe);
2373
2374 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2375 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2376
2377 fault_errors = iir;
2378 if (INTEL_INFO(dev_priv)->gen >= 9)
2379 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2380 else
2381 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2382
2383 if (fault_errors)
2384 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2385 pipe_name(pipe),
2386 fault_errors);
2387 }
2388
2389 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2390 master_ctl & GEN8_DE_PCH_IRQ) {
2391
2392
2393
2394
2395
2396 iir = I915_READ(SDEIIR);
2397 if (iir) {
2398 I915_WRITE(SDEIIR, iir);
2399 ret = IRQ_HANDLED;
2400
2401 if (HAS_PCH_SPT(dev_priv))
2402 spt_irq_handler(dev, iir);
2403 else
2404 cpt_irq_handler(dev, iir);
2405 } else {
2406
2407
2408
2409
2410 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2411 }
2412 }
2413
2414 return ret;
2415}
2416
2417static irqreturn_t gen8_irq_handler(int irq, void *arg)
2418{
2419 struct drm_device *dev = arg;
2420 struct drm_i915_private *dev_priv = dev->dev_private;
2421 u32 master_ctl;
2422 irqreturn_t ret;
2423
2424 if (!intel_irqs_enabled(dev_priv))
2425 return IRQ_NONE;
2426
2427 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2428 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2429 if (!master_ctl)
2430 return IRQ_NONE;
2431
2432 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2433
2434
2435 disable_rpm_wakeref_asserts(dev_priv);
2436
2437
2438 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2439 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2440
2441 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2442 POSTING_READ_FW(GEN8_MASTER_IRQ);
2443
2444 enable_rpm_wakeref_asserts(dev_priv);
2445
2446 return ret;
2447}
2448
2449static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2450 bool reset_completed)
2451{
2452 struct intel_engine_cs *ring;
2453 int i;
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463 for_each_ring(ring, dev_priv, i)
2464 wake_up_all(&ring->irq_queue);
2465
2466
2467 wake_up_all(&dev_priv->pending_flip_queue);
2468
2469
2470
2471
2472
2473 if (reset_completed)
2474 wake_up_all(&dev_priv->gpu_error.reset_queue);
2475}
2476
2477
2478
2479
2480
2481
2482
2483
2484static void i915_reset_and_wakeup(struct drm_device *dev)
2485{
2486 struct drm_i915_private *dev_priv = to_i915(dev);
2487 struct i915_gpu_error *error = &dev_priv->gpu_error;
2488 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2489 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2490 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2491 int ret;
2492
2493 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2506 DRM_DEBUG_DRIVER("resetting chip\n");
2507 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2508 reset_event);
2509
2510
2511
2512
2513
2514
2515
2516
2517 intel_runtime_pm_get(dev_priv);
2518
2519 intel_prepare_reset(dev);
2520
2521
2522
2523
2524
2525
2526
2527 ret = i915_reset(dev);
2528
2529 intel_finish_reset(dev);
2530
2531 intel_runtime_pm_put(dev_priv);
2532
2533 if (ret == 0) {
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544 smp_mb__before_atomic();
2545 atomic_inc(&dev_priv->gpu_error.reset_counter);
2546
2547 kobject_uevent_env(&dev->primary->kdev->kobj,
2548 KOBJ_CHANGE, reset_done_event);
2549 } else {
2550 atomic_or(I915_WEDGED, &error->reset_counter);
2551 }
2552
2553
2554
2555
2556
2557 i915_error_wake_up(dev_priv, true);
2558 }
2559}
2560
2561static void i915_report_and_clear_eir(struct drm_device *dev)
2562{
2563 struct drm_i915_private *dev_priv = dev->dev_private;
2564 uint32_t instdone[I915_NUM_INSTDONE_REG];
2565 u32 eir = I915_READ(EIR);
2566 int pipe, i;
2567
2568 if (!eir)
2569 return;
2570
2571 pr_err("render error detected, EIR: 0x%08x\n", eir);
2572
2573 i915_get_extra_instdone(dev, instdone);
2574
2575 if (IS_G4X(dev)) {
2576 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2577 u32 ipeir = I915_READ(IPEIR_I965);
2578
2579 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2580 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2581 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2582 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2583 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2584 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2585 I915_WRITE(IPEIR_I965, ipeir);
2586 POSTING_READ(IPEIR_I965);
2587 }
2588 if (eir & GM45_ERROR_PAGE_TABLE) {
2589 u32 pgtbl_err = I915_READ(PGTBL_ER);
2590 pr_err("page table error\n");
2591 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2592 I915_WRITE(PGTBL_ER, pgtbl_err);
2593 POSTING_READ(PGTBL_ER);
2594 }
2595 }
2596
2597 if (!IS_GEN2(dev)) {
2598 if (eir & I915_ERROR_PAGE_TABLE) {
2599 u32 pgtbl_err = I915_READ(PGTBL_ER);
2600 pr_err("page table error\n");
2601 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2602 I915_WRITE(PGTBL_ER, pgtbl_err);
2603 POSTING_READ(PGTBL_ER);
2604 }
2605 }
2606
2607 if (eir & I915_ERROR_MEMORY_REFRESH) {
2608 pr_err("memory refresh error:\n");
2609 for_each_pipe(dev_priv, pipe)
2610 pr_err("pipe %c stat: 0x%08x\n",
2611 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2612
2613 }
2614 if (eir & I915_ERROR_INSTRUCTION) {
2615 pr_err("instruction error\n");
2616 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2617 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2618 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2619 if (INTEL_INFO(dev)->gen < 4) {
2620 u32 ipeir = I915_READ(IPEIR);
2621
2622 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2623 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2624 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2625 I915_WRITE(IPEIR, ipeir);
2626 POSTING_READ(IPEIR);
2627 } else {
2628 u32 ipeir = I915_READ(IPEIR_I965);
2629
2630 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2631 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2632 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2633 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2634 I915_WRITE(IPEIR_I965, ipeir);
2635 POSTING_READ(IPEIR_I965);
2636 }
2637 }
2638
2639 I915_WRITE(EIR, eir);
2640 POSTING_READ(EIR);
2641 eir = I915_READ(EIR);
2642 if (eir) {
2643
2644
2645
2646
2647 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2648 I915_WRITE(EMR, I915_READ(EMR) | eir);
2649 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2650 }
2651}
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663void i915_handle_error(struct drm_device *dev, bool wedged,
2664 const char *fmt, ...)
2665{
2666 struct drm_i915_private *dev_priv = dev->dev_private;
2667 va_list args;
2668 char error_msg[80];
2669
2670 va_start(args, fmt);
2671 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2672 va_end(args);
2673
2674 i915_capture_error_state(dev, wedged, error_msg);
2675 i915_report_and_clear_eir(dev);
2676
2677 if (wedged) {
2678 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2679 &dev_priv->gpu_error.reset_counter);
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694 i915_error_wake_up(dev_priv, false);
2695 }
2696
2697 i915_reset_and_wakeup(dev);
2698}
2699
2700
2701
2702
2703static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2704{
2705 struct drm_i915_private *dev_priv = dev->dev_private;
2706 unsigned long irqflags;
2707
2708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2709 if (INTEL_INFO(dev)->gen >= 4)
2710 i915_enable_pipestat(dev_priv, pipe,
2711 PIPE_START_VBLANK_INTERRUPT_STATUS);
2712 else
2713 i915_enable_pipestat(dev_priv, pipe,
2714 PIPE_VBLANK_INTERRUPT_STATUS);
2715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2716
2717 return 0;
2718}
2719
2720static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2721{
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2723 unsigned long irqflags;
2724 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2725 DE_PIPE_VBLANK(pipe);
2726
2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2728 ilk_enable_display_irq(dev_priv, bit);
2729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2730
2731 return 0;
2732}
2733
2734static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2735{
2736 struct drm_i915_private *dev_priv = dev->dev_private;
2737 unsigned long irqflags;
2738
2739 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2740 i915_enable_pipestat(dev_priv, pipe,
2741 PIPE_START_VBLANK_INTERRUPT_STATUS);
2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2743
2744 return 0;
2745}
2746
2747static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2748{
2749 struct drm_i915_private *dev_priv = dev->dev_private;
2750 unsigned long irqflags;
2751
2752 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2753 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2754 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2755
2756 return 0;
2757}
2758
2759
2760
2761
2762static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2763{
2764 struct drm_i915_private *dev_priv = dev->dev_private;
2765 unsigned long irqflags;
2766
2767 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2768 i915_disable_pipestat(dev_priv, pipe,
2769 PIPE_VBLANK_INTERRUPT_STATUS |
2770 PIPE_START_VBLANK_INTERRUPT_STATUS);
2771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2772}
2773
2774static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2775{
2776 struct drm_i915_private *dev_priv = dev->dev_private;
2777 unsigned long irqflags;
2778 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2779 DE_PIPE_VBLANK(pipe);
2780
2781 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2782 ilk_disable_display_irq(dev_priv, bit);
2783 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2784}
2785
2786static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2787{
2788 struct drm_i915_private *dev_priv = dev->dev_private;
2789 unsigned long irqflags;
2790
2791 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2792 i915_disable_pipestat(dev_priv, pipe,
2793 PIPE_START_VBLANK_INTERRUPT_STATUS);
2794 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2795}
2796
2797static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2798{
2799 struct drm_i915_private *dev_priv = dev->dev_private;
2800 unsigned long irqflags;
2801
2802 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2803 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2804 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2805}
2806
2807static bool
2808ring_idle(struct intel_engine_cs *ring, u32 seqno)
2809{
2810 return (list_empty(&ring->request_list) ||
2811 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2812}
2813
2814static bool
2815ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2816{
2817 if (INTEL_INFO(dev)->gen >= 8) {
2818 return (ipehr >> 23) == 0x1c;
2819 } else {
2820 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2821 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2822 MI_SEMAPHORE_REGISTER);
2823 }
2824}
2825
2826static struct intel_engine_cs *
2827semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2828{
2829 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2830 struct intel_engine_cs *signaller;
2831 int i;
2832
2833 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2834 for_each_ring(signaller, dev_priv, i) {
2835 if (ring == signaller)
2836 continue;
2837
2838 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2839 return signaller;
2840 }
2841 } else {
2842 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2843
2844 for_each_ring(signaller, dev_priv, i) {
2845 if(ring == signaller)
2846 continue;
2847
2848 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2849 return signaller;
2850 }
2851 }
2852
2853 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2854 ring->id, ipehr, offset);
2855
2856 return NULL;
2857}
2858
2859static struct intel_engine_cs *
2860semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2861{
2862 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2863 u32 cmd, ipehr, head;
2864 u64 offset = 0;
2865 int i, backwards;
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884 if (ring->buffer == NULL)
2885 return NULL;
2886
2887 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2888 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2889 return NULL;
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2900 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2901
2902 for (i = backwards; i; --i) {
2903
2904
2905
2906
2907
2908 head &= ring->buffer->size - 1;
2909
2910
2911 cmd = ioread32(ring->buffer->virtual_start + head);
2912 if (cmd == ipehr)
2913 break;
2914
2915 head -= 4;
2916 }
2917
2918 if (!i)
2919 return NULL;
2920
2921 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2922 if (INTEL_INFO(ring->dev)->gen >= 8) {
2923 offset = ioread32(ring->buffer->virtual_start + head + 12);
2924 offset <<= 32;
2925 offset = ioread32(ring->buffer->virtual_start + head + 8);
2926 }
2927 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2928}
2929
2930static int semaphore_passed(struct intel_engine_cs *ring)
2931{
2932 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2933 struct intel_engine_cs *signaller;
2934 u32 seqno;
2935
2936 ring->hangcheck.deadlock++;
2937
2938 signaller = semaphore_waits_for(ring, &seqno);
2939 if (signaller == NULL)
2940 return -1;
2941
2942
2943 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2944 return -1;
2945
2946 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2947 return 1;
2948
2949
2950 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2951 semaphore_passed(signaller) < 0)
2952 return -1;
2953
2954 return 0;
2955}
2956
2957static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2958{
2959 struct intel_engine_cs *ring;
2960 int i;
2961
2962 for_each_ring(ring, dev_priv, i)
2963 ring->hangcheck.deadlock = 0;
2964}
2965
2966static bool subunits_stuck(struct intel_engine_cs *ring)
2967{
2968 u32 instdone[I915_NUM_INSTDONE_REG];
2969 bool stuck;
2970 int i;
2971
2972 if (ring->id != RCS)
2973 return true;
2974
2975 i915_get_extra_instdone(ring->dev, instdone);
2976
2977
2978
2979
2980
2981
2982 stuck = true;
2983 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2984 const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
2985
2986 if (tmp != ring->hangcheck.instdone[i])
2987 stuck = false;
2988
2989 ring->hangcheck.instdone[i] |= tmp;
2990 }
2991
2992 return stuck;
2993}
2994
2995static enum intel_ring_hangcheck_action
2996head_stuck(struct intel_engine_cs *ring, u64 acthd)
2997{
2998 if (acthd != ring->hangcheck.acthd) {
2999
3000
3001 memset(ring->hangcheck.instdone, 0,
3002 sizeof(ring->hangcheck.instdone));
3003
3004 if (acthd > ring->hangcheck.max_acthd) {
3005 ring->hangcheck.max_acthd = acthd;
3006 return HANGCHECK_ACTIVE;
3007 }
3008
3009 return HANGCHECK_ACTIVE_LOOP;
3010 }
3011
3012 if (!subunits_stuck(ring))
3013 return HANGCHECK_ACTIVE;
3014
3015 return HANGCHECK_HUNG;
3016}
3017
3018static enum intel_ring_hangcheck_action
3019ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3020{
3021 struct drm_device *dev = ring->dev;
3022 struct drm_i915_private *dev_priv = dev->dev_private;
3023 enum intel_ring_hangcheck_action ha;
3024 u32 tmp;
3025
3026 ha = head_stuck(ring, acthd);
3027 if (ha != HANGCHECK_HUNG)
3028 return ha;
3029
3030 if (IS_GEN2(dev))
3031 return HANGCHECK_HUNG;
3032
3033
3034
3035
3036
3037
3038 tmp = I915_READ_CTL(ring);
3039 if (tmp & RING_WAIT) {
3040 i915_handle_error(dev, false,
3041 "Kicking stuck wait on %s",
3042 ring->name);
3043 I915_WRITE_CTL(ring, tmp);
3044 return HANGCHECK_KICK;
3045 }
3046
3047 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3048 switch (semaphore_passed(ring)) {
3049 default:
3050 return HANGCHECK_HUNG;
3051 case 1:
3052 i915_handle_error(dev, false,
3053 "Kicking stuck semaphore on %s",
3054 ring->name);
3055 I915_WRITE_CTL(ring, tmp);
3056 return HANGCHECK_KICK;
3057 case 0:
3058 return HANGCHECK_WAIT;
3059 }
3060 }
3061
3062 return HANGCHECK_HUNG;
3063}
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073static void i915_hangcheck_elapsed(struct work_struct *work)
3074{
3075 struct drm_i915_private *dev_priv =
3076 container_of(work, typeof(*dev_priv),
3077 gpu_error.hangcheck_work.work);
3078 struct drm_device *dev = dev_priv->dev;
3079 struct intel_engine_cs *ring;
3080 int i;
3081 int busy_count = 0, rings_hung = 0;
3082 bool stuck[I915_NUM_RINGS] = { 0 };
3083#define BUSY 1
3084#define KICK 5
3085#define HUNG 20
3086
3087 if (!i915.enable_hangcheck)
3088 return;
3089
3090
3091
3092
3093
3094
3095 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3096
3097
3098
3099
3100
3101 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3102
3103 for_each_ring(ring, dev_priv, i) {
3104 u64 acthd;
3105 u32 seqno;
3106 bool busy = true;
3107
3108 semaphore_clear_deadlocks(dev_priv);
3109
3110 seqno = ring->get_seqno(ring, false);
3111 acthd = intel_ring_get_active_head(ring);
3112
3113 if (ring->hangcheck.seqno == seqno) {
3114 if (ring_idle(ring, seqno)) {
3115 ring->hangcheck.action = HANGCHECK_IDLE;
3116
3117 if (waitqueue_active(&ring->irq_queue)) {
3118
3119 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3120 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3121 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3122 ring->name);
3123 else
3124 DRM_INFO("Fake missed irq on %s\n",
3125 ring->name);
3126 wake_up_all(&ring->irq_queue);
3127 }
3128
3129 ring->hangcheck.score += BUSY;
3130 } else
3131 busy = false;
3132 } else {
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148 ring->hangcheck.action = ring_stuck(ring,
3149 acthd);
3150
3151 switch (ring->hangcheck.action) {
3152 case HANGCHECK_IDLE:
3153 case HANGCHECK_WAIT:
3154 case HANGCHECK_ACTIVE:
3155 break;
3156 case HANGCHECK_ACTIVE_LOOP:
3157 ring->hangcheck.score += BUSY;
3158 break;
3159 case HANGCHECK_KICK:
3160 ring->hangcheck.score += KICK;
3161 break;
3162 case HANGCHECK_HUNG:
3163 ring->hangcheck.score += HUNG;
3164 stuck[i] = true;
3165 break;
3166 }
3167 }
3168 } else {
3169 ring->hangcheck.action = HANGCHECK_ACTIVE;
3170
3171
3172
3173
3174 if (ring->hangcheck.score > 0)
3175 ring->hangcheck.score--;
3176
3177
3178 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3179
3180 memset(ring->hangcheck.instdone, 0,
3181 sizeof(ring->hangcheck.instdone));
3182 }
3183
3184 ring->hangcheck.seqno = seqno;
3185 ring->hangcheck.acthd = acthd;
3186 busy_count += busy;
3187 }
3188
3189 for_each_ring(ring, dev_priv, i) {
3190 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3191 DRM_INFO("%s on %s\n",
3192 stuck[i] ? "stuck" : "no progress",
3193 ring->name);
3194 rings_hung++;
3195 }
3196 }
3197
3198 if (rings_hung) {
3199 i915_handle_error(dev, true, "Ring hung");
3200 goto out;
3201 }
3202
3203 if (busy_count)
3204
3205
3206 i915_queue_hangcheck(dev);
3207
3208out:
3209 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3210}
3211
3212void i915_queue_hangcheck(struct drm_device *dev)
3213{
3214 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3215
3216 if (!i915.enable_hangcheck)
3217 return;
3218
3219
3220
3221
3222
3223
3224 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3225 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3226}
3227
3228static void ibx_irq_reset(struct drm_device *dev)
3229{
3230 struct drm_i915_private *dev_priv = dev->dev_private;
3231
3232 if (HAS_PCH_NOP(dev))
3233 return;
3234
3235 GEN5_IRQ_RESET(SDE);
3236
3237 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3238 I915_WRITE(SERR_INT, 0xffffffff);
3239}
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249static void ibx_irq_pre_postinstall(struct drm_device *dev)
3250{
3251 struct drm_i915_private *dev_priv = dev->dev_private;
3252
3253 if (HAS_PCH_NOP(dev))
3254 return;
3255
3256 WARN_ON(I915_READ(SDEIER) != 0);
3257 I915_WRITE(SDEIER, 0xffffffff);
3258 POSTING_READ(SDEIER);
3259}
3260
3261static void gen5_gt_irq_reset(struct drm_device *dev)
3262{
3263 struct drm_i915_private *dev_priv = dev->dev_private;
3264
3265 GEN5_IRQ_RESET(GT);
3266 if (INTEL_INFO(dev)->gen >= 6)
3267 GEN5_IRQ_RESET(GEN6_PM);
3268}
3269
3270
3271
3272static void ironlake_irq_reset(struct drm_device *dev)
3273{
3274 struct drm_i915_private *dev_priv = dev->dev_private;
3275
3276 I915_WRITE(HWSTAM, 0xffffffff);
3277
3278 GEN5_IRQ_RESET(DE);
3279 if (IS_GEN7(dev))
3280 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3281
3282 gen5_gt_irq_reset(dev);
3283
3284 ibx_irq_reset(dev);
3285}
3286
3287static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3288{
3289 enum pipe pipe;
3290
3291 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3292 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3293
3294 for_each_pipe(dev_priv, pipe)
3295 I915_WRITE(PIPESTAT(pipe), 0xffff);
3296
3297 GEN5_IRQ_RESET(VLV_);
3298}
3299
3300static void valleyview_irq_preinstall(struct drm_device *dev)
3301{
3302 struct drm_i915_private *dev_priv = dev->dev_private;
3303
3304
3305 I915_WRITE(VLV_IMR, 0);
3306 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3307 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3308 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3309
3310 gen5_gt_irq_reset(dev);
3311
3312 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3313
3314 vlv_display_irq_reset(dev_priv);
3315}
3316
3317static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3318{
3319 GEN8_IRQ_RESET_NDX(GT, 0);
3320 GEN8_IRQ_RESET_NDX(GT, 1);
3321 GEN8_IRQ_RESET_NDX(GT, 2);
3322 GEN8_IRQ_RESET_NDX(GT, 3);
3323}
3324
3325static void gen8_irq_reset(struct drm_device *dev)
3326{
3327 struct drm_i915_private *dev_priv = dev->dev_private;
3328 int pipe;
3329
3330 I915_WRITE(GEN8_MASTER_IRQ, 0);
3331 POSTING_READ(GEN8_MASTER_IRQ);
3332
3333 gen8_gt_irq_reset(dev_priv);
3334
3335 for_each_pipe(dev_priv, pipe)
3336 if (intel_display_power_is_enabled(dev_priv,
3337 POWER_DOMAIN_PIPE(pipe)))
3338 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3339
3340 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3341 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3342 GEN5_IRQ_RESET(GEN8_PCU_);
3343
3344 if (HAS_PCH_SPLIT(dev))
3345 ibx_irq_reset(dev);
3346}
3347
3348void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3349 unsigned int pipe_mask)
3350{
3351 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3352 enum pipe pipe;
3353
3354 spin_lock_irq(&dev_priv->irq_lock);
3355 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3356 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3357 dev_priv->de_irq_mask[pipe],
3358 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3359 spin_unlock_irq(&dev_priv->irq_lock);
3360}
3361
3362void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3363 unsigned int pipe_mask)
3364{
3365 enum pipe pipe;
3366
3367 spin_lock_irq(&dev_priv->irq_lock);
3368 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3369 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3370 spin_unlock_irq(&dev_priv->irq_lock);
3371
3372
3373 synchronize_irq(dev_priv->dev->irq);
3374}
3375
3376static void cherryview_irq_preinstall(struct drm_device *dev)
3377{
3378 struct drm_i915_private *dev_priv = dev->dev_private;
3379
3380 I915_WRITE(GEN8_MASTER_IRQ, 0);
3381 POSTING_READ(GEN8_MASTER_IRQ);
3382
3383 gen8_gt_irq_reset(dev_priv);
3384
3385 GEN5_IRQ_RESET(GEN8_PCU_);
3386
3387 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3388
3389 vlv_display_irq_reset(dev_priv);
3390}
3391
3392static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3393 const u32 hpd[HPD_NUM_PINS])
3394{
3395 struct drm_i915_private *dev_priv = to_i915(dev);
3396 struct intel_encoder *encoder;
3397 u32 enabled_irqs = 0;
3398
3399 for_each_intel_encoder(dev, encoder)
3400 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3401 enabled_irqs |= hpd[encoder->hpd_pin];
3402
3403 return enabled_irqs;
3404}
3405
3406static void ibx_hpd_irq_setup(struct drm_device *dev)
3407{
3408 struct drm_i915_private *dev_priv = dev->dev_private;
3409 u32 hotplug_irqs, hotplug, enabled_irqs;
3410
3411 if (HAS_PCH_IBX(dev)) {
3412 hotplug_irqs = SDE_HOTPLUG_MASK;
3413 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3414 } else {
3415 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3416 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3417 }
3418
3419 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3420
3421
3422
3423
3424
3425
3426 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3427 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3428 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3429 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3430 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3431
3432
3433
3434
3435 if (HAS_PCH_LPT_LP(dev))
3436 hotplug |= PORTA_HOTPLUG_ENABLE;
3437 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3438}
3439
3440static void spt_hpd_irq_setup(struct drm_device *dev)
3441{
3442 struct drm_i915_private *dev_priv = dev->dev_private;
3443 u32 hotplug_irqs, hotplug, enabled_irqs;
3444
3445 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3446 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3447
3448 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3449
3450
3451 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3452 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3453 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3454 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3455
3456 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3457 hotplug |= PORTE_HOTPLUG_ENABLE;
3458 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3459}
3460
3461static void ilk_hpd_irq_setup(struct drm_device *dev)
3462{
3463 struct drm_i915_private *dev_priv = dev->dev_private;
3464 u32 hotplug_irqs, hotplug, enabled_irqs;
3465
3466 if (INTEL_INFO(dev)->gen >= 8) {
3467 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3468 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3469
3470 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3471 } else if (INTEL_INFO(dev)->gen >= 7) {
3472 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3473 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3474
3475 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3476 } else {
3477 hotplug_irqs = DE_DP_A_HOTPLUG;
3478 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3479
3480 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3481 }
3482
3483
3484
3485
3486
3487
3488 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3489 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3490 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3491 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3492
3493 ibx_hpd_irq_setup(dev);
3494}
3495
3496static void bxt_hpd_irq_setup(struct drm_device *dev)
3497{
3498 struct drm_i915_private *dev_priv = dev->dev_private;
3499 u32 hotplug_irqs, hotplug, enabled_irqs;
3500
3501 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3502 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3503
3504 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3505
3506 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3507 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3508 PORTA_HOTPLUG_ENABLE;
3509 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3510}
3511
3512static void ibx_irq_postinstall(struct drm_device *dev)
3513{
3514 struct drm_i915_private *dev_priv = dev->dev_private;
3515 u32 mask;
3516
3517 if (HAS_PCH_NOP(dev))
3518 return;
3519
3520 if (HAS_PCH_IBX(dev))
3521 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3522 else
3523 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3524
3525 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3526 I915_WRITE(SDEIMR, ~mask);
3527}
3528
3529static void gen5_gt_irq_postinstall(struct drm_device *dev)
3530{
3531 struct drm_i915_private *dev_priv = dev->dev_private;
3532 u32 pm_irqs, gt_irqs;
3533
3534 pm_irqs = gt_irqs = 0;
3535
3536 dev_priv->gt_irq_mask = ~0;
3537 if (HAS_L3_DPF(dev)) {
3538
3539 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3540 gt_irqs |= GT_PARITY_ERROR(dev);
3541 }
3542
3543 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3544 if (IS_GEN5(dev)) {
3545 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3546 ILK_BSD_USER_INTERRUPT;
3547 } else {
3548 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3549 }
3550
3551 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3552
3553 if (INTEL_INFO(dev)->gen >= 6) {
3554
3555
3556
3557
3558 if (HAS_VEBOX(dev))
3559 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3560
3561 dev_priv->pm_irq_mask = 0xffffffff;
3562 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3563 }
3564}
3565
3566static int ironlake_irq_postinstall(struct drm_device *dev)
3567{
3568 struct drm_i915_private *dev_priv = dev->dev_private;
3569 u32 display_mask, extra_mask;
3570
3571 if (INTEL_INFO(dev)->gen >= 7) {
3572 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3573 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3574 DE_PLANEB_FLIP_DONE_IVB |
3575 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3576 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3577 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3578 DE_DP_A_HOTPLUG_IVB);
3579 } else {
3580 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3581 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3582 DE_AUX_CHANNEL_A |
3583 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3584 DE_POISON);
3585 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3586 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3587 DE_DP_A_HOTPLUG);
3588 }
3589
3590 dev_priv->irq_mask = ~display_mask;
3591
3592 I915_WRITE(HWSTAM, 0xeffe);
3593
3594 ibx_irq_pre_postinstall(dev);
3595
3596 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3597
3598 gen5_gt_irq_postinstall(dev);
3599
3600 ibx_irq_postinstall(dev);
3601
3602 if (IS_IRONLAKE_M(dev)) {
3603
3604
3605
3606
3607
3608 spin_lock_irq(&dev_priv->irq_lock);
3609 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3610 spin_unlock_irq(&dev_priv->irq_lock);
3611 }
3612
3613 return 0;
3614}
3615
3616static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3617{
3618 u32 pipestat_mask;
3619 u32 iir_mask;
3620 enum pipe pipe;
3621
3622 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3623 PIPE_FIFO_UNDERRUN_STATUS;
3624
3625 for_each_pipe(dev_priv, pipe)
3626 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3627 POSTING_READ(PIPESTAT(PIPE_A));
3628
3629 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3630 PIPE_CRC_DONE_INTERRUPT_STATUS;
3631
3632 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3633 for_each_pipe(dev_priv, pipe)
3634 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3635
3636 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3637 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3638 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3639 if (IS_CHERRYVIEW(dev_priv))
3640 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3641 dev_priv->irq_mask &= ~iir_mask;
3642
3643 I915_WRITE(VLV_IIR, iir_mask);
3644 I915_WRITE(VLV_IIR, iir_mask);
3645 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3646 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3647 POSTING_READ(VLV_IMR);
3648}
3649
3650static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3651{
3652 u32 pipestat_mask;
3653 u32 iir_mask;
3654 enum pipe pipe;
3655
3656 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3657 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3658 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3659 if (IS_CHERRYVIEW(dev_priv))
3660 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3661
3662 dev_priv->irq_mask |= iir_mask;
3663 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3664 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3665 I915_WRITE(VLV_IIR, iir_mask);
3666 I915_WRITE(VLV_IIR, iir_mask);
3667 POSTING_READ(VLV_IIR);
3668
3669 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3670 PIPE_CRC_DONE_INTERRUPT_STATUS;
3671
3672 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3673 for_each_pipe(dev_priv, pipe)
3674 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3675
3676 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3677 PIPE_FIFO_UNDERRUN_STATUS;
3678
3679 for_each_pipe(dev_priv, pipe)
3680 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3681 POSTING_READ(PIPESTAT(PIPE_A));
3682}
3683
3684void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3685{
3686 assert_spin_locked(&dev_priv->irq_lock);
3687
3688 if (dev_priv->display_irqs_enabled)
3689 return;
3690
3691 dev_priv->display_irqs_enabled = true;
3692
3693 if (intel_irqs_enabled(dev_priv))
3694 valleyview_display_irqs_install(dev_priv);
3695}
3696
3697void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3698{
3699 assert_spin_locked(&dev_priv->irq_lock);
3700
3701 if (!dev_priv->display_irqs_enabled)
3702 return;
3703
3704 dev_priv->display_irqs_enabled = false;
3705
3706 if (intel_irqs_enabled(dev_priv))
3707 valleyview_display_irqs_uninstall(dev_priv);
3708}
3709
3710static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3711{
3712 dev_priv->irq_mask = ~0;
3713
3714 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3715 POSTING_READ(PORT_HOTPLUG_EN);
3716
3717 I915_WRITE(VLV_IIR, 0xffffffff);
3718 I915_WRITE(VLV_IIR, 0xffffffff);
3719 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3720 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3721 POSTING_READ(VLV_IMR);
3722
3723
3724
3725 spin_lock_irq(&dev_priv->irq_lock);
3726 if (dev_priv->display_irqs_enabled)
3727 valleyview_display_irqs_install(dev_priv);
3728 spin_unlock_irq(&dev_priv->irq_lock);
3729}
3730
3731static int valleyview_irq_postinstall(struct drm_device *dev)
3732{
3733 struct drm_i915_private *dev_priv = dev->dev_private;
3734
3735 vlv_display_irq_postinstall(dev_priv);
3736
3737 gen5_gt_irq_postinstall(dev);
3738
3739
3740#if 0
3741 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3742 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3743#endif
3744
3745 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3746
3747 return 0;
3748}
3749
3750static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3751{
3752
3753 uint32_t gt_interrupts[] = {
3754 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3755 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3756 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3757 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3758 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3759 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3760 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3761 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3762 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3763 0,
3764 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3765 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3766 };
3767
3768 dev_priv->pm_irq_mask = 0xffffffff;
3769 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3770 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3771
3772
3773
3774
3775 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3776 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3777}
3778
3779static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3780{
3781 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3782 uint32_t de_pipe_enables;
3783 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3784 u32 de_port_enables;
3785 enum pipe pipe;
3786
3787 if (INTEL_INFO(dev_priv)->gen >= 9) {
3788 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3789 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3790 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3791 GEN9_AUX_CHANNEL_D;
3792 if (IS_BROXTON(dev_priv))
3793 de_port_masked |= BXT_DE_PORT_GMBUS;
3794 } else {
3795 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3796 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3797 }
3798
3799 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3800 GEN8_PIPE_FIFO_UNDERRUN;
3801
3802 de_port_enables = de_port_masked;
3803 if (IS_BROXTON(dev_priv))
3804 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3805 else if (IS_BROADWELL(dev_priv))
3806 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3807
3808 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3809 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3810 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3811
3812 for_each_pipe(dev_priv, pipe)
3813 if (intel_display_power_is_enabled(dev_priv,
3814 POWER_DOMAIN_PIPE(pipe)))
3815 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3816 dev_priv->de_irq_mask[pipe],
3817 de_pipe_enables);
3818
3819 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3820}
3821
3822static int gen8_irq_postinstall(struct drm_device *dev)
3823{
3824 struct drm_i915_private *dev_priv = dev->dev_private;
3825
3826 if (HAS_PCH_SPLIT(dev))
3827 ibx_irq_pre_postinstall(dev);
3828
3829 gen8_gt_irq_postinstall(dev_priv);
3830 gen8_de_irq_postinstall(dev_priv);
3831
3832 if (HAS_PCH_SPLIT(dev))
3833 ibx_irq_postinstall(dev);
3834
3835 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3836 POSTING_READ(GEN8_MASTER_IRQ);
3837
3838 return 0;
3839}
3840
3841static int cherryview_irq_postinstall(struct drm_device *dev)
3842{
3843 struct drm_i915_private *dev_priv = dev->dev_private;
3844
3845 vlv_display_irq_postinstall(dev_priv);
3846
3847 gen8_gt_irq_postinstall(dev_priv);
3848
3849 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3850 POSTING_READ(GEN8_MASTER_IRQ);
3851
3852 return 0;
3853}
3854
3855static void gen8_irq_uninstall(struct drm_device *dev)
3856{
3857 struct drm_i915_private *dev_priv = dev->dev_private;
3858
3859 if (!dev_priv)
3860 return;
3861
3862 gen8_irq_reset(dev);
3863}
3864
3865static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3866{
3867
3868
3869 spin_lock_irq(&dev_priv->irq_lock);
3870 if (dev_priv->display_irqs_enabled)
3871 valleyview_display_irqs_uninstall(dev_priv);
3872 spin_unlock_irq(&dev_priv->irq_lock);
3873
3874 vlv_display_irq_reset(dev_priv);
3875
3876 dev_priv->irq_mask = ~0;
3877}
3878
3879static void valleyview_irq_uninstall(struct drm_device *dev)
3880{
3881 struct drm_i915_private *dev_priv = dev->dev_private;
3882
3883 if (!dev_priv)
3884 return;
3885
3886 I915_WRITE(VLV_MASTER_IER, 0);
3887
3888 gen5_gt_irq_reset(dev);
3889
3890 I915_WRITE(HWSTAM, 0xffffffff);
3891
3892 vlv_display_irq_uninstall(dev_priv);
3893}
3894
3895static void cherryview_irq_uninstall(struct drm_device *dev)
3896{
3897 struct drm_i915_private *dev_priv = dev->dev_private;
3898
3899 if (!dev_priv)
3900 return;
3901
3902 I915_WRITE(GEN8_MASTER_IRQ, 0);
3903 POSTING_READ(GEN8_MASTER_IRQ);
3904
3905 gen8_gt_irq_reset(dev_priv);
3906
3907 GEN5_IRQ_RESET(GEN8_PCU_);
3908
3909 vlv_display_irq_uninstall(dev_priv);
3910}
3911
3912static void ironlake_irq_uninstall(struct drm_device *dev)
3913{
3914 struct drm_i915_private *dev_priv = dev->dev_private;
3915
3916 if (!dev_priv)
3917 return;
3918
3919 ironlake_irq_reset(dev);
3920}
3921
3922static void i8xx_irq_preinstall(struct drm_device * dev)
3923{
3924 struct drm_i915_private *dev_priv = dev->dev_private;
3925 int pipe;
3926
3927 for_each_pipe(dev_priv, pipe)
3928 I915_WRITE(PIPESTAT(pipe), 0);
3929 I915_WRITE16(IMR, 0xffff);
3930 I915_WRITE16(IER, 0x0);
3931 POSTING_READ16(IER);
3932}
3933
3934static int i8xx_irq_postinstall(struct drm_device *dev)
3935{
3936 struct drm_i915_private *dev_priv = dev->dev_private;
3937
3938 I915_WRITE16(EMR,
3939 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3940
3941
3942 dev_priv->irq_mask =
3943 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3944 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3945 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3946 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3947 I915_WRITE16(IMR, dev_priv->irq_mask);
3948
3949 I915_WRITE16(IER,
3950 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3951 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3952 I915_USER_INTERRUPT);
3953 POSTING_READ16(IER);
3954
3955
3956
3957 spin_lock_irq(&dev_priv->irq_lock);
3958 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3959 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3960 spin_unlock_irq(&dev_priv->irq_lock);
3961
3962 return 0;
3963}
3964
3965
3966
3967
3968static bool i8xx_handle_vblank(struct drm_device *dev,
3969 int plane, int pipe, u32 iir)
3970{
3971 struct drm_i915_private *dev_priv = dev->dev_private;
3972 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3973
3974 if (!intel_pipe_handle_vblank(dev, pipe))
3975 return false;
3976
3977 if ((iir & flip_pending) == 0)
3978 goto check_page_flip;
3979
3980
3981
3982
3983
3984
3985
3986 if (I915_READ16(ISR) & flip_pending)
3987 goto check_page_flip;
3988
3989 intel_prepare_page_flip(dev, plane);
3990 intel_finish_page_flip(dev, pipe);
3991 return true;
3992
3993check_page_flip:
3994 intel_check_page_flip(dev, pipe);
3995 return false;
3996}
3997
3998static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3999{
4000 struct drm_device *dev = arg;
4001 struct drm_i915_private *dev_priv = dev->dev_private;
4002 u16 iir, new_iir;
4003 u32 pipe_stats[2];
4004 int pipe;
4005 u16 flip_mask =
4006 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4007 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4008 irqreturn_t ret;
4009
4010 if (!intel_irqs_enabled(dev_priv))
4011 return IRQ_NONE;
4012
4013
4014 disable_rpm_wakeref_asserts(dev_priv);
4015
4016 ret = IRQ_NONE;
4017 iir = I915_READ16(IIR);
4018 if (iir == 0)
4019 goto out;
4020
4021 while (iir & ~flip_mask) {
4022
4023
4024
4025
4026
4027 spin_lock(&dev_priv->irq_lock);
4028 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4029 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4030
4031 for_each_pipe(dev_priv, pipe) {
4032 i915_reg_t reg = PIPESTAT(pipe);
4033 pipe_stats[pipe] = I915_READ(reg);
4034
4035
4036
4037
4038 if (pipe_stats[pipe] & 0x8000ffff)
4039 I915_WRITE(reg, pipe_stats[pipe]);
4040 }
4041 spin_unlock(&dev_priv->irq_lock);
4042
4043 I915_WRITE16(IIR, iir & ~flip_mask);
4044 new_iir = I915_READ16(IIR);
4045
4046 if (iir & I915_USER_INTERRUPT)
4047 notify_ring(&dev_priv->ring[RCS]);
4048
4049 for_each_pipe(dev_priv, pipe) {
4050 int plane = pipe;
4051 if (HAS_FBC(dev))
4052 plane = !plane;
4053
4054 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4055 i8xx_handle_vblank(dev, plane, pipe, iir))
4056 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4057
4058 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4059 i9xx_pipe_crc_irq_handler(dev, pipe);
4060
4061 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4062 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4063 pipe);
4064 }
4065
4066 iir = new_iir;
4067 }
4068 ret = IRQ_HANDLED;
4069
4070out:
4071 enable_rpm_wakeref_asserts(dev_priv);
4072
4073 return ret;
4074}
4075
4076static void i8xx_irq_uninstall(struct drm_device * dev)
4077{
4078 struct drm_i915_private *dev_priv = dev->dev_private;
4079 int pipe;
4080
4081 for_each_pipe(dev_priv, pipe) {
4082
4083 I915_WRITE(PIPESTAT(pipe), 0);
4084 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4085 }
4086 I915_WRITE16(IMR, 0xffff);
4087 I915_WRITE16(IER, 0x0);
4088 I915_WRITE16(IIR, I915_READ16(IIR));
4089}
4090
4091static void i915_irq_preinstall(struct drm_device * dev)
4092{
4093 struct drm_i915_private *dev_priv = dev->dev_private;
4094 int pipe;
4095
4096 if (I915_HAS_HOTPLUG(dev)) {
4097 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4098 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4099 }
4100
4101 I915_WRITE16(HWSTAM, 0xeffe);
4102 for_each_pipe(dev_priv, pipe)
4103 I915_WRITE(PIPESTAT(pipe), 0);
4104 I915_WRITE(IMR, 0xffffffff);
4105 I915_WRITE(IER, 0x0);
4106 POSTING_READ(IER);
4107}
4108
4109static int i915_irq_postinstall(struct drm_device *dev)
4110{
4111 struct drm_i915_private *dev_priv = dev->dev_private;
4112 u32 enable_mask;
4113
4114 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4115
4116
4117 dev_priv->irq_mask =
4118 ~(I915_ASLE_INTERRUPT |
4119 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4120 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4121 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4122 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4123
4124 enable_mask =
4125 I915_ASLE_INTERRUPT |
4126 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4127 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4128 I915_USER_INTERRUPT;
4129
4130 if (I915_HAS_HOTPLUG(dev)) {
4131 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4132 POSTING_READ(PORT_HOTPLUG_EN);
4133
4134
4135 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4136
4137 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4138 }
4139
4140 I915_WRITE(IMR, dev_priv->irq_mask);
4141 I915_WRITE(IER, enable_mask);
4142 POSTING_READ(IER);
4143
4144 i915_enable_asle_pipestat(dev);
4145
4146
4147
4148 spin_lock_irq(&dev_priv->irq_lock);
4149 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4150 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4151 spin_unlock_irq(&dev_priv->irq_lock);
4152
4153 return 0;
4154}
4155
4156
4157
4158
4159static bool i915_handle_vblank(struct drm_device *dev,
4160 int plane, int pipe, u32 iir)
4161{
4162 struct drm_i915_private *dev_priv = dev->dev_private;
4163 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4164
4165 if (!intel_pipe_handle_vblank(dev, pipe))
4166 return false;
4167
4168 if ((iir & flip_pending) == 0)
4169 goto check_page_flip;
4170
4171
4172
4173
4174
4175
4176
4177 if (I915_READ(ISR) & flip_pending)
4178 goto check_page_flip;
4179
4180 intel_prepare_page_flip(dev, plane);
4181 intel_finish_page_flip(dev, pipe);
4182 return true;
4183
4184check_page_flip:
4185 intel_check_page_flip(dev, pipe);
4186 return false;
4187}
4188
4189static irqreturn_t i915_irq_handler(int irq, void *arg)
4190{
4191 struct drm_device *dev = arg;
4192 struct drm_i915_private *dev_priv = dev->dev_private;
4193 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4194 u32 flip_mask =
4195 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4196 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4197 int pipe, ret = IRQ_NONE;
4198
4199 if (!intel_irqs_enabled(dev_priv))
4200 return IRQ_NONE;
4201
4202
4203 disable_rpm_wakeref_asserts(dev_priv);
4204
4205 iir = I915_READ(IIR);
4206 do {
4207 bool irq_received = (iir & ~flip_mask) != 0;
4208 bool blc_event = false;
4209
4210
4211
4212
4213
4214
4215 spin_lock(&dev_priv->irq_lock);
4216 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4217 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4218
4219 for_each_pipe(dev_priv, pipe) {
4220 i915_reg_t reg = PIPESTAT(pipe);
4221 pipe_stats[pipe] = I915_READ(reg);
4222
4223
4224 if (pipe_stats[pipe] & 0x8000ffff) {
4225 I915_WRITE(reg, pipe_stats[pipe]);
4226 irq_received = true;
4227 }
4228 }
4229 spin_unlock(&dev_priv->irq_lock);
4230
4231 if (!irq_received)
4232 break;
4233
4234
4235 if (I915_HAS_HOTPLUG(dev) &&
4236 iir & I915_DISPLAY_PORT_INTERRUPT)
4237 i9xx_hpd_irq_handler(dev);
4238
4239 I915_WRITE(IIR, iir & ~flip_mask);
4240 new_iir = I915_READ(IIR);
4241
4242 if (iir & I915_USER_INTERRUPT)
4243 notify_ring(&dev_priv->ring[RCS]);
4244
4245 for_each_pipe(dev_priv, pipe) {
4246 int plane = pipe;
4247 if (HAS_FBC(dev))
4248 plane = !plane;
4249
4250 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4251 i915_handle_vblank(dev, plane, pipe, iir))
4252 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4253
4254 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4255 blc_event = true;
4256
4257 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4258 i9xx_pipe_crc_irq_handler(dev, pipe);
4259
4260 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4261 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4262 pipe);
4263 }
4264
4265 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4266 intel_opregion_asle_intr(dev);
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283 ret = IRQ_HANDLED;
4284 iir = new_iir;
4285 } while (iir & ~flip_mask);
4286
4287 enable_rpm_wakeref_asserts(dev_priv);
4288
4289 return ret;
4290}
4291
4292static void i915_irq_uninstall(struct drm_device * dev)
4293{
4294 struct drm_i915_private *dev_priv = dev->dev_private;
4295 int pipe;
4296
4297 if (I915_HAS_HOTPLUG(dev)) {
4298 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4299 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4300 }
4301
4302 I915_WRITE16(HWSTAM, 0xffff);
4303 for_each_pipe(dev_priv, pipe) {
4304
4305 I915_WRITE(PIPESTAT(pipe), 0);
4306 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4307 }
4308 I915_WRITE(IMR, 0xffffffff);
4309 I915_WRITE(IER, 0x0);
4310
4311 I915_WRITE(IIR, I915_READ(IIR));
4312}
4313
4314static void i965_irq_preinstall(struct drm_device * dev)
4315{
4316 struct drm_i915_private *dev_priv = dev->dev_private;
4317 int pipe;
4318
4319 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4320 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4321
4322 I915_WRITE(HWSTAM, 0xeffe);
4323 for_each_pipe(dev_priv, pipe)
4324 I915_WRITE(PIPESTAT(pipe), 0);
4325 I915_WRITE(IMR, 0xffffffff);
4326 I915_WRITE(IER, 0x0);
4327 POSTING_READ(IER);
4328}
4329
4330static int i965_irq_postinstall(struct drm_device *dev)
4331{
4332 struct drm_i915_private *dev_priv = dev->dev_private;
4333 u32 enable_mask;
4334 u32 error_mask;
4335
4336
4337 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4338 I915_DISPLAY_PORT_INTERRUPT |
4339 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4340 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4341 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4342 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4343 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4344
4345 enable_mask = ~dev_priv->irq_mask;
4346 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4347 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4348 enable_mask |= I915_USER_INTERRUPT;
4349
4350 if (IS_G4X(dev))
4351 enable_mask |= I915_BSD_USER_INTERRUPT;
4352
4353
4354
4355 spin_lock_irq(&dev_priv->irq_lock);
4356 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4357 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4358 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4359 spin_unlock_irq(&dev_priv->irq_lock);
4360
4361
4362
4363
4364
4365 if (IS_G4X(dev)) {
4366 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4367 GM45_ERROR_MEM_PRIV |
4368 GM45_ERROR_CP_PRIV |
4369 I915_ERROR_MEMORY_REFRESH);
4370 } else {
4371 error_mask = ~(I915_ERROR_PAGE_TABLE |
4372 I915_ERROR_MEMORY_REFRESH);
4373 }
4374 I915_WRITE(EMR, error_mask);
4375
4376 I915_WRITE(IMR, dev_priv->irq_mask);
4377 I915_WRITE(IER, enable_mask);
4378 POSTING_READ(IER);
4379
4380 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4381 POSTING_READ(PORT_HOTPLUG_EN);
4382
4383 i915_enable_asle_pipestat(dev);
4384
4385 return 0;
4386}
4387
4388static void i915_hpd_irq_setup(struct drm_device *dev)
4389{
4390 struct drm_i915_private *dev_priv = dev->dev_private;
4391 u32 hotplug_en;
4392
4393 assert_spin_locked(&dev_priv->irq_lock);
4394
4395
4396
4397 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4398
4399
4400
4401
4402 if (IS_G4X(dev))
4403 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4404 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4405
4406
4407 i915_hotplug_interrupt_update_locked(dev_priv,
4408 HOTPLUG_INT_EN_MASK |
4409 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4410 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4411 hotplug_en);
4412}
4413
4414static irqreturn_t i965_irq_handler(int irq, void *arg)
4415{
4416 struct drm_device *dev = arg;
4417 struct drm_i915_private *dev_priv = dev->dev_private;
4418 u32 iir, new_iir;
4419 u32 pipe_stats[I915_MAX_PIPES];
4420 int ret = IRQ_NONE, pipe;
4421 u32 flip_mask =
4422 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4423 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4424
4425 if (!intel_irqs_enabled(dev_priv))
4426 return IRQ_NONE;
4427
4428
4429 disable_rpm_wakeref_asserts(dev_priv);
4430
4431 iir = I915_READ(IIR);
4432
4433 for (;;) {
4434 bool irq_received = (iir & ~flip_mask) != 0;
4435 bool blc_event = false;
4436
4437
4438
4439
4440
4441
4442 spin_lock(&dev_priv->irq_lock);
4443 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4444 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4445
4446 for_each_pipe(dev_priv, pipe) {
4447 i915_reg_t reg = PIPESTAT(pipe);
4448 pipe_stats[pipe] = I915_READ(reg);
4449
4450
4451
4452
4453 if (pipe_stats[pipe] & 0x8000ffff) {
4454 I915_WRITE(reg, pipe_stats[pipe]);
4455 irq_received = true;
4456 }
4457 }
4458 spin_unlock(&dev_priv->irq_lock);
4459
4460 if (!irq_received)
4461 break;
4462
4463 ret = IRQ_HANDLED;
4464
4465
4466 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4467 i9xx_hpd_irq_handler(dev);
4468
4469 I915_WRITE(IIR, iir & ~flip_mask);
4470 new_iir = I915_READ(IIR);
4471
4472 if (iir & I915_USER_INTERRUPT)
4473 notify_ring(&dev_priv->ring[RCS]);
4474 if (iir & I915_BSD_USER_INTERRUPT)
4475 notify_ring(&dev_priv->ring[VCS]);
4476
4477 for_each_pipe(dev_priv, pipe) {
4478 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4479 i915_handle_vblank(dev, pipe, pipe, iir))
4480 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4481
4482 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4483 blc_event = true;
4484
4485 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4486 i9xx_pipe_crc_irq_handler(dev, pipe);
4487
4488 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4489 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4490 }
4491
4492 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4493 intel_opregion_asle_intr(dev);
4494
4495 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4496 gmbus_irq_handler(dev);
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513 iir = new_iir;
4514 }
4515
4516 enable_rpm_wakeref_asserts(dev_priv);
4517
4518 return ret;
4519}
4520
4521static void i965_irq_uninstall(struct drm_device * dev)
4522{
4523 struct drm_i915_private *dev_priv = dev->dev_private;
4524 int pipe;
4525
4526 if (!dev_priv)
4527 return;
4528
4529 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4530 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4531
4532 I915_WRITE(HWSTAM, 0xffffffff);
4533 for_each_pipe(dev_priv, pipe)
4534 I915_WRITE(PIPESTAT(pipe), 0);
4535 I915_WRITE(IMR, 0xffffffff);
4536 I915_WRITE(IER, 0x0);
4537
4538 for_each_pipe(dev_priv, pipe)
4539 I915_WRITE(PIPESTAT(pipe),
4540 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4541 I915_WRITE(IIR, I915_READ(IIR));
4542}
4543
4544
4545
4546
4547
4548
4549
4550
4551void intel_irq_init(struct drm_i915_private *dev_priv)
4552{
4553 struct drm_device *dev = dev_priv->dev;
4554
4555 intel_hpd_init_work(dev_priv);
4556
4557 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4558 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4559
4560
4561 if (IS_VALLEYVIEW(dev_priv))
4562
4563 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4564 else
4565 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4566
4567 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4568 i915_hangcheck_elapsed);
4569
4570 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4571
4572 if (IS_GEN2(dev_priv)) {
4573 dev->max_vblank_count = 0;
4574 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4575 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4576 dev->max_vblank_count = 0xffffffff;
4577 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4578 } else {
4579 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4580 dev->max_vblank_count = 0xffffff;
4581 }
4582
4583
4584
4585
4586
4587
4588 if (!IS_GEN2(dev_priv))
4589 dev->vblank_disable_immediate = true;
4590
4591 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4592 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4593
4594 if (IS_CHERRYVIEW(dev_priv)) {
4595 dev->driver->irq_handler = cherryview_irq_handler;
4596 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4597 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4598 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4599 dev->driver->enable_vblank = valleyview_enable_vblank;
4600 dev->driver->disable_vblank = valleyview_disable_vblank;
4601 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4602 } else if (IS_VALLEYVIEW(dev_priv)) {
4603 dev->driver->irq_handler = valleyview_irq_handler;
4604 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4605 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4606 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4607 dev->driver->enable_vblank = valleyview_enable_vblank;
4608 dev->driver->disable_vblank = valleyview_disable_vblank;
4609 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4610 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4611 dev->driver->irq_handler = gen8_irq_handler;
4612 dev->driver->irq_preinstall = gen8_irq_reset;
4613 dev->driver->irq_postinstall = gen8_irq_postinstall;
4614 dev->driver->irq_uninstall = gen8_irq_uninstall;
4615 dev->driver->enable_vblank = gen8_enable_vblank;
4616 dev->driver->disable_vblank = gen8_disable_vblank;
4617 if (IS_BROXTON(dev))
4618 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4619 else if (HAS_PCH_SPT(dev))
4620 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4621 else
4622 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4623 } else if (HAS_PCH_SPLIT(dev)) {
4624 dev->driver->irq_handler = ironlake_irq_handler;
4625 dev->driver->irq_preinstall = ironlake_irq_reset;
4626 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4627 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4628 dev->driver->enable_vblank = ironlake_enable_vblank;
4629 dev->driver->disable_vblank = ironlake_disable_vblank;
4630 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4631 } else {
4632 if (INTEL_INFO(dev_priv)->gen == 2) {
4633 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4634 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4635 dev->driver->irq_handler = i8xx_irq_handler;
4636 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4637 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4638 dev->driver->irq_preinstall = i915_irq_preinstall;
4639 dev->driver->irq_postinstall = i915_irq_postinstall;
4640 dev->driver->irq_uninstall = i915_irq_uninstall;
4641 dev->driver->irq_handler = i915_irq_handler;
4642 } else {
4643 dev->driver->irq_preinstall = i965_irq_preinstall;
4644 dev->driver->irq_postinstall = i965_irq_postinstall;
4645 dev->driver->irq_uninstall = i965_irq_uninstall;
4646 dev->driver->irq_handler = i965_irq_handler;
4647 }
4648 if (I915_HAS_HOTPLUG(dev_priv))
4649 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4650 dev->driver->enable_vblank = i915_enable_vblank;
4651 dev->driver->disable_vblank = i915_disable_vblank;
4652 }
4653}
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666int intel_irq_install(struct drm_i915_private *dev_priv)
4667{
4668
4669
4670
4671
4672
4673 dev_priv->pm.irqs_enabled = true;
4674
4675 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4676}
4677
4678
4679
4680
4681
4682
4683
4684
4685void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4686{
4687 drm_irq_uninstall(dev_priv->dev);
4688 intel_hpd_cancel_work(dev_priv);
4689 dev_priv->pm.irqs_enabled = false;
4690}
4691
4692
4693
4694
4695
4696
4697
4698
4699void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4700{
4701 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4702 dev_priv->pm.irqs_enabled = false;
4703 synchronize_irq(dev_priv->dev->irq);
4704}
4705
4706
4707
4708
4709
4710
4711
4712
4713void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4714{
4715 dev_priv->pm.irqs_enabled = true;
4716 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4717 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4718}
4719