1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/sysrq.h>
32#include <linux/slab.h>
33#include <linux/circ_buf.h>
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
36#include "i915_drv.h"
37#include "i915_trace.h"
38#include "intel_drv.h"
39
40
41
42
43
44
45
46
47
48static const u32 hpd_ibx[HPD_NUM_PINS] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54};
55
56static const u32 hpd_cpt[HPD_NUM_PINS] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62};
63
64static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71};
72
73static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
82static const u32 hpd_status_i915[HPD_NUM_PINS] = {
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89};
90
91
92#define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
100} while (0)
101
102#define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
110} while (0)
111
112
113
114
115#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
117 if (val) { \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119 (reg), val); \
120 I915_WRITE((reg), 0xffffffff); \
121 POSTING_READ(reg); \
122 I915_WRITE((reg), 0xffffffff); \
123 POSTING_READ(reg); \
124 } \
125} while (0)
126
127#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
132} while (0)
133
134#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
139} while (0)
140
141static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
142
143
144void
145ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146{
147 assert_spin_locked(&dev_priv->irq_lock);
148
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150 return;
151
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
155 POSTING_READ(DEIMR);
156 }
157}
158
159void
160ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
161{
162 assert_spin_locked(&dev_priv->irq_lock);
163
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
165 return;
166
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
170 POSTING_READ(DEIMR);
171 }
172}
173
174
175
176
177
178
179
180static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
183{
184 assert_spin_locked(&dev_priv->irq_lock);
185
186 WARN_ON(enabled_irq_mask & ~interrupt_mask);
187
188 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
189 return;
190
191 dev_priv->gt_irq_mask &= ~interrupt_mask;
192 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
193 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
194 POSTING_READ(GTIMR);
195}
196
197void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
198{
199 ilk_update_gt_irq(dev_priv, mask, mask);
200}
201
202void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
203{
204 ilk_update_gt_irq(dev_priv, mask, 0);
205}
206
207static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
208{
209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
210}
211
212static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
213{
214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
215}
216
217static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
218{
219 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
220}
221
222
223
224
225
226
227
228static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
229 uint32_t interrupt_mask,
230 uint32_t enabled_irq_mask)
231{
232 uint32_t new_val;
233
234 WARN_ON(enabled_irq_mask & ~interrupt_mask);
235
236 assert_spin_locked(&dev_priv->irq_lock);
237
238 new_val = dev_priv->pm_irq_mask;
239 new_val &= ~interrupt_mask;
240 new_val |= (~enabled_irq_mask & interrupt_mask);
241
242 if (new_val != dev_priv->pm_irq_mask) {
243 dev_priv->pm_irq_mask = new_val;
244 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
245 POSTING_READ(gen6_pm_imr(dev_priv));
246 }
247}
248
249void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
250{
251 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
252 return;
253
254 snb_update_pm_irq(dev_priv, mask, mask);
255}
256
257static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
258 uint32_t mask)
259{
260 snb_update_pm_irq(dev_priv, mask, 0);
261}
262
263void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
264{
265 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
266 return;
267
268 __gen6_disable_pm_irq(dev_priv, mask);
269}
270
271void gen6_reset_rps_interrupts(struct drm_device *dev)
272{
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 uint32_t reg = gen6_pm_iir(dev_priv);
275
276 spin_lock_irq(&dev_priv->irq_lock);
277 I915_WRITE(reg, dev_priv->pm_rps_events);
278 I915_WRITE(reg, dev_priv->pm_rps_events);
279 POSTING_READ(reg);
280 dev_priv->rps.pm_iir = 0;
281 spin_unlock_irq(&dev_priv->irq_lock);
282}
283
284void gen6_enable_rps_interrupts(struct drm_device *dev)
285{
286 struct drm_i915_private *dev_priv = dev->dev_private;
287
288 spin_lock_irq(&dev_priv->irq_lock);
289
290 WARN_ON(dev_priv->rps.pm_iir);
291 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
292 dev_priv->rps.interrupts_enabled = true;
293 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
294 dev_priv->pm_rps_events);
295 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
296
297 spin_unlock_irq(&dev_priv->irq_lock);
298}
299
300u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
301{
302
303
304
305
306
307
308 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
309 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
310
311 if (INTEL_INFO(dev_priv)->gen >= 8)
312 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
313
314 return mask;
315}
316
317void gen6_disable_rps_interrupts(struct drm_device *dev)
318{
319 struct drm_i915_private *dev_priv = dev->dev_private;
320
321 spin_lock_irq(&dev_priv->irq_lock);
322 dev_priv->rps.interrupts_enabled = false;
323 spin_unlock_irq(&dev_priv->irq_lock);
324
325 cancel_work_sync(&dev_priv->rps.work);
326
327 spin_lock_irq(&dev_priv->irq_lock);
328
329 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
330
331 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
332 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
333 ~dev_priv->pm_rps_events);
334
335 spin_unlock_irq(&dev_priv->irq_lock);
336
337 synchronize_irq(dev->irq);
338}
339
340
341
342
343
344
345
346void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
347 uint32_t interrupt_mask,
348 uint32_t enabled_irq_mask)
349{
350 uint32_t sdeimr = I915_READ(SDEIMR);
351 sdeimr &= ~interrupt_mask;
352 sdeimr |= (~enabled_irq_mask & interrupt_mask);
353
354 WARN_ON(enabled_irq_mask & ~interrupt_mask);
355
356 assert_spin_locked(&dev_priv->irq_lock);
357
358 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
359 return;
360
361 I915_WRITE(SDEIMR, sdeimr);
362 POSTING_READ(SDEIMR);
363}
364
365static void
366__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
367 u32 enable_mask, u32 status_mask)
368{
369 u32 reg = PIPESTAT(pipe);
370 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
371
372 assert_spin_locked(&dev_priv->irq_lock);
373 WARN_ON(!intel_irqs_enabled(dev_priv));
374
375 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
376 status_mask & ~PIPESTAT_INT_STATUS_MASK,
377 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
378 pipe_name(pipe), enable_mask, status_mask))
379 return;
380
381 if ((pipestat & enable_mask) == enable_mask)
382 return;
383
384 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
385
386
387 pipestat |= enable_mask | status_mask;
388 I915_WRITE(reg, pipestat);
389 POSTING_READ(reg);
390}
391
392static void
393__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
394 u32 enable_mask, u32 status_mask)
395{
396 u32 reg = PIPESTAT(pipe);
397 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
398
399 assert_spin_locked(&dev_priv->irq_lock);
400 WARN_ON(!intel_irqs_enabled(dev_priv));
401
402 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
403 status_mask & ~PIPESTAT_INT_STATUS_MASK,
404 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
405 pipe_name(pipe), enable_mask, status_mask))
406 return;
407
408 if ((pipestat & enable_mask) == 0)
409 return;
410
411 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
412
413 pipestat &= ~enable_mask;
414 I915_WRITE(reg, pipestat);
415 POSTING_READ(reg);
416}
417
418static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
419{
420 u32 enable_mask = status_mask << 16;
421
422
423
424
425
426 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
427 return 0;
428
429
430
431
432 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
433 return 0;
434
435 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
436 SPRITE0_FLIP_DONE_INT_EN_VLV |
437 SPRITE1_FLIP_DONE_INT_EN_VLV);
438 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
439 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
440 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
441 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
442
443 return enable_mask;
444}
445
446void
447i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
448 u32 status_mask)
449{
450 u32 enable_mask;
451
452 if (IS_VALLEYVIEW(dev_priv->dev))
453 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
454 status_mask);
455 else
456 enable_mask = status_mask << 16;
457 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
458}
459
460void
461i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
462 u32 status_mask)
463{
464 u32 enable_mask;
465
466 if (IS_VALLEYVIEW(dev_priv->dev))
467 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
468 status_mask);
469 else
470 enable_mask = status_mask << 16;
471 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
472}
473
474
475
476
477static void i915_enable_asle_pipestat(struct drm_device *dev)
478{
479 struct drm_i915_private *dev_priv = dev->dev_private;
480
481 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
482 return;
483
484 spin_lock_irq(&dev_priv->irq_lock);
485
486 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
487 if (INTEL_INFO(dev)->gen >= 4)
488 i915_enable_pipestat(dev_priv, PIPE_A,
489 PIPE_LEGACY_BLC_EVENT_STATUS);
490
491 spin_unlock_irq(&dev_priv->irq_lock);
492}
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
545{
546
547 return 0;
548}
549
550
551
552
553static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
554{
555 struct drm_i915_private *dev_priv = dev->dev_private;
556 unsigned long high_frame;
557 unsigned long low_frame;
558 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
559 struct intel_crtc *intel_crtc =
560 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
561 const struct drm_display_mode *mode =
562 &intel_crtc->config->base.adjusted_mode;
563
564 htotal = mode->crtc_htotal;
565 hsync_start = mode->crtc_hsync_start;
566 vbl_start = mode->crtc_vblank_start;
567 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
568 vbl_start = DIV_ROUND_UP(vbl_start, 2);
569
570
571 vbl_start *= htotal;
572
573
574 vbl_start -= htotal - hsync_start;
575
576 high_frame = PIPEFRAME(pipe);
577 low_frame = PIPEFRAMEPIXEL(pipe);
578
579
580
581
582
583
584 do {
585 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
586 low = I915_READ(low_frame);
587 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
588 } while (high1 != high2);
589
590 high1 >>= PIPE_FRAME_HIGH_SHIFT;
591 pixel = low & PIPE_PIXEL_MASK;
592 low >>= PIPE_FRAME_LOW_SHIFT;
593
594
595
596
597
598
599 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
600}
601
602static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
603{
604 struct drm_i915_private *dev_priv = dev->dev_private;
605 int reg = PIPE_FRMCOUNT_GM45(pipe);
606
607 return I915_READ(reg);
608}
609
610
611#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
612
613static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
614{
615 struct drm_device *dev = crtc->base.dev;
616 struct drm_i915_private *dev_priv = dev->dev_private;
617 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
618 enum pipe pipe = crtc->pipe;
619 int position, vtotal;
620
621 vtotal = mode->crtc_vtotal;
622 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
623 vtotal /= 2;
624
625 if (IS_GEN2(dev))
626 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
627 else
628 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
629
630
631
632
633
634 return (position + crtc->scanline_offset) % vtotal;
635}
636
637static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
638 unsigned int flags, int *vpos, int *hpos,
639 ktime_t *stime, ktime_t *etime)
640{
641 struct drm_i915_private *dev_priv = dev->dev_private;
642 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
643 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
644 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
645 int position;
646 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
647 bool in_vbl = true;
648 int ret = 0;
649 unsigned long irqflags;
650
651 if (!intel_crtc->active) {
652 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
653 "pipe %c\n", pipe_name(pipe));
654 return 0;
655 }
656
657 htotal = mode->crtc_htotal;
658 hsync_start = mode->crtc_hsync_start;
659 vtotal = mode->crtc_vtotal;
660 vbl_start = mode->crtc_vblank_start;
661 vbl_end = mode->crtc_vblank_end;
662
663 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
664 vbl_start = DIV_ROUND_UP(vbl_start, 2);
665 vbl_end /= 2;
666 vtotal /= 2;
667 }
668
669 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
670
671
672
673
674
675
676 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
677
678
679
680
681 if (stime)
682 *stime = ktime_get();
683
684 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
685
686
687
688 position = __intel_get_crtc_scanline(intel_crtc);
689 } else {
690
691
692
693
694 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
695
696
697 vbl_start *= htotal;
698 vbl_end *= htotal;
699 vtotal *= htotal;
700
701
702
703
704
705
706
707
708
709
710 if (position >= vtotal)
711 position = vtotal - 1;
712
713
714
715
716
717
718
719
720
721
722 position = (position + htotal - hsync_start) % vtotal;
723 }
724
725
726 if (etime)
727 *etime = ktime_get();
728
729
730
731 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
732
733 in_vbl = position >= vbl_start && position < vbl_end;
734
735
736
737
738
739
740
741 if (position >= vbl_start)
742 position -= vbl_end;
743 else
744 position += vtotal - vbl_end;
745
746 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
747 *vpos = position;
748 *hpos = 0;
749 } else {
750 *vpos = position / htotal;
751 *hpos = position - (*vpos * htotal);
752 }
753
754
755 if (in_vbl)
756 ret |= DRM_SCANOUTPOS_IN_VBLANK;
757
758 return ret;
759}
760
761int intel_get_crtc_scanline(struct intel_crtc *crtc)
762{
763 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
764 unsigned long irqflags;
765 int position;
766
767 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
768 position = __intel_get_crtc_scanline(crtc);
769 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
770
771 return position;
772}
773
774static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
775 int *max_error,
776 struct timeval *vblank_time,
777 unsigned flags)
778{
779 struct drm_crtc *crtc;
780
781 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
782 DRM_ERROR("Invalid crtc %d\n", pipe);
783 return -EINVAL;
784 }
785
786
787 crtc = intel_get_crtc_for_pipe(dev, pipe);
788 if (crtc == NULL) {
789 DRM_ERROR("Invalid crtc %d\n", pipe);
790 return -EINVAL;
791 }
792
793 if (!crtc->state->enable) {
794 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
795 return -EBUSY;
796 }
797
798
799 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
800 vblank_time, flags,
801 crtc,
802 &to_intel_crtc(crtc)->config->base.adjusted_mode);
803}
804
805static bool intel_hpd_irq_event(struct drm_device *dev,
806 struct drm_connector *connector)
807{
808 enum drm_connector_status old_status;
809
810 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
811 old_status = connector->status;
812
813 connector->status = connector->funcs->detect(connector, false);
814 if (old_status == connector->status)
815 return false;
816
817 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
818 connector->base.id,
819 connector->name,
820 drm_get_connector_status_name(old_status),
821 drm_get_connector_status_name(connector->status));
822
823 return true;
824}
825
826static void i915_digport_work_func(struct work_struct *work)
827{
828 struct drm_i915_private *dev_priv =
829 container_of(work, struct drm_i915_private, dig_port_work);
830 u32 long_port_mask, short_port_mask;
831 struct intel_digital_port *intel_dig_port;
832 int i;
833 u32 old_bits = 0;
834
835 spin_lock_irq(&dev_priv->irq_lock);
836 long_port_mask = dev_priv->long_hpd_port_mask;
837 dev_priv->long_hpd_port_mask = 0;
838 short_port_mask = dev_priv->short_hpd_port_mask;
839 dev_priv->short_hpd_port_mask = 0;
840 spin_unlock_irq(&dev_priv->irq_lock);
841
842 for (i = 0; i < I915_MAX_PORTS; i++) {
843 bool valid = false;
844 bool long_hpd = false;
845 intel_dig_port = dev_priv->hpd_irq_port[i];
846 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
847 continue;
848
849 if (long_port_mask & (1 << i)) {
850 valid = true;
851 long_hpd = true;
852 } else if (short_port_mask & (1 << i))
853 valid = true;
854
855 if (valid) {
856 enum irqreturn ret;
857
858 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
859 if (ret == IRQ_NONE) {
860
861 old_bits |= (1 << intel_dig_port->base.hpd_pin);
862 }
863 }
864 }
865
866 if (old_bits) {
867 spin_lock_irq(&dev_priv->irq_lock);
868 dev_priv->hpd_event_bits |= old_bits;
869 spin_unlock_irq(&dev_priv->irq_lock);
870 schedule_work(&dev_priv->hotplug_work);
871 }
872}
873
874
875
876
877#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
878
879static void i915_hotplug_work_func(struct work_struct *work)
880{
881 struct drm_i915_private *dev_priv =
882 container_of(work, struct drm_i915_private, hotplug_work);
883 struct drm_device *dev = dev_priv->dev;
884 struct drm_mode_config *mode_config = &dev->mode_config;
885 struct intel_connector *intel_connector;
886 struct intel_encoder *intel_encoder;
887 struct drm_connector *connector;
888 bool hpd_disabled = false;
889 bool changed = false;
890 u32 hpd_event_bits;
891
892 mutex_lock(&mode_config->mutex);
893 DRM_DEBUG_KMS("running encoder hotplug functions\n");
894
895 spin_lock_irq(&dev_priv->irq_lock);
896
897 hpd_event_bits = dev_priv->hpd_event_bits;
898 dev_priv->hpd_event_bits = 0;
899 list_for_each_entry(connector, &mode_config->connector_list, head) {
900 intel_connector = to_intel_connector(connector);
901 if (!intel_connector->encoder)
902 continue;
903 intel_encoder = intel_connector->encoder;
904 if (intel_encoder->hpd_pin > HPD_NONE &&
905 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
906 connector->polled == DRM_CONNECTOR_POLL_HPD) {
907 DRM_INFO("HPD interrupt storm detected on connector %s: "
908 "switching from hotplug detection to polling\n",
909 connector->name);
910 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
911 connector->polled = DRM_CONNECTOR_POLL_CONNECT
912 | DRM_CONNECTOR_POLL_DISCONNECT;
913 hpd_disabled = true;
914 }
915 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
916 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
917 connector->name, intel_encoder->hpd_pin);
918 }
919 }
920
921
922
923 if (hpd_disabled) {
924 drm_kms_helper_poll_enable(dev);
925 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
926 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
927 }
928
929 spin_unlock_irq(&dev_priv->irq_lock);
930
931 list_for_each_entry(connector, &mode_config->connector_list, head) {
932 intel_connector = to_intel_connector(connector);
933 if (!intel_connector->encoder)
934 continue;
935 intel_encoder = intel_connector->encoder;
936 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
937 if (intel_encoder->hot_plug)
938 intel_encoder->hot_plug(intel_encoder);
939 if (intel_hpd_irq_event(dev, connector))
940 changed = true;
941 }
942 }
943 mutex_unlock(&mode_config->mutex);
944
945 if (changed)
946 drm_kms_helper_hotplug_event(dev);
947}
948
949static void ironlake_rps_change_irq_handler(struct drm_device *dev)
950{
951 struct drm_i915_private *dev_priv = dev->dev_private;
952 u32 busy_up, busy_down, max_avg, min_avg;
953 u8 new_delay;
954
955 spin_lock(&mchdev_lock);
956
957 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
958
959 new_delay = dev_priv->ips.cur_delay;
960
961 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
962 busy_up = I915_READ(RCPREVBSYTUPAVG);
963 busy_down = I915_READ(RCPREVBSYTDNAVG);
964 max_avg = I915_READ(RCBMAXAVG);
965 min_avg = I915_READ(RCBMINAVG);
966
967
968 if (busy_up > max_avg) {
969 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
970 new_delay = dev_priv->ips.cur_delay - 1;
971 if (new_delay < dev_priv->ips.max_delay)
972 new_delay = dev_priv->ips.max_delay;
973 } else if (busy_down < min_avg) {
974 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
975 new_delay = dev_priv->ips.cur_delay + 1;
976 if (new_delay > dev_priv->ips.min_delay)
977 new_delay = dev_priv->ips.min_delay;
978 }
979
980 if (ironlake_set_drps(dev, new_delay))
981 dev_priv->ips.cur_delay = new_delay;
982
983 spin_unlock(&mchdev_lock);
984
985 return;
986}
987
988static void notify_ring(struct drm_device *dev,
989 struct intel_engine_cs *ring)
990{
991 if (!intel_ring_initialized(ring))
992 return;
993
994 trace_i915_gem_request_notify(ring);
995
996 wake_up_all(&ring->irq_queue);
997}
998
999static void vlv_c0_read(struct drm_i915_private *dev_priv,
1000 struct intel_rps_ei *ei)
1001{
1002 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1003 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1004 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1005}
1006
1007static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1008 const struct intel_rps_ei *old,
1009 const struct intel_rps_ei *now,
1010 int threshold)
1011{
1012 u64 time, c0;
1013
1014 if (old->cz_clock == 0)
1015 return false;
1016
1017 time = now->cz_clock - old->cz_clock;
1018 time *= threshold * dev_priv->mem_freq;
1019
1020
1021
1022
1023
1024 c0 = now->render_c0 - old->render_c0;
1025 c0 += now->media_c0 - old->media_c0;
1026 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
1027
1028 return c0 >= time;
1029}
1030
1031void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1032{
1033 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1034 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1035}
1036
1037static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1038{
1039 struct intel_rps_ei now;
1040 u32 events = 0;
1041
1042 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1043 return 0;
1044
1045 vlv_c0_read(dev_priv, &now);
1046 if (now.cz_clock == 0)
1047 return 0;
1048
1049 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1050 if (!vlv_c0_above(dev_priv,
1051 &dev_priv->rps.down_ei, &now,
1052 VLV_RP_DOWN_EI_THRESHOLD))
1053 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1054 dev_priv->rps.down_ei = now;
1055 }
1056
1057 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1058 if (vlv_c0_above(dev_priv,
1059 &dev_priv->rps.up_ei, &now,
1060 VLV_RP_UP_EI_THRESHOLD))
1061 events |= GEN6_PM_RP_UP_THRESHOLD;
1062 dev_priv->rps.up_ei = now;
1063 }
1064
1065 return events;
1066}
1067
1068static void gen6_pm_rps_work(struct work_struct *work)
1069{
1070 struct drm_i915_private *dev_priv =
1071 container_of(work, struct drm_i915_private, rps.work);
1072 u32 pm_iir;
1073 int new_delay, adj;
1074
1075 spin_lock_irq(&dev_priv->irq_lock);
1076
1077 if (!dev_priv->rps.interrupts_enabled) {
1078 spin_unlock_irq(&dev_priv->irq_lock);
1079 return;
1080 }
1081 pm_iir = dev_priv->rps.pm_iir;
1082 dev_priv->rps.pm_iir = 0;
1083
1084 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1085 spin_unlock_irq(&dev_priv->irq_lock);
1086
1087
1088 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1089
1090 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1091 return;
1092
1093 mutex_lock(&dev_priv->rps.hw_lock);
1094
1095 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1096
1097 adj = dev_priv->rps.last_adj;
1098 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1099 if (adj > 0)
1100 adj *= 2;
1101 else {
1102
1103 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1104 }
1105 new_delay = dev_priv->rps.cur_freq + adj;
1106
1107
1108
1109
1110
1111 if (new_delay < dev_priv->rps.efficient_freq)
1112 new_delay = dev_priv->rps.efficient_freq;
1113 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1114 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1115 new_delay = dev_priv->rps.efficient_freq;
1116 else
1117 new_delay = dev_priv->rps.min_freq_softlimit;
1118 adj = 0;
1119 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1120 if (adj < 0)
1121 adj *= 2;
1122 else {
1123
1124 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1125 }
1126 new_delay = dev_priv->rps.cur_freq + adj;
1127 } else {
1128 new_delay = dev_priv->rps.cur_freq;
1129 }
1130
1131
1132
1133
1134 new_delay = clamp_t(int, new_delay,
1135 dev_priv->rps.min_freq_softlimit,
1136 dev_priv->rps.max_freq_softlimit);
1137
1138 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1139
1140 intel_set_rps(dev_priv->dev, new_delay);
1141
1142 mutex_unlock(&dev_priv->rps.hw_lock);
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155static void ivybridge_parity_work(struct work_struct *work)
1156{
1157 struct drm_i915_private *dev_priv =
1158 container_of(work, struct drm_i915_private, l3_parity.error_work);
1159 u32 error_status, row, bank, subbank;
1160 char *parity_event[6];
1161 uint32_t misccpctl;
1162 uint8_t slice = 0;
1163
1164
1165
1166
1167
1168 mutex_lock(&dev_priv->dev->struct_mutex);
1169
1170
1171 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1172 goto out;
1173
1174 misccpctl = I915_READ(GEN7_MISCCPCTL);
1175 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1176 POSTING_READ(GEN7_MISCCPCTL);
1177
1178 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1179 u32 reg;
1180
1181 slice--;
1182 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1183 break;
1184
1185 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1186
1187 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1188
1189 error_status = I915_READ(reg);
1190 row = GEN7_PARITY_ERROR_ROW(error_status);
1191 bank = GEN7_PARITY_ERROR_BANK(error_status);
1192 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1193
1194 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1195 POSTING_READ(reg);
1196
1197 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1198 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1199 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1200 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1201 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1202 parity_event[5] = NULL;
1203
1204 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1205 KOBJ_CHANGE, parity_event);
1206
1207 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1208 slice, row, bank, subbank);
1209
1210 kfree(parity_event[4]);
1211 kfree(parity_event[3]);
1212 kfree(parity_event[2]);
1213 kfree(parity_event[1]);
1214 }
1215
1216 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1217
1218out:
1219 WARN_ON(dev_priv->l3_parity.which_slice);
1220 spin_lock_irq(&dev_priv->irq_lock);
1221 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1222 spin_unlock_irq(&dev_priv->irq_lock);
1223
1224 mutex_unlock(&dev_priv->dev->struct_mutex);
1225}
1226
1227static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1228{
1229 struct drm_i915_private *dev_priv = dev->dev_private;
1230
1231 if (!HAS_L3_DPF(dev))
1232 return;
1233
1234 spin_lock(&dev_priv->irq_lock);
1235 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1236 spin_unlock(&dev_priv->irq_lock);
1237
1238 iir &= GT_PARITY_ERROR(dev);
1239 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1240 dev_priv->l3_parity.which_slice |= 1 << 1;
1241
1242 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1243 dev_priv->l3_parity.which_slice |= 1 << 0;
1244
1245 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1246}
1247
1248static void ilk_gt_irq_handler(struct drm_device *dev,
1249 struct drm_i915_private *dev_priv,
1250 u32 gt_iir)
1251{
1252 if (gt_iir &
1253 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1254 notify_ring(dev, &dev_priv->ring[RCS]);
1255 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1256 notify_ring(dev, &dev_priv->ring[VCS]);
1257}
1258
1259static void snb_gt_irq_handler(struct drm_device *dev,
1260 struct drm_i915_private *dev_priv,
1261 u32 gt_iir)
1262{
1263
1264 if (gt_iir &
1265 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1266 notify_ring(dev, &dev_priv->ring[RCS]);
1267 if (gt_iir & GT_BSD_USER_INTERRUPT)
1268 notify_ring(dev, &dev_priv->ring[VCS]);
1269 if (gt_iir & GT_BLT_USER_INTERRUPT)
1270 notify_ring(dev, &dev_priv->ring[BCS]);
1271
1272 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1273 GT_BSD_CS_ERROR_INTERRUPT |
1274 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1275 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1276
1277 if (gt_iir & GT_PARITY_ERROR(dev))
1278 ivybridge_parity_error_irq_handler(dev, gt_iir);
1279}
1280
1281static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1282 struct drm_i915_private *dev_priv,
1283 u32 master_ctl)
1284{
1285 struct intel_engine_cs *ring;
1286 u32 rcs, bcs, vcs;
1287 uint32_t tmp = 0;
1288 irqreturn_t ret = IRQ_NONE;
1289
1290 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1291 tmp = I915_READ(GEN8_GT_IIR(0));
1292 if (tmp) {
1293 I915_WRITE(GEN8_GT_IIR(0), tmp);
1294 ret = IRQ_HANDLED;
1295
1296 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1297 ring = &dev_priv->ring[RCS];
1298 if (rcs & GT_RENDER_USER_INTERRUPT)
1299 notify_ring(dev, ring);
1300 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1301 intel_lrc_irq_handler(ring);
1302
1303 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1304 ring = &dev_priv->ring[BCS];
1305 if (bcs & GT_RENDER_USER_INTERRUPT)
1306 notify_ring(dev, ring);
1307 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1308 intel_lrc_irq_handler(ring);
1309 } else
1310 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1311 }
1312
1313 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1314 tmp = I915_READ(GEN8_GT_IIR(1));
1315 if (tmp) {
1316 I915_WRITE(GEN8_GT_IIR(1), tmp);
1317 ret = IRQ_HANDLED;
1318
1319 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1320 ring = &dev_priv->ring[VCS];
1321 if (vcs & GT_RENDER_USER_INTERRUPT)
1322 notify_ring(dev, ring);
1323 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1324 intel_lrc_irq_handler(ring);
1325
1326 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1327 ring = &dev_priv->ring[VCS2];
1328 if (vcs & GT_RENDER_USER_INTERRUPT)
1329 notify_ring(dev, ring);
1330 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1331 intel_lrc_irq_handler(ring);
1332 } else
1333 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1334 }
1335
1336 if (master_ctl & GEN8_GT_PM_IRQ) {
1337 tmp = I915_READ(GEN8_GT_IIR(2));
1338 if (tmp & dev_priv->pm_rps_events) {
1339 I915_WRITE(GEN8_GT_IIR(2),
1340 tmp & dev_priv->pm_rps_events);
1341 ret = IRQ_HANDLED;
1342 gen6_rps_irq_handler(dev_priv, tmp);
1343 } else
1344 DRM_ERROR("The master control interrupt lied (PM)!\n");
1345 }
1346
1347 if (master_ctl & GEN8_GT_VECS_IRQ) {
1348 tmp = I915_READ(GEN8_GT_IIR(3));
1349 if (tmp) {
1350 I915_WRITE(GEN8_GT_IIR(3), tmp);
1351 ret = IRQ_HANDLED;
1352
1353 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1354 ring = &dev_priv->ring[VECS];
1355 if (vcs & GT_RENDER_USER_INTERRUPT)
1356 notify_ring(dev, ring);
1357 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1358 intel_lrc_irq_handler(ring);
1359 } else
1360 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1361 }
1362
1363 return ret;
1364}
1365
1366#define HPD_STORM_DETECT_PERIOD 1000
1367#define HPD_STORM_THRESHOLD 5
1368
1369static int pch_port_to_hotplug_shift(enum port port)
1370{
1371 switch (port) {
1372 case PORT_A:
1373 case PORT_E:
1374 default:
1375 return -1;
1376 case PORT_B:
1377 return 0;
1378 case PORT_C:
1379 return 8;
1380 case PORT_D:
1381 return 16;
1382 }
1383}
1384
1385static int i915_port_to_hotplug_shift(enum port port)
1386{
1387 switch (port) {
1388 case PORT_A:
1389 case PORT_E:
1390 default:
1391 return -1;
1392 case PORT_B:
1393 return 17;
1394 case PORT_C:
1395 return 19;
1396 case PORT_D:
1397 return 21;
1398 }
1399}
1400
1401static inline enum port get_port_from_pin(enum hpd_pin pin)
1402{
1403 switch (pin) {
1404 case HPD_PORT_B:
1405 return PORT_B;
1406 case HPD_PORT_C:
1407 return PORT_C;
1408 case HPD_PORT_D:
1409 return PORT_D;
1410 default:
1411 return PORT_A;
1412 }
1413}
1414
1415static inline void intel_hpd_irq_handler(struct drm_device *dev,
1416 u32 hotplug_trigger,
1417 u32 dig_hotplug_reg,
1418 const u32 hpd[HPD_NUM_PINS])
1419{
1420 struct drm_i915_private *dev_priv = dev->dev_private;
1421 int i;
1422 enum port port;
1423 bool storm_detected = false;
1424 bool queue_dig = false, queue_hp = false;
1425 u32 dig_shift;
1426 u32 dig_port_mask = 0;
1427
1428 if (!hotplug_trigger)
1429 return;
1430
1431 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1432 hotplug_trigger, dig_hotplug_reg);
1433
1434 spin_lock(&dev_priv->irq_lock);
1435 for (i = 1; i < HPD_NUM_PINS; i++) {
1436 if (!(hpd[i] & hotplug_trigger))
1437 continue;
1438
1439 port = get_port_from_pin(i);
1440 if (port && dev_priv->hpd_irq_port[port]) {
1441 bool long_hpd;
1442
1443 if (HAS_PCH_SPLIT(dev)) {
1444 dig_shift = pch_port_to_hotplug_shift(port);
1445 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1446 } else {
1447 dig_shift = i915_port_to_hotplug_shift(port);
1448 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1449 }
1450
1451 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1452 port_name(port),
1453 long_hpd ? "long" : "short");
1454
1455
1456 if (long_hpd) {
1457 dev_priv->long_hpd_port_mask |= (1 << port);
1458 dig_port_mask |= hpd[i];
1459 } else {
1460
1461 dev_priv->short_hpd_port_mask |= (1 << port);
1462 hotplug_trigger &= ~hpd[i];
1463 }
1464 queue_dig = true;
1465 }
1466 }
1467
1468 for (i = 1; i < HPD_NUM_PINS; i++) {
1469 if (hpd[i] & hotplug_trigger &&
1470 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1471
1472
1473
1474
1475
1476
1477 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1478 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1479 hotplug_trigger, i, hpd[i]);
1480
1481 continue;
1482 }
1483
1484 if (!(hpd[i] & hotplug_trigger) ||
1485 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1486 continue;
1487
1488 if (!(dig_port_mask & hpd[i])) {
1489 dev_priv->hpd_event_bits |= (1 << i);
1490 queue_hp = true;
1491 }
1492
1493 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1494 dev_priv->hpd_stats[i].hpd_last_jiffies
1495 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1496 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1497 dev_priv->hpd_stats[i].hpd_cnt = 0;
1498 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1499 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1500 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1501 dev_priv->hpd_event_bits &= ~(1 << i);
1502 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1503 storm_detected = true;
1504 } else {
1505 dev_priv->hpd_stats[i].hpd_cnt++;
1506 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1507 dev_priv->hpd_stats[i].hpd_cnt);
1508 }
1509 }
1510
1511 if (storm_detected)
1512 dev_priv->display.hpd_irq_setup(dev);
1513 spin_unlock(&dev_priv->irq_lock);
1514
1515
1516
1517
1518
1519
1520
1521 if (queue_dig)
1522 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1523 if (queue_hp)
1524 schedule_work(&dev_priv->hotplug_work);
1525}
1526
1527static void gmbus_irq_handler(struct drm_device *dev)
1528{
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530
1531 wake_up_all(&dev_priv->gmbus_wait_queue);
1532}
1533
1534static void dp_aux_irq_handler(struct drm_device *dev)
1535{
1536 struct drm_i915_private *dev_priv = dev->dev_private;
1537
1538 wake_up_all(&dev_priv->gmbus_wait_queue);
1539}
1540
1541#if defined(CONFIG_DEBUG_FS)
1542static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1543 uint32_t crc0, uint32_t crc1,
1544 uint32_t crc2, uint32_t crc3,
1545 uint32_t crc4)
1546{
1547 struct drm_i915_private *dev_priv = dev->dev_private;
1548 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1549 struct intel_pipe_crc_entry *entry;
1550 int head, tail;
1551
1552 spin_lock(&pipe_crc->lock);
1553
1554 if (!pipe_crc->entries) {
1555 spin_unlock(&pipe_crc->lock);
1556 DRM_DEBUG_KMS("spurious interrupt\n");
1557 return;
1558 }
1559
1560 head = pipe_crc->head;
1561 tail = pipe_crc->tail;
1562
1563 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1564 spin_unlock(&pipe_crc->lock);
1565 DRM_ERROR("CRC buffer overflowing\n");
1566 return;
1567 }
1568
1569 entry = &pipe_crc->entries[head];
1570
1571 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1572 entry->crc[0] = crc0;
1573 entry->crc[1] = crc1;
1574 entry->crc[2] = crc2;
1575 entry->crc[3] = crc3;
1576 entry->crc[4] = crc4;
1577
1578 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1579 pipe_crc->head = head;
1580
1581 spin_unlock(&pipe_crc->lock);
1582
1583 wake_up_interruptible(&pipe_crc->wq);
1584}
1585#else
1586static inline void
1587display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1588 uint32_t crc0, uint32_t crc1,
1589 uint32_t crc2, uint32_t crc3,
1590 uint32_t crc4) {}
1591#endif
1592
1593
1594static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1595{
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597
1598 display_pipe_crc_irq_handler(dev, pipe,
1599 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1600 0, 0, 0, 0);
1601}
1602
1603static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1604{
1605 struct drm_i915_private *dev_priv = dev->dev_private;
1606
1607 display_pipe_crc_irq_handler(dev, pipe,
1608 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1609 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1610 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1611 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1612 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1613}
1614
1615static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1616{
1617 struct drm_i915_private *dev_priv = dev->dev_private;
1618 uint32_t res1, res2;
1619
1620 if (INTEL_INFO(dev)->gen >= 3)
1621 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1622 else
1623 res1 = 0;
1624
1625 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1626 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1627 else
1628 res2 = 0;
1629
1630 display_pipe_crc_irq_handler(dev, pipe,
1631 I915_READ(PIPE_CRC_RES_RED(pipe)),
1632 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1633 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1634 res1, res2);
1635}
1636
1637
1638
1639
1640static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1641{
1642 if (pm_iir & dev_priv->pm_rps_events) {
1643 spin_lock(&dev_priv->irq_lock);
1644 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1645 if (dev_priv->rps.interrupts_enabled) {
1646 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1647 queue_work(dev_priv->wq, &dev_priv->rps.work);
1648 }
1649 spin_unlock(&dev_priv->irq_lock);
1650 }
1651
1652 if (INTEL_INFO(dev_priv)->gen >= 8)
1653 return;
1654
1655 if (HAS_VEBOX(dev_priv->dev)) {
1656 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1657 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1658
1659 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1660 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1661 }
1662}
1663
1664static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1665{
1666 if (!drm_handle_vblank(dev, pipe))
1667 return false;
1668
1669 return true;
1670}
1671
1672static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1673{
1674 struct drm_i915_private *dev_priv = dev->dev_private;
1675 u32 pipe_stats[I915_MAX_PIPES] = { };
1676 int pipe;
1677
1678 spin_lock(&dev_priv->irq_lock);
1679 for_each_pipe(dev_priv, pipe) {
1680 int reg;
1681 u32 mask, iir_bit = 0;
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 mask = PIPE_FIFO_UNDERRUN_STATUS;
1693
1694 switch (pipe) {
1695 case PIPE_A:
1696 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1697 break;
1698 case PIPE_B:
1699 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1700 break;
1701 case PIPE_C:
1702 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1703 break;
1704 }
1705 if (iir & iir_bit)
1706 mask |= dev_priv->pipestat_irq_mask[pipe];
1707
1708 if (!mask)
1709 continue;
1710
1711 reg = PIPESTAT(pipe);
1712 mask |= PIPESTAT_INT_ENABLE_MASK;
1713 pipe_stats[pipe] = I915_READ(reg) & mask;
1714
1715
1716
1717
1718 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1719 PIPESTAT_INT_STATUS_MASK))
1720 I915_WRITE(reg, pipe_stats[pipe]);
1721 }
1722 spin_unlock(&dev_priv->irq_lock);
1723
1724 for_each_pipe(dev_priv, pipe) {
1725 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1726 intel_pipe_handle_vblank(dev, pipe))
1727 intel_check_page_flip(dev, pipe);
1728
1729 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1730 intel_prepare_page_flip(dev, pipe);
1731 intel_finish_page_flip(dev, pipe);
1732 }
1733
1734 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1735 i9xx_pipe_crc_irq_handler(dev, pipe);
1736
1737 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1738 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1739 }
1740
1741 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1742 gmbus_irq_handler(dev);
1743}
1744
1745static void i9xx_hpd_irq_handler(struct drm_device *dev)
1746{
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1748 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1749
1750 if (hotplug_status) {
1751 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1752
1753
1754
1755
1756 POSTING_READ(PORT_HOTPLUG_STAT);
1757
1758 if (IS_G4X(dev)) {
1759 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1760
1761 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1762 } else {
1763 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1764
1765 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1766 }
1767
1768 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1769 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1770 dp_aux_irq_handler(dev);
1771 }
1772}
1773
1774static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1775{
1776 struct drm_device *dev = arg;
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u32 iir, gt_iir, pm_iir;
1779 irqreturn_t ret = IRQ_NONE;
1780
1781 if (!intel_irqs_enabled(dev_priv))
1782 return IRQ_NONE;
1783
1784 while (true) {
1785
1786
1787 gt_iir = I915_READ(GTIIR);
1788 if (gt_iir)
1789 I915_WRITE(GTIIR, gt_iir);
1790
1791 pm_iir = I915_READ(GEN6_PMIIR);
1792 if (pm_iir)
1793 I915_WRITE(GEN6_PMIIR, pm_iir);
1794
1795 iir = I915_READ(VLV_IIR);
1796 if (iir) {
1797
1798 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1799 i9xx_hpd_irq_handler(dev);
1800 I915_WRITE(VLV_IIR, iir);
1801 }
1802
1803 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1804 goto out;
1805
1806 ret = IRQ_HANDLED;
1807
1808 if (gt_iir)
1809 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1810 if (pm_iir)
1811 gen6_rps_irq_handler(dev_priv, pm_iir);
1812
1813
1814 valleyview_pipestat_irq_handler(dev, iir);
1815 }
1816
1817out:
1818 return ret;
1819}
1820
1821static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1822{
1823 struct drm_device *dev = arg;
1824 struct drm_i915_private *dev_priv = dev->dev_private;
1825 u32 master_ctl, iir;
1826 irqreturn_t ret = IRQ_NONE;
1827
1828 if (!intel_irqs_enabled(dev_priv))
1829 return IRQ_NONE;
1830
1831 for (;;) {
1832 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1833 iir = I915_READ(VLV_IIR);
1834
1835 if (master_ctl == 0 && iir == 0)
1836 break;
1837
1838 ret = IRQ_HANDLED;
1839
1840 I915_WRITE(GEN8_MASTER_IRQ, 0);
1841
1842
1843
1844 if (iir) {
1845
1846 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1847 i9xx_hpd_irq_handler(dev);
1848 I915_WRITE(VLV_IIR, iir);
1849 }
1850
1851 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1852
1853
1854
1855 valleyview_pipestat_irq_handler(dev, iir);
1856
1857 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1858 POSTING_READ(GEN8_MASTER_IRQ);
1859 }
1860
1861 return ret;
1862}
1863
1864static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1865{
1866 struct drm_i915_private *dev_priv = dev->dev_private;
1867 int pipe;
1868 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1869 u32 dig_hotplug_reg;
1870
1871 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1872 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1873
1874 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1875
1876 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1877 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1878 SDE_AUDIO_POWER_SHIFT);
1879 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1880 port_name(port));
1881 }
1882
1883 if (pch_iir & SDE_AUX_MASK)
1884 dp_aux_irq_handler(dev);
1885
1886 if (pch_iir & SDE_GMBUS)
1887 gmbus_irq_handler(dev);
1888
1889 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1890 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1891
1892 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1893 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1894
1895 if (pch_iir & SDE_POISON)
1896 DRM_ERROR("PCH poison interrupt\n");
1897
1898 if (pch_iir & SDE_FDI_MASK)
1899 for_each_pipe(dev_priv, pipe)
1900 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1901 pipe_name(pipe),
1902 I915_READ(FDI_RX_IIR(pipe)));
1903
1904 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1905 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1906
1907 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1908 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1909
1910 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1911 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1912
1913 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1914 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1915}
1916
1917static void ivb_err_int_handler(struct drm_device *dev)
1918{
1919 struct drm_i915_private *dev_priv = dev->dev_private;
1920 u32 err_int = I915_READ(GEN7_ERR_INT);
1921 enum pipe pipe;
1922
1923 if (err_int & ERR_INT_POISON)
1924 DRM_ERROR("Poison interrupt\n");
1925
1926 for_each_pipe(dev_priv, pipe) {
1927 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1928 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1929
1930 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1931 if (IS_IVYBRIDGE(dev))
1932 ivb_pipe_crc_irq_handler(dev, pipe);
1933 else
1934 hsw_pipe_crc_irq_handler(dev, pipe);
1935 }
1936 }
1937
1938 I915_WRITE(GEN7_ERR_INT, err_int);
1939}
1940
1941static void cpt_serr_int_handler(struct drm_device *dev)
1942{
1943 struct drm_i915_private *dev_priv = dev->dev_private;
1944 u32 serr_int = I915_READ(SERR_INT);
1945
1946 if (serr_int & SERR_INT_POISON)
1947 DRM_ERROR("PCH poison interrupt\n");
1948
1949 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1950 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1951
1952 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1953 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1954
1955 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1956 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1957
1958 I915_WRITE(SERR_INT, serr_int);
1959}
1960
1961static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1962{
1963 struct drm_i915_private *dev_priv = dev->dev_private;
1964 int pipe;
1965 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1966 u32 dig_hotplug_reg;
1967
1968 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1969 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1970
1971 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
1972
1973 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1974 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1975 SDE_AUDIO_POWER_SHIFT_CPT);
1976 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1977 port_name(port));
1978 }
1979
1980 if (pch_iir & SDE_AUX_MASK_CPT)
1981 dp_aux_irq_handler(dev);
1982
1983 if (pch_iir & SDE_GMBUS_CPT)
1984 gmbus_irq_handler(dev);
1985
1986 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1987 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1988
1989 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1990 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1991
1992 if (pch_iir & SDE_FDI_MASK_CPT)
1993 for_each_pipe(dev_priv, pipe)
1994 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1995 pipe_name(pipe),
1996 I915_READ(FDI_RX_IIR(pipe)));
1997
1998 if (pch_iir & SDE_ERROR_CPT)
1999 cpt_serr_int_handler(dev);
2000}
2001
2002static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2003{
2004 struct drm_i915_private *dev_priv = dev->dev_private;
2005 enum pipe pipe;
2006
2007 if (de_iir & DE_AUX_CHANNEL_A)
2008 dp_aux_irq_handler(dev);
2009
2010 if (de_iir & DE_GSE)
2011 intel_opregion_asle_intr(dev);
2012
2013 if (de_iir & DE_POISON)
2014 DRM_ERROR("Poison interrupt\n");
2015
2016 for_each_pipe(dev_priv, pipe) {
2017 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2018 intel_pipe_handle_vblank(dev, pipe))
2019 intel_check_page_flip(dev, pipe);
2020
2021 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2022 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2023
2024 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2025 i9xx_pipe_crc_irq_handler(dev, pipe);
2026
2027
2028 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2029 intel_prepare_page_flip(dev, pipe);
2030 intel_finish_page_flip_plane(dev, pipe);
2031 }
2032 }
2033
2034
2035 if (de_iir & DE_PCH_EVENT) {
2036 u32 pch_iir = I915_READ(SDEIIR);
2037
2038 if (HAS_PCH_CPT(dev))
2039 cpt_irq_handler(dev, pch_iir);
2040 else
2041 ibx_irq_handler(dev, pch_iir);
2042
2043
2044 I915_WRITE(SDEIIR, pch_iir);
2045 }
2046
2047 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2048 ironlake_rps_change_irq_handler(dev);
2049}
2050
2051static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2052{
2053 struct drm_i915_private *dev_priv = dev->dev_private;
2054 enum pipe pipe;
2055
2056 if (de_iir & DE_ERR_INT_IVB)
2057 ivb_err_int_handler(dev);
2058
2059 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2060 dp_aux_irq_handler(dev);
2061
2062 if (de_iir & DE_GSE_IVB)
2063 intel_opregion_asle_intr(dev);
2064
2065 for_each_pipe(dev_priv, pipe) {
2066 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2067 intel_pipe_handle_vblank(dev, pipe))
2068 intel_check_page_flip(dev, pipe);
2069
2070
2071 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2072 intel_prepare_page_flip(dev, pipe);
2073 intel_finish_page_flip_plane(dev, pipe);
2074 }
2075 }
2076
2077
2078 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2079 u32 pch_iir = I915_READ(SDEIIR);
2080
2081 cpt_irq_handler(dev, pch_iir);
2082
2083
2084 I915_WRITE(SDEIIR, pch_iir);
2085 }
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2097{
2098 struct drm_device *dev = arg;
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2101 irqreturn_t ret = IRQ_NONE;
2102
2103 if (!intel_irqs_enabled(dev_priv))
2104 return IRQ_NONE;
2105
2106
2107
2108 intel_uncore_check_errors(dev);
2109
2110
2111 de_ier = I915_READ(DEIER);
2112 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2113 POSTING_READ(DEIER);
2114
2115
2116
2117
2118
2119
2120 if (!HAS_PCH_NOP(dev)) {
2121 sde_ier = I915_READ(SDEIER);
2122 I915_WRITE(SDEIER, 0);
2123 POSTING_READ(SDEIER);
2124 }
2125
2126
2127
2128 gt_iir = I915_READ(GTIIR);
2129 if (gt_iir) {
2130 I915_WRITE(GTIIR, gt_iir);
2131 ret = IRQ_HANDLED;
2132 if (INTEL_INFO(dev)->gen >= 6)
2133 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2134 else
2135 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2136 }
2137
2138 de_iir = I915_READ(DEIIR);
2139 if (de_iir) {
2140 I915_WRITE(DEIIR, de_iir);
2141 ret = IRQ_HANDLED;
2142 if (INTEL_INFO(dev)->gen >= 7)
2143 ivb_display_irq_handler(dev, de_iir);
2144 else
2145 ilk_display_irq_handler(dev, de_iir);
2146 }
2147
2148 if (INTEL_INFO(dev)->gen >= 6) {
2149 u32 pm_iir = I915_READ(GEN6_PMIIR);
2150 if (pm_iir) {
2151 I915_WRITE(GEN6_PMIIR, pm_iir);
2152 ret = IRQ_HANDLED;
2153 gen6_rps_irq_handler(dev_priv, pm_iir);
2154 }
2155 }
2156
2157 I915_WRITE(DEIER, de_ier);
2158 POSTING_READ(DEIER);
2159 if (!HAS_PCH_NOP(dev)) {
2160 I915_WRITE(SDEIER, sde_ier);
2161 POSTING_READ(SDEIER);
2162 }
2163
2164 return ret;
2165}
2166
2167static irqreturn_t gen8_irq_handler(int irq, void *arg)
2168{
2169 struct drm_device *dev = arg;
2170 struct drm_i915_private *dev_priv = dev->dev_private;
2171 u32 master_ctl;
2172 irqreturn_t ret = IRQ_NONE;
2173 uint32_t tmp = 0;
2174 enum pipe pipe;
2175 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2176
2177 if (!intel_irqs_enabled(dev_priv))
2178 return IRQ_NONE;
2179
2180 if (IS_GEN9(dev))
2181 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2182 GEN9_AUX_CHANNEL_D;
2183
2184 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2185 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2186 if (!master_ctl)
2187 return IRQ_NONE;
2188
2189 I915_WRITE(GEN8_MASTER_IRQ, 0);
2190 POSTING_READ(GEN8_MASTER_IRQ);
2191
2192
2193
2194 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2195
2196 if (master_ctl & GEN8_DE_MISC_IRQ) {
2197 tmp = I915_READ(GEN8_DE_MISC_IIR);
2198 if (tmp) {
2199 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2200 ret = IRQ_HANDLED;
2201 if (tmp & GEN8_DE_MISC_GSE)
2202 intel_opregion_asle_intr(dev);
2203 else
2204 DRM_ERROR("Unexpected DE Misc interrupt\n");
2205 }
2206 else
2207 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2208 }
2209
2210 if (master_ctl & GEN8_DE_PORT_IRQ) {
2211 tmp = I915_READ(GEN8_DE_PORT_IIR);
2212 if (tmp) {
2213 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2214 ret = IRQ_HANDLED;
2215
2216 if (tmp & aux_mask)
2217 dp_aux_irq_handler(dev);
2218 else
2219 DRM_ERROR("Unexpected DE Port interrupt\n");
2220 }
2221 else
2222 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2223 }
2224
2225 for_each_pipe(dev_priv, pipe) {
2226 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2227
2228 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2229 continue;
2230
2231 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2232 if (pipe_iir) {
2233 ret = IRQ_HANDLED;
2234 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2235
2236 if (pipe_iir & GEN8_PIPE_VBLANK &&
2237 intel_pipe_handle_vblank(dev, pipe))
2238 intel_check_page_flip(dev, pipe);
2239
2240 if (IS_GEN9(dev))
2241 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2242 else
2243 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2244
2245 if (flip_done) {
2246 intel_prepare_page_flip(dev, pipe);
2247 intel_finish_page_flip_plane(dev, pipe);
2248 }
2249
2250 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2251 hsw_pipe_crc_irq_handler(dev, pipe);
2252
2253 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2254 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2255 pipe);
2256
2257
2258 if (IS_GEN9(dev))
2259 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2260 else
2261 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2262
2263 if (fault_errors)
2264 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2265 pipe_name(pipe),
2266 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2267 } else
2268 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2269 }
2270
2271 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2272
2273
2274
2275
2276
2277 u32 pch_iir = I915_READ(SDEIIR);
2278 if (pch_iir) {
2279 I915_WRITE(SDEIIR, pch_iir);
2280 ret = IRQ_HANDLED;
2281 cpt_irq_handler(dev, pch_iir);
2282 } else
2283 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2284
2285 }
2286
2287 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2288 POSTING_READ(GEN8_MASTER_IRQ);
2289
2290 return ret;
2291}
2292
2293static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2294 bool reset_completed)
2295{
2296 struct intel_engine_cs *ring;
2297 int i;
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 for_each_ring(ring, dev_priv, i)
2308 wake_up_all(&ring->irq_queue);
2309
2310
2311 wake_up_all(&dev_priv->pending_flip_queue);
2312
2313
2314
2315
2316
2317 if (reset_completed)
2318 wake_up_all(&dev_priv->gpu_error.reset_queue);
2319}
2320
2321
2322
2323
2324
2325
2326
2327static void i915_reset_and_wakeup(struct drm_device *dev)
2328{
2329 struct drm_i915_private *dev_priv = to_i915(dev);
2330 struct i915_gpu_error *error = &dev_priv->gpu_error;
2331 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2332 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2333 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2334 int ret;
2335
2336 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2349 DRM_DEBUG_DRIVER("resetting chip\n");
2350 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2351 reset_event);
2352
2353
2354
2355
2356
2357
2358
2359
2360 intel_runtime_pm_get(dev_priv);
2361
2362 intel_prepare_reset(dev);
2363
2364
2365
2366
2367
2368
2369
2370 ret = i915_reset(dev);
2371
2372 intel_finish_reset(dev);
2373
2374 intel_runtime_pm_put(dev_priv);
2375
2376 if (ret == 0) {
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 smp_mb__before_atomic();
2388 atomic_inc(&dev_priv->gpu_error.reset_counter);
2389
2390 kobject_uevent_env(&dev->primary->kdev->kobj,
2391 KOBJ_CHANGE, reset_done_event);
2392 } else {
2393 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2394 }
2395
2396
2397
2398
2399
2400 i915_error_wake_up(dev_priv, true);
2401 }
2402}
2403
2404static void i915_report_and_clear_eir(struct drm_device *dev)
2405{
2406 struct drm_i915_private *dev_priv = dev->dev_private;
2407 uint32_t instdone[I915_NUM_INSTDONE_REG];
2408 u32 eir = I915_READ(EIR);
2409 int pipe, i;
2410
2411 if (!eir)
2412 return;
2413
2414 pr_err("render error detected, EIR: 0x%08x\n", eir);
2415
2416 i915_get_extra_instdone(dev, instdone);
2417
2418 if (IS_G4X(dev)) {
2419 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2420 u32 ipeir = I915_READ(IPEIR_I965);
2421
2422 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2423 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2424 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2425 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2426 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2427 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2428 I915_WRITE(IPEIR_I965, ipeir);
2429 POSTING_READ(IPEIR_I965);
2430 }
2431 if (eir & GM45_ERROR_PAGE_TABLE) {
2432 u32 pgtbl_err = I915_READ(PGTBL_ER);
2433 pr_err("page table error\n");
2434 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2435 I915_WRITE(PGTBL_ER, pgtbl_err);
2436 POSTING_READ(PGTBL_ER);
2437 }
2438 }
2439
2440 if (!IS_GEN2(dev)) {
2441 if (eir & I915_ERROR_PAGE_TABLE) {
2442 u32 pgtbl_err = I915_READ(PGTBL_ER);
2443 pr_err("page table error\n");
2444 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2445 I915_WRITE(PGTBL_ER, pgtbl_err);
2446 POSTING_READ(PGTBL_ER);
2447 }
2448 }
2449
2450 if (eir & I915_ERROR_MEMORY_REFRESH) {
2451 pr_err("memory refresh error:\n");
2452 for_each_pipe(dev_priv, pipe)
2453 pr_err("pipe %c stat: 0x%08x\n",
2454 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2455
2456 }
2457 if (eir & I915_ERROR_INSTRUCTION) {
2458 pr_err("instruction error\n");
2459 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2460 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2461 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2462 if (INTEL_INFO(dev)->gen < 4) {
2463 u32 ipeir = I915_READ(IPEIR);
2464
2465 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2466 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2467 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2468 I915_WRITE(IPEIR, ipeir);
2469 POSTING_READ(IPEIR);
2470 } else {
2471 u32 ipeir = I915_READ(IPEIR_I965);
2472
2473 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2474 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2475 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2476 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2477 I915_WRITE(IPEIR_I965, ipeir);
2478 POSTING_READ(IPEIR_I965);
2479 }
2480 }
2481
2482 I915_WRITE(EIR, eir);
2483 POSTING_READ(EIR);
2484 eir = I915_READ(EIR);
2485 if (eir) {
2486
2487
2488
2489
2490 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2491 I915_WRITE(EMR, I915_READ(EMR) | eir);
2492 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2493 }
2494}
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506void i915_handle_error(struct drm_device *dev, bool wedged,
2507 const char *fmt, ...)
2508{
2509 struct drm_i915_private *dev_priv = dev->dev_private;
2510 va_list args;
2511 char error_msg[80];
2512
2513 va_start(args, fmt);
2514 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2515 va_end(args);
2516
2517 i915_capture_error_state(dev, wedged, error_msg);
2518 i915_report_and_clear_eir(dev);
2519
2520 if (wedged) {
2521 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2522 &dev_priv->gpu_error.reset_counter);
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537 i915_error_wake_up(dev_priv, false);
2538 }
2539
2540 i915_reset_and_wakeup(dev);
2541}
2542
2543
2544
2545
2546static int i915_enable_vblank(struct drm_device *dev, int pipe)
2547{
2548 struct drm_i915_private *dev_priv = dev->dev_private;
2549 unsigned long irqflags;
2550
2551 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2552 if (INTEL_INFO(dev)->gen >= 4)
2553 i915_enable_pipestat(dev_priv, pipe,
2554 PIPE_START_VBLANK_INTERRUPT_STATUS);
2555 else
2556 i915_enable_pipestat(dev_priv, pipe,
2557 PIPE_VBLANK_INTERRUPT_STATUS);
2558 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2559
2560 return 0;
2561}
2562
2563static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2564{
2565 struct drm_i915_private *dev_priv = dev->dev_private;
2566 unsigned long irqflags;
2567 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2568 DE_PIPE_VBLANK(pipe);
2569
2570 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2571 ironlake_enable_display_irq(dev_priv, bit);
2572 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2573
2574 return 0;
2575}
2576
2577static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2578{
2579 struct drm_i915_private *dev_priv = dev->dev_private;
2580 unsigned long irqflags;
2581
2582 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2583 i915_enable_pipestat(dev_priv, pipe,
2584 PIPE_START_VBLANK_INTERRUPT_STATUS);
2585 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2586
2587 return 0;
2588}
2589
2590static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2591{
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593 unsigned long irqflags;
2594
2595 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2596 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2597 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2598 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2599 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2600 return 0;
2601}
2602
2603
2604
2605
2606static void i915_disable_vblank(struct drm_device *dev, int pipe)
2607{
2608 struct drm_i915_private *dev_priv = dev->dev_private;
2609 unsigned long irqflags;
2610
2611 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2612 i915_disable_pipestat(dev_priv, pipe,
2613 PIPE_VBLANK_INTERRUPT_STATUS |
2614 PIPE_START_VBLANK_INTERRUPT_STATUS);
2615 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2616}
2617
2618static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2619{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 unsigned long irqflags;
2622 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2623 DE_PIPE_VBLANK(pipe);
2624
2625 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2626 ironlake_disable_display_irq(dev_priv, bit);
2627 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2628}
2629
2630static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2631{
2632 struct drm_i915_private *dev_priv = dev->dev_private;
2633 unsigned long irqflags;
2634
2635 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2636 i915_disable_pipestat(dev_priv, pipe,
2637 PIPE_START_VBLANK_INTERRUPT_STATUS);
2638 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2639}
2640
2641static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2642{
2643 struct drm_i915_private *dev_priv = dev->dev_private;
2644 unsigned long irqflags;
2645
2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2647 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2648 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2649 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2651}
2652
2653static struct drm_i915_gem_request *
2654ring_last_request(struct intel_engine_cs *ring)
2655{
2656 return list_entry(ring->request_list.prev,
2657 struct drm_i915_gem_request, list);
2658}
2659
2660static bool
2661ring_idle(struct intel_engine_cs *ring)
2662{
2663 return (list_empty(&ring->request_list) ||
2664 i915_gem_request_completed(ring_last_request(ring), false));
2665}
2666
2667static bool
2668ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2669{
2670 if (INTEL_INFO(dev)->gen >= 8) {
2671 return (ipehr >> 23) == 0x1c;
2672 } else {
2673 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2674 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2675 MI_SEMAPHORE_REGISTER);
2676 }
2677}
2678
2679static struct intel_engine_cs *
2680semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2681{
2682 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2683 struct intel_engine_cs *signaller;
2684 int i;
2685
2686 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2687 for_each_ring(signaller, dev_priv, i) {
2688 if (ring == signaller)
2689 continue;
2690
2691 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2692 return signaller;
2693 }
2694 } else {
2695 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2696
2697 for_each_ring(signaller, dev_priv, i) {
2698 if(ring == signaller)
2699 continue;
2700
2701 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2702 return signaller;
2703 }
2704 }
2705
2706 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2707 ring->id, ipehr, offset);
2708
2709 return NULL;
2710}
2711
2712static struct intel_engine_cs *
2713semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2714{
2715 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2716 u32 cmd, ipehr, head;
2717 u64 offset = 0;
2718 int i, backwards;
2719
2720 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2721 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2722 return NULL;
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2733 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2734
2735 for (i = backwards; i; --i) {
2736
2737
2738
2739
2740
2741 head &= ring->buffer->size - 1;
2742
2743
2744 cmd = ioread32(ring->buffer->virtual_start + head);
2745 if (cmd == ipehr)
2746 break;
2747
2748 head -= 4;
2749 }
2750
2751 if (!i)
2752 return NULL;
2753
2754 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2755 if (INTEL_INFO(ring->dev)->gen >= 8) {
2756 offset = ioread32(ring->buffer->virtual_start + head + 12);
2757 offset <<= 32;
2758 offset = ioread32(ring->buffer->virtual_start + head + 8);
2759 }
2760 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2761}
2762
2763static int semaphore_passed(struct intel_engine_cs *ring)
2764{
2765 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2766 struct intel_engine_cs *signaller;
2767 u32 seqno;
2768
2769 ring->hangcheck.deadlock++;
2770
2771 signaller = semaphore_waits_for(ring, &seqno);
2772 if (signaller == NULL)
2773 return -1;
2774
2775
2776 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2777 return -1;
2778
2779 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2780 return 1;
2781
2782
2783 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2784 semaphore_passed(signaller) < 0)
2785 return -1;
2786
2787 return 0;
2788}
2789
2790static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2791{
2792 struct intel_engine_cs *ring;
2793 int i;
2794
2795 for_each_ring(ring, dev_priv, i)
2796 ring->hangcheck.deadlock = 0;
2797}
2798
2799static enum intel_ring_hangcheck_action
2800ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2801{
2802 struct drm_device *dev = ring->dev;
2803 struct drm_i915_private *dev_priv = dev->dev_private;
2804 u32 tmp;
2805
2806 if (acthd != ring->hangcheck.acthd) {
2807 if (acthd > ring->hangcheck.max_acthd) {
2808 ring->hangcheck.max_acthd = acthd;
2809 return HANGCHECK_ACTIVE;
2810 }
2811
2812 return HANGCHECK_ACTIVE_LOOP;
2813 }
2814
2815 if (IS_GEN2(dev))
2816 return HANGCHECK_HUNG;
2817
2818
2819
2820
2821
2822
2823 tmp = I915_READ_CTL(ring);
2824 if (tmp & RING_WAIT) {
2825 i915_handle_error(dev, false,
2826 "Kicking stuck wait on %s",
2827 ring->name);
2828 I915_WRITE_CTL(ring, tmp);
2829 return HANGCHECK_KICK;
2830 }
2831
2832 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2833 switch (semaphore_passed(ring)) {
2834 default:
2835 return HANGCHECK_HUNG;
2836 case 1:
2837 i915_handle_error(dev, false,
2838 "Kicking stuck semaphore on %s",
2839 ring->name);
2840 I915_WRITE_CTL(ring, tmp);
2841 return HANGCHECK_KICK;
2842 case 0:
2843 return HANGCHECK_WAIT;
2844 }
2845 }
2846
2847 return HANGCHECK_HUNG;
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858static void i915_hangcheck_elapsed(struct work_struct *work)
2859{
2860 struct drm_i915_private *dev_priv =
2861 container_of(work, typeof(*dev_priv),
2862 gpu_error.hangcheck_work.work);
2863 struct drm_device *dev = dev_priv->dev;
2864 struct intel_engine_cs *ring;
2865 int i;
2866 int busy_count = 0, rings_hung = 0;
2867 bool stuck[I915_NUM_RINGS] = { 0 };
2868#define BUSY 1
2869#define KICK 5
2870#define HUNG 20
2871
2872 if (!i915.enable_hangcheck)
2873 return;
2874
2875 for_each_ring(ring, dev_priv, i) {
2876 u64 acthd;
2877 u32 seqno;
2878 bool busy = true;
2879
2880 semaphore_clear_deadlocks(dev_priv);
2881
2882 seqno = ring->get_seqno(ring, false);
2883 acthd = intel_ring_get_active_head(ring);
2884
2885 if (ring->hangcheck.seqno == seqno) {
2886 if (ring_idle(ring)) {
2887 ring->hangcheck.action = HANGCHECK_IDLE;
2888
2889 if (waitqueue_active(&ring->irq_queue)) {
2890
2891 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2892 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2893 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2894 ring->name);
2895 else
2896 DRM_INFO("Fake missed irq on %s\n",
2897 ring->name);
2898 wake_up_all(&ring->irq_queue);
2899 }
2900
2901 ring->hangcheck.score += BUSY;
2902 } else
2903 busy = false;
2904 } else {
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920 ring->hangcheck.action = ring_stuck(ring,
2921 acthd);
2922
2923 switch (ring->hangcheck.action) {
2924 case HANGCHECK_IDLE:
2925 case HANGCHECK_WAIT:
2926 case HANGCHECK_ACTIVE:
2927 break;
2928 case HANGCHECK_ACTIVE_LOOP:
2929 ring->hangcheck.score += BUSY;
2930 break;
2931 case HANGCHECK_KICK:
2932 ring->hangcheck.score += KICK;
2933 break;
2934 case HANGCHECK_HUNG:
2935 ring->hangcheck.score += HUNG;
2936 stuck[i] = true;
2937 break;
2938 }
2939 }
2940 } else {
2941 ring->hangcheck.action = HANGCHECK_ACTIVE;
2942
2943
2944
2945
2946 if (ring->hangcheck.score > 0)
2947 ring->hangcheck.score--;
2948
2949 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2950 }
2951
2952 ring->hangcheck.seqno = seqno;
2953 ring->hangcheck.acthd = acthd;
2954 busy_count += busy;
2955 }
2956
2957 for_each_ring(ring, dev_priv, i) {
2958 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2959 DRM_INFO("%s on %s\n",
2960 stuck[i] ? "stuck" : "no progress",
2961 ring->name);
2962 rings_hung++;
2963 }
2964 }
2965
2966 if (rings_hung)
2967 return i915_handle_error(dev, true, "Ring hung");
2968
2969 if (busy_count)
2970
2971
2972 i915_queue_hangcheck(dev);
2973}
2974
2975void i915_queue_hangcheck(struct drm_device *dev)
2976{
2977 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
2978
2979 if (!i915.enable_hangcheck)
2980 return;
2981
2982
2983
2984
2985
2986
2987 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2988 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
2989}
2990
2991static void ibx_irq_reset(struct drm_device *dev)
2992{
2993 struct drm_i915_private *dev_priv = dev->dev_private;
2994
2995 if (HAS_PCH_NOP(dev))
2996 return;
2997
2998 GEN5_IRQ_RESET(SDE);
2999
3000 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3001 I915_WRITE(SERR_INT, 0xffffffff);
3002}
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012static void ibx_irq_pre_postinstall(struct drm_device *dev)
3013{
3014 struct drm_i915_private *dev_priv = dev->dev_private;
3015
3016 if (HAS_PCH_NOP(dev))
3017 return;
3018
3019 WARN_ON(I915_READ(SDEIER) != 0);
3020 I915_WRITE(SDEIER, 0xffffffff);
3021 POSTING_READ(SDEIER);
3022}
3023
3024static void gen5_gt_irq_reset(struct drm_device *dev)
3025{
3026 struct drm_i915_private *dev_priv = dev->dev_private;
3027
3028 GEN5_IRQ_RESET(GT);
3029 if (INTEL_INFO(dev)->gen >= 6)
3030 GEN5_IRQ_RESET(GEN6_PM);
3031}
3032
3033
3034
3035static void ironlake_irq_reset(struct drm_device *dev)
3036{
3037 struct drm_i915_private *dev_priv = dev->dev_private;
3038
3039 I915_WRITE(HWSTAM, 0xffffffff);
3040
3041 GEN5_IRQ_RESET(DE);
3042 if (IS_GEN7(dev))
3043 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3044
3045 gen5_gt_irq_reset(dev);
3046
3047 ibx_irq_reset(dev);
3048}
3049
3050static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3051{
3052 enum pipe pipe;
3053
3054 I915_WRITE(PORT_HOTPLUG_EN, 0);
3055 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3056
3057 for_each_pipe(dev_priv, pipe)
3058 I915_WRITE(PIPESTAT(pipe), 0xffff);
3059
3060 GEN5_IRQ_RESET(VLV_);
3061}
3062
3063static void valleyview_irq_preinstall(struct drm_device *dev)
3064{
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3066
3067
3068 I915_WRITE(VLV_IMR, 0);
3069 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3070 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3071 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3072
3073 gen5_gt_irq_reset(dev);
3074
3075 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3076
3077 vlv_display_irq_reset(dev_priv);
3078}
3079
3080static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3081{
3082 GEN8_IRQ_RESET_NDX(GT, 0);
3083 GEN8_IRQ_RESET_NDX(GT, 1);
3084 GEN8_IRQ_RESET_NDX(GT, 2);
3085 GEN8_IRQ_RESET_NDX(GT, 3);
3086}
3087
3088static void gen8_irq_reset(struct drm_device *dev)
3089{
3090 struct drm_i915_private *dev_priv = dev->dev_private;
3091 int pipe;
3092
3093 I915_WRITE(GEN8_MASTER_IRQ, 0);
3094 POSTING_READ(GEN8_MASTER_IRQ);
3095
3096 gen8_gt_irq_reset(dev_priv);
3097
3098 for_each_pipe(dev_priv, pipe)
3099 if (intel_display_power_is_enabled(dev_priv,
3100 POWER_DOMAIN_PIPE(pipe)))
3101 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3102
3103 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3104 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3105 GEN5_IRQ_RESET(GEN8_PCU_);
3106
3107 ibx_irq_reset(dev);
3108}
3109
3110void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3111 unsigned int pipe_mask)
3112{
3113 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3114
3115 spin_lock_irq(&dev_priv->irq_lock);
3116 if (pipe_mask & 1 << PIPE_A)
3117 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3118 dev_priv->de_irq_mask[PIPE_A],
3119 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3120 if (pipe_mask & 1 << PIPE_B)
3121 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3122 dev_priv->de_irq_mask[PIPE_B],
3123 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3124 if (pipe_mask & 1 << PIPE_C)
3125 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3126 dev_priv->de_irq_mask[PIPE_C],
3127 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3128 spin_unlock_irq(&dev_priv->irq_lock);
3129}
3130
3131static void cherryview_irq_preinstall(struct drm_device *dev)
3132{
3133 struct drm_i915_private *dev_priv = dev->dev_private;
3134
3135 I915_WRITE(GEN8_MASTER_IRQ, 0);
3136 POSTING_READ(GEN8_MASTER_IRQ);
3137
3138 gen8_gt_irq_reset(dev_priv);
3139
3140 GEN5_IRQ_RESET(GEN8_PCU_);
3141
3142 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3143
3144 vlv_display_irq_reset(dev_priv);
3145}
3146
3147static void ibx_hpd_irq_setup(struct drm_device *dev)
3148{
3149 struct drm_i915_private *dev_priv = dev->dev_private;
3150 struct intel_encoder *intel_encoder;
3151 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3152
3153 if (HAS_PCH_IBX(dev)) {
3154 hotplug_irqs = SDE_HOTPLUG_MASK;
3155 for_each_intel_encoder(dev, intel_encoder)
3156 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3157 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3158 } else {
3159 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3160 for_each_intel_encoder(dev, intel_encoder)
3161 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3162 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3163 }
3164
3165 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3166
3167
3168
3169
3170
3171
3172
3173 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3174 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3175 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3176 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3177 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3178 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3179}
3180
3181static void ibx_irq_postinstall(struct drm_device *dev)
3182{
3183 struct drm_i915_private *dev_priv = dev->dev_private;
3184 u32 mask;
3185
3186 if (HAS_PCH_NOP(dev))
3187 return;
3188
3189 if (HAS_PCH_IBX(dev))
3190 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3191 else
3192 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3193
3194 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3195 I915_WRITE(SDEIMR, ~mask);
3196}
3197
3198static void gen5_gt_irq_postinstall(struct drm_device *dev)
3199{
3200 struct drm_i915_private *dev_priv = dev->dev_private;
3201 u32 pm_irqs, gt_irqs;
3202
3203 pm_irqs = gt_irqs = 0;
3204
3205 dev_priv->gt_irq_mask = ~0;
3206 if (HAS_L3_DPF(dev)) {
3207
3208 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3209 gt_irqs |= GT_PARITY_ERROR(dev);
3210 }
3211
3212 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3213 if (IS_GEN5(dev)) {
3214 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3215 ILK_BSD_USER_INTERRUPT;
3216 } else {
3217 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3218 }
3219
3220 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3221
3222 if (INTEL_INFO(dev)->gen >= 6) {
3223
3224
3225
3226
3227 if (HAS_VEBOX(dev))
3228 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3229
3230 dev_priv->pm_irq_mask = 0xffffffff;
3231 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3232 }
3233}
3234
3235static int ironlake_irq_postinstall(struct drm_device *dev)
3236{
3237 struct drm_i915_private *dev_priv = dev->dev_private;
3238 u32 display_mask, extra_mask;
3239
3240 if (INTEL_INFO(dev)->gen >= 7) {
3241 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3242 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3243 DE_PLANEB_FLIP_DONE_IVB |
3244 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3245 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3246 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3247 } else {
3248 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3249 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3250 DE_AUX_CHANNEL_A |
3251 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3252 DE_POISON);
3253 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3254 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3255 }
3256
3257 dev_priv->irq_mask = ~display_mask;
3258
3259 I915_WRITE(HWSTAM, 0xeffe);
3260
3261 ibx_irq_pre_postinstall(dev);
3262
3263 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3264
3265 gen5_gt_irq_postinstall(dev);
3266
3267 ibx_irq_postinstall(dev);
3268
3269 if (IS_IRONLAKE_M(dev)) {
3270
3271
3272
3273
3274
3275 spin_lock_irq(&dev_priv->irq_lock);
3276 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3277 spin_unlock_irq(&dev_priv->irq_lock);
3278 }
3279
3280 return 0;
3281}
3282
3283static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3284{
3285 u32 pipestat_mask;
3286 u32 iir_mask;
3287 enum pipe pipe;
3288
3289 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3290 PIPE_FIFO_UNDERRUN_STATUS;
3291
3292 for_each_pipe(dev_priv, pipe)
3293 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3294 POSTING_READ(PIPESTAT(PIPE_A));
3295
3296 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3297 PIPE_CRC_DONE_INTERRUPT_STATUS;
3298
3299 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3300 for_each_pipe(dev_priv, pipe)
3301 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3302
3303 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3304 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3305 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3306 if (IS_CHERRYVIEW(dev_priv))
3307 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3308 dev_priv->irq_mask &= ~iir_mask;
3309
3310 I915_WRITE(VLV_IIR, iir_mask);
3311 I915_WRITE(VLV_IIR, iir_mask);
3312 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3313 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3314 POSTING_READ(VLV_IMR);
3315}
3316
3317static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3318{
3319 u32 pipestat_mask;
3320 u32 iir_mask;
3321 enum pipe pipe;
3322
3323 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3324 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3325 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3326 if (IS_CHERRYVIEW(dev_priv))
3327 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3328
3329 dev_priv->irq_mask |= iir_mask;
3330 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3331 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3332 I915_WRITE(VLV_IIR, iir_mask);
3333 I915_WRITE(VLV_IIR, iir_mask);
3334 POSTING_READ(VLV_IIR);
3335
3336 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3337 PIPE_CRC_DONE_INTERRUPT_STATUS;
3338
3339 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3340 for_each_pipe(dev_priv, pipe)
3341 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3342
3343 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3344 PIPE_FIFO_UNDERRUN_STATUS;
3345
3346 for_each_pipe(dev_priv, pipe)
3347 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3348 POSTING_READ(PIPESTAT(PIPE_A));
3349}
3350
3351void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3352{
3353 assert_spin_locked(&dev_priv->irq_lock);
3354
3355 if (dev_priv->display_irqs_enabled)
3356 return;
3357
3358 dev_priv->display_irqs_enabled = true;
3359
3360 if (intel_irqs_enabled(dev_priv))
3361 valleyview_display_irqs_install(dev_priv);
3362}
3363
3364void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3365{
3366 assert_spin_locked(&dev_priv->irq_lock);
3367
3368 if (!dev_priv->display_irqs_enabled)
3369 return;
3370
3371 dev_priv->display_irqs_enabled = false;
3372
3373 if (intel_irqs_enabled(dev_priv))
3374 valleyview_display_irqs_uninstall(dev_priv);
3375}
3376
3377static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3378{
3379 dev_priv->irq_mask = ~0;
3380
3381 I915_WRITE(PORT_HOTPLUG_EN, 0);
3382 POSTING_READ(PORT_HOTPLUG_EN);
3383
3384 I915_WRITE(VLV_IIR, 0xffffffff);
3385 I915_WRITE(VLV_IIR, 0xffffffff);
3386 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3387 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3388 POSTING_READ(VLV_IMR);
3389
3390
3391
3392 spin_lock_irq(&dev_priv->irq_lock);
3393 if (dev_priv->display_irqs_enabled)
3394 valleyview_display_irqs_install(dev_priv);
3395 spin_unlock_irq(&dev_priv->irq_lock);
3396}
3397
3398static int valleyview_irq_postinstall(struct drm_device *dev)
3399{
3400 struct drm_i915_private *dev_priv = dev->dev_private;
3401
3402 vlv_display_irq_postinstall(dev_priv);
3403
3404 gen5_gt_irq_postinstall(dev);
3405
3406
3407#if 0
3408 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3409 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3410#endif
3411
3412 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3413
3414 return 0;
3415}
3416
3417static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3418{
3419
3420 uint32_t gt_interrupts[] = {
3421 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3422 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3423 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3424 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3425 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3426 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3427 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3428 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3429 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3430 0,
3431 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3432 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3433 };
3434
3435 dev_priv->pm_irq_mask = 0xffffffff;
3436 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3437 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3438
3439
3440
3441
3442 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3443 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3444}
3445
3446static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3447{
3448 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3449 uint32_t de_pipe_enables;
3450 int pipe;
3451 u32 aux_en = GEN8_AUX_CHANNEL_A;
3452
3453 if (IS_GEN9(dev_priv)) {
3454 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3455 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3456 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3457 GEN9_AUX_CHANNEL_D;
3458 } else
3459 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3460 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3461
3462 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3463 GEN8_PIPE_FIFO_UNDERRUN;
3464
3465 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3466 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3467 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3468
3469 for_each_pipe(dev_priv, pipe)
3470 if (intel_display_power_is_enabled(dev_priv,
3471 POWER_DOMAIN_PIPE(pipe)))
3472 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3473 dev_priv->de_irq_mask[pipe],
3474 de_pipe_enables);
3475
3476 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3477}
3478
3479static int gen8_irq_postinstall(struct drm_device *dev)
3480{
3481 struct drm_i915_private *dev_priv = dev->dev_private;
3482
3483 ibx_irq_pre_postinstall(dev);
3484
3485 gen8_gt_irq_postinstall(dev_priv);
3486 gen8_de_irq_postinstall(dev_priv);
3487
3488 ibx_irq_postinstall(dev);
3489
3490 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3491 POSTING_READ(GEN8_MASTER_IRQ);
3492
3493 return 0;
3494}
3495
3496static int cherryview_irq_postinstall(struct drm_device *dev)
3497{
3498 struct drm_i915_private *dev_priv = dev->dev_private;
3499
3500 vlv_display_irq_postinstall(dev_priv);
3501
3502 gen8_gt_irq_postinstall(dev_priv);
3503
3504 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3505 POSTING_READ(GEN8_MASTER_IRQ);
3506
3507 return 0;
3508}
3509
3510static void gen8_irq_uninstall(struct drm_device *dev)
3511{
3512 struct drm_i915_private *dev_priv = dev->dev_private;
3513
3514 if (!dev_priv)
3515 return;
3516
3517 gen8_irq_reset(dev);
3518}
3519
3520static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3521{
3522
3523
3524 spin_lock_irq(&dev_priv->irq_lock);
3525 if (dev_priv->display_irqs_enabled)
3526 valleyview_display_irqs_uninstall(dev_priv);
3527 spin_unlock_irq(&dev_priv->irq_lock);
3528
3529 vlv_display_irq_reset(dev_priv);
3530
3531 dev_priv->irq_mask = ~0;
3532}
3533
3534static void valleyview_irq_uninstall(struct drm_device *dev)
3535{
3536 struct drm_i915_private *dev_priv = dev->dev_private;
3537
3538 if (!dev_priv)
3539 return;
3540
3541 I915_WRITE(VLV_MASTER_IER, 0);
3542
3543 gen5_gt_irq_reset(dev);
3544
3545 I915_WRITE(HWSTAM, 0xffffffff);
3546
3547 vlv_display_irq_uninstall(dev_priv);
3548}
3549
3550static void cherryview_irq_uninstall(struct drm_device *dev)
3551{
3552 struct drm_i915_private *dev_priv = dev->dev_private;
3553
3554 if (!dev_priv)
3555 return;
3556
3557 I915_WRITE(GEN8_MASTER_IRQ, 0);
3558 POSTING_READ(GEN8_MASTER_IRQ);
3559
3560 gen8_gt_irq_reset(dev_priv);
3561
3562 GEN5_IRQ_RESET(GEN8_PCU_);
3563
3564 vlv_display_irq_uninstall(dev_priv);
3565}
3566
3567static void ironlake_irq_uninstall(struct drm_device *dev)
3568{
3569 struct drm_i915_private *dev_priv = dev->dev_private;
3570
3571 if (!dev_priv)
3572 return;
3573
3574 ironlake_irq_reset(dev);
3575}
3576
3577static void i8xx_irq_preinstall(struct drm_device * dev)
3578{
3579 struct drm_i915_private *dev_priv = dev->dev_private;
3580 int pipe;
3581
3582 for_each_pipe(dev_priv, pipe)
3583 I915_WRITE(PIPESTAT(pipe), 0);
3584 I915_WRITE16(IMR, 0xffff);
3585 I915_WRITE16(IER, 0x0);
3586 POSTING_READ16(IER);
3587}
3588
3589static int i8xx_irq_postinstall(struct drm_device *dev)
3590{
3591 struct drm_i915_private *dev_priv = dev->dev_private;
3592
3593 I915_WRITE16(EMR,
3594 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3595
3596
3597 dev_priv->irq_mask =
3598 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3599 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3600 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3601 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3602 I915_WRITE16(IMR, dev_priv->irq_mask);
3603
3604 I915_WRITE16(IER,
3605 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3606 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3607 I915_USER_INTERRUPT);
3608 POSTING_READ16(IER);
3609
3610
3611
3612 spin_lock_irq(&dev_priv->irq_lock);
3613 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3614 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3615 spin_unlock_irq(&dev_priv->irq_lock);
3616
3617 return 0;
3618}
3619
3620
3621
3622
3623static bool i8xx_handle_vblank(struct drm_device *dev,
3624 int plane, int pipe, u32 iir)
3625{
3626 struct drm_i915_private *dev_priv = dev->dev_private;
3627 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3628
3629 if (!intel_pipe_handle_vblank(dev, pipe))
3630 return false;
3631
3632 if ((iir & flip_pending) == 0)
3633 goto check_page_flip;
3634
3635
3636
3637
3638
3639
3640
3641 if (I915_READ16(ISR) & flip_pending)
3642 goto check_page_flip;
3643
3644 intel_prepare_page_flip(dev, plane);
3645 intel_finish_page_flip(dev, pipe);
3646 return true;
3647
3648check_page_flip:
3649 intel_check_page_flip(dev, pipe);
3650 return false;
3651}
3652
3653static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3654{
3655 struct drm_device *dev = arg;
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3657 u16 iir, new_iir;
3658 u32 pipe_stats[2];
3659 int pipe;
3660 u16 flip_mask =
3661 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3662 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3663
3664 if (!intel_irqs_enabled(dev_priv))
3665 return IRQ_NONE;
3666
3667 iir = I915_READ16(IIR);
3668 if (iir == 0)
3669 return IRQ_NONE;
3670
3671 while (iir & ~flip_mask) {
3672
3673
3674
3675
3676
3677 spin_lock(&dev_priv->irq_lock);
3678 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3679 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3680
3681 for_each_pipe(dev_priv, pipe) {
3682 int reg = PIPESTAT(pipe);
3683 pipe_stats[pipe] = I915_READ(reg);
3684
3685
3686
3687
3688 if (pipe_stats[pipe] & 0x8000ffff)
3689 I915_WRITE(reg, pipe_stats[pipe]);
3690 }
3691 spin_unlock(&dev_priv->irq_lock);
3692
3693 I915_WRITE16(IIR, iir & ~flip_mask);
3694 new_iir = I915_READ16(IIR);
3695
3696 if (iir & I915_USER_INTERRUPT)
3697 notify_ring(dev, &dev_priv->ring[RCS]);
3698
3699 for_each_pipe(dev_priv, pipe) {
3700 int plane = pipe;
3701 if (HAS_FBC(dev))
3702 plane = !plane;
3703
3704 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3705 i8xx_handle_vblank(dev, plane, pipe, iir))
3706 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3707
3708 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3709 i9xx_pipe_crc_irq_handler(dev, pipe);
3710
3711 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3712 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3713 pipe);
3714 }
3715
3716 iir = new_iir;
3717 }
3718
3719 return IRQ_HANDLED;
3720}
3721
3722static void i8xx_irq_uninstall(struct drm_device * dev)
3723{
3724 struct drm_i915_private *dev_priv = dev->dev_private;
3725 int pipe;
3726
3727 for_each_pipe(dev_priv, pipe) {
3728
3729 I915_WRITE(PIPESTAT(pipe), 0);
3730 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3731 }
3732 I915_WRITE16(IMR, 0xffff);
3733 I915_WRITE16(IER, 0x0);
3734 I915_WRITE16(IIR, I915_READ16(IIR));
3735}
3736
3737static void i915_irq_preinstall(struct drm_device * dev)
3738{
3739 struct drm_i915_private *dev_priv = dev->dev_private;
3740 int pipe;
3741
3742 if (I915_HAS_HOTPLUG(dev)) {
3743 I915_WRITE(PORT_HOTPLUG_EN, 0);
3744 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3745 }
3746
3747 I915_WRITE16(HWSTAM, 0xeffe);
3748 for_each_pipe(dev_priv, pipe)
3749 I915_WRITE(PIPESTAT(pipe), 0);
3750 I915_WRITE(IMR, 0xffffffff);
3751 I915_WRITE(IER, 0x0);
3752 POSTING_READ(IER);
3753}
3754
3755static int i915_irq_postinstall(struct drm_device *dev)
3756{
3757 struct drm_i915_private *dev_priv = dev->dev_private;
3758 u32 enable_mask;
3759
3760 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3761
3762
3763 dev_priv->irq_mask =
3764 ~(I915_ASLE_INTERRUPT |
3765 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3766 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3767 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3768 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3769
3770 enable_mask =
3771 I915_ASLE_INTERRUPT |
3772 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3773 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3774 I915_USER_INTERRUPT;
3775
3776 if (I915_HAS_HOTPLUG(dev)) {
3777 I915_WRITE(PORT_HOTPLUG_EN, 0);
3778 POSTING_READ(PORT_HOTPLUG_EN);
3779
3780
3781 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3782
3783 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3784 }
3785
3786 I915_WRITE(IMR, dev_priv->irq_mask);
3787 I915_WRITE(IER, enable_mask);
3788 POSTING_READ(IER);
3789
3790 i915_enable_asle_pipestat(dev);
3791
3792
3793
3794 spin_lock_irq(&dev_priv->irq_lock);
3795 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3796 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3797 spin_unlock_irq(&dev_priv->irq_lock);
3798
3799 return 0;
3800}
3801
3802
3803
3804
3805static bool i915_handle_vblank(struct drm_device *dev,
3806 int plane, int pipe, u32 iir)
3807{
3808 struct drm_i915_private *dev_priv = dev->dev_private;
3809 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3810
3811 if (!intel_pipe_handle_vblank(dev, pipe))
3812 return false;
3813
3814 if ((iir & flip_pending) == 0)
3815 goto check_page_flip;
3816
3817
3818
3819
3820
3821
3822
3823 if (I915_READ(ISR) & flip_pending)
3824 goto check_page_flip;
3825
3826 intel_prepare_page_flip(dev, plane);
3827 intel_finish_page_flip(dev, pipe);
3828 return true;
3829
3830check_page_flip:
3831 intel_check_page_flip(dev, pipe);
3832 return false;
3833}
3834
3835static irqreturn_t i915_irq_handler(int irq, void *arg)
3836{
3837 struct drm_device *dev = arg;
3838 struct drm_i915_private *dev_priv = dev->dev_private;
3839 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3840 u32 flip_mask =
3841 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3842 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3843 int pipe, ret = IRQ_NONE;
3844
3845 if (!intel_irqs_enabled(dev_priv))
3846 return IRQ_NONE;
3847
3848 iir = I915_READ(IIR);
3849 do {
3850 bool irq_received = (iir & ~flip_mask) != 0;
3851 bool blc_event = false;
3852
3853
3854
3855
3856
3857
3858 spin_lock(&dev_priv->irq_lock);
3859 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3860 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3861
3862 for_each_pipe(dev_priv, pipe) {
3863 int reg = PIPESTAT(pipe);
3864 pipe_stats[pipe] = I915_READ(reg);
3865
3866
3867 if (pipe_stats[pipe] & 0x8000ffff) {
3868 I915_WRITE(reg, pipe_stats[pipe]);
3869 irq_received = true;
3870 }
3871 }
3872 spin_unlock(&dev_priv->irq_lock);
3873
3874 if (!irq_received)
3875 break;
3876
3877
3878 if (I915_HAS_HOTPLUG(dev) &&
3879 iir & I915_DISPLAY_PORT_INTERRUPT)
3880 i9xx_hpd_irq_handler(dev);
3881
3882 I915_WRITE(IIR, iir & ~flip_mask);
3883 new_iir = I915_READ(IIR);
3884
3885 if (iir & I915_USER_INTERRUPT)
3886 notify_ring(dev, &dev_priv->ring[RCS]);
3887
3888 for_each_pipe(dev_priv, pipe) {
3889 int plane = pipe;
3890 if (HAS_FBC(dev))
3891 plane = !plane;
3892
3893 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3894 i915_handle_vblank(dev, plane, pipe, iir))
3895 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3896
3897 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3898 blc_event = true;
3899
3900 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3901 i9xx_pipe_crc_irq_handler(dev, pipe);
3902
3903 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3904 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3905 pipe);
3906 }
3907
3908 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3909 intel_opregion_asle_intr(dev);
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926 ret = IRQ_HANDLED;
3927 iir = new_iir;
3928 } while (iir & ~flip_mask);
3929
3930 return ret;
3931}
3932
3933static void i915_irq_uninstall(struct drm_device * dev)
3934{
3935 struct drm_i915_private *dev_priv = dev->dev_private;
3936 int pipe;
3937
3938 if (I915_HAS_HOTPLUG(dev)) {
3939 I915_WRITE(PORT_HOTPLUG_EN, 0);
3940 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3941 }
3942
3943 I915_WRITE16(HWSTAM, 0xffff);
3944 for_each_pipe(dev_priv, pipe) {
3945
3946 I915_WRITE(PIPESTAT(pipe), 0);
3947 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3948 }
3949 I915_WRITE(IMR, 0xffffffff);
3950 I915_WRITE(IER, 0x0);
3951
3952 I915_WRITE(IIR, I915_READ(IIR));
3953}
3954
3955static void i965_irq_preinstall(struct drm_device * dev)
3956{
3957 struct drm_i915_private *dev_priv = dev->dev_private;
3958 int pipe;
3959
3960 I915_WRITE(PORT_HOTPLUG_EN, 0);
3961 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3962
3963 I915_WRITE(HWSTAM, 0xeffe);
3964 for_each_pipe(dev_priv, pipe)
3965 I915_WRITE(PIPESTAT(pipe), 0);
3966 I915_WRITE(IMR, 0xffffffff);
3967 I915_WRITE(IER, 0x0);
3968 POSTING_READ(IER);
3969}
3970
3971static int i965_irq_postinstall(struct drm_device *dev)
3972{
3973 struct drm_i915_private *dev_priv = dev->dev_private;
3974 u32 enable_mask;
3975 u32 error_mask;
3976
3977
3978 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3979 I915_DISPLAY_PORT_INTERRUPT |
3980 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3981 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3982 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3983 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3984 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3985
3986 enable_mask = ~dev_priv->irq_mask;
3987 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3988 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3989 enable_mask |= I915_USER_INTERRUPT;
3990
3991 if (IS_G4X(dev))
3992 enable_mask |= I915_BSD_USER_INTERRUPT;
3993
3994
3995
3996 spin_lock_irq(&dev_priv->irq_lock);
3997 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3998 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3999 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4000 spin_unlock_irq(&dev_priv->irq_lock);
4001
4002
4003
4004
4005
4006 if (IS_G4X(dev)) {
4007 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4008 GM45_ERROR_MEM_PRIV |
4009 GM45_ERROR_CP_PRIV |
4010 I915_ERROR_MEMORY_REFRESH);
4011 } else {
4012 error_mask = ~(I915_ERROR_PAGE_TABLE |
4013 I915_ERROR_MEMORY_REFRESH);
4014 }
4015 I915_WRITE(EMR, error_mask);
4016
4017 I915_WRITE(IMR, dev_priv->irq_mask);
4018 I915_WRITE(IER, enable_mask);
4019 POSTING_READ(IER);
4020
4021 I915_WRITE(PORT_HOTPLUG_EN, 0);
4022 POSTING_READ(PORT_HOTPLUG_EN);
4023
4024 i915_enable_asle_pipestat(dev);
4025
4026 return 0;
4027}
4028
4029static void i915_hpd_irq_setup(struct drm_device *dev)
4030{
4031 struct drm_i915_private *dev_priv = dev->dev_private;
4032 struct intel_encoder *intel_encoder;
4033 u32 hotplug_en;
4034
4035 assert_spin_locked(&dev_priv->irq_lock);
4036
4037 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4038 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4039
4040
4041 for_each_intel_encoder(dev, intel_encoder)
4042 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4043 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4044
4045
4046
4047
4048 if (IS_G4X(dev))
4049 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4050 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4051 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4052
4053
4054 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4055}
4056
4057static irqreturn_t i965_irq_handler(int irq, void *arg)
4058{
4059 struct drm_device *dev = arg;
4060 struct drm_i915_private *dev_priv = dev->dev_private;
4061 u32 iir, new_iir;
4062 u32 pipe_stats[I915_MAX_PIPES];
4063 int ret = IRQ_NONE, pipe;
4064 u32 flip_mask =
4065 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4066 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4067
4068 if (!intel_irqs_enabled(dev_priv))
4069 return IRQ_NONE;
4070
4071 iir = I915_READ(IIR);
4072
4073 for (;;) {
4074 bool irq_received = (iir & ~flip_mask) != 0;
4075 bool blc_event = false;
4076
4077
4078
4079
4080
4081
4082 spin_lock(&dev_priv->irq_lock);
4083 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4084 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4085
4086 for_each_pipe(dev_priv, pipe) {
4087 int reg = PIPESTAT(pipe);
4088 pipe_stats[pipe] = I915_READ(reg);
4089
4090
4091
4092
4093 if (pipe_stats[pipe] & 0x8000ffff) {
4094 I915_WRITE(reg, pipe_stats[pipe]);
4095 irq_received = true;
4096 }
4097 }
4098 spin_unlock(&dev_priv->irq_lock);
4099
4100 if (!irq_received)
4101 break;
4102
4103 ret = IRQ_HANDLED;
4104
4105
4106 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4107 i9xx_hpd_irq_handler(dev);
4108
4109 I915_WRITE(IIR, iir & ~flip_mask);
4110 new_iir = I915_READ(IIR);
4111
4112 if (iir & I915_USER_INTERRUPT)
4113 notify_ring(dev, &dev_priv->ring[RCS]);
4114 if (iir & I915_BSD_USER_INTERRUPT)
4115 notify_ring(dev, &dev_priv->ring[VCS]);
4116
4117 for_each_pipe(dev_priv, pipe) {
4118 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4119 i915_handle_vblank(dev, pipe, pipe, iir))
4120 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4121
4122 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4123 blc_event = true;
4124
4125 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4126 i9xx_pipe_crc_irq_handler(dev, pipe);
4127
4128 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4129 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4130 }
4131
4132 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4133 intel_opregion_asle_intr(dev);
4134
4135 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4136 gmbus_irq_handler(dev);
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153 iir = new_iir;
4154 }
4155
4156 return ret;
4157}
4158
4159static void i965_irq_uninstall(struct drm_device * dev)
4160{
4161 struct drm_i915_private *dev_priv = dev->dev_private;
4162 int pipe;
4163
4164 if (!dev_priv)
4165 return;
4166
4167 I915_WRITE(PORT_HOTPLUG_EN, 0);
4168 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4169
4170 I915_WRITE(HWSTAM, 0xffffffff);
4171 for_each_pipe(dev_priv, pipe)
4172 I915_WRITE(PIPESTAT(pipe), 0);
4173 I915_WRITE(IMR, 0xffffffff);
4174 I915_WRITE(IER, 0x0);
4175
4176 for_each_pipe(dev_priv, pipe)
4177 I915_WRITE(PIPESTAT(pipe),
4178 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4179 I915_WRITE(IIR, I915_READ(IIR));
4180}
4181
4182static void intel_hpd_irq_reenable_work(struct work_struct *work)
4183{
4184 struct drm_i915_private *dev_priv =
4185 container_of(work, typeof(*dev_priv),
4186 hotplug_reenable_work.work);
4187 struct drm_device *dev = dev_priv->dev;
4188 struct drm_mode_config *mode_config = &dev->mode_config;
4189 int i;
4190
4191 intel_runtime_pm_get(dev_priv);
4192
4193 spin_lock_irq(&dev_priv->irq_lock);
4194 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4195 struct drm_connector *connector;
4196
4197 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4198 continue;
4199
4200 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4201
4202 list_for_each_entry(connector, &mode_config->connector_list, head) {
4203 struct intel_connector *intel_connector = to_intel_connector(connector);
4204
4205 if (intel_connector->encoder->hpd_pin == i) {
4206 if (connector->polled != intel_connector->polled)
4207 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4208 connector->name);
4209 connector->polled = intel_connector->polled;
4210 if (!connector->polled)
4211 connector->polled = DRM_CONNECTOR_POLL_HPD;
4212 }
4213 }
4214 }
4215 if (dev_priv->display.hpd_irq_setup)
4216 dev_priv->display.hpd_irq_setup(dev);
4217 spin_unlock_irq(&dev_priv->irq_lock);
4218
4219 intel_runtime_pm_put(dev_priv);
4220}
4221
4222
4223
4224
4225
4226
4227
4228
4229void intel_irq_init(struct drm_i915_private *dev_priv)
4230{
4231 struct drm_device *dev = dev_priv->dev;
4232
4233 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4234 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4235 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4236 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4237
4238
4239 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4240
4241 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4242 else
4243 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4244
4245 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4246 i915_hangcheck_elapsed);
4247 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4248 intel_hpd_irq_reenable_work);
4249
4250 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4251
4252 if (IS_GEN2(dev_priv)) {
4253 dev->max_vblank_count = 0;
4254 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4255 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4256 dev->max_vblank_count = 0xffffffff;
4257 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4258 } else {
4259 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4260 dev->max_vblank_count = 0xffffff;
4261 }
4262
4263
4264
4265
4266
4267
4268 if (!IS_GEN2(dev_priv))
4269 dev->vblank_disable_immediate = true;
4270
4271 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4272 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4273
4274 if (IS_CHERRYVIEW(dev_priv)) {
4275 dev->driver->irq_handler = cherryview_irq_handler;
4276 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4277 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4278 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4279 dev->driver->enable_vblank = valleyview_enable_vblank;
4280 dev->driver->disable_vblank = valleyview_disable_vblank;
4281 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4282 } else if (IS_VALLEYVIEW(dev_priv)) {
4283 dev->driver->irq_handler = valleyview_irq_handler;
4284 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4285 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4286 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4287 dev->driver->enable_vblank = valleyview_enable_vblank;
4288 dev->driver->disable_vblank = valleyview_disable_vblank;
4289 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4290 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4291 dev->driver->irq_handler = gen8_irq_handler;
4292 dev->driver->irq_preinstall = gen8_irq_reset;
4293 dev->driver->irq_postinstall = gen8_irq_postinstall;
4294 dev->driver->irq_uninstall = gen8_irq_uninstall;
4295 dev->driver->enable_vblank = gen8_enable_vblank;
4296 dev->driver->disable_vblank = gen8_disable_vblank;
4297 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4298 } else if (HAS_PCH_SPLIT(dev)) {
4299 dev->driver->irq_handler = ironlake_irq_handler;
4300 dev->driver->irq_preinstall = ironlake_irq_reset;
4301 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4302 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4303 dev->driver->enable_vblank = ironlake_enable_vblank;
4304 dev->driver->disable_vblank = ironlake_disable_vblank;
4305 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4306 } else {
4307 if (INTEL_INFO(dev_priv)->gen == 2) {
4308 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4309 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4310 dev->driver->irq_handler = i8xx_irq_handler;
4311 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4312 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4313 dev->driver->irq_preinstall = i915_irq_preinstall;
4314 dev->driver->irq_postinstall = i915_irq_postinstall;
4315 dev->driver->irq_uninstall = i915_irq_uninstall;
4316 dev->driver->irq_handler = i915_irq_handler;
4317 } else {
4318 dev->driver->irq_preinstall = i965_irq_preinstall;
4319 dev->driver->irq_postinstall = i965_irq_postinstall;
4320 dev->driver->irq_uninstall = i965_irq_uninstall;
4321 dev->driver->irq_handler = i965_irq_handler;
4322 }
4323 if (I915_HAS_HOTPLUG(dev_priv))
4324 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4325 dev->driver->enable_vblank = i915_enable_vblank;
4326 dev->driver->disable_vblank = i915_disable_vblank;
4327 }
4328}
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342void intel_hpd_init(struct drm_i915_private *dev_priv)
4343{
4344 struct drm_device *dev = dev_priv->dev;
4345 struct drm_mode_config *mode_config = &dev->mode_config;
4346 struct drm_connector *connector;
4347 int i;
4348
4349 for (i = 1; i < HPD_NUM_PINS; i++) {
4350 dev_priv->hpd_stats[i].hpd_cnt = 0;
4351 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4352 }
4353 list_for_each_entry(connector, &mode_config->connector_list, head) {
4354 struct intel_connector *intel_connector = to_intel_connector(connector);
4355 connector->polled = intel_connector->polled;
4356 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4357 connector->polled = DRM_CONNECTOR_POLL_HPD;
4358 if (intel_connector->mst_port)
4359 connector->polled = DRM_CONNECTOR_POLL_HPD;
4360 }
4361
4362
4363
4364 spin_lock_irq(&dev_priv->irq_lock);
4365 if (dev_priv->display.hpd_irq_setup)
4366 dev_priv->display.hpd_irq_setup(dev);
4367 spin_unlock_irq(&dev_priv->irq_lock);
4368}
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381int intel_irq_install(struct drm_i915_private *dev_priv)
4382{
4383
4384
4385
4386
4387
4388 dev_priv->pm.irqs_enabled = true;
4389
4390 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4391}
4392
4393
4394
4395
4396
4397
4398
4399
4400void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4401{
4402 drm_irq_uninstall(dev_priv->dev);
4403 intel_hpd_cancel_work(dev_priv);
4404 dev_priv->pm.irqs_enabled = false;
4405}
4406
4407
4408
4409
4410
4411
4412
4413
4414void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4415{
4416 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4417 dev_priv->pm.irqs_enabled = false;
4418 synchronize_irq(dev_priv->dev->irq);
4419}
4420
4421
4422
4423
4424
4425
4426
4427
4428void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4429{
4430 dev_priv->pm.irqs_enabled = true;
4431 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4432 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4433}
4434