1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/sysrq.h>
32#include <linux/slab.h>
33#include <linux/circ_buf.h>
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
36#include "i915_drv.h"
37#include "i915_trace.h"
38#include "intel_drv.h"
39
40static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
65static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
74static const u32 hpd_status_i915[] = {
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
83
84#define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92} while (0)
93
94#define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
102} while (0)
103
104
105
106
107#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117} while (0)
118
119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124} while (0)
125
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131} while (0)
132
133
134static void
135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136{
137 assert_spin_locked(&dev_priv->irq_lock);
138
139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
140 return;
141
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
145 POSTING_READ(DEIMR);
146 }
147}
148
149static void
150ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151{
152 assert_spin_locked(&dev_priv->irq_lock);
153
154 if (!intel_irqs_enabled(dev_priv))
155 return;
156
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 POSTING_READ(DEIMR);
161 }
162}
163
164
165
166
167
168
169
170static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173{
174 assert_spin_locked(&dev_priv->irq_lock);
175
176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
177 return;
178
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183}
184
185void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186{
187 ilk_update_gt_irq(dev_priv, mask, mask);
188}
189
190void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191{
192 ilk_update_gt_irq(dev_priv, mask, 0);
193}
194
195
196
197
198
199
200
201static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204{
205 uint32_t new_val;
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return;
211
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
220 }
221}
222
223void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{
225 snb_update_pm_irq(dev_priv, mask, mask);
226}
227
228void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229{
230 snb_update_pm_irq(dev_priv, mask, 0);
231}
232
233static bool ivb_can_enable_err_int(struct drm_device *dev)
234{
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
239 assert_spin_locked(&dev_priv->irq_lock);
240
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249}
250
251
252
253
254
255
256
257
258
259static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262{
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279}
280
281void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{
283 bdw_update_pm_irq(dev_priv, mask, mask);
284}
285
286void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287{
288 bdw_update_pm_irq(dev_priv, mask, 0);
289}
290
291static bool cpt_can_enable_serr_int(struct drm_device *dev)
292{
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
297 assert_spin_locked(&dev_priv->irq_lock);
298
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307}
308
309void i9xx_check_fifo_underruns(struct drm_device *dev)
310{
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313 unsigned long flags;
314
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
319 u32 pipestat;
320
321 if (crtc->cpu_fifo_underrun_disabled)
322 continue;
323
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
326 continue;
327
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
329 POSTING_READ(reg);
330
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 }
333
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
335}
336
337static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
338 enum pipe pipe,
339 bool enable, bool old)
340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe);
343 u32 pipestat = I915_READ(reg) & 0xffff0000;
344
345 assert_spin_locked(&dev_priv->irq_lock);
346
347 if (enable) {
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
349 POSTING_READ(reg);
350 } else {
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
353 }
354}
355
356static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
357 enum pipe pipe, bool enable)
358{
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
362
363 if (enable)
364 ironlake_enable_display_irq(dev_priv, bit);
365 else
366 ironlake_disable_display_irq(dev_priv, bit);
367}
368
369static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
370 enum pipe pipe,
371 bool enable, bool old)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374 if (enable) {
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
376
377 if (!ivb_can_enable_err_int(dev))
378 return;
379
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
381 } else {
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
383
384 if (old &&
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
387 pipe_name(pipe));
388 }
389 }
390}
391
392static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
393 enum pipe pipe, bool enable)
394{
395 struct drm_i915_private *dev_priv = dev->dev_private;
396
397 assert_spin_locked(&dev_priv->irq_lock);
398
399 if (enable)
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
401 else
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
405}
406
407
408
409
410
411
412
413static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask)
416{
417 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask;
419 sdeimr |= (~enabled_irq_mask & interrupt_mask);
420
421 assert_spin_locked(&dev_priv->irq_lock);
422
423 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
424 return;
425
426 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR);
428}
429#define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431#define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
433
434static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
436 bool enable)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
441
442 if (enable)
443 ibx_enable_display_interrupt(dev_priv, bit);
444 else
445 ibx_disable_display_interrupt(dev_priv, bit);
446}
447
448static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
450 bool enable, bool old)
451{
452 struct drm_i915_private *dev_priv = dev->dev_private;
453
454 if (enable) {
455 I915_WRITE(SERR_INT,
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
457
458 if (!cpt_can_enable_serr_int(dev))
459 return;
460
461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
462 } else {
463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
464
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
469 }
470 }
471}
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
489{
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
493 bool old;
494
495 assert_spin_locked(&dev_priv->irq_lock);
496
497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
508
509 return old;
510}
511
512bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
514{
515 struct drm_i915_private *dev_priv = dev->dev_private;
516 unsigned long flags;
517 bool ret;
518
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
522
523 return ret;
524}
525
526static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
527 enum pipe pipe)
528{
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
532
533 return !intel_crtc->cpu_fifo_underrun_disabled;
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
552 bool enable)
553{
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
557 unsigned long flags;
558 bool old;
559
560
561
562
563
564
565
566
567
568
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
570
571 old = !intel_crtc->pch_fifo_underrun_disabled;
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
573
574 if (HAS_PCH_IBX(dev))
575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
576 else
577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
578
579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
580 return old;
581}
582
583
584static void
585__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
586 u32 enable_mask, u32 status_mask)
587{
588 u32 reg = PIPESTAT(pipe);
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
590
591 assert_spin_locked(&dev_priv->irq_lock);
592
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
597 return;
598
599 if ((pipestat & enable_mask) == enable_mask)
600 return;
601
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
603
604
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
607 POSTING_READ(reg);
608}
609
610static void
611__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
613{
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
616
617 assert_spin_locked(&dev_priv->irq_lock);
618
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
623 return;
624
625 if ((pipestat & enable_mask) == 0)
626 return;
627
628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
629
630 pipestat &= ~enable_mask;
631 I915_WRITE(reg, pipestat);
632 POSTING_READ(reg);
633}
634
635static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
636{
637 u32 enable_mask = status_mask << 16;
638
639
640
641
642
643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
644 return 0;
645
646
647
648
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
650 return 0;
651
652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
653 SPRITE0_FLIP_DONE_INT_EN_VLV |
654 SPRITE1_FLIP_DONE_INT_EN_VLV);
655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
659
660 return enable_mask;
661}
662
663void
664i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
665 u32 status_mask)
666{
667 u32 enable_mask;
668
669 if (IS_VALLEYVIEW(dev_priv->dev))
670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
671 status_mask);
672 else
673 enable_mask = status_mask << 16;
674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
675}
676
677void
678i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
679 u32 status_mask)
680{
681 u32 enable_mask;
682
683 if (IS_VALLEYVIEW(dev_priv->dev))
684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
685 status_mask);
686 else
687 enable_mask = status_mask << 16;
688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
689}
690
691
692
693
694static void i915_enable_asle_pipestat(struct drm_device *dev)
695{
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
698
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return;
701
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
703
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
708
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
710}
711
712
713
714
715
716
717
718
719
720
721static int
722i915_pipe_enabled(struct drm_device *dev, int pipe)
723{
724 struct drm_i915_private *dev_priv = dev->dev_private;
725
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
730
731 return intel_crtc->active;
732 } else {
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
734 }
735}
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
788{
789
790 return 0;
791}
792
793
794
795
796static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
797{
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
802
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
806 return 0;
807 }
808
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
814
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
820 } else {
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
822
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
829 }
830
831
832 vbl_start *= htotal;
833
834
835 vbl_start -= htotal - hsync_start;
836
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
839
840
841
842
843
844
845 do {
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
850
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
854
855
856
857
858
859
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
861}
862
863static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
864{
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
867
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
871 return 0;
872 }
873
874 return I915_READ(reg);
875}
876
877
878#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
879
880static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
881{
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
887
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
890 vtotal /= 2;
891
892 if (IS_GEN2(dev))
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
894 else
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
896
897
898
899
900
901 return (position + crtc->scanline_offset) % vtotal;
902}
903
904static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
907{
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
912 int position;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
914 bool in_vbl = true;
915 int ret = 0;
916 unsigned long irqflags;
917
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
921 return 0;
922 }
923
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
929
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
932 vbl_end /= 2;
933 vtotal /= 2;
934 }
935
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
937
938
939
940
941
942
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
944
945
946
947
948 if (stime)
949 *stime = ktime_get();
950
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952
953
954
955 position = __intel_get_crtc_scanline(intel_crtc);
956 } else {
957
958
959
960
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
962
963
964 vbl_start *= htotal;
965 vbl_end *= htotal;
966 vtotal *= htotal;
967
968
969
970
971
972
973
974
975
976
977 if (position >= vtotal)
978 position = vtotal - 1;
979
980
981
982
983
984
985
986
987
988
989 position = (position + htotal - hsync_start) % vtotal;
990 }
991
992
993 if (etime)
994 *etime = ktime_get();
995
996
997
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
999
1000 in_vbl = position >= vbl_start && position < vbl_end;
1001
1002
1003
1004
1005
1006
1007
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1010 else
1011 position += vtotal - vbl_end;
1012
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1014 *vpos = position;
1015 *hpos = 0;
1016 } else {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1019 }
1020
1021
1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_INVBL;
1024
1025 return ret;
1026}
1027
1028int intel_get_crtc_scanline(struct intel_crtc *crtc)
1029{
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1032 int position;
1033
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1037
1038 return position;
1039}
1040
1041static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1042 int *max_error,
1043 struct timeval *vblank_time,
1044 unsigned flags)
1045{
1046 struct drm_crtc *crtc;
1047
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1050 return -EINVAL;
1051 }
1052
1053
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1055 if (crtc == NULL) {
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1057 return -EINVAL;
1058 }
1059
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1062 return -EBUSY;
1063 }
1064
1065
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1067 vblank_time, flags,
1068 crtc,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1070}
1071
1072static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1074{
1075 enum drm_connector_status old_status;
1076
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1079
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1082 return false;
1083
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1085 connector->base.id,
1086 connector->name,
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1089
1090 return true;
1091}
1092
1093static void i915_digport_work_func(struct work_struct *work)
1094{
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1100 int i, ret;
1101 u32 old_bits = 0;
1102
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1109
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1111 bool valid = false;
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1115 continue;
1116
1117 if (long_port_mask & (1 << i)) {
1118 valid = true;
1119 long_hpd = true;
1120 } else if (short_port_mask & (1 << i))
1121 valid = true;
1122
1123 if (valid) {
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1125 if (ret == true) {
1126
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1128 }
1129 }
1130 }
1131
1132 if (old_bits) {
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1137 }
1138}
1139
1140
1141
1142
1143#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1144
1145static void i915_hotplug_work_func(struct work_struct *work)
1146{
1147 struct drm_i915_private *dev_priv =
1148 container_of(work, struct drm_i915_private, hotplug_work);
1149 struct drm_device *dev = dev_priv->dev;
1150 struct drm_mode_config *mode_config = &dev->mode_config;
1151 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false;
1156 bool changed = false;
1157 u32 hpd_event_bits;
1158
1159 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1161
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1163
1164 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0;
1166 list_for_each_entry(connector, &mode_config->connector_list, head) {
1167 intel_connector = to_intel_connector(connector);
1168 if (!intel_connector->encoder)
1169 continue;
1170 intel_encoder = intel_connector->encoder;
1171 if (intel_encoder->hpd_pin > HPD_NONE &&
1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1173 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1174 DRM_INFO("HPD interrupt storm detected on connector %s: "
1175 "switching from hotplug detection to polling\n",
1176 connector->name);
1177 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1178 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1179 | DRM_CONNECTOR_POLL_DISCONNECT;
1180 hpd_disabled = true;
1181 }
1182 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1184 connector->name, intel_encoder->hpd_pin);
1185 }
1186 }
1187
1188
1189
1190 if (hpd_disabled) {
1191 drm_kms_helper_poll_enable(dev);
1192 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 }
1195
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1197
1198 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector);
1200 if (!intel_connector->encoder)
1201 continue;
1202 intel_encoder = intel_connector->encoder;
1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1204 if (intel_encoder->hot_plug)
1205 intel_encoder->hot_plug(intel_encoder);
1206 if (intel_hpd_irq_event(dev, connector))
1207 changed = true;
1208 }
1209 }
1210 mutex_unlock(&mode_config->mutex);
1211
1212 if (changed)
1213 drm_kms_helper_hotplug_event(dev);
1214}
1215
1216static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 busy_up, busy_down, max_avg, min_avg;
1220 u8 new_delay;
1221
1222 spin_lock(&mchdev_lock);
1223
1224 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1225
1226 new_delay = dev_priv->ips.cur_delay;
1227
1228 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1229 busy_up = I915_READ(RCPREVBSYTUPAVG);
1230 busy_down = I915_READ(RCPREVBSYTDNAVG);
1231 max_avg = I915_READ(RCBMAXAVG);
1232 min_avg = I915_READ(RCBMINAVG);
1233
1234
1235 if (busy_up > max_avg) {
1236 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1237 new_delay = dev_priv->ips.cur_delay - 1;
1238 if (new_delay < dev_priv->ips.max_delay)
1239 new_delay = dev_priv->ips.max_delay;
1240 } else if (busy_down < min_avg) {
1241 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1242 new_delay = dev_priv->ips.cur_delay + 1;
1243 if (new_delay > dev_priv->ips.min_delay)
1244 new_delay = dev_priv->ips.min_delay;
1245 }
1246
1247 if (ironlake_set_drps(dev, new_delay))
1248 dev_priv->ips.cur_delay = new_delay;
1249
1250 spin_unlock(&mchdev_lock);
1251
1252 return;
1253}
1254
1255static void notify_ring(struct drm_device *dev,
1256 struct intel_engine_cs *ring)
1257{
1258 if (!intel_ring_initialized(ring))
1259 return;
1260
1261 trace_i915_gem_request_complete(ring);
1262
1263 if (drm_core_check_feature(dev, DRIVER_MODESET))
1264 intel_notify_mmio_flip(ring);
1265
1266 wake_up_all(&ring->irq_queue);
1267 i915_queue_hangcheck(dev);
1268}
1269
1270static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1271 struct intel_rps_ei *rps_ei)
1272{
1273 u32 cz_ts, cz_freq_khz;
1274 u32 render_count, media_count;
1275 u32 elapsed_render, elapsed_media, elapsed_time;
1276 u32 residency = 0;
1277
1278 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1279 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1280
1281 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1282 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1283
1284 if (rps_ei->cz_clock == 0) {
1285 rps_ei->cz_clock = cz_ts;
1286 rps_ei->render_c0 = render_count;
1287 rps_ei->media_c0 = media_count;
1288
1289 return dev_priv->rps.cur_freq;
1290 }
1291
1292 elapsed_time = cz_ts - rps_ei->cz_clock;
1293 rps_ei->cz_clock = cz_ts;
1294
1295 elapsed_render = render_count - rps_ei->render_c0;
1296 rps_ei->render_c0 = render_count;
1297
1298 elapsed_media = media_count - rps_ei->media_c0;
1299 rps_ei->media_c0 = media_count;
1300
1301
1302 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1303 elapsed_render /= cz_freq_khz;
1304 elapsed_media /= cz_freq_khz;
1305
1306
1307
1308
1309
1310 if (elapsed_time) {
1311 residency =
1312 ((max(elapsed_render, elapsed_media) * 100)
1313 / elapsed_time);
1314 }
1315
1316 return residency;
1317}
1318
1319
1320
1321
1322
1323
1324
1325static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1326{
1327 u32 residency_C0_up = 0, residency_C0_down = 0;
1328 u8 new_delay, adj;
1329
1330 dev_priv->rps.ei_interrupt_count++;
1331
1332 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1333
1334
1335 if (dev_priv->rps.up_ei.cz_clock == 0) {
1336 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1337 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1338 return dev_priv->rps.cur_freq;
1339 }
1340
1341
1342
1343
1344
1345
1346
1347 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1348
1349 dev_priv->rps.ei_interrupt_count = 0;
1350
1351 residency_C0_down = vlv_c0_residency(dev_priv,
1352 &dev_priv->rps.down_ei);
1353 } else {
1354 residency_C0_up = vlv_c0_residency(dev_priv,
1355 &dev_priv->rps.up_ei);
1356 }
1357
1358 new_delay = dev_priv->rps.cur_freq;
1359
1360 adj = dev_priv->rps.last_adj;
1361
1362 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1363 if (adj > 0)
1364 adj *= 2;
1365 else
1366 adj = 1;
1367
1368 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1369 new_delay = dev_priv->rps.cur_freq + adj;
1370
1371
1372
1373
1374
1375 if (new_delay < dev_priv->rps.efficient_freq)
1376 new_delay = dev_priv->rps.efficient_freq;
1377
1378 } else if (!dev_priv->rps.ei_interrupt_count &&
1379 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1380 if (adj < 0)
1381 adj *= 2;
1382 else
1383 adj = -1;
1384
1385
1386
1387
1388 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1389 new_delay = dev_priv->rps.cur_freq + adj;
1390 }
1391
1392 return new_delay;
1393}
1394
1395static void gen6_pm_rps_work(struct work_struct *work)
1396{
1397 struct drm_i915_private *dev_priv =
1398 container_of(work, struct drm_i915_private, rps.work);
1399 u32 pm_iir;
1400 int new_delay, adj;
1401
1402 spin_lock_irq(&dev_priv->irq_lock);
1403 pm_iir = dev_priv->rps.pm_iir;
1404 dev_priv->rps.pm_iir = 0;
1405 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1406 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1407 else {
1408
1409 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1410 }
1411 spin_unlock_irq(&dev_priv->irq_lock);
1412
1413
1414 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1415
1416 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1417 return;
1418
1419 mutex_lock(&dev_priv->rps.hw_lock);
1420
1421 adj = dev_priv->rps.last_adj;
1422 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1423 if (adj > 0)
1424 adj *= 2;
1425 else {
1426
1427 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1428 }
1429 new_delay = dev_priv->rps.cur_freq + adj;
1430
1431
1432
1433
1434
1435 if (new_delay < dev_priv->rps.efficient_freq)
1436 new_delay = dev_priv->rps.efficient_freq;
1437 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1438 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1439 new_delay = dev_priv->rps.efficient_freq;
1440 else
1441 new_delay = dev_priv->rps.min_freq_softlimit;
1442 adj = 0;
1443 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1444 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1445 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1446 if (adj < 0)
1447 adj *= 2;
1448 else {
1449
1450 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1451 }
1452 new_delay = dev_priv->rps.cur_freq + adj;
1453 } else {
1454 new_delay = dev_priv->rps.cur_freq;
1455 }
1456
1457
1458
1459
1460 new_delay = clamp_t(int, new_delay,
1461 dev_priv->rps.min_freq_softlimit,
1462 dev_priv->rps.max_freq_softlimit);
1463
1464 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1465
1466 if (IS_VALLEYVIEW(dev_priv->dev))
1467 valleyview_set_rps(dev_priv->dev, new_delay);
1468 else
1469 gen6_set_rps(dev_priv->dev, new_delay);
1470
1471 mutex_unlock(&dev_priv->rps.hw_lock);
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484static void ivybridge_parity_work(struct work_struct *work)
1485{
1486 struct drm_i915_private *dev_priv =
1487 container_of(work, struct drm_i915_private, l3_parity.error_work);
1488 u32 error_status, row, bank, subbank;
1489 char *parity_event[6];
1490 uint32_t misccpctl;
1491 unsigned long flags;
1492 uint8_t slice = 0;
1493
1494
1495
1496
1497
1498 mutex_lock(&dev_priv->dev->struct_mutex);
1499
1500
1501 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1502 goto out;
1503
1504 misccpctl = I915_READ(GEN7_MISCCPCTL);
1505 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1506 POSTING_READ(GEN7_MISCCPCTL);
1507
1508 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1509 u32 reg;
1510
1511 slice--;
1512 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1513 break;
1514
1515 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1516
1517 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1518
1519 error_status = I915_READ(reg);
1520 row = GEN7_PARITY_ERROR_ROW(error_status);
1521 bank = GEN7_PARITY_ERROR_BANK(error_status);
1522 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1523
1524 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1525 POSTING_READ(reg);
1526
1527 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1528 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1529 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1530 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1531 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1532 parity_event[5] = NULL;
1533
1534 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1535 KOBJ_CHANGE, parity_event);
1536
1537 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1538 slice, row, bank, subbank);
1539
1540 kfree(parity_event[4]);
1541 kfree(parity_event[3]);
1542 kfree(parity_event[2]);
1543 kfree(parity_event[1]);
1544 }
1545
1546 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1547
1548out:
1549 WARN_ON(dev_priv->l3_parity.which_slice);
1550 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1551 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1552 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1553
1554 mutex_unlock(&dev_priv->dev->struct_mutex);
1555}
1556
1557static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1558{
1559 struct drm_i915_private *dev_priv = dev->dev_private;
1560
1561 if (!HAS_L3_DPF(dev))
1562 return;
1563
1564 spin_lock(&dev_priv->irq_lock);
1565 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1566 spin_unlock(&dev_priv->irq_lock);
1567
1568 iir &= GT_PARITY_ERROR(dev);
1569 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1570 dev_priv->l3_parity.which_slice |= 1 << 1;
1571
1572 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1573 dev_priv->l3_parity.which_slice |= 1 << 0;
1574
1575 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1576}
1577
1578static void ilk_gt_irq_handler(struct drm_device *dev,
1579 struct drm_i915_private *dev_priv,
1580 u32 gt_iir)
1581{
1582 if (gt_iir &
1583 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1584 notify_ring(dev, &dev_priv->ring[RCS]);
1585 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1586 notify_ring(dev, &dev_priv->ring[VCS]);
1587}
1588
1589static void snb_gt_irq_handler(struct drm_device *dev,
1590 struct drm_i915_private *dev_priv,
1591 u32 gt_iir)
1592{
1593
1594 if (gt_iir &
1595 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1596 notify_ring(dev, &dev_priv->ring[RCS]);
1597 if (gt_iir & GT_BSD_USER_INTERRUPT)
1598 notify_ring(dev, &dev_priv->ring[VCS]);
1599 if (gt_iir & GT_BLT_USER_INTERRUPT)
1600 notify_ring(dev, &dev_priv->ring[BCS]);
1601
1602 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1603 GT_BSD_CS_ERROR_INTERRUPT |
1604 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1605 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1606 gt_iir);
1607 }
1608
1609 if (gt_iir & GT_PARITY_ERROR(dev))
1610 ivybridge_parity_error_irq_handler(dev, gt_iir);
1611}
1612
1613static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1614{
1615 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1616 return;
1617
1618 spin_lock(&dev_priv->irq_lock);
1619 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1620 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1621 spin_unlock(&dev_priv->irq_lock);
1622
1623 queue_work(dev_priv->wq, &dev_priv->rps.work);
1624}
1625
1626static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1627 struct drm_i915_private *dev_priv,
1628 u32 master_ctl)
1629{
1630 u32 rcs, bcs, vcs;
1631 uint32_t tmp = 0;
1632 irqreturn_t ret = IRQ_NONE;
1633
1634 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1635 tmp = I915_READ(GEN8_GT_IIR(0));
1636 if (tmp) {
1637 I915_WRITE(GEN8_GT_IIR(0), tmp);
1638 ret = IRQ_HANDLED;
1639 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1640 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1641 if (rcs & GT_RENDER_USER_INTERRUPT)
1642 notify_ring(dev, &dev_priv->ring[RCS]);
1643 if (bcs & GT_RENDER_USER_INTERRUPT)
1644 notify_ring(dev, &dev_priv->ring[BCS]);
1645 } else
1646 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1647 }
1648
1649 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1650 tmp = I915_READ(GEN8_GT_IIR(1));
1651 if (tmp) {
1652 I915_WRITE(GEN8_GT_IIR(1), tmp);
1653 ret = IRQ_HANDLED;
1654 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1655 if (vcs & GT_RENDER_USER_INTERRUPT)
1656 notify_ring(dev, &dev_priv->ring[VCS]);
1657 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1658 if (vcs & GT_RENDER_USER_INTERRUPT)
1659 notify_ring(dev, &dev_priv->ring[VCS2]);
1660 } else
1661 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1662 }
1663
1664 if (master_ctl & GEN8_GT_PM_IRQ) {
1665 tmp = I915_READ(GEN8_GT_IIR(2));
1666 if (tmp & dev_priv->pm_rps_events) {
1667 I915_WRITE(GEN8_GT_IIR(2),
1668 tmp & dev_priv->pm_rps_events);
1669 ret = IRQ_HANDLED;
1670 gen8_rps_irq_handler(dev_priv, tmp);
1671 } else
1672 DRM_ERROR("The master control interrupt lied (PM)!\n");
1673 }
1674
1675 if (master_ctl & GEN8_GT_VECS_IRQ) {
1676 tmp = I915_READ(GEN8_GT_IIR(3));
1677 if (tmp) {
1678 I915_WRITE(GEN8_GT_IIR(3), tmp);
1679 ret = IRQ_HANDLED;
1680 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1681 if (vcs & GT_RENDER_USER_INTERRUPT)
1682 notify_ring(dev, &dev_priv->ring[VECS]);
1683 } else
1684 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1685 }
1686
1687 return ret;
1688}
1689
1690#define HPD_STORM_DETECT_PERIOD 1000
1691#define HPD_STORM_THRESHOLD 5
1692
1693static int ilk_port_to_hotplug_shift(enum port port)
1694{
1695 switch (port) {
1696 case PORT_A:
1697 case PORT_E:
1698 default:
1699 return -1;
1700 case PORT_B:
1701 return 0;
1702 case PORT_C:
1703 return 8;
1704 case PORT_D:
1705 return 16;
1706 }
1707}
1708
1709static int g4x_port_to_hotplug_shift(enum port port)
1710{
1711 switch (port) {
1712 case PORT_A:
1713 case PORT_E:
1714 default:
1715 return -1;
1716 case PORT_B:
1717 return 17;
1718 case PORT_C:
1719 return 19;
1720 case PORT_D:
1721 return 21;
1722 }
1723}
1724
1725static inline enum port get_port_from_pin(enum hpd_pin pin)
1726{
1727 switch (pin) {
1728 case HPD_PORT_B:
1729 return PORT_B;
1730 case HPD_PORT_C:
1731 return PORT_C;
1732 case HPD_PORT_D:
1733 return PORT_D;
1734 default:
1735 return PORT_A;
1736 }
1737}
1738
1739static inline void intel_hpd_irq_handler(struct drm_device *dev,
1740 u32 hotplug_trigger,
1741 u32 dig_hotplug_reg,
1742 const u32 *hpd)
1743{
1744 struct drm_i915_private *dev_priv = dev->dev_private;
1745 int i;
1746 enum port port;
1747 bool storm_detected = false;
1748 bool queue_dig = false, queue_hp = false;
1749 u32 dig_shift;
1750 u32 dig_port_mask = 0;
1751
1752 if (!hotplug_trigger)
1753 return;
1754
1755 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1756 hotplug_trigger, dig_hotplug_reg);
1757
1758 spin_lock(&dev_priv->irq_lock);
1759 for (i = 1; i < HPD_NUM_PINS; i++) {
1760 if (!(hpd[i] & hotplug_trigger))
1761 continue;
1762
1763 port = get_port_from_pin(i);
1764 if (port && dev_priv->hpd_irq_port[port]) {
1765 bool long_hpd;
1766
1767 if (IS_G4X(dev)) {
1768 dig_shift = g4x_port_to_hotplug_shift(port);
1769 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1770 } else {
1771 dig_shift = ilk_port_to_hotplug_shift(port);
1772 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1773 }
1774
1775 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
1776
1777
1778 if (long_hpd) {
1779 dev_priv->long_hpd_port_mask |= (1 << port);
1780 dig_port_mask |= hpd[i];
1781 } else {
1782
1783 dev_priv->short_hpd_port_mask |= (1 << port);
1784 hotplug_trigger &= ~hpd[i];
1785 }
1786 queue_dig = true;
1787 }
1788 }
1789
1790 for (i = 1; i < HPD_NUM_PINS; i++) {
1791 if (hpd[i] & hotplug_trigger &&
1792 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1793
1794
1795
1796
1797
1798
1799 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1800 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1801 hotplug_trigger, i, hpd[i]);
1802
1803 continue;
1804 }
1805
1806 if (!(hpd[i] & hotplug_trigger) ||
1807 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1808 continue;
1809
1810 if (!(dig_port_mask & hpd[i])) {
1811 dev_priv->hpd_event_bits |= (1 << i);
1812 queue_hp = true;
1813 }
1814
1815 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1816 dev_priv->hpd_stats[i].hpd_last_jiffies
1817 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1818 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1819 dev_priv->hpd_stats[i].hpd_cnt = 0;
1820 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1821 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1822 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1823 dev_priv->hpd_event_bits &= ~(1 << i);
1824 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1825 storm_detected = true;
1826 } else {
1827 dev_priv->hpd_stats[i].hpd_cnt++;
1828 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1829 dev_priv->hpd_stats[i].hpd_cnt);
1830 }
1831 }
1832
1833 if (storm_detected)
1834 dev_priv->display.hpd_irq_setup(dev);
1835 spin_unlock(&dev_priv->irq_lock);
1836
1837
1838
1839
1840
1841
1842
1843 if (queue_dig)
1844 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1845 if (queue_hp)
1846 schedule_work(&dev_priv->hotplug_work);
1847}
1848
1849static void gmbus_irq_handler(struct drm_device *dev)
1850{
1851 struct drm_i915_private *dev_priv = dev->dev_private;
1852
1853 wake_up_all(&dev_priv->gmbus_wait_queue);
1854}
1855
1856static void dp_aux_irq_handler(struct drm_device *dev)
1857{
1858 struct drm_i915_private *dev_priv = dev->dev_private;
1859
1860 wake_up_all(&dev_priv->gmbus_wait_queue);
1861}
1862
1863#if defined(CONFIG_DEBUG_FS)
1864static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1865 uint32_t crc0, uint32_t crc1,
1866 uint32_t crc2, uint32_t crc3,
1867 uint32_t crc4)
1868{
1869 struct drm_i915_private *dev_priv = dev->dev_private;
1870 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1871 struct intel_pipe_crc_entry *entry;
1872 int head, tail;
1873
1874 spin_lock(&pipe_crc->lock);
1875
1876 if (!pipe_crc->entries) {
1877 spin_unlock(&pipe_crc->lock);
1878 DRM_ERROR("spurious interrupt\n");
1879 return;
1880 }
1881
1882 head = pipe_crc->head;
1883 tail = pipe_crc->tail;
1884
1885 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1886 spin_unlock(&pipe_crc->lock);
1887 DRM_ERROR("CRC buffer overflowing\n");
1888 return;
1889 }
1890
1891 entry = &pipe_crc->entries[head];
1892
1893 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1894 entry->crc[0] = crc0;
1895 entry->crc[1] = crc1;
1896 entry->crc[2] = crc2;
1897 entry->crc[3] = crc3;
1898 entry->crc[4] = crc4;
1899
1900 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1901 pipe_crc->head = head;
1902
1903 spin_unlock(&pipe_crc->lock);
1904
1905 wake_up_interruptible(&pipe_crc->wq);
1906}
1907#else
1908static inline void
1909display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1910 uint32_t crc0, uint32_t crc1,
1911 uint32_t crc2, uint32_t crc3,
1912 uint32_t crc4) {}
1913#endif
1914
1915
1916static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1917{
1918 struct drm_i915_private *dev_priv = dev->dev_private;
1919
1920 display_pipe_crc_irq_handler(dev, pipe,
1921 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1922 0, 0, 0, 0);
1923}
1924
1925static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1926{
1927 struct drm_i915_private *dev_priv = dev->dev_private;
1928
1929 display_pipe_crc_irq_handler(dev, pipe,
1930 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1931 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1932 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1933 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1934 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1935}
1936
1937static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1938{
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1940 uint32_t res1, res2;
1941
1942 if (INTEL_INFO(dev)->gen >= 3)
1943 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1944 else
1945 res1 = 0;
1946
1947 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1948 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1949 else
1950 res2 = 0;
1951
1952 display_pipe_crc_irq_handler(dev, pipe,
1953 I915_READ(PIPE_CRC_RES_RED(pipe)),
1954 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1955 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1956 res1, res2);
1957}
1958
1959
1960
1961
1962static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1963{
1964 if (pm_iir & dev_priv->pm_rps_events) {
1965 spin_lock(&dev_priv->irq_lock);
1966 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1967 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1968 spin_unlock(&dev_priv->irq_lock);
1969
1970 queue_work(dev_priv->wq, &dev_priv->rps.work);
1971 }
1972
1973 if (HAS_VEBOX(dev_priv->dev)) {
1974 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1975 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1976
1977 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1978 i915_handle_error(dev_priv->dev, false,
1979 "VEBOX CS error interrupt 0x%08x",
1980 pm_iir);
1981 }
1982 }
1983}
1984
1985static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1986{
1987 struct intel_crtc *crtc;
1988
1989 if (!drm_handle_vblank(dev, pipe))
1990 return false;
1991
1992 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1993 wake_up(&crtc->vbl_wait);
1994
1995 return true;
1996}
1997
1998static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1999{
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 pipe_stats[I915_MAX_PIPES] = { };
2002 int pipe;
2003
2004 spin_lock(&dev_priv->irq_lock);
2005 for_each_pipe(pipe) {
2006 int reg;
2007 u32 mask, iir_bit = 0;
2008
2009
2010
2011
2012
2013
2014
2015
2016 mask = 0;
2017 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2018 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2019
2020 switch (pipe) {
2021 case PIPE_A:
2022 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2023 break;
2024 case PIPE_B:
2025 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2026 break;
2027 case PIPE_C:
2028 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2029 break;
2030 }
2031 if (iir & iir_bit)
2032 mask |= dev_priv->pipestat_irq_mask[pipe];
2033
2034 if (!mask)
2035 continue;
2036
2037 reg = PIPESTAT(pipe);
2038 mask |= PIPESTAT_INT_ENABLE_MASK;
2039 pipe_stats[pipe] = I915_READ(reg) & mask;
2040
2041
2042
2043
2044 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2045 PIPESTAT_INT_STATUS_MASK))
2046 I915_WRITE(reg, pipe_stats[pipe]);
2047 }
2048 spin_unlock(&dev_priv->irq_lock);
2049
2050 for_each_pipe(pipe) {
2051 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2052 intel_pipe_handle_vblank(dev, pipe);
2053
2054 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2055 intel_prepare_page_flip(dev, pipe);
2056 intel_finish_page_flip(dev, pipe);
2057 }
2058
2059 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2060 i9xx_pipe_crc_irq_handler(dev, pipe);
2061
2062 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2063 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2064 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2065 }
2066
2067 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2068 gmbus_irq_handler(dev);
2069}
2070
2071static void i9xx_hpd_irq_handler(struct drm_device *dev)
2072{
2073 struct drm_i915_private *dev_priv = dev->dev_private;
2074 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2075
2076 if (hotplug_status) {
2077 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2078
2079
2080
2081
2082 POSTING_READ(PORT_HOTPLUG_STAT);
2083
2084 if (IS_G4X(dev)) {
2085 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2086
2087 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2088 } else {
2089 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2090
2091 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2092 }
2093
2094 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2095 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2096 dp_aux_irq_handler(dev);
2097 }
2098}
2099
2100static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2101{
2102 struct drm_device *dev = arg;
2103 struct drm_i915_private *dev_priv = dev->dev_private;
2104 u32 iir, gt_iir, pm_iir;
2105 irqreturn_t ret = IRQ_NONE;
2106
2107 while (true) {
2108
2109
2110 gt_iir = I915_READ(GTIIR);
2111 if (gt_iir)
2112 I915_WRITE(GTIIR, gt_iir);
2113
2114 pm_iir = I915_READ(GEN6_PMIIR);
2115 if (pm_iir)
2116 I915_WRITE(GEN6_PMIIR, pm_iir);
2117
2118 iir = I915_READ(VLV_IIR);
2119 if (iir) {
2120
2121 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2122 i9xx_hpd_irq_handler(dev);
2123 I915_WRITE(VLV_IIR, iir);
2124 }
2125
2126 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2127 goto out;
2128
2129 ret = IRQ_HANDLED;
2130
2131 if (gt_iir)
2132 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2133 if (pm_iir)
2134 gen6_rps_irq_handler(dev_priv, pm_iir);
2135
2136
2137 valleyview_pipestat_irq_handler(dev, iir);
2138 }
2139
2140out:
2141 return ret;
2142}
2143
2144static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2145{
2146 struct drm_device *dev = arg;
2147 struct drm_i915_private *dev_priv = dev->dev_private;
2148 u32 master_ctl, iir;
2149 irqreturn_t ret = IRQ_NONE;
2150
2151 for (;;) {
2152 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2153 iir = I915_READ(VLV_IIR);
2154
2155 if (master_ctl == 0 && iir == 0)
2156 break;
2157
2158 ret = IRQ_HANDLED;
2159
2160 I915_WRITE(GEN8_MASTER_IRQ, 0);
2161
2162
2163
2164 if (iir) {
2165
2166 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2167 i9xx_hpd_irq_handler(dev);
2168 I915_WRITE(VLV_IIR, iir);
2169 }
2170
2171 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2172
2173
2174
2175 valleyview_pipestat_irq_handler(dev, iir);
2176
2177 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2178 POSTING_READ(GEN8_MASTER_IRQ);
2179 }
2180
2181 return ret;
2182}
2183
2184static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2185{
2186 struct drm_i915_private *dev_priv = dev->dev_private;
2187 int pipe;
2188 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2189 u32 dig_hotplug_reg;
2190
2191 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2192 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2193
2194 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2195
2196 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2197 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2198 SDE_AUDIO_POWER_SHIFT);
2199 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2200 port_name(port));
2201 }
2202
2203 if (pch_iir & SDE_AUX_MASK)
2204 dp_aux_irq_handler(dev);
2205
2206 if (pch_iir & SDE_GMBUS)
2207 gmbus_irq_handler(dev);
2208
2209 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2210 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2211
2212 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2213 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2214
2215 if (pch_iir & SDE_POISON)
2216 DRM_ERROR("PCH poison interrupt\n");
2217
2218 if (pch_iir & SDE_FDI_MASK)
2219 for_each_pipe(pipe)
2220 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2221 pipe_name(pipe),
2222 I915_READ(FDI_RX_IIR(pipe)));
2223
2224 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2225 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2226
2227 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2228 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2229
2230 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2231 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2232 false))
2233 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2234
2235 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2236 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2237 false))
2238 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2239}
2240
2241static void ivb_err_int_handler(struct drm_device *dev)
2242{
2243 struct drm_i915_private *dev_priv = dev->dev_private;
2244 u32 err_int = I915_READ(GEN7_ERR_INT);
2245 enum pipe pipe;
2246
2247 if (err_int & ERR_INT_POISON)
2248 DRM_ERROR("Poison interrupt\n");
2249
2250 for_each_pipe(pipe) {
2251 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2252 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2253 false))
2254 DRM_ERROR("Pipe %c FIFO underrun\n",
2255 pipe_name(pipe));
2256 }
2257
2258 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2259 if (IS_IVYBRIDGE(dev))
2260 ivb_pipe_crc_irq_handler(dev, pipe);
2261 else
2262 hsw_pipe_crc_irq_handler(dev, pipe);
2263 }
2264 }
2265
2266 I915_WRITE(GEN7_ERR_INT, err_int);
2267}
2268
2269static void cpt_serr_int_handler(struct drm_device *dev)
2270{
2271 struct drm_i915_private *dev_priv = dev->dev_private;
2272 u32 serr_int = I915_READ(SERR_INT);
2273
2274 if (serr_int & SERR_INT_POISON)
2275 DRM_ERROR("PCH poison interrupt\n");
2276
2277 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2278 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2279 false))
2280 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2281
2282 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2283 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2284 false))
2285 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2286
2287 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2288 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2289 false))
2290 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2291
2292 I915_WRITE(SERR_INT, serr_int);
2293}
2294
2295static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2296{
2297 struct drm_i915_private *dev_priv = dev->dev_private;
2298 int pipe;
2299 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2300 u32 dig_hotplug_reg;
2301
2302 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2303 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2304
2305 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2306
2307 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2308 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2309 SDE_AUDIO_POWER_SHIFT_CPT);
2310 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2311 port_name(port));
2312 }
2313
2314 if (pch_iir & SDE_AUX_MASK_CPT)
2315 dp_aux_irq_handler(dev);
2316
2317 if (pch_iir & SDE_GMBUS_CPT)
2318 gmbus_irq_handler(dev);
2319
2320 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2321 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2322
2323 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2324 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2325
2326 if (pch_iir & SDE_FDI_MASK_CPT)
2327 for_each_pipe(pipe)
2328 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2329 pipe_name(pipe),
2330 I915_READ(FDI_RX_IIR(pipe)));
2331
2332 if (pch_iir & SDE_ERROR_CPT)
2333 cpt_serr_int_handler(dev);
2334}
2335
2336static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2337{
2338 struct drm_i915_private *dev_priv = dev->dev_private;
2339 enum pipe pipe;
2340
2341 if (de_iir & DE_AUX_CHANNEL_A)
2342 dp_aux_irq_handler(dev);
2343
2344 if (de_iir & DE_GSE)
2345 intel_opregion_asle_intr(dev);
2346
2347 if (de_iir & DE_POISON)
2348 DRM_ERROR("Poison interrupt\n");
2349
2350 for_each_pipe(pipe) {
2351 if (de_iir & DE_PIPE_VBLANK(pipe))
2352 intel_pipe_handle_vblank(dev, pipe);
2353
2354 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2355 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2356 DRM_ERROR("Pipe %c FIFO underrun\n",
2357 pipe_name(pipe));
2358
2359 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2360 i9xx_pipe_crc_irq_handler(dev, pipe);
2361
2362
2363 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2364 intel_prepare_page_flip(dev, pipe);
2365 intel_finish_page_flip_plane(dev, pipe);
2366 }
2367 }
2368
2369
2370 if (de_iir & DE_PCH_EVENT) {
2371 u32 pch_iir = I915_READ(SDEIIR);
2372
2373 if (HAS_PCH_CPT(dev))
2374 cpt_irq_handler(dev, pch_iir);
2375 else
2376 ibx_irq_handler(dev, pch_iir);
2377
2378
2379 I915_WRITE(SDEIIR, pch_iir);
2380 }
2381
2382 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2383 ironlake_rps_change_irq_handler(dev);
2384}
2385
2386static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2387{
2388 struct drm_i915_private *dev_priv = dev->dev_private;
2389 enum pipe pipe;
2390
2391 if (de_iir & DE_ERR_INT_IVB)
2392 ivb_err_int_handler(dev);
2393
2394 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2395 dp_aux_irq_handler(dev);
2396
2397 if (de_iir & DE_GSE_IVB)
2398 intel_opregion_asle_intr(dev);
2399
2400 for_each_pipe(pipe) {
2401 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2402 intel_pipe_handle_vblank(dev, pipe);
2403
2404
2405 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2406 intel_prepare_page_flip(dev, pipe);
2407 intel_finish_page_flip_plane(dev, pipe);
2408 }
2409 }
2410
2411
2412 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2413 u32 pch_iir = I915_READ(SDEIIR);
2414
2415 cpt_irq_handler(dev, pch_iir);
2416
2417
2418 I915_WRITE(SDEIIR, pch_iir);
2419 }
2420}
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2431{
2432 struct drm_device *dev = arg;
2433 struct drm_i915_private *dev_priv = dev->dev_private;
2434 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2435 irqreturn_t ret = IRQ_NONE;
2436
2437
2438
2439 intel_uncore_check_errors(dev);
2440
2441
2442 de_ier = I915_READ(DEIER);
2443 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2444 POSTING_READ(DEIER);
2445
2446
2447
2448
2449
2450
2451 if (!HAS_PCH_NOP(dev)) {
2452 sde_ier = I915_READ(SDEIER);
2453 I915_WRITE(SDEIER, 0);
2454 POSTING_READ(SDEIER);
2455 }
2456
2457
2458
2459 gt_iir = I915_READ(GTIIR);
2460 if (gt_iir) {
2461 I915_WRITE(GTIIR, gt_iir);
2462 ret = IRQ_HANDLED;
2463 if (INTEL_INFO(dev)->gen >= 6)
2464 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2465 else
2466 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2467 }
2468
2469 de_iir = I915_READ(DEIIR);
2470 if (de_iir) {
2471 I915_WRITE(DEIIR, de_iir);
2472 ret = IRQ_HANDLED;
2473 if (INTEL_INFO(dev)->gen >= 7)
2474 ivb_display_irq_handler(dev, de_iir);
2475 else
2476 ilk_display_irq_handler(dev, de_iir);
2477 }
2478
2479 if (INTEL_INFO(dev)->gen >= 6) {
2480 u32 pm_iir = I915_READ(GEN6_PMIIR);
2481 if (pm_iir) {
2482 I915_WRITE(GEN6_PMIIR, pm_iir);
2483 ret = IRQ_HANDLED;
2484 gen6_rps_irq_handler(dev_priv, pm_iir);
2485 }
2486 }
2487
2488 I915_WRITE(DEIER, de_ier);
2489 POSTING_READ(DEIER);
2490 if (!HAS_PCH_NOP(dev)) {
2491 I915_WRITE(SDEIER, sde_ier);
2492 POSTING_READ(SDEIER);
2493 }
2494
2495 return ret;
2496}
2497
2498static irqreturn_t gen8_irq_handler(int irq, void *arg)
2499{
2500 struct drm_device *dev = arg;
2501 struct drm_i915_private *dev_priv = dev->dev_private;
2502 u32 master_ctl;
2503 irqreturn_t ret = IRQ_NONE;
2504 uint32_t tmp = 0;
2505 enum pipe pipe;
2506
2507 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2508 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2509 if (!master_ctl)
2510 return IRQ_NONE;
2511
2512 I915_WRITE(GEN8_MASTER_IRQ, 0);
2513 POSTING_READ(GEN8_MASTER_IRQ);
2514
2515
2516
2517 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2518
2519 if (master_ctl & GEN8_DE_MISC_IRQ) {
2520 tmp = I915_READ(GEN8_DE_MISC_IIR);
2521 if (tmp) {
2522 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2523 ret = IRQ_HANDLED;
2524 if (tmp & GEN8_DE_MISC_GSE)
2525 intel_opregion_asle_intr(dev);
2526 else
2527 DRM_ERROR("Unexpected DE Misc interrupt\n");
2528 }
2529 else
2530 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2531 }
2532
2533 if (master_ctl & GEN8_DE_PORT_IRQ) {
2534 tmp = I915_READ(GEN8_DE_PORT_IIR);
2535 if (tmp) {
2536 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2537 ret = IRQ_HANDLED;
2538 if (tmp & GEN8_AUX_CHANNEL_A)
2539 dp_aux_irq_handler(dev);
2540 else
2541 DRM_ERROR("Unexpected DE Port interrupt\n");
2542 }
2543 else
2544 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2545 }
2546
2547 for_each_pipe(pipe) {
2548 uint32_t pipe_iir;
2549
2550 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2551 continue;
2552
2553 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2554 if (pipe_iir) {
2555 ret = IRQ_HANDLED;
2556 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2557 if (pipe_iir & GEN8_PIPE_VBLANK)
2558 intel_pipe_handle_vblank(dev, pipe);
2559
2560 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2561 intel_prepare_page_flip(dev, pipe);
2562 intel_finish_page_flip_plane(dev, pipe);
2563 }
2564
2565 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2566 hsw_pipe_crc_irq_handler(dev, pipe);
2567
2568 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2569 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2570 false))
2571 DRM_ERROR("Pipe %c FIFO underrun\n",
2572 pipe_name(pipe));
2573 }
2574
2575 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2576 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2577 pipe_name(pipe),
2578 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2579 }
2580 } else
2581 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2582 }
2583
2584 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2585
2586
2587
2588
2589
2590 u32 pch_iir = I915_READ(SDEIIR);
2591 if (pch_iir) {
2592 I915_WRITE(SDEIIR, pch_iir);
2593 ret = IRQ_HANDLED;
2594 cpt_irq_handler(dev, pch_iir);
2595 } else
2596 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2597
2598 }
2599
2600 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2601 POSTING_READ(GEN8_MASTER_IRQ);
2602
2603 return ret;
2604}
2605
2606static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2607 bool reset_completed)
2608{
2609 struct intel_engine_cs *ring;
2610 int i;
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620 for_each_ring(ring, dev_priv, i)
2621 wake_up_all(&ring->irq_queue);
2622
2623
2624 wake_up_all(&dev_priv->pending_flip_queue);
2625
2626
2627
2628
2629
2630 if (reset_completed)
2631 wake_up_all(&dev_priv->gpu_error.reset_queue);
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641static void i915_error_work_func(struct work_struct *work)
2642{
2643 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2644 work);
2645 struct drm_i915_private *dev_priv =
2646 container_of(error, struct drm_i915_private, gpu_error);
2647 struct drm_device *dev = dev_priv->dev;
2648 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2649 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2650 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2651 int ret;
2652
2653 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2666 DRM_DEBUG_DRIVER("resetting chip\n");
2667 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2668 reset_event);
2669
2670
2671
2672
2673
2674
2675
2676
2677 intel_runtime_pm_get(dev_priv);
2678
2679
2680
2681
2682
2683
2684 ret = i915_reset(dev);
2685
2686 intel_display_handle_reset(dev);
2687
2688 intel_runtime_pm_put(dev_priv);
2689
2690 if (ret == 0) {
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701 smp_mb__before_atomic();
2702 atomic_inc(&dev_priv->gpu_error.reset_counter);
2703
2704 kobject_uevent_env(&dev->primary->kdev->kobj,
2705 KOBJ_CHANGE, reset_done_event);
2706 } else {
2707 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2708 }
2709
2710
2711
2712
2713
2714 i915_error_wake_up(dev_priv, true);
2715 }
2716}
2717
2718static void i915_report_and_clear_eir(struct drm_device *dev)
2719{
2720 struct drm_i915_private *dev_priv = dev->dev_private;
2721 uint32_t instdone[I915_NUM_INSTDONE_REG];
2722 u32 eir = I915_READ(EIR);
2723 int pipe, i;
2724
2725 if (!eir)
2726 return;
2727
2728 pr_err("render error detected, EIR: 0x%08x\n", eir);
2729
2730 i915_get_extra_instdone(dev, instdone);
2731
2732 if (IS_G4X(dev)) {
2733 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2734 u32 ipeir = I915_READ(IPEIR_I965);
2735
2736 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2737 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2738 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2739 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2740 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2741 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2742 I915_WRITE(IPEIR_I965, ipeir);
2743 POSTING_READ(IPEIR_I965);
2744 }
2745 if (eir & GM45_ERROR_PAGE_TABLE) {
2746 u32 pgtbl_err = I915_READ(PGTBL_ER);
2747 pr_err("page table error\n");
2748 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2749 I915_WRITE(PGTBL_ER, pgtbl_err);
2750 POSTING_READ(PGTBL_ER);
2751 }
2752 }
2753
2754 if (!IS_GEN2(dev)) {
2755 if (eir & I915_ERROR_PAGE_TABLE) {
2756 u32 pgtbl_err = I915_READ(PGTBL_ER);
2757 pr_err("page table error\n");
2758 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2759 I915_WRITE(PGTBL_ER, pgtbl_err);
2760 POSTING_READ(PGTBL_ER);
2761 }
2762 }
2763
2764 if (eir & I915_ERROR_MEMORY_REFRESH) {
2765 pr_err("memory refresh error:\n");
2766 for_each_pipe(pipe)
2767 pr_err("pipe %c stat: 0x%08x\n",
2768 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2769
2770 }
2771 if (eir & I915_ERROR_INSTRUCTION) {
2772 pr_err("instruction error\n");
2773 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2774 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2775 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2776 if (INTEL_INFO(dev)->gen < 4) {
2777 u32 ipeir = I915_READ(IPEIR);
2778
2779 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2780 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2781 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2782 I915_WRITE(IPEIR, ipeir);
2783 POSTING_READ(IPEIR);
2784 } else {
2785 u32 ipeir = I915_READ(IPEIR_I965);
2786
2787 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2788 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2789 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2790 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2791 I915_WRITE(IPEIR_I965, ipeir);
2792 POSTING_READ(IPEIR_I965);
2793 }
2794 }
2795
2796 I915_WRITE(EIR, eir);
2797 POSTING_READ(EIR);
2798 eir = I915_READ(EIR);
2799 if (eir) {
2800
2801
2802
2803
2804 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2805 I915_WRITE(EMR, I915_READ(EMR) | eir);
2806 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2807 }
2808}
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820void i915_handle_error(struct drm_device *dev, bool wedged,
2821 const char *fmt, ...)
2822{
2823 struct drm_i915_private *dev_priv = dev->dev_private;
2824 va_list args;
2825 char error_msg[80];
2826
2827 va_start(args, fmt);
2828 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2829 va_end(args);
2830
2831 i915_capture_error_state(dev, wedged, error_msg);
2832 i915_report_and_clear_eir(dev);
2833
2834 if (wedged) {
2835 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2836 &dev_priv->gpu_error.reset_counter);
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851 i915_error_wake_up(dev_priv, false);
2852 }
2853
2854
2855
2856
2857
2858
2859
2860 schedule_work(&dev_priv->gpu_error.work);
2861}
2862
2863static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2864{
2865 struct drm_i915_private *dev_priv = dev->dev_private;
2866 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2867 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2868 struct drm_i915_gem_object *obj;
2869 struct intel_unpin_work *work;
2870 unsigned long flags;
2871 bool stall_detected;
2872
2873
2874 if (intel_crtc == NULL)
2875 return;
2876
2877 spin_lock_irqsave(&dev->event_lock, flags);
2878 work = intel_crtc->unpin_work;
2879
2880 if (work == NULL ||
2881 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2882 !work->enable_stall_check) {
2883
2884 spin_unlock_irqrestore(&dev->event_lock, flags);
2885 return;
2886 }
2887
2888
2889 obj = work->pending_flip_obj;
2890 if (INTEL_INFO(dev)->gen >= 4) {
2891 int dspsurf = DSPSURF(intel_crtc->plane);
2892 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2893 i915_gem_obj_ggtt_offset(obj);
2894 } else {
2895 int dspaddr = DSPADDR(intel_crtc->plane);
2896 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2897 crtc->y * crtc->primary->fb->pitches[0] +
2898 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2899 }
2900
2901 spin_unlock_irqrestore(&dev->event_lock, flags);
2902
2903 if (stall_detected) {
2904 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2905 intel_prepare_page_flip(dev, intel_crtc->plane);
2906 }
2907}
2908
2909
2910
2911
2912static int i915_enable_vblank(struct drm_device *dev, int pipe)
2913{
2914 struct drm_i915_private *dev_priv = dev->dev_private;
2915 unsigned long irqflags;
2916
2917 if (!i915_pipe_enabled(dev, pipe))
2918 return -EINVAL;
2919
2920 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2921 if (INTEL_INFO(dev)->gen >= 4)
2922 i915_enable_pipestat(dev_priv, pipe,
2923 PIPE_START_VBLANK_INTERRUPT_STATUS);
2924 else
2925 i915_enable_pipestat(dev_priv, pipe,
2926 PIPE_VBLANK_INTERRUPT_STATUS);
2927 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2928
2929 return 0;
2930}
2931
2932static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2933{
2934 struct drm_i915_private *dev_priv = dev->dev_private;
2935 unsigned long irqflags;
2936 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2937 DE_PIPE_VBLANK(pipe);
2938
2939 if (!i915_pipe_enabled(dev, pipe))
2940 return -EINVAL;
2941
2942 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2943 ironlake_enable_display_irq(dev_priv, bit);
2944 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2945
2946 return 0;
2947}
2948
2949static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2950{
2951 struct drm_i915_private *dev_priv = dev->dev_private;
2952 unsigned long irqflags;
2953
2954 if (!i915_pipe_enabled(dev, pipe))
2955 return -EINVAL;
2956
2957 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2958 i915_enable_pipestat(dev_priv, pipe,
2959 PIPE_START_VBLANK_INTERRUPT_STATUS);
2960 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2961
2962 return 0;
2963}
2964
2965static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2966{
2967 struct drm_i915_private *dev_priv = dev->dev_private;
2968 unsigned long irqflags;
2969
2970 if (!i915_pipe_enabled(dev, pipe))
2971 return -EINVAL;
2972
2973 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2974 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2975 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2976 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2977 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2978 return 0;
2979}
2980
2981
2982
2983
2984static void i915_disable_vblank(struct drm_device *dev, int pipe)
2985{
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 unsigned long irqflags;
2988
2989 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2990 i915_disable_pipestat(dev_priv, pipe,
2991 PIPE_VBLANK_INTERRUPT_STATUS |
2992 PIPE_START_VBLANK_INTERRUPT_STATUS);
2993 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2994}
2995
2996static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2997{
2998 struct drm_i915_private *dev_priv = dev->dev_private;
2999 unsigned long irqflags;
3000 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
3001 DE_PIPE_VBLANK(pipe);
3002
3003 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3004 ironlake_disable_display_irq(dev_priv, bit);
3005 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3006}
3007
3008static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3009{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 unsigned long irqflags;
3012
3013 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3014 i915_disable_pipestat(dev_priv, pipe,
3015 PIPE_START_VBLANK_INTERRUPT_STATUS);
3016 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3017}
3018
3019static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3020{
3021 struct drm_i915_private *dev_priv = dev->dev_private;
3022 unsigned long irqflags;
3023
3024 if (!i915_pipe_enabled(dev, pipe))
3025 return;
3026
3027 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3028 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3029 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3030 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3031 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3032}
3033
3034static u32
3035ring_last_seqno(struct intel_engine_cs *ring)
3036{
3037 return list_entry(ring->request_list.prev,
3038 struct drm_i915_gem_request, list)->seqno;
3039}
3040
3041static bool
3042ring_idle(struct intel_engine_cs *ring, u32 seqno)
3043{
3044 return (list_empty(&ring->request_list) ||
3045 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3046}
3047
3048static bool
3049ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3050{
3051 if (INTEL_INFO(dev)->gen >= 8) {
3052 return (ipehr >> 23) == 0x1c;
3053 } else {
3054 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3055 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3056 MI_SEMAPHORE_REGISTER);
3057 }
3058}
3059
3060static struct intel_engine_cs *
3061semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3062{
3063 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3064 struct intel_engine_cs *signaller;
3065 int i;
3066
3067 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3068 for_each_ring(signaller, dev_priv, i) {
3069 if (ring == signaller)
3070 continue;
3071
3072 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3073 return signaller;
3074 }
3075 } else {
3076 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3077
3078 for_each_ring(signaller, dev_priv, i) {
3079 if(ring == signaller)
3080 continue;
3081
3082 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3083 return signaller;
3084 }
3085 }
3086
3087 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3088 ring->id, ipehr, offset);
3089
3090 return NULL;
3091}
3092
3093static struct intel_engine_cs *
3094semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3095{
3096 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3097 u32 cmd, ipehr, head;
3098 u64 offset = 0;
3099 int i, backwards;
3100
3101 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3102 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3103 return NULL;
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3114 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3115
3116 for (i = backwards; i; --i) {
3117
3118
3119
3120
3121
3122 head &= ring->buffer->size - 1;
3123
3124
3125 cmd = ioread32(ring->buffer->virtual_start + head);
3126 if (cmd == ipehr)
3127 break;
3128
3129 head -= 4;
3130 }
3131
3132 if (!i)
3133 return NULL;
3134
3135 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3136 if (INTEL_INFO(ring->dev)->gen >= 8) {
3137 offset = ioread32(ring->buffer->virtual_start + head + 12);
3138 offset <<= 32;
3139 offset = ioread32(ring->buffer->virtual_start + head + 8);
3140 }
3141 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3142}
3143
3144static int semaphore_passed(struct intel_engine_cs *ring)
3145{
3146 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3147 struct intel_engine_cs *signaller;
3148 u32 seqno;
3149
3150 ring->hangcheck.deadlock++;
3151
3152 signaller = semaphore_waits_for(ring, &seqno);
3153 if (signaller == NULL)
3154 return -1;
3155
3156
3157 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3158 return -1;
3159
3160 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3161 return 1;
3162
3163
3164 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3165 semaphore_passed(signaller) < 0)
3166 return -1;
3167
3168 return 0;
3169}
3170
3171static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3172{
3173 struct intel_engine_cs *ring;
3174 int i;
3175
3176 for_each_ring(ring, dev_priv, i)
3177 ring->hangcheck.deadlock = 0;
3178}
3179
3180static enum intel_ring_hangcheck_action
3181ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3182{
3183 struct drm_device *dev = ring->dev;
3184 struct drm_i915_private *dev_priv = dev->dev_private;
3185 u32 tmp;
3186
3187 if (acthd != ring->hangcheck.acthd) {
3188 if (acthd > ring->hangcheck.max_acthd) {
3189 ring->hangcheck.max_acthd = acthd;
3190 return HANGCHECK_ACTIVE;
3191 }
3192
3193 return HANGCHECK_ACTIVE_LOOP;
3194 }
3195
3196 if (IS_GEN2(dev))
3197 return HANGCHECK_HUNG;
3198
3199
3200
3201
3202
3203
3204 tmp = I915_READ_CTL(ring);
3205 if (tmp & RING_WAIT) {
3206 i915_handle_error(dev, false,
3207 "Kicking stuck wait on %s",
3208 ring->name);
3209 I915_WRITE_CTL(ring, tmp);
3210 return HANGCHECK_KICK;
3211 }
3212
3213 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3214 switch (semaphore_passed(ring)) {
3215 default:
3216 return HANGCHECK_HUNG;
3217 case 1:
3218 i915_handle_error(dev, false,
3219 "Kicking stuck semaphore on %s",
3220 ring->name);
3221 I915_WRITE_CTL(ring, tmp);
3222 return HANGCHECK_KICK;
3223 case 0:
3224 return HANGCHECK_WAIT;
3225 }
3226 }
3227
3228 return HANGCHECK_HUNG;
3229}
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239static void i915_hangcheck_elapsed(unsigned long data)
3240{
3241 struct drm_device *dev = (struct drm_device *)data;
3242 struct drm_i915_private *dev_priv = dev->dev_private;
3243 struct intel_engine_cs *ring;
3244 int i;
3245 int busy_count = 0, rings_hung = 0;
3246 bool stuck[I915_NUM_RINGS] = { 0 };
3247#define BUSY 1
3248#define KICK 5
3249#define HUNG 20
3250
3251 if (!i915.enable_hangcheck)
3252 return;
3253
3254 for_each_ring(ring, dev_priv, i) {
3255 u64 acthd;
3256 u32 seqno;
3257 bool busy = true;
3258
3259 semaphore_clear_deadlocks(dev_priv);
3260
3261 seqno = ring->get_seqno(ring, false);
3262 acthd = intel_ring_get_active_head(ring);
3263
3264 if (ring->hangcheck.seqno == seqno) {
3265 if (ring_idle(ring, seqno)) {
3266 ring->hangcheck.action = HANGCHECK_IDLE;
3267
3268 if (waitqueue_active(&ring->irq_queue)) {
3269
3270 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3271 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3272 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3273 ring->name);
3274 else
3275 DRM_INFO("Fake missed irq on %s\n",
3276 ring->name);
3277 wake_up_all(&ring->irq_queue);
3278 }
3279
3280 ring->hangcheck.score += BUSY;
3281 } else
3282 busy = false;
3283 } else {
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299 ring->hangcheck.action = ring_stuck(ring,
3300 acthd);
3301
3302 switch (ring->hangcheck.action) {
3303 case HANGCHECK_IDLE:
3304 case HANGCHECK_WAIT:
3305 case HANGCHECK_ACTIVE:
3306 break;
3307 case HANGCHECK_ACTIVE_LOOP:
3308 ring->hangcheck.score += BUSY;
3309 break;
3310 case HANGCHECK_KICK:
3311 ring->hangcheck.score += KICK;
3312 break;
3313 case HANGCHECK_HUNG:
3314 ring->hangcheck.score += HUNG;
3315 stuck[i] = true;
3316 break;
3317 }
3318 }
3319 } else {
3320 ring->hangcheck.action = HANGCHECK_ACTIVE;
3321
3322
3323
3324
3325 if (ring->hangcheck.score > 0)
3326 ring->hangcheck.score--;
3327
3328 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3329 }
3330
3331 ring->hangcheck.seqno = seqno;
3332 ring->hangcheck.acthd = acthd;
3333 busy_count += busy;
3334 }
3335
3336 for_each_ring(ring, dev_priv, i) {
3337 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3338 DRM_INFO("%s on %s\n",
3339 stuck[i] ? "stuck" : "no progress",
3340 ring->name);
3341 rings_hung++;
3342 }
3343 }
3344
3345 if (rings_hung)
3346 return i915_handle_error(dev, true, "Ring hung");
3347
3348 if (busy_count)
3349
3350
3351 i915_queue_hangcheck(dev);
3352}
3353
3354void i915_queue_hangcheck(struct drm_device *dev)
3355{
3356 struct drm_i915_private *dev_priv = dev->dev_private;
3357 if (!i915.enable_hangcheck)
3358 return;
3359
3360 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3361 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3362}
3363
3364static void ibx_irq_reset(struct drm_device *dev)
3365{
3366 struct drm_i915_private *dev_priv = dev->dev_private;
3367
3368 if (HAS_PCH_NOP(dev))
3369 return;
3370
3371 GEN5_IRQ_RESET(SDE);
3372
3373 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3374 I915_WRITE(SERR_INT, 0xffffffff);
3375}
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385static void ibx_irq_pre_postinstall(struct drm_device *dev)
3386{
3387 struct drm_i915_private *dev_priv = dev->dev_private;
3388
3389 if (HAS_PCH_NOP(dev))
3390 return;
3391
3392 WARN_ON(I915_READ(SDEIER) != 0);
3393 I915_WRITE(SDEIER, 0xffffffff);
3394 POSTING_READ(SDEIER);
3395}
3396
3397static void gen5_gt_irq_reset(struct drm_device *dev)
3398{
3399 struct drm_i915_private *dev_priv = dev->dev_private;
3400
3401 GEN5_IRQ_RESET(GT);
3402 if (INTEL_INFO(dev)->gen >= 6)
3403 GEN5_IRQ_RESET(GEN6_PM);
3404}
3405
3406
3407
3408static void ironlake_irq_reset(struct drm_device *dev)
3409{
3410 struct drm_i915_private *dev_priv = dev->dev_private;
3411
3412 I915_WRITE(HWSTAM, 0xffffffff);
3413
3414 GEN5_IRQ_RESET(DE);
3415 if (IS_GEN7(dev))
3416 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3417
3418 gen5_gt_irq_reset(dev);
3419
3420 ibx_irq_reset(dev);
3421}
3422
3423static void valleyview_irq_preinstall(struct drm_device *dev)
3424{
3425 struct drm_i915_private *dev_priv = dev->dev_private;
3426 int pipe;
3427
3428
3429 I915_WRITE(VLV_IMR, 0);
3430 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3431 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3432 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3433
3434
3435 I915_WRITE(GTIIR, I915_READ(GTIIR));
3436 I915_WRITE(GTIIR, I915_READ(GTIIR));
3437
3438 gen5_gt_irq_reset(dev);
3439
3440 I915_WRITE(DPINVGTT, 0xff);
3441
3442 I915_WRITE(PORT_HOTPLUG_EN, 0);
3443 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3444 for_each_pipe(pipe)
3445 I915_WRITE(PIPESTAT(pipe), 0xffff);
3446 I915_WRITE(VLV_IIR, 0xffffffff);
3447 I915_WRITE(VLV_IMR, 0xffffffff);
3448 I915_WRITE(VLV_IER, 0x0);
3449 POSTING_READ(VLV_IER);
3450}
3451
3452static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3453{
3454 GEN8_IRQ_RESET_NDX(GT, 0);
3455 GEN8_IRQ_RESET_NDX(GT, 1);
3456 GEN8_IRQ_RESET_NDX(GT, 2);
3457 GEN8_IRQ_RESET_NDX(GT, 3);
3458}
3459
3460static void gen8_irq_reset(struct drm_device *dev)
3461{
3462 struct drm_i915_private *dev_priv = dev->dev_private;
3463 int pipe;
3464
3465 I915_WRITE(GEN8_MASTER_IRQ, 0);
3466 POSTING_READ(GEN8_MASTER_IRQ);
3467
3468 gen8_gt_irq_reset(dev_priv);
3469
3470 for_each_pipe(pipe)
3471 if (intel_display_power_enabled(dev_priv,
3472 POWER_DOMAIN_PIPE(pipe)))
3473 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3474
3475 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3476 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3477 GEN5_IRQ_RESET(GEN8_PCU_);
3478
3479 ibx_irq_reset(dev);
3480}
3481
3482void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3483{
3484 unsigned long irqflags;
3485
3486 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3487 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3488 ~dev_priv->de_irq_mask[PIPE_B]);
3489 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3490 ~dev_priv->de_irq_mask[PIPE_C]);
3491 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3492}
3493
3494static void cherryview_irq_preinstall(struct drm_device *dev)
3495{
3496 struct drm_i915_private *dev_priv = dev->dev_private;
3497 int pipe;
3498
3499 I915_WRITE(GEN8_MASTER_IRQ, 0);
3500 POSTING_READ(GEN8_MASTER_IRQ);
3501
3502 gen8_gt_irq_reset(dev_priv);
3503
3504 GEN5_IRQ_RESET(GEN8_PCU_);
3505
3506 POSTING_READ(GEN8_PCU_IIR);
3507
3508 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3509
3510 I915_WRITE(PORT_HOTPLUG_EN, 0);
3511 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3512
3513 for_each_pipe(pipe)
3514 I915_WRITE(PIPESTAT(pipe), 0xffff);
3515
3516 I915_WRITE(VLV_IMR, 0xffffffff);
3517 I915_WRITE(VLV_IER, 0x0);
3518 I915_WRITE(VLV_IIR, 0xffffffff);
3519 POSTING_READ(VLV_IIR);
3520}
3521
3522static void ibx_hpd_irq_setup(struct drm_device *dev)
3523{
3524 struct drm_i915_private *dev_priv = dev->dev_private;
3525 struct drm_mode_config *mode_config = &dev->mode_config;
3526 struct intel_encoder *intel_encoder;
3527 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3528
3529 if (HAS_PCH_IBX(dev)) {
3530 hotplug_irqs = SDE_HOTPLUG_MASK;
3531 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3532 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3533 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3534 } else {
3535 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3536 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3537 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3538 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3539 }
3540
3541 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3542
3543
3544
3545
3546
3547
3548
3549 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3550 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3551 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3552 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3553 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3554 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3555}
3556
3557static void ibx_irq_postinstall(struct drm_device *dev)
3558{
3559 struct drm_i915_private *dev_priv = dev->dev_private;
3560 u32 mask;
3561
3562 if (HAS_PCH_NOP(dev))
3563 return;
3564
3565 if (HAS_PCH_IBX(dev))
3566 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3567 else
3568 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3569
3570 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3571 I915_WRITE(SDEIMR, ~mask);
3572}
3573
3574static void gen5_gt_irq_postinstall(struct drm_device *dev)
3575{
3576 struct drm_i915_private *dev_priv = dev->dev_private;
3577 u32 pm_irqs, gt_irqs;
3578
3579 pm_irqs = gt_irqs = 0;
3580
3581 dev_priv->gt_irq_mask = ~0;
3582 if (HAS_L3_DPF(dev)) {
3583
3584 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3585 gt_irqs |= GT_PARITY_ERROR(dev);
3586 }
3587
3588 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3589 if (IS_GEN5(dev)) {
3590 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3591 ILK_BSD_USER_INTERRUPT;
3592 } else {
3593 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3594 }
3595
3596 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3597
3598 if (INTEL_INFO(dev)->gen >= 6) {
3599 pm_irqs |= dev_priv->pm_rps_events;
3600
3601 if (HAS_VEBOX(dev))
3602 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3603
3604 dev_priv->pm_irq_mask = 0xffffffff;
3605 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3606 }
3607}
3608
3609static int ironlake_irq_postinstall(struct drm_device *dev)
3610{
3611 unsigned long irqflags;
3612 struct drm_i915_private *dev_priv = dev->dev_private;
3613 u32 display_mask, extra_mask;
3614
3615 if (INTEL_INFO(dev)->gen >= 7) {
3616 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3617 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3618 DE_PLANEB_FLIP_DONE_IVB |
3619 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3620 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3621 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3622 } else {
3623 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3624 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3625 DE_AUX_CHANNEL_A |
3626 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3627 DE_POISON);
3628 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3629 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3630 }
3631
3632 dev_priv->irq_mask = ~display_mask;
3633
3634 I915_WRITE(HWSTAM, 0xeffe);
3635
3636 ibx_irq_pre_postinstall(dev);
3637
3638 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3639
3640 gen5_gt_irq_postinstall(dev);
3641
3642 ibx_irq_postinstall(dev);
3643
3644 if (IS_IRONLAKE_M(dev)) {
3645
3646
3647
3648
3649
3650 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3651 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3652 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3653 }
3654
3655 return 0;
3656}
3657
3658static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3659{
3660 u32 pipestat_mask;
3661 u32 iir_mask;
3662
3663 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3664 PIPE_FIFO_UNDERRUN_STATUS;
3665
3666 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3667 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3668 POSTING_READ(PIPESTAT(PIPE_A));
3669
3670 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3671 PIPE_CRC_DONE_INTERRUPT_STATUS;
3672
3673 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3674 PIPE_GMBUS_INTERRUPT_STATUS);
3675 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3676
3677 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3678 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3679 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3680 dev_priv->irq_mask &= ~iir_mask;
3681
3682 I915_WRITE(VLV_IIR, iir_mask);
3683 I915_WRITE(VLV_IIR, iir_mask);
3684 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3685 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3686 POSTING_READ(VLV_IER);
3687}
3688
3689static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3690{
3691 u32 pipestat_mask;
3692 u32 iir_mask;
3693
3694 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3695 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3696 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3697
3698 dev_priv->irq_mask |= iir_mask;
3699 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3700 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3701 I915_WRITE(VLV_IIR, iir_mask);
3702 I915_WRITE(VLV_IIR, iir_mask);
3703 POSTING_READ(VLV_IIR);
3704
3705 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3706 PIPE_CRC_DONE_INTERRUPT_STATUS;
3707
3708 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3709 PIPE_GMBUS_INTERRUPT_STATUS);
3710 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3711
3712 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3713 PIPE_FIFO_UNDERRUN_STATUS;
3714 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3715 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3716 POSTING_READ(PIPESTAT(PIPE_A));
3717}
3718
3719void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3720{
3721 assert_spin_locked(&dev_priv->irq_lock);
3722
3723 if (dev_priv->display_irqs_enabled)
3724 return;
3725
3726 dev_priv->display_irqs_enabled = true;
3727
3728 if (dev_priv->dev->irq_enabled)
3729 valleyview_display_irqs_install(dev_priv);
3730}
3731
3732void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3733{
3734 assert_spin_locked(&dev_priv->irq_lock);
3735
3736 if (!dev_priv->display_irqs_enabled)
3737 return;
3738
3739 dev_priv->display_irqs_enabled = false;
3740
3741 if (dev_priv->dev->irq_enabled)
3742 valleyview_display_irqs_uninstall(dev_priv);
3743}
3744
3745static int valleyview_irq_postinstall(struct drm_device *dev)
3746{
3747 struct drm_i915_private *dev_priv = dev->dev_private;
3748 unsigned long irqflags;
3749
3750 dev_priv->irq_mask = ~0;
3751
3752 I915_WRITE(PORT_HOTPLUG_EN, 0);
3753 POSTING_READ(PORT_HOTPLUG_EN);
3754
3755 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3756 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3757 I915_WRITE(VLV_IIR, 0xffffffff);
3758 POSTING_READ(VLV_IER);
3759
3760
3761
3762 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3763 if (dev_priv->display_irqs_enabled)
3764 valleyview_display_irqs_install(dev_priv);
3765 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3766
3767 I915_WRITE(VLV_IIR, 0xffffffff);
3768 I915_WRITE(VLV_IIR, 0xffffffff);
3769
3770 gen5_gt_irq_postinstall(dev);
3771
3772
3773#if 0
3774 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3775 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3776#endif
3777
3778 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3779
3780 return 0;
3781}
3782
3783static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3784{
3785 int i;
3786
3787
3788 uint32_t gt_interrupts[] = {
3789 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3790 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3791 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3792 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3793 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3794 0,
3795 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3796 };
3797
3798 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3799 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3800
3801 dev_priv->pm_irq_mask = 0xffffffff;
3802}
3803
3804static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3805{
3806 struct drm_device *dev = dev_priv->dev;
3807 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3808 GEN8_PIPE_CDCLK_CRC_DONE |
3809 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3810 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3811 GEN8_PIPE_FIFO_UNDERRUN;
3812 int pipe;
3813 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3814 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3815 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3816
3817 for_each_pipe(pipe)
3818 if (intel_display_power_enabled(dev_priv,
3819 POWER_DOMAIN_PIPE(pipe)))
3820 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3821 dev_priv->de_irq_mask[pipe],
3822 de_pipe_enables);
3823
3824 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3825}
3826
3827static int gen8_irq_postinstall(struct drm_device *dev)
3828{
3829 struct drm_i915_private *dev_priv = dev->dev_private;
3830
3831 ibx_irq_pre_postinstall(dev);
3832
3833 gen8_gt_irq_postinstall(dev_priv);
3834 gen8_de_irq_postinstall(dev_priv);
3835
3836 ibx_irq_postinstall(dev);
3837
3838 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3839 POSTING_READ(GEN8_MASTER_IRQ);
3840
3841 return 0;
3842}
3843
3844static int cherryview_irq_postinstall(struct drm_device *dev)
3845{
3846 struct drm_i915_private *dev_priv = dev->dev_private;
3847 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3848 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3849 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3850 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3851 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3852 PIPE_CRC_DONE_INTERRUPT_STATUS;
3853 unsigned long irqflags;
3854 int pipe;
3855
3856
3857
3858
3859
3860 dev_priv->irq_mask = ~enable_mask;
3861
3862 for_each_pipe(pipe)
3863 I915_WRITE(PIPESTAT(pipe), 0xffff);
3864
3865 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3866 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3867 for_each_pipe(pipe)
3868 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3869 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3870
3871 I915_WRITE(VLV_IIR, 0xffffffff);
3872 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3873 I915_WRITE(VLV_IER, enable_mask);
3874
3875 gen8_gt_irq_postinstall(dev_priv);
3876
3877 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3878 POSTING_READ(GEN8_MASTER_IRQ);
3879
3880 return 0;
3881}
3882
3883static void gen8_irq_uninstall(struct drm_device *dev)
3884{
3885 struct drm_i915_private *dev_priv = dev->dev_private;
3886
3887 if (!dev_priv)
3888 return;
3889
3890 gen8_irq_reset(dev);
3891}
3892
3893static void valleyview_irq_uninstall(struct drm_device *dev)
3894{
3895 struct drm_i915_private *dev_priv = dev->dev_private;
3896 unsigned long irqflags;
3897 int pipe;
3898
3899 if (!dev_priv)
3900 return;
3901
3902 I915_WRITE(VLV_MASTER_IER, 0);
3903
3904 for_each_pipe(pipe)
3905 I915_WRITE(PIPESTAT(pipe), 0xffff);
3906
3907 I915_WRITE(HWSTAM, 0xffffffff);
3908 I915_WRITE(PORT_HOTPLUG_EN, 0);
3909 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3910
3911 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3912 if (dev_priv->display_irqs_enabled)
3913 valleyview_display_irqs_uninstall(dev_priv);
3914 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3915
3916 dev_priv->irq_mask = 0;
3917
3918 I915_WRITE(VLV_IIR, 0xffffffff);
3919 I915_WRITE(VLV_IMR, 0xffffffff);
3920 I915_WRITE(VLV_IER, 0x0);
3921 POSTING_READ(VLV_IER);
3922}
3923
3924static void cherryview_irq_uninstall(struct drm_device *dev)
3925{
3926 struct drm_i915_private *dev_priv = dev->dev_private;
3927 int pipe;
3928
3929 if (!dev_priv)
3930 return;
3931
3932 I915_WRITE(GEN8_MASTER_IRQ, 0);
3933 POSTING_READ(GEN8_MASTER_IRQ);
3934
3935#define GEN8_IRQ_FINI_NDX(type, which) \
3936do { \
3937 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3938 I915_WRITE(GEN8_##type##_IER(which), 0); \
3939 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3940 POSTING_READ(GEN8_##type##_IIR(which)); \
3941 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3942} while (0)
3943
3944#define GEN8_IRQ_FINI(type) \
3945do { \
3946 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3947 I915_WRITE(GEN8_##type##_IER, 0); \
3948 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3949 POSTING_READ(GEN8_##type##_IIR); \
3950 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3951} while (0)
3952
3953 GEN8_IRQ_FINI_NDX(GT, 0);
3954 GEN8_IRQ_FINI_NDX(GT, 1);
3955 GEN8_IRQ_FINI_NDX(GT, 2);
3956 GEN8_IRQ_FINI_NDX(GT, 3);
3957
3958 GEN8_IRQ_FINI(PCU);
3959
3960#undef GEN8_IRQ_FINI
3961#undef GEN8_IRQ_FINI_NDX
3962
3963 I915_WRITE(PORT_HOTPLUG_EN, 0);
3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3965
3966 for_each_pipe(pipe)
3967 I915_WRITE(PIPESTAT(pipe), 0xffff);
3968
3969 I915_WRITE(VLV_IMR, 0xffffffff);
3970 I915_WRITE(VLV_IER, 0x0);
3971 I915_WRITE(VLV_IIR, 0xffffffff);
3972 POSTING_READ(VLV_IIR);
3973}
3974
3975static void ironlake_irq_uninstall(struct drm_device *dev)
3976{
3977 struct drm_i915_private *dev_priv = dev->dev_private;
3978
3979 if (!dev_priv)
3980 return;
3981
3982 ironlake_irq_reset(dev);
3983}
3984
3985static void i8xx_irq_preinstall(struct drm_device * dev)
3986{
3987 struct drm_i915_private *dev_priv = dev->dev_private;
3988 int pipe;
3989
3990 for_each_pipe(pipe)
3991 I915_WRITE(PIPESTAT(pipe), 0);
3992 I915_WRITE16(IMR, 0xffff);
3993 I915_WRITE16(IER, 0x0);
3994 POSTING_READ16(IER);
3995}
3996
3997static int i8xx_irq_postinstall(struct drm_device *dev)
3998{
3999 struct drm_i915_private *dev_priv = dev->dev_private;
4000 unsigned long irqflags;
4001
4002 I915_WRITE16(EMR,
4003 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4004
4005
4006 dev_priv->irq_mask =
4007 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4008 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4009 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4010 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4011 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4012 I915_WRITE16(IMR, dev_priv->irq_mask);
4013
4014 I915_WRITE16(IER,
4015 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4016 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4017 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4018 I915_USER_INTERRUPT);
4019 POSTING_READ16(IER);
4020
4021
4022
4023 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4024 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4025 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4026 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4027
4028 return 0;
4029}
4030
4031
4032
4033
4034static bool i8xx_handle_vblank(struct drm_device *dev,
4035 int plane, int pipe, u32 iir)
4036{
4037 struct drm_i915_private *dev_priv = dev->dev_private;
4038 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4039
4040 if (!intel_pipe_handle_vblank(dev, pipe))
4041 return false;
4042
4043 if ((iir & flip_pending) == 0)
4044 return false;
4045
4046 intel_prepare_page_flip(dev, plane);
4047
4048
4049
4050
4051
4052
4053
4054 if (I915_READ16(ISR) & flip_pending)
4055 return false;
4056
4057 intel_finish_page_flip(dev, pipe);
4058
4059 return true;
4060}
4061
4062static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4063{
4064 struct drm_device *dev = arg;
4065 struct drm_i915_private *dev_priv = dev->dev_private;
4066 u16 iir, new_iir;
4067 u32 pipe_stats[2];
4068 unsigned long irqflags;
4069 int pipe;
4070 u16 flip_mask =
4071 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4072 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4073
4074 iir = I915_READ16(IIR);
4075 if (iir == 0)
4076 return IRQ_NONE;
4077
4078 while (iir & ~flip_mask) {
4079
4080
4081
4082
4083
4084 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4085 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4086 i915_handle_error(dev, false,
4087 "Command parser error, iir 0x%08x",
4088 iir);
4089
4090 for_each_pipe(pipe) {
4091 int reg = PIPESTAT(pipe);
4092 pipe_stats[pipe] = I915_READ(reg);
4093
4094
4095
4096
4097 if (pipe_stats[pipe] & 0x8000ffff)
4098 I915_WRITE(reg, pipe_stats[pipe]);
4099 }
4100 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4101
4102 I915_WRITE16(IIR, iir & ~flip_mask);
4103 new_iir = I915_READ16(IIR);
4104
4105 i915_update_dri1_breadcrumb(dev);
4106
4107 if (iir & I915_USER_INTERRUPT)
4108 notify_ring(dev, &dev_priv->ring[RCS]);
4109
4110 for_each_pipe(pipe) {
4111 int plane = pipe;
4112 if (HAS_FBC(dev))
4113 plane = !plane;
4114
4115 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4116 i8xx_handle_vblank(dev, plane, pipe, iir))
4117 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4118
4119 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4120 i9xx_pipe_crc_irq_handler(dev, pipe);
4121
4122 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4123 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4124 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4125 }
4126
4127 iir = new_iir;
4128 }
4129
4130 return IRQ_HANDLED;
4131}
4132
4133static void i8xx_irq_uninstall(struct drm_device * dev)
4134{
4135 struct drm_i915_private *dev_priv = dev->dev_private;
4136 int pipe;
4137
4138 for_each_pipe(pipe) {
4139
4140 I915_WRITE(PIPESTAT(pipe), 0);
4141 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4142 }
4143 I915_WRITE16(IMR, 0xffff);
4144 I915_WRITE16(IER, 0x0);
4145 I915_WRITE16(IIR, I915_READ16(IIR));
4146}
4147
4148static void i915_irq_preinstall(struct drm_device * dev)
4149{
4150 struct drm_i915_private *dev_priv = dev->dev_private;
4151 int pipe;
4152
4153 if (I915_HAS_HOTPLUG(dev)) {
4154 I915_WRITE(PORT_HOTPLUG_EN, 0);
4155 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4156 }
4157
4158 I915_WRITE16(HWSTAM, 0xeffe);
4159 for_each_pipe(pipe)
4160 I915_WRITE(PIPESTAT(pipe), 0);
4161 I915_WRITE(IMR, 0xffffffff);
4162 I915_WRITE(IER, 0x0);
4163 POSTING_READ(IER);
4164}
4165
4166static int i915_irq_postinstall(struct drm_device *dev)
4167{
4168 struct drm_i915_private *dev_priv = dev->dev_private;
4169 u32 enable_mask;
4170 unsigned long irqflags;
4171
4172 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4173
4174
4175 dev_priv->irq_mask =
4176 ~(I915_ASLE_INTERRUPT |
4177 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4178 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4179 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4180 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4181 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4182
4183 enable_mask =
4184 I915_ASLE_INTERRUPT |
4185 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4186 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4187 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4188 I915_USER_INTERRUPT;
4189
4190 if (I915_HAS_HOTPLUG(dev)) {
4191 I915_WRITE(PORT_HOTPLUG_EN, 0);
4192 POSTING_READ(PORT_HOTPLUG_EN);
4193
4194
4195 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4196
4197 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4198 }
4199
4200 I915_WRITE(IMR, dev_priv->irq_mask);
4201 I915_WRITE(IER, enable_mask);
4202 POSTING_READ(IER);
4203
4204 i915_enable_asle_pipestat(dev);
4205
4206
4207
4208 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4209 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4210 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4211 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4212
4213 return 0;
4214}
4215
4216
4217
4218
4219static bool i915_handle_vblank(struct drm_device *dev,
4220 int plane, int pipe, u32 iir)
4221{
4222 struct drm_i915_private *dev_priv = dev->dev_private;
4223 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4224
4225 if (!intel_pipe_handle_vblank(dev, pipe))
4226 return false;
4227
4228 if ((iir & flip_pending) == 0)
4229 return false;
4230
4231 intel_prepare_page_flip(dev, plane);
4232
4233
4234
4235
4236
4237
4238
4239 if (I915_READ(ISR) & flip_pending)
4240 return false;
4241
4242 intel_finish_page_flip(dev, pipe);
4243
4244 return true;
4245}
4246
4247static irqreturn_t i915_irq_handler(int irq, void *arg)
4248{
4249 struct drm_device *dev = arg;
4250 struct drm_i915_private *dev_priv = dev->dev_private;
4251 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4252 unsigned long irqflags;
4253 u32 flip_mask =
4254 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4255 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4256 int pipe, ret = IRQ_NONE;
4257
4258 iir = I915_READ(IIR);
4259 do {
4260 bool irq_received = (iir & ~flip_mask) != 0;
4261 bool blc_event = false;
4262
4263
4264
4265
4266
4267
4268 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4269 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4270 i915_handle_error(dev, false,
4271 "Command parser error, iir 0x%08x",
4272 iir);
4273
4274 for_each_pipe(pipe) {
4275 int reg = PIPESTAT(pipe);
4276 pipe_stats[pipe] = I915_READ(reg);
4277
4278
4279 if (pipe_stats[pipe] & 0x8000ffff) {
4280 I915_WRITE(reg, pipe_stats[pipe]);
4281 irq_received = true;
4282 }
4283 }
4284 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4285
4286 if (!irq_received)
4287 break;
4288
4289
4290 if (I915_HAS_HOTPLUG(dev) &&
4291 iir & I915_DISPLAY_PORT_INTERRUPT)
4292 i9xx_hpd_irq_handler(dev);
4293
4294 I915_WRITE(IIR, iir & ~flip_mask);
4295 new_iir = I915_READ(IIR);
4296
4297 if (iir & I915_USER_INTERRUPT)
4298 notify_ring(dev, &dev_priv->ring[RCS]);
4299
4300 for_each_pipe(pipe) {
4301 int plane = pipe;
4302 if (HAS_FBC(dev))
4303 plane = !plane;
4304
4305 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4306 i915_handle_vblank(dev, plane, pipe, iir))
4307 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4308
4309 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4310 blc_event = true;
4311
4312 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4313 i9xx_pipe_crc_irq_handler(dev, pipe);
4314
4315 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4316 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4317 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4318 }
4319
4320 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4321 intel_opregion_asle_intr(dev);
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338 ret = IRQ_HANDLED;
4339 iir = new_iir;
4340 } while (iir & ~flip_mask);
4341
4342 i915_update_dri1_breadcrumb(dev);
4343
4344 return ret;
4345}
4346
4347static void i915_irq_uninstall(struct drm_device * dev)
4348{
4349 struct drm_i915_private *dev_priv = dev->dev_private;
4350 int pipe;
4351
4352 if (I915_HAS_HOTPLUG(dev)) {
4353 I915_WRITE(PORT_HOTPLUG_EN, 0);
4354 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4355 }
4356
4357 I915_WRITE16(HWSTAM, 0xffff);
4358 for_each_pipe(pipe) {
4359
4360 I915_WRITE(PIPESTAT(pipe), 0);
4361 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4362 }
4363 I915_WRITE(IMR, 0xffffffff);
4364 I915_WRITE(IER, 0x0);
4365
4366 I915_WRITE(IIR, I915_READ(IIR));
4367}
4368
4369static void i965_irq_preinstall(struct drm_device * dev)
4370{
4371 struct drm_i915_private *dev_priv = dev->dev_private;
4372 int pipe;
4373
4374 I915_WRITE(PORT_HOTPLUG_EN, 0);
4375 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4376
4377 I915_WRITE(HWSTAM, 0xeffe);
4378 for_each_pipe(pipe)
4379 I915_WRITE(PIPESTAT(pipe), 0);
4380 I915_WRITE(IMR, 0xffffffff);
4381 I915_WRITE(IER, 0x0);
4382 POSTING_READ(IER);
4383}
4384
4385static int i965_irq_postinstall(struct drm_device *dev)
4386{
4387 struct drm_i915_private *dev_priv = dev->dev_private;
4388 u32 enable_mask;
4389 u32 error_mask;
4390 unsigned long irqflags;
4391
4392
4393 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4394 I915_DISPLAY_PORT_INTERRUPT |
4395 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4396 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4397 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4398 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4399 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4400
4401 enable_mask = ~dev_priv->irq_mask;
4402 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4403 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4404 enable_mask |= I915_USER_INTERRUPT;
4405
4406 if (IS_G4X(dev))
4407 enable_mask |= I915_BSD_USER_INTERRUPT;
4408
4409
4410
4411 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4412 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4413 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4414 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4415 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4416
4417
4418
4419
4420
4421 if (IS_G4X(dev)) {
4422 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4423 GM45_ERROR_MEM_PRIV |
4424 GM45_ERROR_CP_PRIV |
4425 I915_ERROR_MEMORY_REFRESH);
4426 } else {
4427 error_mask = ~(I915_ERROR_PAGE_TABLE |
4428 I915_ERROR_MEMORY_REFRESH);
4429 }
4430 I915_WRITE(EMR, error_mask);
4431
4432 I915_WRITE(IMR, dev_priv->irq_mask);
4433 I915_WRITE(IER, enable_mask);
4434 POSTING_READ(IER);
4435
4436 I915_WRITE(PORT_HOTPLUG_EN, 0);
4437 POSTING_READ(PORT_HOTPLUG_EN);
4438
4439 i915_enable_asle_pipestat(dev);
4440
4441 return 0;
4442}
4443
4444static void i915_hpd_irq_setup(struct drm_device *dev)
4445{
4446 struct drm_i915_private *dev_priv = dev->dev_private;
4447 struct drm_mode_config *mode_config = &dev->mode_config;
4448 struct intel_encoder *intel_encoder;
4449 u32 hotplug_en;
4450
4451 assert_spin_locked(&dev_priv->irq_lock);
4452
4453 if (I915_HAS_HOTPLUG(dev)) {
4454 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4455 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4456
4457
4458 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
4459 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4460 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4461
4462
4463
4464
4465 if (IS_G4X(dev))
4466 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4467 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4468 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4469
4470
4471 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4472 }
4473}
4474
4475static irqreturn_t i965_irq_handler(int irq, void *arg)
4476{
4477 struct drm_device *dev = arg;
4478 struct drm_i915_private *dev_priv = dev->dev_private;
4479 u32 iir, new_iir;
4480 u32 pipe_stats[I915_MAX_PIPES];
4481 unsigned long irqflags;
4482 int ret = IRQ_NONE, pipe;
4483 u32 flip_mask =
4484 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4485 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4486
4487 iir = I915_READ(IIR);
4488
4489 for (;;) {
4490 bool irq_received = (iir & ~flip_mask) != 0;
4491 bool blc_event = false;
4492
4493
4494
4495
4496
4497
4498 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4499 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4500 i915_handle_error(dev, false,
4501 "Command parser error, iir 0x%08x",
4502 iir);
4503
4504 for_each_pipe(pipe) {
4505 int reg = PIPESTAT(pipe);
4506 pipe_stats[pipe] = I915_READ(reg);
4507
4508
4509
4510
4511 if (pipe_stats[pipe] & 0x8000ffff) {
4512 I915_WRITE(reg, pipe_stats[pipe]);
4513 irq_received = true;
4514 }
4515 }
4516 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4517
4518 if (!irq_received)
4519 break;
4520
4521 ret = IRQ_HANDLED;
4522
4523
4524 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4525 i9xx_hpd_irq_handler(dev);
4526
4527 I915_WRITE(IIR, iir & ~flip_mask);
4528 new_iir = I915_READ(IIR);
4529
4530 if (iir & I915_USER_INTERRUPT)
4531 notify_ring(dev, &dev_priv->ring[RCS]);
4532 if (iir & I915_BSD_USER_INTERRUPT)
4533 notify_ring(dev, &dev_priv->ring[VCS]);
4534
4535 for_each_pipe(pipe) {
4536 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4537 i915_handle_vblank(dev, pipe, pipe, iir))
4538 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4539
4540 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4541 blc_event = true;
4542
4543 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4544 i9xx_pipe_crc_irq_handler(dev, pipe);
4545
4546 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4547 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4548 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4549 }
4550
4551 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4552 intel_opregion_asle_intr(dev);
4553
4554 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4555 gmbus_irq_handler(dev);
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572 iir = new_iir;
4573 }
4574
4575 i915_update_dri1_breadcrumb(dev);
4576
4577 return ret;
4578}
4579
4580static void i965_irq_uninstall(struct drm_device * dev)
4581{
4582 struct drm_i915_private *dev_priv = dev->dev_private;
4583 int pipe;
4584
4585 if (!dev_priv)
4586 return;
4587
4588 I915_WRITE(PORT_HOTPLUG_EN, 0);
4589 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4590
4591 I915_WRITE(HWSTAM, 0xffffffff);
4592 for_each_pipe(pipe)
4593 I915_WRITE(PIPESTAT(pipe), 0);
4594 I915_WRITE(IMR, 0xffffffff);
4595 I915_WRITE(IER, 0x0);
4596
4597 for_each_pipe(pipe)
4598 I915_WRITE(PIPESTAT(pipe),
4599 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4600 I915_WRITE(IIR, I915_READ(IIR));
4601}
4602
4603static void intel_hpd_irq_reenable(struct work_struct *work)
4604{
4605 struct drm_i915_private *dev_priv =
4606 container_of(work, typeof(*dev_priv),
4607 hotplug_reenable_work.work);
4608 struct drm_device *dev = dev_priv->dev;
4609 struct drm_mode_config *mode_config = &dev->mode_config;
4610 unsigned long irqflags;
4611 int i;
4612
4613 intel_runtime_pm_get(dev_priv);
4614
4615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4616 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4617 struct drm_connector *connector;
4618
4619 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4620 continue;
4621
4622 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4623
4624 list_for_each_entry(connector, &mode_config->connector_list, head) {
4625 struct intel_connector *intel_connector = to_intel_connector(connector);
4626
4627 if (intel_connector->encoder->hpd_pin == i) {
4628 if (connector->polled != intel_connector->polled)
4629 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4630 connector->name);
4631 connector->polled = intel_connector->polled;
4632 if (!connector->polled)
4633 connector->polled = DRM_CONNECTOR_POLL_HPD;
4634 }
4635 }
4636 }
4637 if (dev_priv->display.hpd_irq_setup)
4638 dev_priv->display.hpd_irq_setup(dev);
4639 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4640
4641 intel_runtime_pm_put(dev_priv);
4642}
4643
4644void intel_irq_init(struct drm_device *dev)
4645{
4646 struct drm_i915_private *dev_priv = dev->dev_private;
4647
4648 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4649 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4650 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4651 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4652 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4653
4654
4655 if (IS_VALLEYVIEW(dev))
4656
4657 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4658 else
4659 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4660
4661 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4662 i915_hangcheck_elapsed,
4663 (unsigned long) dev);
4664 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4665 intel_hpd_irq_reenable);
4666
4667 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4668
4669
4670 dev_priv->pm._irqs_disabled = true;
4671
4672 if (IS_GEN2(dev)) {
4673 dev->max_vblank_count = 0;
4674 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4675 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4676 dev->max_vblank_count = 0xffffffff;
4677 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4678 } else {
4679 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4680 dev->max_vblank_count = 0xffffff;
4681 }
4682
4683 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4684 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4685 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4686 }
4687
4688 if (IS_CHERRYVIEW(dev)) {
4689 dev->driver->irq_handler = cherryview_irq_handler;
4690 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4691 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4692 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4693 dev->driver->enable_vblank = valleyview_enable_vblank;
4694 dev->driver->disable_vblank = valleyview_disable_vblank;
4695 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4696 } else if (IS_VALLEYVIEW(dev)) {
4697 dev->driver->irq_handler = valleyview_irq_handler;
4698 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4699 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4700 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4701 dev->driver->enable_vblank = valleyview_enable_vblank;
4702 dev->driver->disable_vblank = valleyview_disable_vblank;
4703 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4704 } else if (IS_GEN8(dev)) {
4705 dev->driver->irq_handler = gen8_irq_handler;
4706 dev->driver->irq_preinstall = gen8_irq_reset;
4707 dev->driver->irq_postinstall = gen8_irq_postinstall;
4708 dev->driver->irq_uninstall = gen8_irq_uninstall;
4709 dev->driver->enable_vblank = gen8_enable_vblank;
4710 dev->driver->disable_vblank = gen8_disable_vblank;
4711 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4712 } else if (HAS_PCH_SPLIT(dev)) {
4713 dev->driver->irq_handler = ironlake_irq_handler;
4714 dev->driver->irq_preinstall = ironlake_irq_reset;
4715 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4716 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4717 dev->driver->enable_vblank = ironlake_enable_vblank;
4718 dev->driver->disable_vblank = ironlake_disable_vblank;
4719 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4720 } else {
4721 if (INTEL_INFO(dev)->gen == 2) {
4722 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4723 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4724 dev->driver->irq_handler = i8xx_irq_handler;
4725 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4726 } else if (INTEL_INFO(dev)->gen == 3) {
4727 dev->driver->irq_preinstall = i915_irq_preinstall;
4728 dev->driver->irq_postinstall = i915_irq_postinstall;
4729 dev->driver->irq_uninstall = i915_irq_uninstall;
4730 dev->driver->irq_handler = i915_irq_handler;
4731 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4732 } else {
4733 dev->driver->irq_preinstall = i965_irq_preinstall;
4734 dev->driver->irq_postinstall = i965_irq_postinstall;
4735 dev->driver->irq_uninstall = i965_irq_uninstall;
4736 dev->driver->irq_handler = i965_irq_handler;
4737 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4738 }
4739 dev->driver->enable_vblank = i915_enable_vblank;
4740 dev->driver->disable_vblank = i915_disable_vblank;
4741 }
4742}
4743
4744void intel_hpd_init(struct drm_device *dev)
4745{
4746 struct drm_i915_private *dev_priv = dev->dev_private;
4747 struct drm_mode_config *mode_config = &dev->mode_config;
4748 struct drm_connector *connector;
4749 unsigned long irqflags;
4750 int i;
4751
4752 for (i = 1; i < HPD_NUM_PINS; i++) {
4753 dev_priv->hpd_stats[i].hpd_cnt = 0;
4754 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4755 }
4756 list_for_each_entry(connector, &mode_config->connector_list, head) {
4757 struct intel_connector *intel_connector = to_intel_connector(connector);
4758 connector->polled = intel_connector->polled;
4759 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4760 connector->polled = DRM_CONNECTOR_POLL_HPD;
4761 if (intel_connector->mst_port)
4762 connector->polled = DRM_CONNECTOR_POLL_HPD;
4763 }
4764
4765
4766
4767 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4768 if (dev_priv->display.hpd_irq_setup)
4769 dev_priv->display.hpd_irq_setup(dev);
4770 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4771}
4772
4773
4774void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4775{
4776 struct drm_i915_private *dev_priv = dev->dev_private;
4777
4778 dev->driver->irq_uninstall(dev);
4779 dev_priv->pm._irqs_disabled = true;
4780}
4781
4782
4783void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4784{
4785 struct drm_i915_private *dev_priv = dev->dev_private;
4786
4787 dev_priv->pm._irqs_disabled = false;
4788 dev->driver->irq_preinstall(dev);
4789 dev->driver->irq_postinstall(dev);
4790}
4791