1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/cpuidle.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h>
35
36#include <drm/drm_drv.h>
37#include <drm/drm_irq.h>
38#include <drm/i915_drm.h>
39
40#include "display/intel_fifo_underrun.h"
41#include "display/intel_hotplug.h"
42#include "display/intel_lpe_audio.h"
43#include "display/intel_psr.h"
44
45#include "i915_drv.h"
46#include "i915_irq.h"
47#include "i915_trace.h"
48#include "intel_drv.h"
49#include "intel_pm.h"
50
51
52
53
54
55
56
57
58
59static const u32 hpd_ilk[HPD_NUM_PINS] = {
60 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
61};
62
63static const u32 hpd_ivb[HPD_NUM_PINS] = {
64 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
65};
66
67static const u32 hpd_bdw[HPD_NUM_PINS] = {
68 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
69};
70
71static const u32 hpd_ibx[HPD_NUM_PINS] = {
72 [HPD_CRT] = SDE_CRT_HOTPLUG,
73 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
74 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
75 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
76 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
77};
78
79static const u32 hpd_cpt[HPD_NUM_PINS] = {
80 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
81 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
82 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
83 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
84 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
85};
86
87static const u32 hpd_spt[HPD_NUM_PINS] = {
88 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
89 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
90 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
91 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
92 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
93};
94
95static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
96 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
97 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
98 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
99 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
100 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
101 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
102};
103
104static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
105 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
107 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
108 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111};
112
113static const u32 hpd_status_i915[HPD_NUM_PINS] = {
114 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
115 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
116 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
117 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
118 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
119 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
120};
121
122
123static const u32 hpd_bxt[HPD_NUM_PINS] = {
124 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
125 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
126 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
127};
128
129static const u32 hpd_gen11[HPD_NUM_PINS] = {
130 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
131 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
132 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
133 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
134};
135
136static const u32 hpd_icp[HPD_NUM_PINS] = {
137 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
138 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
139 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
140 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
141 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
142 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
143};
144
145static const u32 hpd_mcc[HPD_NUM_PINS] = {
146 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
147 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
148 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
149};
150
151static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
152 i915_reg_t iir, i915_reg_t ier)
153{
154 intel_uncore_write(uncore, imr, 0xffffffff);
155 intel_uncore_posting_read(uncore, imr);
156
157 intel_uncore_write(uncore, ier, 0);
158
159
160 intel_uncore_write(uncore, iir, 0xffffffff);
161 intel_uncore_posting_read(uncore, iir);
162 intel_uncore_write(uncore, iir, 0xffffffff);
163 intel_uncore_posting_read(uncore, iir);
164}
165
166static void gen2_irq_reset(struct intel_uncore *uncore)
167{
168 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
169 intel_uncore_posting_read16(uncore, GEN2_IMR);
170
171 intel_uncore_write16(uncore, GEN2_IER, 0);
172
173
174 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
175 intel_uncore_posting_read16(uncore, GEN2_IIR);
176 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
177 intel_uncore_posting_read16(uncore, GEN2_IIR);
178}
179
180#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
181({ \
182 unsigned int which_ = which; \
183 gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
184 GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
185})
186
187#define GEN3_IRQ_RESET(uncore, type) \
188 gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
189
190#define GEN2_IRQ_RESET(uncore) \
191 gen2_irq_reset(uncore)
192
193
194
195
196static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
197{
198 u32 val = intel_uncore_read(uncore, reg);
199
200 if (val == 0)
201 return;
202
203 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
204 i915_mmio_reg_offset(reg), val);
205 intel_uncore_write(uncore, reg, 0xffffffff);
206 intel_uncore_posting_read(uncore, reg);
207 intel_uncore_write(uncore, reg, 0xffffffff);
208 intel_uncore_posting_read(uncore, reg);
209}
210
211static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
212{
213 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
214
215 if (val == 0)
216 return;
217
218 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
219 i915_mmio_reg_offset(GEN2_IIR), val);
220 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
221 intel_uncore_posting_read16(uncore, GEN2_IIR);
222 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
223 intel_uncore_posting_read16(uncore, GEN2_IIR);
224}
225
226static void gen3_irq_init(struct intel_uncore *uncore,
227 i915_reg_t imr, u32 imr_val,
228 i915_reg_t ier, u32 ier_val,
229 i915_reg_t iir)
230{
231 gen3_assert_iir_is_zero(uncore, iir);
232
233 intel_uncore_write(uncore, ier, ier_val);
234 intel_uncore_write(uncore, imr, imr_val);
235 intel_uncore_posting_read(uncore, imr);
236}
237
238static void gen2_irq_init(struct intel_uncore *uncore,
239 u32 imr_val, u32 ier_val)
240{
241 gen2_assert_iir_is_zero(uncore);
242
243 intel_uncore_write16(uncore, GEN2_IER, ier_val);
244 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
245 intel_uncore_posting_read16(uncore, GEN2_IMR);
246}
247
248#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
249({ \
250 unsigned int which_ = which; \
251 gen3_irq_init((uncore), \
252 GEN8_##type##_IMR(which_), imr_val, \
253 GEN8_##type##_IER(which_), ier_val, \
254 GEN8_##type##_IIR(which_)); \
255})
256
257#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
258 gen3_irq_init((uncore), \
259 type##IMR, imr_val, \
260 type##IER, ier_val, \
261 type##IIR)
262
263#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
264 gen2_irq_init((uncore), imr_val, ier_val)
265
266static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
267static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
268
269
270static inline void
271i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
272 u32 mask,
273 u32 bits)
274{
275 u32 val;
276
277 lockdep_assert_held(&dev_priv->irq_lock);
278 WARN_ON(bits & ~mask);
279
280 val = I915_READ(PORT_HOTPLUG_EN);
281 val &= ~mask;
282 val |= bits;
283 I915_WRITE(PORT_HOTPLUG_EN, val);
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
299 u32 mask,
300 u32 bits)
301{
302 spin_lock_irq(&dev_priv->irq_lock);
303 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
304 spin_unlock_irq(&dev_priv->irq_lock);
305}
306
307static u32
308gen11_gt_engine_identity(struct drm_i915_private * const i915,
309 const unsigned int bank, const unsigned int bit);
310
311static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
312 const unsigned int bank,
313 const unsigned int bit)
314{
315 void __iomem * const regs = i915->uncore.regs;
316 u32 dw;
317
318 lockdep_assert_held(&i915->irq_lock);
319
320 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
321 if (dw & BIT(bit)) {
322
323
324
325
326 gen11_gt_engine_identity(i915, bank, bit);
327
328
329
330
331
332
333
334 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
335
336 return true;
337 }
338
339 return false;
340}
341
342
343
344
345
346
347
348void ilk_update_display_irq(struct drm_i915_private *dev_priv,
349 u32 interrupt_mask,
350 u32 enabled_irq_mask)
351{
352 u32 new_val;
353
354 lockdep_assert_held(&dev_priv->irq_lock);
355
356 WARN_ON(enabled_irq_mask & ~interrupt_mask);
357
358 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
359 return;
360
361 new_val = dev_priv->irq_mask;
362 new_val &= ~interrupt_mask;
363 new_val |= (~enabled_irq_mask & interrupt_mask);
364
365 if (new_val != dev_priv->irq_mask) {
366 dev_priv->irq_mask = new_val;
367 I915_WRITE(DEIMR, dev_priv->irq_mask);
368 POSTING_READ(DEIMR);
369 }
370}
371
372
373
374
375
376
377
378static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
379 u32 interrupt_mask,
380 u32 enabled_irq_mask)
381{
382 lockdep_assert_held(&dev_priv->irq_lock);
383
384 WARN_ON(enabled_irq_mask & ~interrupt_mask);
385
386 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
387 return;
388
389 dev_priv->gt_irq_mask &= ~interrupt_mask;
390 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
391 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
392}
393
394void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
395{
396 ilk_update_gt_irq(dev_priv, mask, mask);
397 intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
398}
399
400void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
401{
402 ilk_update_gt_irq(dev_priv, mask, 0);
403}
404
405static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
406{
407 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
408
409 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
410}
411
412static void write_pm_imr(struct drm_i915_private *dev_priv)
413{
414 i915_reg_t reg;
415 u32 mask = dev_priv->pm_imr;
416
417 if (INTEL_GEN(dev_priv) >= 11) {
418 reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
419
420 mask = mask << 16;
421 } else if (INTEL_GEN(dev_priv) >= 8) {
422 reg = GEN8_GT_IMR(2);
423 } else {
424 reg = GEN6_PMIMR;
425 }
426
427 I915_WRITE(reg, mask);
428 POSTING_READ(reg);
429}
430
431static void write_pm_ier(struct drm_i915_private *dev_priv)
432{
433 i915_reg_t reg;
434 u32 mask = dev_priv->pm_ier;
435
436 if (INTEL_GEN(dev_priv) >= 11) {
437 reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
438
439 mask = mask << 16;
440 } else if (INTEL_GEN(dev_priv) >= 8) {
441 reg = GEN8_GT_IER(2);
442 } else {
443 reg = GEN6_PMIER;
444 }
445
446 I915_WRITE(reg, mask);
447}
448
449
450
451
452
453
454
455static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
456 u32 interrupt_mask,
457 u32 enabled_irq_mask)
458{
459 u32 new_val;
460
461 WARN_ON(enabled_irq_mask & ~interrupt_mask);
462
463 lockdep_assert_held(&dev_priv->irq_lock);
464
465 new_val = dev_priv->pm_imr;
466 new_val &= ~interrupt_mask;
467 new_val |= (~enabled_irq_mask & interrupt_mask);
468
469 if (new_val != dev_priv->pm_imr) {
470 dev_priv->pm_imr = new_val;
471 write_pm_imr(dev_priv);
472 }
473}
474
475void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
476{
477 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
478 return;
479
480 snb_update_pm_irq(dev_priv, mask, mask);
481}
482
483static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
484{
485 snb_update_pm_irq(dev_priv, mask, 0);
486}
487
488void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
489{
490 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
491 return;
492
493 __gen6_mask_pm_irq(dev_priv, mask);
494}
495
496static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
497{
498 i915_reg_t reg = gen6_pm_iir(dev_priv);
499
500 lockdep_assert_held(&dev_priv->irq_lock);
501
502 I915_WRITE(reg, reset_mask);
503 I915_WRITE(reg, reset_mask);
504 POSTING_READ(reg);
505}
506
507static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
508{
509 lockdep_assert_held(&dev_priv->irq_lock);
510
511 dev_priv->pm_ier |= enable_mask;
512 write_pm_ier(dev_priv);
513 gen6_unmask_pm_irq(dev_priv, enable_mask);
514
515}
516
517static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
518{
519 lockdep_assert_held(&dev_priv->irq_lock);
520
521 dev_priv->pm_ier &= ~disable_mask;
522 __gen6_mask_pm_irq(dev_priv, disable_mask);
523 write_pm_ier(dev_priv);
524
525}
526
527void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
528{
529 spin_lock_irq(&dev_priv->irq_lock);
530
531 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
532 ;
533
534 dev_priv->gt_pm.rps.pm_iir = 0;
535
536 spin_unlock_irq(&dev_priv->irq_lock);
537}
538
539void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
540{
541 spin_lock_irq(&dev_priv->irq_lock);
542 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
543 dev_priv->gt_pm.rps.pm_iir = 0;
544 spin_unlock_irq(&dev_priv->irq_lock);
545}
546
547void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
548{
549 struct intel_rps *rps = &dev_priv->gt_pm.rps;
550
551 if (READ_ONCE(rps->interrupts_enabled))
552 return;
553
554 spin_lock_irq(&dev_priv->irq_lock);
555 WARN_ON_ONCE(rps->pm_iir);
556
557 if (INTEL_GEN(dev_priv) >= 11)
558 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
559 else
560 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
561
562 rps->interrupts_enabled = true;
563 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
564
565 spin_unlock_irq(&dev_priv->irq_lock);
566}
567
568void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
569{
570 struct intel_rps *rps = &dev_priv->gt_pm.rps;
571
572 if (!READ_ONCE(rps->interrupts_enabled))
573 return;
574
575 spin_lock_irq(&dev_priv->irq_lock);
576 rps->interrupts_enabled = false;
577
578 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
579
580 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
581
582 spin_unlock_irq(&dev_priv->irq_lock);
583 synchronize_irq(dev_priv->drm.irq);
584
585
586
587
588
589
590 cancel_work_sync(&rps->work);
591 if (INTEL_GEN(dev_priv) >= 11)
592 gen11_reset_rps_interrupts(dev_priv);
593 else
594 gen6_reset_rps_interrupts(dev_priv);
595}
596
597void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
598{
599 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
600
601 spin_lock_irq(&dev_priv->irq_lock);
602 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
603 spin_unlock_irq(&dev_priv->irq_lock);
604}
605
606void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
607{
608 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
609
610 spin_lock_irq(&dev_priv->irq_lock);
611 if (!dev_priv->guc.interrupts.enabled) {
612 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
613 dev_priv->pm_guc_events);
614 dev_priv->guc.interrupts.enabled = true;
615 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
616 }
617 spin_unlock_irq(&dev_priv->irq_lock);
618}
619
620void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
621{
622 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
623
624 spin_lock_irq(&dev_priv->irq_lock);
625 dev_priv->guc.interrupts.enabled = false;
626
627 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
628
629 spin_unlock_irq(&dev_priv->irq_lock);
630 synchronize_irq(dev_priv->drm.irq);
631
632 gen9_reset_guc_interrupts(dev_priv);
633}
634
635void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
636{
637 spin_lock_irq(&i915->irq_lock);
638 gen11_reset_one_iir(i915, 0, GEN11_GUC);
639 spin_unlock_irq(&i915->irq_lock);
640}
641
642void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
643{
644 spin_lock_irq(&dev_priv->irq_lock);
645 if (!dev_priv->guc.interrupts.enabled) {
646 u32 events = REG_FIELD_PREP(ENGINE1_MASK,
647 GEN11_GUC_INTR_GUC2HOST);
648
649 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
650 I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
651 I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
652 dev_priv->guc.interrupts.enabled = true;
653 }
654 spin_unlock_irq(&dev_priv->irq_lock);
655}
656
657void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
658{
659 spin_lock_irq(&dev_priv->irq_lock);
660 dev_priv->guc.interrupts.enabled = false;
661
662 I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
663 I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
664
665 spin_unlock_irq(&dev_priv->irq_lock);
666 synchronize_irq(dev_priv->drm.irq);
667
668 gen11_reset_guc_interrupts(dev_priv);
669}
670
671
672
673
674
675
676
677static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
678 u32 interrupt_mask,
679 u32 enabled_irq_mask)
680{
681 u32 new_val;
682 u32 old_val;
683
684 lockdep_assert_held(&dev_priv->irq_lock);
685
686 WARN_ON(enabled_irq_mask & ~interrupt_mask);
687
688 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
689 return;
690
691 old_val = I915_READ(GEN8_DE_PORT_IMR);
692
693 new_val = old_val;
694 new_val &= ~interrupt_mask;
695 new_val |= (~enabled_irq_mask & interrupt_mask);
696
697 if (new_val != old_val) {
698 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
699 POSTING_READ(GEN8_DE_PORT_IMR);
700 }
701}
702
703
704
705
706
707
708
709
710void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
711 enum pipe pipe,
712 u32 interrupt_mask,
713 u32 enabled_irq_mask)
714{
715 u32 new_val;
716
717 lockdep_assert_held(&dev_priv->irq_lock);
718
719 WARN_ON(enabled_irq_mask & ~interrupt_mask);
720
721 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
722 return;
723
724 new_val = dev_priv->de_irq_mask[pipe];
725 new_val &= ~interrupt_mask;
726 new_val |= (~enabled_irq_mask & interrupt_mask);
727
728 if (new_val != dev_priv->de_irq_mask[pipe]) {
729 dev_priv->de_irq_mask[pipe] = new_val;
730 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
731 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
732 }
733}
734
735
736
737
738
739
740
741void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
742 u32 interrupt_mask,
743 u32 enabled_irq_mask)
744{
745 u32 sdeimr = I915_READ(SDEIMR);
746 sdeimr &= ~interrupt_mask;
747 sdeimr |= (~enabled_irq_mask & interrupt_mask);
748
749 WARN_ON(enabled_irq_mask & ~interrupt_mask);
750
751 lockdep_assert_held(&dev_priv->irq_lock);
752
753 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
754 return;
755
756 I915_WRITE(SDEIMR, sdeimr);
757 POSTING_READ(SDEIMR);
758}
759
760u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
761 enum pipe pipe)
762{
763 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
764 u32 enable_mask = status_mask << 16;
765
766 lockdep_assert_held(&dev_priv->irq_lock);
767
768 if (INTEL_GEN(dev_priv) < 5)
769 goto out;
770
771
772
773
774
775 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
776 return 0;
777
778
779
780
781 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
782 return 0;
783
784 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
785 SPRITE0_FLIP_DONE_INT_EN_VLV |
786 SPRITE1_FLIP_DONE_INT_EN_VLV);
787 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
788 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
789 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
790 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
791
792out:
793 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
794 status_mask & ~PIPESTAT_INT_STATUS_MASK,
795 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
796 pipe_name(pipe), enable_mask, status_mask);
797
798 return enable_mask;
799}
800
801void i915_enable_pipestat(struct drm_i915_private *dev_priv,
802 enum pipe pipe, u32 status_mask)
803{
804 i915_reg_t reg = PIPESTAT(pipe);
805 u32 enable_mask;
806
807 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
808 "pipe %c: status_mask=0x%x\n",
809 pipe_name(pipe), status_mask);
810
811 lockdep_assert_held(&dev_priv->irq_lock);
812 WARN_ON(!intel_irqs_enabled(dev_priv));
813
814 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
815 return;
816
817 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
818 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
819
820 I915_WRITE(reg, enable_mask | status_mask);
821 POSTING_READ(reg);
822}
823
824void i915_disable_pipestat(struct drm_i915_private *dev_priv,
825 enum pipe pipe, u32 status_mask)
826{
827 i915_reg_t reg = PIPESTAT(pipe);
828 u32 enable_mask;
829
830 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
831 "pipe %c: status_mask=0x%x\n",
832 pipe_name(pipe), status_mask);
833
834 lockdep_assert_held(&dev_priv->irq_lock);
835 WARN_ON(!intel_irqs_enabled(dev_priv));
836
837 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
838 return;
839
840 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
841 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
842
843 I915_WRITE(reg, enable_mask | status_mask);
844 POSTING_READ(reg);
845}
846
847static bool i915_has_asle(struct drm_i915_private *dev_priv)
848{
849 if (!dev_priv->opregion.asle)
850 return false;
851
852 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
853}
854
855
856
857
858
859static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
860{
861 if (!i915_has_asle(dev_priv))
862 return;
863
864 spin_lock_irq(&dev_priv->irq_lock);
865
866 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
867 if (INTEL_GEN(dev_priv) >= 4)
868 i915_enable_pipestat(dev_priv, PIPE_A,
869 PIPE_LEGACY_BLC_EVENT_STATUS);
870
871 spin_unlock_irq(&dev_priv->irq_lock);
872}
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
928{
929 struct drm_i915_private *dev_priv = to_i915(dev);
930 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
931 const struct drm_display_mode *mode = &vblank->hwmode;
932 i915_reg_t high_frame, low_frame;
933 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
934 unsigned long irqflags;
935
936
937
938
939
940
941
942
943
944
945
946
947 if (!vblank->max_vblank_count)
948 return 0;
949
950 htotal = mode->crtc_htotal;
951 hsync_start = mode->crtc_hsync_start;
952 vbl_start = mode->crtc_vblank_start;
953 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
954 vbl_start = DIV_ROUND_UP(vbl_start, 2);
955
956
957 vbl_start *= htotal;
958
959
960 vbl_start -= htotal - hsync_start;
961
962 high_frame = PIPEFRAME(pipe);
963 low_frame = PIPEFRAMEPIXEL(pipe);
964
965 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
966
967
968
969
970
971
972 do {
973 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
974 low = I915_READ_FW(low_frame);
975 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
976 } while (high1 != high2);
977
978 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
979
980 high1 >>= PIPE_FRAME_HIGH_SHIFT;
981 pixel = low & PIPE_PIXEL_MASK;
982 low >>= PIPE_FRAME_LOW_SHIFT;
983
984
985
986
987
988
989 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
990}
991
992static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
993{
994 struct drm_i915_private *dev_priv = to_i915(dev);
995
996 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1008{
1009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1010 struct drm_vblank_crtc *vblank =
1011 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1012 const struct drm_display_mode *mode = &vblank->hwmode;
1013 u32 vblank_start = mode->crtc_vblank_start;
1014 u32 vtotal = mode->crtc_vtotal;
1015 u32 htotal = mode->crtc_htotal;
1016 u32 clock = mode->crtc_clock;
1017 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1018
1019
1020
1021
1022
1023
1024
1025 do {
1026
1027
1028
1029
1030
1031 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1032
1033
1034
1035
1036
1037 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1038
1039 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1040 } while (scan_post_time != scan_prev_time);
1041
1042 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1043 clock), 1000 * htotal);
1044 scanline = min(scanline, vtotal - 1);
1045 scanline = (scanline + vblank_start) % vtotal;
1046
1047 return scanline;
1048}
1049
1050
1051static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1052{
1053 struct drm_device *dev = crtc->base.dev;
1054 struct drm_i915_private *dev_priv = to_i915(dev);
1055 const struct drm_display_mode *mode;
1056 struct drm_vblank_crtc *vblank;
1057 enum pipe pipe = crtc->pipe;
1058 int position, vtotal;
1059
1060 if (!crtc->active)
1061 return -1;
1062
1063 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1064 mode = &vblank->hwmode;
1065
1066 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1067 return __intel_get_crtc_scanline_from_timestamp(crtc);
1068
1069 vtotal = mode->crtc_vtotal;
1070 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1071 vtotal /= 2;
1072
1073 if (IS_GEN(dev_priv, 2))
1074 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1075 else
1076 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 if (HAS_DDI(dev_priv) && !position) {
1091 int i, temp;
1092
1093 for (i = 0; i < 100; i++) {
1094 udelay(1);
1095 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1096 if (temp != position) {
1097 position = temp;
1098 break;
1099 }
1100 }
1101 }
1102
1103
1104
1105
1106
1107 return (position + crtc->scanline_offset) % vtotal;
1108}
1109
1110static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1111 bool in_vblank_irq, int *vpos, int *hpos,
1112 ktime_t *stime, ktime_t *etime,
1113 const struct drm_display_mode *mode)
1114{
1115 struct drm_i915_private *dev_priv = to_i915(dev);
1116 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1117 pipe);
1118 int position;
1119 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1120 unsigned long irqflags;
1121 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
1122 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
1123 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1124
1125 if (WARN_ON(!mode->crtc_clock)) {
1126 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1127 "pipe %c\n", pipe_name(pipe));
1128 return false;
1129 }
1130
1131 htotal = mode->crtc_htotal;
1132 hsync_start = mode->crtc_hsync_start;
1133 vtotal = mode->crtc_vtotal;
1134 vbl_start = mode->crtc_vblank_start;
1135 vbl_end = mode->crtc_vblank_end;
1136
1137 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1138 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1139 vbl_end /= 2;
1140 vtotal /= 2;
1141 }
1142
1143
1144
1145
1146
1147
1148 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1149
1150
1151
1152
1153 if (stime)
1154 *stime = ktime_get();
1155
1156 if (use_scanline_counter) {
1157
1158
1159
1160 position = __intel_get_crtc_scanline(intel_crtc);
1161 } else {
1162
1163
1164
1165
1166 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1167
1168
1169 vbl_start *= htotal;
1170 vbl_end *= htotal;
1171 vtotal *= htotal;
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 if (position >= vtotal)
1183 position = vtotal - 1;
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 position = (position + htotal - hsync_start) % vtotal;
1195 }
1196
1197
1198 if (etime)
1199 *etime = ktime_get();
1200
1201
1202
1203 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1204
1205
1206
1207
1208
1209
1210
1211 if (position >= vbl_start)
1212 position -= vbl_end;
1213 else
1214 position += vtotal - vbl_end;
1215
1216 if (use_scanline_counter) {
1217 *vpos = position;
1218 *hpos = 0;
1219 } else {
1220 *vpos = position / htotal;
1221 *hpos = position - (*vpos * htotal);
1222 }
1223
1224 return true;
1225}
1226
1227int intel_get_crtc_scanline(struct intel_crtc *crtc)
1228{
1229 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1230 unsigned long irqflags;
1231 int position;
1232
1233 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1234 position = __intel_get_crtc_scanline(crtc);
1235 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1236
1237 return position;
1238}
1239
1240static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1241{
1242 struct intel_uncore *uncore = &dev_priv->uncore;
1243 u32 busy_up, busy_down, max_avg, min_avg;
1244 u8 new_delay;
1245
1246 spin_lock(&mchdev_lock);
1247
1248 intel_uncore_write16(uncore,
1249 MEMINTRSTS,
1250 intel_uncore_read(uncore, MEMINTRSTS));
1251
1252 new_delay = dev_priv->ips.cur_delay;
1253
1254 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1255 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1256 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1257 max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1258 min_avg = intel_uncore_read(uncore, RCBMINAVG);
1259
1260
1261 if (busy_up > max_avg) {
1262 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1263 new_delay = dev_priv->ips.cur_delay - 1;
1264 if (new_delay < dev_priv->ips.max_delay)
1265 new_delay = dev_priv->ips.max_delay;
1266 } else if (busy_down < min_avg) {
1267 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1268 new_delay = dev_priv->ips.cur_delay + 1;
1269 if (new_delay > dev_priv->ips.min_delay)
1270 new_delay = dev_priv->ips.min_delay;
1271 }
1272
1273 if (ironlake_set_drps(dev_priv, new_delay))
1274 dev_priv->ips.cur_delay = new_delay;
1275
1276 spin_unlock(&mchdev_lock);
1277
1278 return;
1279}
1280
1281static void vlv_c0_read(struct drm_i915_private *dev_priv,
1282 struct intel_rps_ei *ei)
1283{
1284 ei->ktime = ktime_get_raw();
1285 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1286 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1287}
1288
1289void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1290{
1291 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1292}
1293
1294static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1295{
1296 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1297 const struct intel_rps_ei *prev = &rps->ei;
1298 struct intel_rps_ei now;
1299 u32 events = 0;
1300
1301 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1302 return 0;
1303
1304 vlv_c0_read(dev_priv, &now);
1305
1306 if (prev->ktime) {
1307 u64 time, c0;
1308 u32 render, media;
1309
1310 time = ktime_us_delta(now.ktime, prev->ktime);
1311
1312 time *= dev_priv->czclk_freq;
1313
1314
1315
1316
1317
1318
1319 render = now.render_c0 - prev->render_c0;
1320 media = now.media_c0 - prev->media_c0;
1321 c0 = max(render, media);
1322 c0 *= 1000 * 100 << 8;
1323
1324 if (c0 > time * rps->power.up_threshold)
1325 events = GEN6_PM_RP_UP_THRESHOLD;
1326 else if (c0 < time * rps->power.down_threshold)
1327 events = GEN6_PM_RP_DOWN_THRESHOLD;
1328 }
1329
1330 rps->ei = now;
1331 return events;
1332}
1333
1334static void gen6_pm_rps_work(struct work_struct *work)
1335{
1336 struct drm_i915_private *dev_priv =
1337 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1338 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1339 bool client_boost = false;
1340 int new_delay, adj, min, max;
1341 u32 pm_iir = 0;
1342
1343 spin_lock_irq(&dev_priv->irq_lock);
1344 if (rps->interrupts_enabled) {
1345 pm_iir = fetch_and_zero(&rps->pm_iir);
1346 client_boost = atomic_read(&rps->num_waiters);
1347 }
1348 spin_unlock_irq(&dev_priv->irq_lock);
1349
1350
1351 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1352 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1353 goto out;
1354
1355 mutex_lock(&rps->lock);
1356
1357 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1358
1359 adj = rps->last_adj;
1360 new_delay = rps->cur_freq;
1361 min = rps->min_freq_softlimit;
1362 max = rps->max_freq_softlimit;
1363 if (client_boost)
1364 max = rps->max_freq;
1365 if (client_boost && new_delay < rps->boost_freq) {
1366 new_delay = rps->boost_freq;
1367 adj = 0;
1368 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1369 if (adj > 0)
1370 adj *= 2;
1371 else
1372 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1373
1374 if (new_delay >= rps->max_freq_softlimit)
1375 adj = 0;
1376 } else if (client_boost) {
1377 adj = 0;
1378 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1379 if (rps->cur_freq > rps->efficient_freq)
1380 new_delay = rps->efficient_freq;
1381 else if (rps->cur_freq > rps->min_freq_softlimit)
1382 new_delay = rps->min_freq_softlimit;
1383 adj = 0;
1384 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1385 if (adj < 0)
1386 adj *= 2;
1387 else
1388 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1389
1390 if (new_delay <= rps->min_freq_softlimit)
1391 adj = 0;
1392 } else {
1393 adj = 0;
1394 }
1395
1396 rps->last_adj = adj;
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1407 (adj > 0 && rps->power.mode == LOW_POWER))
1408 rps->last_adj = 0;
1409
1410
1411
1412
1413 new_delay += adj;
1414 new_delay = clamp_t(int, new_delay, min, max);
1415
1416 if (intel_set_rps(dev_priv, new_delay)) {
1417 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1418 rps->last_adj = 0;
1419 }
1420
1421 mutex_unlock(&rps->lock);
1422
1423out:
1424
1425 spin_lock_irq(&dev_priv->irq_lock);
1426 if (rps->interrupts_enabled)
1427 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1428 spin_unlock_irq(&dev_priv->irq_lock);
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441static void ivybridge_parity_work(struct work_struct *work)
1442{
1443 struct drm_i915_private *dev_priv =
1444 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1445 u32 error_status, row, bank, subbank;
1446 char *parity_event[6];
1447 u32 misccpctl;
1448 u8 slice = 0;
1449
1450
1451
1452
1453
1454 mutex_lock(&dev_priv->drm.struct_mutex);
1455
1456
1457 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1458 goto out;
1459
1460 misccpctl = I915_READ(GEN7_MISCCPCTL);
1461 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1462 POSTING_READ(GEN7_MISCCPCTL);
1463
1464 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1465 i915_reg_t reg;
1466
1467 slice--;
1468 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1469 break;
1470
1471 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1472
1473 reg = GEN7_L3CDERRST1(slice);
1474
1475 error_status = I915_READ(reg);
1476 row = GEN7_PARITY_ERROR_ROW(error_status);
1477 bank = GEN7_PARITY_ERROR_BANK(error_status);
1478 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1479
1480 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1481 POSTING_READ(reg);
1482
1483 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1484 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1485 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1486 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1487 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1488 parity_event[5] = NULL;
1489
1490 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1491 KOBJ_CHANGE, parity_event);
1492
1493 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1494 slice, row, bank, subbank);
1495
1496 kfree(parity_event[4]);
1497 kfree(parity_event[3]);
1498 kfree(parity_event[2]);
1499 kfree(parity_event[1]);
1500 }
1501
1502 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1503
1504out:
1505 WARN_ON(dev_priv->l3_parity.which_slice);
1506 spin_lock_irq(&dev_priv->irq_lock);
1507 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1508 spin_unlock_irq(&dev_priv->irq_lock);
1509
1510 mutex_unlock(&dev_priv->drm.struct_mutex);
1511}
1512
1513static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1514 u32 iir)
1515{
1516 if (!HAS_L3_DPF(dev_priv))
1517 return;
1518
1519 spin_lock(&dev_priv->irq_lock);
1520 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1521 spin_unlock(&dev_priv->irq_lock);
1522
1523 iir &= GT_PARITY_ERROR(dev_priv);
1524 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1525 dev_priv->l3_parity.which_slice |= 1 << 1;
1526
1527 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1528 dev_priv->l3_parity.which_slice |= 1 << 0;
1529
1530 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1531}
1532
1533static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1534 u32 gt_iir)
1535{
1536 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1537 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1538 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1539 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1540}
1541
1542static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1543 u32 gt_iir)
1544{
1545 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1546 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1547 if (gt_iir & GT_BSD_USER_INTERRUPT)
1548 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1549 if (gt_iir & GT_BLT_USER_INTERRUPT)
1550 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1551
1552 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1553 GT_BSD_CS_ERROR_INTERRUPT |
1554 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1555 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1556
1557 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1558 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1559}
1560
1561static void
1562gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1563{
1564 bool tasklet = false;
1565
1566 if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1567 tasklet = true;
1568
1569 if (iir & GT_RENDER_USER_INTERRUPT) {
1570 intel_engine_breadcrumbs_irq(engine);
1571 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
1572 }
1573
1574 if (tasklet)
1575 tasklet_hi_schedule(&engine->execlists.tasklet);
1576}
1577
1578static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1579 u32 master_ctl, u32 gt_iir[4])
1580{
1581 void __iomem * const regs = i915->uncore.regs;
1582
1583#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1584 GEN8_GT_BCS_IRQ | \
1585 GEN8_GT_VCS0_IRQ | \
1586 GEN8_GT_VCS1_IRQ | \
1587 GEN8_GT_VECS_IRQ | \
1588 GEN8_GT_PM_IRQ | \
1589 GEN8_GT_GUC_IRQ)
1590
1591 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1592 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1593 if (likely(gt_iir[0]))
1594 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1595 }
1596
1597 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1598 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1599 if (likely(gt_iir[1]))
1600 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1601 }
1602
1603 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1604 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1605 if (likely(gt_iir[2]))
1606 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1607 }
1608
1609 if (master_ctl & GEN8_GT_VECS_IRQ) {
1610 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1611 if (likely(gt_iir[3]))
1612 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1613 }
1614}
1615
1616static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1617 u32 master_ctl, u32 gt_iir[4])
1618{
1619 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1620 gen8_cs_irq_handler(i915->engine[RCS0],
1621 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1622 gen8_cs_irq_handler(i915->engine[BCS0],
1623 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1624 }
1625
1626 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1627 gen8_cs_irq_handler(i915->engine[VCS0],
1628 gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
1629 gen8_cs_irq_handler(i915->engine[VCS1],
1630 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1631 }
1632
1633 if (master_ctl & GEN8_GT_VECS_IRQ) {
1634 gen8_cs_irq_handler(i915->engine[VECS0],
1635 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1636 }
1637
1638 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1639 gen6_rps_irq_handler(i915, gt_iir[2]);
1640 gen9_guc_irq_handler(i915, gt_iir[2]);
1641 }
1642}
1643
1644static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1645{
1646 switch (pin) {
1647 case HPD_PORT_C:
1648 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1649 case HPD_PORT_D:
1650 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1651 case HPD_PORT_E:
1652 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1653 case HPD_PORT_F:
1654 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1655 default:
1656 return false;
1657 }
1658}
1659
1660static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1661{
1662 switch (pin) {
1663 case HPD_PORT_A:
1664 return val & PORTA_HOTPLUG_LONG_DETECT;
1665 case HPD_PORT_B:
1666 return val & PORTB_HOTPLUG_LONG_DETECT;
1667 case HPD_PORT_C:
1668 return val & PORTC_HOTPLUG_LONG_DETECT;
1669 default:
1670 return false;
1671 }
1672}
1673
1674static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1675{
1676 switch (pin) {
1677 case HPD_PORT_A:
1678 return val & ICP_DDIA_HPD_LONG_DETECT;
1679 case HPD_PORT_B:
1680 return val & ICP_DDIB_HPD_LONG_DETECT;
1681 default:
1682 return false;
1683 }
1684}
1685
1686static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1687{
1688 switch (pin) {
1689 case HPD_PORT_C:
1690 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1691 case HPD_PORT_D:
1692 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1693 case HPD_PORT_E:
1694 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1695 case HPD_PORT_F:
1696 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1697 default:
1698 return false;
1699 }
1700}
1701
1702static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1703{
1704 switch (pin) {
1705 case HPD_PORT_E:
1706 return val & PORTE_HOTPLUG_LONG_DETECT;
1707 default:
1708 return false;
1709 }
1710}
1711
1712static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1713{
1714 switch (pin) {
1715 case HPD_PORT_A:
1716 return val & PORTA_HOTPLUG_LONG_DETECT;
1717 case HPD_PORT_B:
1718 return val & PORTB_HOTPLUG_LONG_DETECT;
1719 case HPD_PORT_C:
1720 return val & PORTC_HOTPLUG_LONG_DETECT;
1721 case HPD_PORT_D:
1722 return val & PORTD_HOTPLUG_LONG_DETECT;
1723 default:
1724 return false;
1725 }
1726}
1727
1728static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1729{
1730 switch (pin) {
1731 case HPD_PORT_A:
1732 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1733 default:
1734 return false;
1735 }
1736}
1737
1738static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1739{
1740 switch (pin) {
1741 case HPD_PORT_B:
1742 return val & PORTB_HOTPLUG_LONG_DETECT;
1743 case HPD_PORT_C:
1744 return val & PORTC_HOTPLUG_LONG_DETECT;
1745 case HPD_PORT_D:
1746 return val & PORTD_HOTPLUG_LONG_DETECT;
1747 default:
1748 return false;
1749 }
1750}
1751
1752static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1753{
1754 switch (pin) {
1755 case HPD_PORT_B:
1756 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1757 case HPD_PORT_C:
1758 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1759 case HPD_PORT_D:
1760 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1761 default:
1762 return false;
1763 }
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1774 u32 *pin_mask, u32 *long_mask,
1775 u32 hotplug_trigger, u32 dig_hotplug_reg,
1776 const u32 hpd[HPD_NUM_PINS],
1777 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1778{
1779 enum hpd_pin pin;
1780
1781 for_each_hpd_pin(pin) {
1782 if ((hpd[pin] & hotplug_trigger) == 0)
1783 continue;
1784
1785 *pin_mask |= BIT(pin);
1786
1787 if (long_pulse_detect(pin, dig_hotplug_reg))
1788 *long_mask |= BIT(pin);
1789 }
1790
1791 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1792 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1793
1794}
1795
1796static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1797{
1798 wake_up_all(&dev_priv->gmbus_wait_queue);
1799}
1800
1801static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1802{
1803 wake_up_all(&dev_priv->gmbus_wait_queue);
1804}
1805
1806#if defined(CONFIG_DEBUG_FS)
1807static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1808 enum pipe pipe,
1809 u32 crc0, u32 crc1,
1810 u32 crc2, u32 crc3,
1811 u32 crc4)
1812{
1813 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1814 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1815 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1816
1817 trace_intel_pipe_crc(crtc, crcs);
1818
1819 spin_lock(&pipe_crc->lock);
1820
1821
1822
1823
1824
1825
1826
1827
1828 if (pipe_crc->skipped <= 0 ||
1829 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1830 pipe_crc->skipped++;
1831 spin_unlock(&pipe_crc->lock);
1832 return;
1833 }
1834 spin_unlock(&pipe_crc->lock);
1835
1836 drm_crtc_add_crc_entry(&crtc->base, true,
1837 drm_crtc_accurate_vblank_count(&crtc->base),
1838 crcs);
1839}
1840#else
1841static inline void
1842display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1843 enum pipe pipe,
1844 u32 crc0, u32 crc1,
1845 u32 crc2, u32 crc3,
1846 u32 crc4) {}
1847#endif
1848
1849
1850static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1851 enum pipe pipe)
1852{
1853 display_pipe_crc_irq_handler(dev_priv, pipe,
1854 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1855 0, 0, 0, 0);
1856}
1857
1858static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1859 enum pipe pipe)
1860{
1861 display_pipe_crc_irq_handler(dev_priv, pipe,
1862 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1863 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1864 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1865 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1866 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1867}
1868
1869static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1870 enum pipe pipe)
1871{
1872 u32 res1, res2;
1873
1874 if (INTEL_GEN(dev_priv) >= 3)
1875 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1876 else
1877 res1 = 0;
1878
1879 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1880 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1881 else
1882 res2 = 0;
1883
1884 display_pipe_crc_irq_handler(dev_priv, pipe,
1885 I915_READ(PIPE_CRC_RES_RED(pipe)),
1886 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1887 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1888 res1, res2);
1889}
1890
1891
1892
1893
1894static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
1895{
1896 struct intel_rps *rps = &i915->gt_pm.rps;
1897 const u32 events = i915->pm_rps_events & pm_iir;
1898
1899 lockdep_assert_held(&i915->irq_lock);
1900
1901 if (unlikely(!events))
1902 return;
1903
1904 gen6_mask_pm_irq(i915, events);
1905
1906 if (!rps->interrupts_enabled)
1907 return;
1908
1909 rps->pm_iir |= events;
1910 schedule_work(&rps->work);
1911}
1912
1913static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1914{
1915 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1916
1917 if (pm_iir & dev_priv->pm_rps_events) {
1918 spin_lock(&dev_priv->irq_lock);
1919 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1920 if (rps->interrupts_enabled) {
1921 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1922 schedule_work(&rps->work);
1923 }
1924 spin_unlock(&dev_priv->irq_lock);
1925 }
1926
1927 if (INTEL_GEN(dev_priv) >= 8)
1928 return;
1929
1930 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1931 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
1932
1933 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1934 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1935}
1936
1937static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1938{
1939 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1940 intel_guc_to_host_event_handler(&dev_priv->guc);
1941}
1942
1943static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
1944{
1945 if (iir & GEN11_GUC_INTR_GUC2HOST)
1946 intel_guc_to_host_event_handler(&i915->guc);
1947}
1948
1949static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1950{
1951 enum pipe pipe;
1952
1953 for_each_pipe(dev_priv, pipe) {
1954 I915_WRITE(PIPESTAT(pipe),
1955 PIPESTAT_INT_STATUS_MASK |
1956 PIPE_FIFO_UNDERRUN_STATUS);
1957
1958 dev_priv->pipestat_irq_mask[pipe] = 0;
1959 }
1960}
1961
1962static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1963 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1964{
1965 int pipe;
1966
1967 spin_lock(&dev_priv->irq_lock);
1968
1969 if (!dev_priv->display_irqs_enabled) {
1970 spin_unlock(&dev_priv->irq_lock);
1971 return;
1972 }
1973
1974 for_each_pipe(dev_priv, pipe) {
1975 i915_reg_t reg;
1976 u32 status_mask, enable_mask, iir_bit = 0;
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1988
1989 switch (pipe) {
1990 case PIPE_A:
1991 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1992 break;
1993 case PIPE_B:
1994 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1995 break;
1996 case PIPE_C:
1997 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1998 break;
1999 }
2000 if (iir & iir_bit)
2001 status_mask |= dev_priv->pipestat_irq_mask[pipe];
2002
2003 if (!status_mask)
2004 continue;
2005
2006 reg = PIPESTAT(pipe);
2007 pipe_stats[pipe] = I915_READ(reg) & status_mask;
2008 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 if (pipe_stats[pipe]) {
2020 I915_WRITE(reg, pipe_stats[pipe]);
2021 I915_WRITE(reg, enable_mask);
2022 }
2023 }
2024 spin_unlock(&dev_priv->irq_lock);
2025}
2026
2027static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2028 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2029{
2030 enum pipe pipe;
2031
2032 for_each_pipe(dev_priv, pipe) {
2033 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2034 drm_handle_vblank(&dev_priv->drm, pipe);
2035
2036 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2037 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2038
2039 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2040 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2041 }
2042}
2043
2044static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2045 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2046{
2047 bool blc_event = false;
2048 enum pipe pipe;
2049
2050 for_each_pipe(dev_priv, pipe) {
2051 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2052 drm_handle_vblank(&dev_priv->drm, pipe);
2053
2054 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2055 blc_event = true;
2056
2057 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2058 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2059
2060 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2061 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062 }
2063
2064 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2065 intel_opregion_asle_intr(dev_priv);
2066}
2067
2068static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2069 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2070{
2071 bool blc_event = false;
2072 enum pipe pipe;
2073
2074 for_each_pipe(dev_priv, pipe) {
2075 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2076 drm_handle_vblank(&dev_priv->drm, pipe);
2077
2078 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2079 blc_event = true;
2080
2081 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2082 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2083
2084 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2085 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2086 }
2087
2088 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2089 intel_opregion_asle_intr(dev_priv);
2090
2091 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2092 gmbus_irq_handler(dev_priv);
2093}
2094
2095static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2096 u32 pipe_stats[I915_MAX_PIPES])
2097{
2098 enum pipe pipe;
2099
2100 for_each_pipe(dev_priv, pipe) {
2101 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2102 drm_handle_vblank(&dev_priv->drm, pipe);
2103
2104 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2105 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2106
2107 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2108 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2109 }
2110
2111 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2112 gmbus_irq_handler(dev_priv);
2113}
2114
2115static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2116{
2117 u32 hotplug_status = 0, hotplug_status_mask;
2118 int i;
2119
2120 if (IS_G4X(dev_priv) ||
2121 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2122 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2123 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2124 else
2125 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136 for (i = 0; i < 10; i++) {
2137 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2138
2139 if (tmp == 0)
2140 return hotplug_status;
2141
2142 hotplug_status |= tmp;
2143 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2144 }
2145
2146 WARN_ONCE(1,
2147 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2148 I915_READ(PORT_HOTPLUG_STAT));
2149
2150 return hotplug_status;
2151}
2152
2153static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2154 u32 hotplug_status)
2155{
2156 u32 pin_mask = 0, long_mask = 0;
2157
2158 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2159 IS_CHERRYVIEW(dev_priv)) {
2160 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2161
2162 if (hotplug_trigger) {
2163 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2164 hotplug_trigger, hotplug_trigger,
2165 hpd_status_g4x,
2166 i9xx_port_hotplug_long_detect);
2167
2168 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2169 }
2170
2171 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2172 dp_aux_irq_handler(dev_priv);
2173 } else {
2174 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2175
2176 if (hotplug_trigger) {
2177 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2178 hotplug_trigger, hotplug_trigger,
2179 hpd_status_i915,
2180 i9xx_port_hotplug_long_detect);
2181 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2182 }
2183 }
2184}
2185
2186static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2187{
2188 struct drm_device *dev = arg;
2189 struct drm_i915_private *dev_priv = to_i915(dev);
2190 irqreturn_t ret = IRQ_NONE;
2191
2192 if (!intel_irqs_enabled(dev_priv))
2193 return IRQ_NONE;
2194
2195
2196 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2197
2198 do {
2199 u32 iir, gt_iir, pm_iir;
2200 u32 pipe_stats[I915_MAX_PIPES] = {};
2201 u32 hotplug_status = 0;
2202 u32 ier = 0;
2203
2204 gt_iir = I915_READ(GTIIR);
2205 pm_iir = I915_READ(GEN6_PMIIR);
2206 iir = I915_READ(VLV_IIR);
2207
2208 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2209 break;
2210
2211 ret = IRQ_HANDLED;
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226 I915_WRITE(VLV_MASTER_IER, 0);
2227 ier = I915_READ(VLV_IER);
2228 I915_WRITE(VLV_IER, 0);
2229
2230 if (gt_iir)
2231 I915_WRITE(GTIIR, gt_iir);
2232 if (pm_iir)
2233 I915_WRITE(GEN6_PMIIR, pm_iir);
2234
2235 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2236 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2237
2238
2239
2240 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2241
2242 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2243 I915_LPE_PIPE_B_INTERRUPT))
2244 intel_lpe_audio_irq_handler(dev_priv);
2245
2246
2247
2248
2249
2250 if (iir)
2251 I915_WRITE(VLV_IIR, iir);
2252
2253 I915_WRITE(VLV_IER, ier);
2254 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2255
2256 if (gt_iir)
2257 snb_gt_irq_handler(dev_priv, gt_iir);
2258 if (pm_iir)
2259 gen6_rps_irq_handler(dev_priv, pm_iir);
2260
2261 if (hotplug_status)
2262 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2263
2264 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2265 } while (0);
2266
2267 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2268
2269 return ret;
2270}
2271
2272static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2273{
2274 struct drm_device *dev = arg;
2275 struct drm_i915_private *dev_priv = to_i915(dev);
2276 irqreturn_t ret = IRQ_NONE;
2277
2278 if (!intel_irqs_enabled(dev_priv))
2279 return IRQ_NONE;
2280
2281
2282 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2283
2284 do {
2285 u32 master_ctl, iir;
2286 u32 pipe_stats[I915_MAX_PIPES] = {};
2287 u32 hotplug_status = 0;
2288 u32 gt_iir[4];
2289 u32 ier = 0;
2290
2291 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2292 iir = I915_READ(VLV_IIR);
2293
2294 if (master_ctl == 0 && iir == 0)
2295 break;
2296
2297 ret = IRQ_HANDLED;
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312 I915_WRITE(GEN8_MASTER_IRQ, 0);
2313 ier = I915_READ(VLV_IER);
2314 I915_WRITE(VLV_IER, 0);
2315
2316 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2317
2318 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2319 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2320
2321
2322
2323 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2324
2325 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2326 I915_LPE_PIPE_B_INTERRUPT |
2327 I915_LPE_PIPE_C_INTERRUPT))
2328 intel_lpe_audio_irq_handler(dev_priv);
2329
2330
2331
2332
2333
2334 if (iir)
2335 I915_WRITE(VLV_IIR, iir);
2336
2337 I915_WRITE(VLV_IER, ier);
2338 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2339
2340 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2341
2342 if (hotplug_status)
2343 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2344
2345 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2346 } while (0);
2347
2348 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2349
2350 return ret;
2351}
2352
2353static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2354 u32 hotplug_trigger,
2355 const u32 hpd[HPD_NUM_PINS])
2356{
2357 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2358
2359
2360
2361
2362
2363
2364
2365 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2366 if (!hotplug_trigger) {
2367 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2368 PORTD_HOTPLUG_STATUS_MASK |
2369 PORTC_HOTPLUG_STATUS_MASK |
2370 PORTB_HOTPLUG_STATUS_MASK;
2371 dig_hotplug_reg &= ~mask;
2372 }
2373
2374 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2375 if (!hotplug_trigger)
2376 return;
2377
2378 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2379 dig_hotplug_reg, hpd,
2380 pch_port_hotplug_long_detect);
2381
2382 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2383}
2384
2385static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2386{
2387 int pipe;
2388 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2389
2390 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2391
2392 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2393 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2394 SDE_AUDIO_POWER_SHIFT);
2395 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2396 port_name(port));
2397 }
2398
2399 if (pch_iir & SDE_AUX_MASK)
2400 dp_aux_irq_handler(dev_priv);
2401
2402 if (pch_iir & SDE_GMBUS)
2403 gmbus_irq_handler(dev_priv);
2404
2405 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2406 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2407
2408 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2409 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2410
2411 if (pch_iir & SDE_POISON)
2412 DRM_ERROR("PCH poison interrupt\n");
2413
2414 if (pch_iir & SDE_FDI_MASK)
2415 for_each_pipe(dev_priv, pipe)
2416 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2417 pipe_name(pipe),
2418 I915_READ(FDI_RX_IIR(pipe)));
2419
2420 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2421 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2422
2423 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2424 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2425
2426 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2427 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2428
2429 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2430 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2431}
2432
2433static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2434{
2435 u32 err_int = I915_READ(GEN7_ERR_INT);
2436 enum pipe pipe;
2437
2438 if (err_int & ERR_INT_POISON)
2439 DRM_ERROR("Poison interrupt\n");
2440
2441 for_each_pipe(dev_priv, pipe) {
2442 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2443 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2444
2445 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2446 if (IS_IVYBRIDGE(dev_priv))
2447 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2448 else
2449 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2450 }
2451 }
2452
2453 I915_WRITE(GEN7_ERR_INT, err_int);
2454}
2455
2456static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2457{
2458 u32 serr_int = I915_READ(SERR_INT);
2459 enum pipe pipe;
2460
2461 if (serr_int & SERR_INT_POISON)
2462 DRM_ERROR("PCH poison interrupt\n");
2463
2464 for_each_pipe(dev_priv, pipe)
2465 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2466 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2467
2468 I915_WRITE(SERR_INT, serr_int);
2469}
2470
2471static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2472{
2473 int pipe;
2474 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2475
2476 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2477
2478 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2479 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2480 SDE_AUDIO_POWER_SHIFT_CPT);
2481 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2482 port_name(port));
2483 }
2484
2485 if (pch_iir & SDE_AUX_MASK_CPT)
2486 dp_aux_irq_handler(dev_priv);
2487
2488 if (pch_iir & SDE_GMBUS_CPT)
2489 gmbus_irq_handler(dev_priv);
2490
2491 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2492 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2493
2494 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2495 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2496
2497 if (pch_iir & SDE_FDI_MASK_CPT)
2498 for_each_pipe(dev_priv, pipe)
2499 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2500 pipe_name(pipe),
2501 I915_READ(FDI_RX_IIR(pipe)));
2502
2503 if (pch_iir & SDE_ERROR_CPT)
2504 cpt_serr_int_handler(dev_priv);
2505}
2506
2507static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2508 const u32 *pins)
2509{
2510 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2511 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2512 u32 pin_mask = 0, long_mask = 0;
2513
2514 if (ddi_hotplug_trigger) {
2515 u32 dig_hotplug_reg;
2516
2517 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2518 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2519
2520 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2521 ddi_hotplug_trigger,
2522 dig_hotplug_reg, pins,
2523 icp_ddi_port_hotplug_long_detect);
2524 }
2525
2526 if (tc_hotplug_trigger) {
2527 u32 dig_hotplug_reg;
2528
2529 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2530 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2531
2532 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2533 tc_hotplug_trigger,
2534 dig_hotplug_reg, pins,
2535 icp_tc_port_hotplug_long_detect);
2536 }
2537
2538 if (pin_mask)
2539 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2540
2541 if (pch_iir & SDE_GMBUS_ICP)
2542 gmbus_irq_handler(dev_priv);
2543}
2544
2545static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2546{
2547 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2548 ~SDE_PORTE_HOTPLUG_SPT;
2549 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2550 u32 pin_mask = 0, long_mask = 0;
2551
2552 if (hotplug_trigger) {
2553 u32 dig_hotplug_reg;
2554
2555 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2556 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2557
2558 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2559 hotplug_trigger, dig_hotplug_reg, hpd_spt,
2560 spt_port_hotplug_long_detect);
2561 }
2562
2563 if (hotplug2_trigger) {
2564 u32 dig_hotplug_reg;
2565
2566 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2567 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2568
2569 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2570 hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2571 spt_port_hotplug2_long_detect);
2572 }
2573
2574 if (pin_mask)
2575 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2576
2577 if (pch_iir & SDE_GMBUS_CPT)
2578 gmbus_irq_handler(dev_priv);
2579}
2580
2581static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2582 u32 hotplug_trigger,
2583 const u32 hpd[HPD_NUM_PINS])
2584{
2585 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2586
2587 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2588 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2589
2590 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2591 dig_hotplug_reg, hpd,
2592 ilk_port_hotplug_long_detect);
2593
2594 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2595}
2596
2597static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2598 u32 de_iir)
2599{
2600 enum pipe pipe;
2601 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2602
2603 if (hotplug_trigger)
2604 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2605
2606 if (de_iir & DE_AUX_CHANNEL_A)
2607 dp_aux_irq_handler(dev_priv);
2608
2609 if (de_iir & DE_GSE)
2610 intel_opregion_asle_intr(dev_priv);
2611
2612 if (de_iir & DE_POISON)
2613 DRM_ERROR("Poison interrupt\n");
2614
2615 for_each_pipe(dev_priv, pipe) {
2616 if (de_iir & DE_PIPE_VBLANK(pipe))
2617 drm_handle_vblank(&dev_priv->drm, pipe);
2618
2619 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2620 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2621
2622 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2623 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2624 }
2625
2626
2627 if (de_iir & DE_PCH_EVENT) {
2628 u32 pch_iir = I915_READ(SDEIIR);
2629
2630 if (HAS_PCH_CPT(dev_priv))
2631 cpt_irq_handler(dev_priv, pch_iir);
2632 else
2633 ibx_irq_handler(dev_priv, pch_iir);
2634
2635
2636 I915_WRITE(SDEIIR, pch_iir);
2637 }
2638
2639 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2640 ironlake_rps_change_irq_handler(dev_priv);
2641}
2642
2643static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2644 u32 de_iir)
2645{
2646 enum pipe pipe;
2647 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2648
2649 if (hotplug_trigger)
2650 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2651
2652 if (de_iir & DE_ERR_INT_IVB)
2653 ivb_err_int_handler(dev_priv);
2654
2655 if (de_iir & DE_EDP_PSR_INT_HSW) {
2656 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2657
2658 intel_psr_irq_handler(dev_priv, psr_iir);
2659 I915_WRITE(EDP_PSR_IIR, psr_iir);
2660 }
2661
2662 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2663 dp_aux_irq_handler(dev_priv);
2664
2665 if (de_iir & DE_GSE_IVB)
2666 intel_opregion_asle_intr(dev_priv);
2667
2668 for_each_pipe(dev_priv, pipe) {
2669 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2670 drm_handle_vblank(&dev_priv->drm, pipe);
2671 }
2672
2673
2674 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2675 u32 pch_iir = I915_READ(SDEIIR);
2676
2677 cpt_irq_handler(dev_priv, pch_iir);
2678
2679
2680 I915_WRITE(SDEIIR, pch_iir);
2681 }
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2693{
2694 struct drm_device *dev = arg;
2695 struct drm_i915_private *dev_priv = to_i915(dev);
2696 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2697 irqreturn_t ret = IRQ_NONE;
2698
2699 if (!intel_irqs_enabled(dev_priv))
2700 return IRQ_NONE;
2701
2702
2703 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2704
2705
2706 de_ier = I915_READ(DEIER);
2707 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2708
2709
2710
2711
2712
2713
2714 if (!HAS_PCH_NOP(dev_priv)) {
2715 sde_ier = I915_READ(SDEIER);
2716 I915_WRITE(SDEIER, 0);
2717 }
2718
2719
2720
2721 gt_iir = I915_READ(GTIIR);
2722 if (gt_iir) {
2723 I915_WRITE(GTIIR, gt_iir);
2724 ret = IRQ_HANDLED;
2725 if (INTEL_GEN(dev_priv) >= 6)
2726 snb_gt_irq_handler(dev_priv, gt_iir);
2727 else
2728 ilk_gt_irq_handler(dev_priv, gt_iir);
2729 }
2730
2731 de_iir = I915_READ(DEIIR);
2732 if (de_iir) {
2733 I915_WRITE(DEIIR, de_iir);
2734 ret = IRQ_HANDLED;
2735 if (INTEL_GEN(dev_priv) >= 7)
2736 ivb_display_irq_handler(dev_priv, de_iir);
2737 else
2738 ilk_display_irq_handler(dev_priv, de_iir);
2739 }
2740
2741 if (INTEL_GEN(dev_priv) >= 6) {
2742 u32 pm_iir = I915_READ(GEN6_PMIIR);
2743 if (pm_iir) {
2744 I915_WRITE(GEN6_PMIIR, pm_iir);
2745 ret = IRQ_HANDLED;
2746 gen6_rps_irq_handler(dev_priv, pm_iir);
2747 }
2748 }
2749
2750 I915_WRITE(DEIER, de_ier);
2751 if (!HAS_PCH_NOP(dev_priv))
2752 I915_WRITE(SDEIER, sde_ier);
2753
2754
2755 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2756
2757 return ret;
2758}
2759
2760static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2761 u32 hotplug_trigger,
2762 const u32 hpd[HPD_NUM_PINS])
2763{
2764 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2765
2766 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2767 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2768
2769 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2770 dig_hotplug_reg, hpd,
2771 bxt_port_hotplug_long_detect);
2772
2773 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2774}
2775
2776static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2777{
2778 u32 pin_mask = 0, long_mask = 0;
2779 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2780 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2781
2782 if (trigger_tc) {
2783 u32 dig_hotplug_reg;
2784
2785 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2786 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2787
2788 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2789 dig_hotplug_reg, hpd_gen11,
2790 gen11_port_hotplug_long_detect);
2791 }
2792
2793 if (trigger_tbt) {
2794 u32 dig_hotplug_reg;
2795
2796 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2797 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2798
2799 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2800 dig_hotplug_reg, hpd_gen11,
2801 gen11_port_hotplug_long_detect);
2802 }
2803
2804 if (pin_mask)
2805 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2806 else
2807 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2808}
2809
2810static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2811{
2812 u32 mask = GEN8_AUX_CHANNEL_A;
2813
2814 if (INTEL_GEN(dev_priv) >= 9)
2815 mask |= GEN9_AUX_CHANNEL_B |
2816 GEN9_AUX_CHANNEL_C |
2817 GEN9_AUX_CHANNEL_D;
2818
2819 if (IS_CNL_WITH_PORT_F(dev_priv))
2820 mask |= CNL_AUX_CHANNEL_F;
2821
2822 if (INTEL_GEN(dev_priv) >= 11)
2823 mask |= ICL_AUX_CHANNEL_E |
2824 CNL_AUX_CHANNEL_F;
2825
2826 return mask;
2827}
2828
2829static irqreturn_t
2830gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2831{
2832 irqreturn_t ret = IRQ_NONE;
2833 u32 iir;
2834 enum pipe pipe;
2835
2836 if (master_ctl & GEN8_DE_MISC_IRQ) {
2837 iir = I915_READ(GEN8_DE_MISC_IIR);
2838 if (iir) {
2839 bool found = false;
2840
2841 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2842 ret = IRQ_HANDLED;
2843
2844 if (iir & GEN8_DE_MISC_GSE) {
2845 intel_opregion_asle_intr(dev_priv);
2846 found = true;
2847 }
2848
2849 if (iir & GEN8_DE_EDP_PSR) {
2850 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2851
2852 intel_psr_irq_handler(dev_priv, psr_iir);
2853 I915_WRITE(EDP_PSR_IIR, psr_iir);
2854 found = true;
2855 }
2856
2857 if (!found)
2858 DRM_ERROR("Unexpected DE Misc interrupt\n");
2859 }
2860 else
2861 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2862 }
2863
2864 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2865 iir = I915_READ(GEN11_DE_HPD_IIR);
2866 if (iir) {
2867 I915_WRITE(GEN11_DE_HPD_IIR, iir);
2868 ret = IRQ_HANDLED;
2869 gen11_hpd_irq_handler(dev_priv, iir);
2870 } else {
2871 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2872 }
2873 }
2874
2875 if (master_ctl & GEN8_DE_PORT_IRQ) {
2876 iir = I915_READ(GEN8_DE_PORT_IIR);
2877 if (iir) {
2878 u32 tmp_mask;
2879 bool found = false;
2880
2881 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2882 ret = IRQ_HANDLED;
2883
2884 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2885 dp_aux_irq_handler(dev_priv);
2886 found = true;
2887 }
2888
2889 if (IS_GEN9_LP(dev_priv)) {
2890 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2891 if (tmp_mask) {
2892 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2893 hpd_bxt);
2894 found = true;
2895 }
2896 } else if (IS_BROADWELL(dev_priv)) {
2897 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2898 if (tmp_mask) {
2899 ilk_hpd_irq_handler(dev_priv,
2900 tmp_mask, hpd_bdw);
2901 found = true;
2902 }
2903 }
2904
2905 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2906 gmbus_irq_handler(dev_priv);
2907 found = true;
2908 }
2909
2910 if (!found)
2911 DRM_ERROR("Unexpected DE Port interrupt\n");
2912 }
2913 else
2914 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2915 }
2916
2917 for_each_pipe(dev_priv, pipe) {
2918 u32 fault_errors;
2919
2920 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2921 continue;
2922
2923 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2924 if (!iir) {
2925 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2926 continue;
2927 }
2928
2929 ret = IRQ_HANDLED;
2930 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2931
2932 if (iir & GEN8_PIPE_VBLANK)
2933 drm_handle_vblank(&dev_priv->drm, pipe);
2934
2935 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2936 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2937
2938 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2939 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2940
2941 fault_errors = iir;
2942 if (INTEL_GEN(dev_priv) >= 9)
2943 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2944 else
2945 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2946
2947 if (fault_errors)
2948 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2949 pipe_name(pipe),
2950 fault_errors);
2951 }
2952
2953 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2954 master_ctl & GEN8_DE_PCH_IRQ) {
2955
2956
2957
2958
2959
2960 iir = I915_READ(SDEIIR);
2961 if (iir) {
2962 I915_WRITE(SDEIIR, iir);
2963 ret = IRQ_HANDLED;
2964
2965 if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
2966 icp_irq_handler(dev_priv, iir, hpd_mcc);
2967 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2968 icp_irq_handler(dev_priv, iir, hpd_icp);
2969 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2970 spt_irq_handler(dev_priv, iir);
2971 else
2972 cpt_irq_handler(dev_priv, iir);
2973 } else {
2974
2975
2976
2977
2978 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2979 }
2980 }
2981
2982 return ret;
2983}
2984
2985static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2986{
2987 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2988
2989
2990
2991
2992
2993
2994
2995 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2996}
2997
2998static inline void gen8_master_intr_enable(void __iomem * const regs)
2999{
3000 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3001}
3002
3003static irqreturn_t gen8_irq_handler(int irq, void *arg)
3004{
3005 struct drm_i915_private *dev_priv = to_i915(arg);
3006 void __iomem * const regs = dev_priv->uncore.regs;
3007 u32 master_ctl;
3008 u32 gt_iir[4];
3009
3010 if (!intel_irqs_enabled(dev_priv))
3011 return IRQ_NONE;
3012
3013 master_ctl = gen8_master_intr_disable(regs);
3014 if (!master_ctl) {
3015 gen8_master_intr_enable(regs);
3016 return IRQ_NONE;
3017 }
3018
3019
3020 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3021
3022
3023 if (master_ctl & ~GEN8_GT_IRQS) {
3024 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3025 gen8_de_irq_handler(dev_priv, master_ctl);
3026 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3027 }
3028
3029 gen8_master_intr_enable(regs);
3030
3031 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
3032
3033 return IRQ_HANDLED;
3034}
3035
3036static u32
3037gen11_gt_engine_identity(struct drm_i915_private * const i915,
3038 const unsigned int bank, const unsigned int bit)
3039{
3040 void __iomem * const regs = i915->uncore.regs;
3041 u32 timeout_ts;
3042 u32 ident;
3043
3044 lockdep_assert_held(&i915->irq_lock);
3045
3046 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
3047
3048
3049
3050
3051
3052 timeout_ts = (local_clock() >> 10) + 100;
3053 do {
3054 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
3055 } while (!(ident & GEN11_INTR_DATA_VALID) &&
3056 !time_after32(local_clock() >> 10, timeout_ts));
3057
3058 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
3059 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
3060 bank, bit, ident);
3061 return 0;
3062 }
3063
3064 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
3065 GEN11_INTR_DATA_VALID);
3066
3067 return ident;
3068}
3069
3070static void
3071gen11_other_irq_handler(struct drm_i915_private * const i915,
3072 const u8 instance, const u16 iir)
3073{
3074 if (instance == OTHER_GUC_INSTANCE)
3075 return gen11_guc_irq_handler(i915, iir);
3076
3077 if (instance == OTHER_GTPM_INSTANCE)
3078 return gen11_rps_irq_handler(i915, iir);
3079
3080 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3081 instance, iir);
3082}
3083
3084static void
3085gen11_engine_irq_handler(struct drm_i915_private * const i915,
3086 const u8 class, const u8 instance, const u16 iir)
3087{
3088 struct intel_engine_cs *engine;
3089
3090 if (instance <= MAX_ENGINE_INSTANCE)
3091 engine = i915->engine_class[class][instance];
3092 else
3093 engine = NULL;
3094
3095 if (likely(engine))
3096 return gen8_cs_irq_handler(engine, iir);
3097
3098 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3099 class, instance);
3100}
3101
3102static void
3103gen11_gt_identity_handler(struct drm_i915_private * const i915,
3104 const u32 identity)
3105{
3106 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3107 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3108 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3109
3110 if (unlikely(!intr))
3111 return;
3112
3113 if (class <= COPY_ENGINE_CLASS)
3114 return gen11_engine_irq_handler(i915, class, instance, intr);
3115
3116 if (class == OTHER_CLASS)
3117 return gen11_other_irq_handler(i915, instance, intr);
3118
3119 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3120 class, instance, intr);
3121}
3122
3123static void
3124gen11_gt_bank_handler(struct drm_i915_private * const i915,
3125 const unsigned int bank)
3126{
3127 void __iomem * const regs = i915->uncore.regs;
3128 unsigned long intr_dw;
3129 unsigned int bit;
3130
3131 lockdep_assert_held(&i915->irq_lock);
3132
3133 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3134
3135 for_each_set_bit(bit, &intr_dw, 32) {
3136 const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
3137
3138 gen11_gt_identity_handler(i915, ident);
3139 }
3140
3141
3142 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3143}
3144
3145static void
3146gen11_gt_irq_handler(struct drm_i915_private * const i915,
3147 const u32 master_ctl)
3148{
3149 unsigned int bank;
3150
3151 spin_lock(&i915->irq_lock);
3152
3153 for (bank = 0; bank < 2; bank++) {
3154 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3155 gen11_gt_bank_handler(i915, bank);
3156 }
3157
3158 spin_unlock(&i915->irq_lock);
3159}
3160
3161static u32
3162gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3163{
3164 void __iomem * const regs = dev_priv->uncore.regs;
3165 u32 iir;
3166
3167 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3168 return 0;
3169
3170 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3171 if (likely(iir))
3172 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3173
3174 return iir;
3175}
3176
3177static void
3178gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3179{
3180 if (iir & GEN11_GU_MISC_GSE)
3181 intel_opregion_asle_intr(dev_priv);
3182}
3183
3184static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3185{
3186 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3187
3188
3189
3190
3191
3192
3193
3194 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3195}
3196
3197static inline void gen11_master_intr_enable(void __iomem * const regs)
3198{
3199 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3200}
3201
3202static irqreturn_t gen11_irq_handler(int irq, void *arg)
3203{
3204 struct drm_i915_private * const i915 = to_i915(arg);
3205 void __iomem * const regs = i915->uncore.regs;
3206 u32 master_ctl;
3207 u32 gu_misc_iir;
3208
3209 if (!intel_irqs_enabled(i915))
3210 return IRQ_NONE;
3211
3212 master_ctl = gen11_master_intr_disable(regs);
3213 if (!master_ctl) {
3214 gen11_master_intr_enable(regs);
3215 return IRQ_NONE;
3216 }
3217
3218
3219 gen11_gt_irq_handler(i915, master_ctl);
3220
3221
3222 if (master_ctl & GEN11_DISPLAY_IRQ) {
3223 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3224
3225 disable_rpm_wakeref_asserts(&i915->runtime_pm);
3226
3227
3228
3229
3230 gen8_de_irq_handler(i915, disp_ctl);
3231 enable_rpm_wakeref_asserts(&i915->runtime_pm);
3232 }
3233
3234 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3235
3236 gen11_master_intr_enable(regs);
3237
3238 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3239
3240 return IRQ_HANDLED;
3241}
3242
3243
3244
3245
3246static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3247{
3248 struct drm_i915_private *dev_priv = to_i915(dev);
3249 unsigned long irqflags;
3250
3251 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3252 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3253 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3254
3255 return 0;
3256}
3257
3258static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
3259{
3260 struct drm_i915_private *dev_priv = to_i915(dev);
3261
3262 if (dev_priv->i945gm_vblank.enabled++ == 0)
3263 schedule_work(&dev_priv->i945gm_vblank.work);
3264
3265 return i8xx_enable_vblank(dev, pipe);
3266}
3267
3268static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3269{
3270 struct drm_i915_private *dev_priv = to_i915(dev);
3271 unsigned long irqflags;
3272
3273 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3274 i915_enable_pipestat(dev_priv, pipe,
3275 PIPE_START_VBLANK_INTERRUPT_STATUS);
3276 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3277
3278 return 0;
3279}
3280
3281static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3282{
3283 struct drm_i915_private *dev_priv = to_i915(dev);
3284 unsigned long irqflags;
3285 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3286 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3287
3288 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3289 ilk_enable_display_irq(dev_priv, bit);
3290 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3291
3292
3293
3294
3295 if (HAS_PSR(dev_priv))
3296 drm_vblank_restore(dev, pipe);
3297
3298 return 0;
3299}
3300
3301static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3302{
3303 struct drm_i915_private *dev_priv = to_i915(dev);
3304 unsigned long irqflags;
3305
3306 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3307 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3308 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3309
3310
3311
3312
3313 if (HAS_PSR(dev_priv))
3314 drm_vblank_restore(dev, pipe);
3315
3316 return 0;
3317}
3318
3319
3320
3321
3322static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3323{
3324 struct drm_i915_private *dev_priv = to_i915(dev);
3325 unsigned long irqflags;
3326
3327 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3328 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3329 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3330}
3331
3332static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
3333{
3334 struct drm_i915_private *dev_priv = to_i915(dev);
3335
3336 i8xx_disable_vblank(dev, pipe);
3337
3338 if (--dev_priv->i945gm_vblank.enabled == 0)
3339 schedule_work(&dev_priv->i945gm_vblank.work);
3340}
3341
3342static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3343{
3344 struct drm_i915_private *dev_priv = to_i915(dev);
3345 unsigned long irqflags;
3346
3347 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3348 i915_disable_pipestat(dev_priv, pipe,
3349 PIPE_START_VBLANK_INTERRUPT_STATUS);
3350 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3351}
3352
3353static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3354{
3355 struct drm_i915_private *dev_priv = to_i915(dev);
3356 unsigned long irqflags;
3357 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3358 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3359
3360 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3361 ilk_disable_display_irq(dev_priv, bit);
3362 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3363}
3364
3365static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3366{
3367 struct drm_i915_private *dev_priv = to_i915(dev);
3368 unsigned long irqflags;
3369
3370 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3371 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3372 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3373}
3374
3375static void i945gm_vblank_work_func(struct work_struct *work)
3376{
3377 struct drm_i915_private *dev_priv =
3378 container_of(work, struct drm_i915_private, i945gm_vblank.work);
3379
3380
3381
3382
3383
3384
3385 pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3386 READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3387 dev_priv->i945gm_vblank.c3_disable_latency :
3388 PM_QOS_DEFAULT_VALUE);
3389}
3390
3391static int cstate_disable_latency(const char *name)
3392{
3393 const struct cpuidle_driver *drv;
3394 int i;
3395
3396 drv = cpuidle_get_driver();
3397 if (!drv)
3398 return 0;
3399
3400 for (i = 0; i < drv->state_count; i++) {
3401 const struct cpuidle_state *state = &drv->states[i];
3402
3403 if (!strcmp(state->name, name))
3404 return state->exit_latency ?
3405 state->exit_latency - 1 : 0;
3406 }
3407
3408 return 0;
3409}
3410
3411static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3412{
3413 INIT_WORK(&dev_priv->i945gm_vblank.work,
3414 i945gm_vblank_work_func);
3415
3416 dev_priv->i945gm_vblank.c3_disable_latency =
3417 cstate_disable_latency("C3");
3418 pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3419 PM_QOS_CPU_DMA_LATENCY,
3420 PM_QOS_DEFAULT_VALUE);
3421}
3422
3423static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3424{
3425 cancel_work_sync(&dev_priv->i945gm_vblank.work);
3426 pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3427}
3428
3429static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3430{
3431 struct intel_uncore *uncore = &dev_priv->uncore;
3432
3433 if (HAS_PCH_NOP(dev_priv))
3434 return;
3435
3436 GEN3_IRQ_RESET(uncore, SDE);
3437
3438 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3439 I915_WRITE(SERR_INT, 0xffffffff);
3440}
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450static void ibx_irq_pre_postinstall(struct drm_device *dev)
3451{
3452 struct drm_i915_private *dev_priv = to_i915(dev);
3453
3454 if (HAS_PCH_NOP(dev_priv))
3455 return;
3456
3457 WARN_ON(I915_READ(SDEIER) != 0);
3458 I915_WRITE(SDEIER, 0xffffffff);
3459 POSTING_READ(SDEIER);
3460}
3461
3462static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3463{
3464 struct intel_uncore *uncore = &dev_priv->uncore;
3465
3466 GEN3_IRQ_RESET(uncore, GT);
3467 if (INTEL_GEN(dev_priv) >= 6)
3468 GEN3_IRQ_RESET(uncore, GEN6_PM);
3469}
3470
3471static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3472{
3473 struct intel_uncore *uncore = &dev_priv->uncore;
3474
3475 if (IS_CHERRYVIEW(dev_priv))
3476 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3477 else
3478 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3479
3480 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3481 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3482
3483 i9xx_pipestat_irq_reset(dev_priv);
3484
3485 GEN3_IRQ_RESET(uncore, VLV_);
3486 dev_priv->irq_mask = ~0u;
3487}
3488
3489static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3490{
3491 struct intel_uncore *uncore = &dev_priv->uncore;
3492
3493 u32 pipestat_mask;
3494 u32 enable_mask;
3495 enum pipe pipe;
3496
3497 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3498
3499 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3500 for_each_pipe(dev_priv, pipe)
3501 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3502
3503 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3504 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3505 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3506 I915_LPE_PIPE_A_INTERRUPT |
3507 I915_LPE_PIPE_B_INTERRUPT;
3508
3509 if (IS_CHERRYVIEW(dev_priv))
3510 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3511 I915_LPE_PIPE_C_INTERRUPT;
3512
3513 WARN_ON(dev_priv->irq_mask != ~0u);
3514
3515 dev_priv->irq_mask = ~enable_mask;
3516
3517 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3518}
3519
3520
3521
3522static void ironlake_irq_reset(struct drm_device *dev)
3523{
3524 struct drm_i915_private *dev_priv = to_i915(dev);
3525 struct intel_uncore *uncore = &dev_priv->uncore;
3526
3527 GEN3_IRQ_RESET(uncore, DE);
3528 if (IS_GEN(dev_priv, 7))
3529 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3530
3531 if (IS_HASWELL(dev_priv)) {
3532 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3533 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3534 }
3535
3536 gen5_gt_irq_reset(dev_priv);
3537
3538 ibx_irq_reset(dev_priv);
3539}
3540
3541static void valleyview_irq_reset(struct drm_device *dev)
3542{
3543 struct drm_i915_private *dev_priv = to_i915(dev);
3544
3545 I915_WRITE(VLV_MASTER_IER, 0);
3546 POSTING_READ(VLV_MASTER_IER);
3547
3548 gen5_gt_irq_reset(dev_priv);
3549
3550 spin_lock_irq(&dev_priv->irq_lock);
3551 if (dev_priv->display_irqs_enabled)
3552 vlv_display_irq_reset(dev_priv);
3553 spin_unlock_irq(&dev_priv->irq_lock);
3554}
3555
3556static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3557{
3558 struct intel_uncore *uncore = &dev_priv->uncore;
3559
3560 GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3561 GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3562 GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3563 GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3564}
3565
3566static void gen8_irq_reset(struct drm_device *dev)
3567{
3568 struct drm_i915_private *dev_priv = to_i915(dev);
3569 struct intel_uncore *uncore = &dev_priv->uncore;
3570 int pipe;
3571
3572 gen8_master_intr_disable(dev_priv->uncore.regs);
3573
3574 gen8_gt_irq_reset(dev_priv);
3575
3576 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3577 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3578
3579 for_each_pipe(dev_priv, pipe)
3580 if (intel_display_power_is_enabled(dev_priv,
3581 POWER_DOMAIN_PIPE(pipe)))
3582 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3583
3584 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3585 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3586 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3587
3588 if (HAS_PCH_SPLIT(dev_priv))
3589 ibx_irq_reset(dev_priv);
3590}
3591
3592static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3593{
3594
3595 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3596 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
3597
3598
3599 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
3600 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
3601 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
3602 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
3603 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3604
3605 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3606 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
3607 I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
3608 I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
3609}
3610
3611static void gen11_irq_reset(struct drm_device *dev)
3612{
3613 struct drm_i915_private *dev_priv = dev->dev_private;
3614 struct intel_uncore *uncore = &dev_priv->uncore;
3615 int pipe;
3616
3617 gen11_master_intr_disable(dev_priv->uncore.regs);
3618
3619 gen11_gt_irq_reset(dev_priv);
3620
3621 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3622
3623 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3624 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3625
3626 for_each_pipe(dev_priv, pipe)
3627 if (intel_display_power_is_enabled(dev_priv,
3628 POWER_DOMAIN_PIPE(pipe)))
3629 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3630
3631 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3632 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3633 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3634 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3635 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3636
3637 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3638 GEN3_IRQ_RESET(uncore, SDE);
3639}
3640
3641void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3642 u8 pipe_mask)
3643{
3644 struct intel_uncore *uncore = &dev_priv->uncore;
3645
3646 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3647 enum pipe pipe;
3648
3649 spin_lock_irq(&dev_priv->irq_lock);
3650
3651 if (!intel_irqs_enabled(dev_priv)) {
3652 spin_unlock_irq(&dev_priv->irq_lock);
3653 return;
3654 }
3655
3656 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3657 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3658 dev_priv->de_irq_mask[pipe],
3659 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3660
3661 spin_unlock_irq(&dev_priv->irq_lock);
3662}
3663
3664void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3665 u8 pipe_mask)
3666{
3667 struct intel_uncore *uncore = &dev_priv->uncore;
3668 enum pipe pipe;
3669
3670 spin_lock_irq(&dev_priv->irq_lock);
3671
3672 if (!intel_irqs_enabled(dev_priv)) {
3673 spin_unlock_irq(&dev_priv->irq_lock);
3674 return;
3675 }
3676
3677 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3678 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3679
3680 spin_unlock_irq(&dev_priv->irq_lock);
3681
3682
3683 synchronize_irq(dev_priv->drm.irq);
3684}
3685
3686static void cherryview_irq_reset(struct drm_device *dev)
3687{
3688 struct drm_i915_private *dev_priv = to_i915(dev);
3689 struct intel_uncore *uncore = &dev_priv->uncore;
3690
3691 I915_WRITE(GEN8_MASTER_IRQ, 0);
3692 POSTING_READ(GEN8_MASTER_IRQ);
3693
3694 gen8_gt_irq_reset(dev_priv);
3695
3696 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3697
3698 spin_lock_irq(&dev_priv->irq_lock);
3699 if (dev_priv->display_irqs_enabled)
3700 vlv_display_irq_reset(dev_priv);
3701 spin_unlock_irq(&dev_priv->irq_lock);
3702}
3703
3704static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3705 const u32 hpd[HPD_NUM_PINS])
3706{
3707 struct intel_encoder *encoder;
3708 u32 enabled_irqs = 0;
3709
3710 for_each_intel_encoder(&dev_priv->drm, encoder)
3711 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3712 enabled_irqs |= hpd[encoder->hpd_pin];
3713
3714 return enabled_irqs;
3715}
3716
3717static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3718{
3719 u32 hotplug;
3720
3721
3722
3723
3724
3725
3726 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3727 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3728 PORTC_PULSE_DURATION_MASK |
3729 PORTD_PULSE_DURATION_MASK);
3730 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3731 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3732 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3733
3734
3735
3736
3737 if (HAS_PCH_LPT_LP(dev_priv))
3738 hotplug |= PORTA_HOTPLUG_ENABLE;
3739 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3740}
3741
3742static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3743{
3744 u32 hotplug_irqs, enabled_irqs;
3745
3746 if (HAS_PCH_IBX(dev_priv)) {
3747 hotplug_irqs = SDE_HOTPLUG_MASK;
3748 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3749 } else {
3750 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3751 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3752 }
3753
3754 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3755
3756 ibx_hpd_detection_setup(dev_priv);
3757}
3758
3759static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3760{
3761 u32 hotplug;
3762
3763 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3764 hotplug |= ICP_DDIA_HPD_ENABLE |
3765 ICP_DDIB_HPD_ENABLE;
3766 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3767
3768 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3769 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3770 ICP_TC_HPD_ENABLE(PORT_TC2) |
3771 ICP_TC_HPD_ENABLE(PORT_TC3) |
3772 ICP_TC_HPD_ENABLE(PORT_TC4);
3773 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3774}
3775
3776static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3777{
3778 u32 hotplug_irqs, enabled_irqs;
3779
3780 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3781 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3782
3783 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3784
3785 icp_hpd_detection_setup(dev_priv);
3786}
3787
3788static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3789{
3790 u32 hotplug;
3791
3792 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3793 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3794 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3795 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3796 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3797 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3798
3799 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3800 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3801 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3802 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3803 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3804 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3805}
3806
3807static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3808{
3809 u32 hotplug_irqs, enabled_irqs;
3810 u32 val;
3811
3812 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3813 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3814
3815 val = I915_READ(GEN11_DE_HPD_IMR);
3816 val &= ~hotplug_irqs;
3817 I915_WRITE(GEN11_DE_HPD_IMR, val);
3818 POSTING_READ(GEN11_DE_HPD_IMR);
3819
3820 gen11_hpd_detection_setup(dev_priv);
3821
3822 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3823 icp_hpd_irq_setup(dev_priv);
3824}
3825
3826static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3827{
3828 u32 val, hotplug;
3829
3830
3831 if (HAS_PCH_CNP(dev_priv)) {
3832 val = I915_READ(SOUTH_CHICKEN1);
3833 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3834 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3835 I915_WRITE(SOUTH_CHICKEN1, val);
3836 }
3837
3838
3839 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3840 hotplug |= PORTA_HOTPLUG_ENABLE |
3841 PORTB_HOTPLUG_ENABLE |
3842 PORTC_HOTPLUG_ENABLE |
3843 PORTD_HOTPLUG_ENABLE;
3844 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3845
3846 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3847 hotplug |= PORTE_HOTPLUG_ENABLE;
3848 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3849}
3850
3851static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3852{
3853 u32 hotplug_irqs, enabled_irqs;
3854
3855 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3856 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3857
3858 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3859
3860 spt_hpd_detection_setup(dev_priv);
3861}
3862
3863static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3864{
3865 u32 hotplug;
3866
3867
3868
3869
3870
3871
3872 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3873 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3874 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3875 DIGITAL_PORTA_PULSE_DURATION_2ms;
3876 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3877}
3878
3879static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3880{
3881 u32 hotplug_irqs, enabled_irqs;
3882
3883 if (INTEL_GEN(dev_priv) >= 8) {
3884 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3885 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3886
3887 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3888 } else if (INTEL_GEN(dev_priv) >= 7) {
3889 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3890 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3891
3892 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3893 } else {
3894 hotplug_irqs = DE_DP_A_HOTPLUG;
3895 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3896
3897 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3898 }
3899
3900 ilk_hpd_detection_setup(dev_priv);
3901
3902 ibx_hpd_irq_setup(dev_priv);
3903}
3904
3905static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3906 u32 enabled_irqs)
3907{
3908 u32 hotplug;
3909
3910 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3911 hotplug |= PORTA_HOTPLUG_ENABLE |
3912 PORTB_HOTPLUG_ENABLE |
3913 PORTC_HOTPLUG_ENABLE;
3914
3915 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3916 hotplug, enabled_irqs);
3917 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3918
3919
3920
3921
3922
3923 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3924 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3925 hotplug |= BXT_DDIA_HPD_INVERT;
3926 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3927 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3928 hotplug |= BXT_DDIB_HPD_INVERT;
3929 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3930 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3931 hotplug |= BXT_DDIC_HPD_INVERT;
3932
3933 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3934}
3935
3936static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3937{
3938 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3939}
3940
3941static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3942{
3943 u32 hotplug_irqs, enabled_irqs;
3944
3945 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3946 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3947
3948 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3949
3950 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3951}
3952
3953static void ibx_irq_postinstall(struct drm_device *dev)
3954{
3955 struct drm_i915_private *dev_priv = to_i915(dev);
3956 u32 mask;
3957
3958 if (HAS_PCH_NOP(dev_priv))
3959 return;
3960
3961 if (HAS_PCH_IBX(dev_priv))
3962 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3963 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3964 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3965 else
3966 mask = SDE_GMBUS_CPT;
3967
3968 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3969 I915_WRITE(SDEIMR, ~mask);
3970
3971 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3972 HAS_PCH_LPT(dev_priv))
3973 ibx_hpd_detection_setup(dev_priv);
3974 else
3975 spt_hpd_detection_setup(dev_priv);
3976}
3977
3978static void gen5_gt_irq_postinstall(struct drm_device *dev)
3979{
3980 struct drm_i915_private *dev_priv = to_i915(dev);
3981 struct intel_uncore *uncore = &dev_priv->uncore;
3982 u32 pm_irqs, gt_irqs;
3983
3984 pm_irqs = gt_irqs = 0;
3985
3986 dev_priv->gt_irq_mask = ~0;
3987 if (HAS_L3_DPF(dev_priv)) {
3988
3989 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3990 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3991 }
3992
3993 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3994 if (IS_GEN(dev_priv, 5)) {
3995 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3996 } else {
3997 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3998 }
3999
4000 GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
4001
4002 if (INTEL_GEN(dev_priv) >= 6) {
4003
4004
4005
4006
4007 if (HAS_ENGINE(dev_priv, VECS0)) {
4008 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4009 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
4010 }
4011
4012 dev_priv->pm_imr = 0xffffffff;
4013 GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
4014 }
4015}
4016
4017static int ironlake_irq_postinstall(struct drm_device *dev)
4018{
4019 struct drm_i915_private *dev_priv = to_i915(dev);
4020 struct intel_uncore *uncore = &dev_priv->uncore;
4021 u32 display_mask, extra_mask;
4022
4023 if (INTEL_GEN(dev_priv) >= 7) {
4024 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4025 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
4026 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
4027 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
4028 DE_DP_A_HOTPLUG_IVB);
4029 } else {
4030 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4031 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4032 DE_PIPEA_CRC_DONE | DE_POISON);
4033 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4034 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4035 DE_DP_A_HOTPLUG);
4036 }
4037
4038 if (IS_HASWELL(dev_priv)) {
4039 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4040 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4041 display_mask |= DE_EDP_PSR_INT_HSW;
4042 }
4043
4044 dev_priv->irq_mask = ~display_mask;
4045
4046 ibx_irq_pre_postinstall(dev);
4047
4048 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4049 display_mask | extra_mask);
4050
4051 gen5_gt_irq_postinstall(dev);
4052
4053 ilk_hpd_detection_setup(dev_priv);
4054
4055 ibx_irq_postinstall(dev);
4056
4057 if (IS_IRONLAKE_M(dev_priv)) {
4058
4059
4060
4061
4062
4063 spin_lock_irq(&dev_priv->irq_lock);
4064 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4065 spin_unlock_irq(&dev_priv->irq_lock);
4066 }
4067
4068 return 0;
4069}
4070
4071void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4072{
4073 lockdep_assert_held(&dev_priv->irq_lock);
4074
4075 if (dev_priv->display_irqs_enabled)
4076 return;
4077
4078 dev_priv->display_irqs_enabled = true;
4079
4080 if (intel_irqs_enabled(dev_priv)) {
4081 vlv_display_irq_reset(dev_priv);
4082 vlv_display_irq_postinstall(dev_priv);
4083 }
4084}
4085
4086void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4087{
4088 lockdep_assert_held(&dev_priv->irq_lock);
4089
4090 if (!dev_priv->display_irqs_enabled)
4091 return;
4092
4093 dev_priv->display_irqs_enabled = false;
4094
4095 if (intel_irqs_enabled(dev_priv))
4096 vlv_display_irq_reset(dev_priv);
4097}
4098
4099
4100static int valleyview_irq_postinstall(struct drm_device *dev)
4101{
4102 struct drm_i915_private *dev_priv = to_i915(dev);
4103
4104 gen5_gt_irq_postinstall(dev);
4105
4106 spin_lock_irq(&dev_priv->irq_lock);
4107 if (dev_priv->display_irqs_enabled)
4108 vlv_display_irq_postinstall(dev_priv);
4109 spin_unlock_irq(&dev_priv->irq_lock);
4110
4111 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4112 POSTING_READ(VLV_MASTER_IER);
4113
4114 return 0;
4115}
4116
4117static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4118{
4119 struct intel_uncore *uncore = &dev_priv->uncore;
4120
4121
4122 u32 gt_interrupts[] = {
4123 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4124 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4125 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4126 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
4127
4128 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4129 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4130 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4131 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
4132
4133 0,
4134
4135 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4136 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4137 };
4138
4139 dev_priv->pm_ier = 0x0;
4140 dev_priv->pm_imr = ~dev_priv->pm_ier;
4141 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4142 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4143
4144
4145
4146
4147 GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4148 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4149}
4150
4151static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4152{
4153 struct intel_uncore *uncore = &dev_priv->uncore;
4154
4155 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4156 u32 de_pipe_enables;
4157 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4158 u32 de_port_enables;
4159 u32 de_misc_masked = GEN8_DE_EDP_PSR;
4160 enum pipe pipe;
4161
4162 if (INTEL_GEN(dev_priv) <= 10)
4163 de_misc_masked |= GEN8_DE_MISC_GSE;
4164
4165 if (INTEL_GEN(dev_priv) >= 9) {
4166 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4167 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4168 GEN9_AUX_CHANNEL_D;
4169 if (IS_GEN9_LP(dev_priv))
4170 de_port_masked |= BXT_DE_PORT_GMBUS;
4171 } else {
4172 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4173 }
4174
4175 if (INTEL_GEN(dev_priv) >= 11)
4176 de_port_masked |= ICL_AUX_CHANNEL_E;
4177
4178 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4179 de_port_masked |= CNL_AUX_CHANNEL_F;
4180
4181 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4182 GEN8_PIPE_FIFO_UNDERRUN;
4183
4184 de_port_enables = de_port_masked;
4185 if (IS_GEN9_LP(dev_priv))
4186 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4187 else if (IS_BROADWELL(dev_priv))
4188 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4189
4190 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4191 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4192
4193 for_each_pipe(dev_priv, pipe) {
4194 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4195
4196 if (intel_display_power_is_enabled(dev_priv,
4197 POWER_DOMAIN_PIPE(pipe)))
4198 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4199 dev_priv->de_irq_mask[pipe],
4200 de_pipe_enables);
4201 }
4202
4203 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4204 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4205
4206 if (INTEL_GEN(dev_priv) >= 11) {
4207 u32 de_hpd_masked = 0;
4208 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4209 GEN11_DE_TBT_HOTPLUG_MASK;
4210
4211 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4212 de_hpd_enables);
4213 gen11_hpd_detection_setup(dev_priv);
4214 } else if (IS_GEN9_LP(dev_priv)) {
4215 bxt_hpd_detection_setup(dev_priv);
4216 } else if (IS_BROADWELL(dev_priv)) {
4217 ilk_hpd_detection_setup(dev_priv);
4218 }
4219}
4220
4221static int gen8_irq_postinstall(struct drm_device *dev)
4222{
4223 struct drm_i915_private *dev_priv = to_i915(dev);
4224
4225 if (HAS_PCH_SPLIT(dev_priv))
4226 ibx_irq_pre_postinstall(dev);
4227
4228 gen8_gt_irq_postinstall(dev_priv);
4229 gen8_de_irq_postinstall(dev_priv);
4230
4231 if (HAS_PCH_SPLIT(dev_priv))
4232 ibx_irq_postinstall(dev);
4233
4234 gen8_master_intr_enable(dev_priv->uncore.regs);
4235
4236 return 0;
4237}
4238
4239static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4240{
4241 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4242
4243 BUILD_BUG_ON(irqs & 0xffff0000);
4244
4245
4246 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4247 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
4248
4249
4250 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
4251 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
4252 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
4253 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
4254 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4255
4256
4257
4258
4259
4260 dev_priv->pm_ier = 0x0;
4261 dev_priv->pm_imr = ~dev_priv->pm_ier;
4262 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4263 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
4264
4265
4266 I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
4267 I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
4268}
4269
4270static void icp_irq_postinstall(struct drm_device *dev)
4271{
4272 struct drm_i915_private *dev_priv = to_i915(dev);
4273 u32 mask = SDE_GMBUS_ICP;
4274
4275 WARN_ON(I915_READ(SDEIER) != 0);
4276 I915_WRITE(SDEIER, 0xffffffff);
4277 POSTING_READ(SDEIER);
4278
4279 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
4280 I915_WRITE(SDEIMR, ~mask);
4281
4282 icp_hpd_detection_setup(dev_priv);
4283}
4284
4285static int gen11_irq_postinstall(struct drm_device *dev)
4286{
4287 struct drm_i915_private *dev_priv = dev->dev_private;
4288 struct intel_uncore *uncore = &dev_priv->uncore;
4289 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4290
4291 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4292 icp_irq_postinstall(dev);
4293
4294 gen11_gt_irq_postinstall(dev_priv);
4295 gen8_de_irq_postinstall(dev_priv);
4296
4297 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4298
4299 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4300
4301 gen11_master_intr_enable(dev_priv->uncore.regs);
4302 POSTING_READ(GEN11_GFX_MSTR_IRQ);
4303
4304 return 0;
4305}
4306
4307static int cherryview_irq_postinstall(struct drm_device *dev)
4308{
4309 struct drm_i915_private *dev_priv = to_i915(dev);
4310
4311 gen8_gt_irq_postinstall(dev_priv);
4312
4313 spin_lock_irq(&dev_priv->irq_lock);
4314 if (dev_priv->display_irqs_enabled)
4315 vlv_display_irq_postinstall(dev_priv);
4316 spin_unlock_irq(&dev_priv->irq_lock);
4317
4318 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4319 POSTING_READ(GEN8_MASTER_IRQ);
4320
4321 return 0;
4322}
4323
4324static void i8xx_irq_reset(struct drm_device *dev)
4325{
4326 struct drm_i915_private *dev_priv = to_i915(dev);
4327 struct intel_uncore *uncore = &dev_priv->uncore;
4328
4329 i9xx_pipestat_irq_reset(dev_priv);
4330
4331 GEN2_IRQ_RESET(uncore);
4332}
4333
4334static int i8xx_irq_postinstall(struct drm_device *dev)
4335{
4336 struct drm_i915_private *dev_priv = to_i915(dev);
4337 struct intel_uncore *uncore = &dev_priv->uncore;
4338 u16 enable_mask;
4339
4340 intel_uncore_write16(uncore,
4341 EMR,
4342 ~(I915_ERROR_PAGE_TABLE |
4343 I915_ERROR_MEMORY_REFRESH));
4344
4345
4346 dev_priv->irq_mask =
4347 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4348 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4349 I915_MASTER_ERROR_INTERRUPT);
4350
4351 enable_mask =
4352 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4353 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4354 I915_MASTER_ERROR_INTERRUPT |
4355 I915_USER_INTERRUPT;
4356
4357 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4358
4359
4360
4361 spin_lock_irq(&dev_priv->irq_lock);
4362 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4363 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4364 spin_unlock_irq(&dev_priv->irq_lock);
4365
4366 return 0;
4367}
4368
4369static void i8xx_error_irq_ack(struct drm_i915_private *i915,
4370 u16 *eir, u16 *eir_stuck)
4371{
4372 struct intel_uncore *uncore = &i915->uncore;
4373 u16 emr;
4374
4375 *eir = intel_uncore_read16(uncore, EIR);
4376
4377 if (*eir)
4378 intel_uncore_write16(uncore, EIR, *eir);
4379
4380 *eir_stuck = intel_uncore_read16(uncore, EIR);
4381 if (*eir_stuck == 0)
4382 return;
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394 emr = intel_uncore_read16(uncore, EMR);
4395 intel_uncore_write16(uncore, EMR, 0xffff);
4396 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
4397}
4398
4399static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4400 u16 eir, u16 eir_stuck)
4401{
4402 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4403
4404 if (eir_stuck)
4405 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4406}
4407
4408static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4409 u32 *eir, u32 *eir_stuck)
4410{
4411 u32 emr;
4412
4413 *eir = I915_READ(EIR);
4414
4415 I915_WRITE(EIR, *eir);
4416
4417 *eir_stuck = I915_READ(EIR);
4418 if (*eir_stuck == 0)
4419 return;
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431 emr = I915_READ(EMR);
4432 I915_WRITE(EMR, 0xffffffff);
4433 I915_WRITE(EMR, emr | *eir_stuck);
4434}
4435
4436static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4437 u32 eir, u32 eir_stuck)
4438{
4439 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4440
4441 if (eir_stuck)
4442 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4443}
4444
4445static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4446{
4447 struct drm_device *dev = arg;
4448 struct drm_i915_private *dev_priv = to_i915(dev);
4449 irqreturn_t ret = IRQ_NONE;
4450
4451 if (!intel_irqs_enabled(dev_priv))
4452 return IRQ_NONE;
4453
4454
4455 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4456
4457 do {
4458 u32 pipe_stats[I915_MAX_PIPES] = {};
4459 u16 eir = 0, eir_stuck = 0;
4460 u16 iir;
4461
4462 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4463 if (iir == 0)
4464 break;
4465
4466 ret = IRQ_HANDLED;
4467
4468
4469
4470 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4471
4472 if (iir & I915_MASTER_ERROR_INTERRUPT)
4473 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4474
4475 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4476
4477 if (iir & I915_USER_INTERRUPT)
4478 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4479
4480 if (iir & I915_MASTER_ERROR_INTERRUPT)
4481 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4482
4483 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4484 } while (0);
4485
4486 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4487
4488 return ret;
4489}
4490
4491static void i915_irq_reset(struct drm_device *dev)
4492{
4493 struct drm_i915_private *dev_priv = to_i915(dev);
4494 struct intel_uncore *uncore = &dev_priv->uncore;
4495
4496 if (I915_HAS_HOTPLUG(dev_priv)) {
4497 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4498 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4499 }
4500
4501 i9xx_pipestat_irq_reset(dev_priv);
4502
4503 GEN3_IRQ_RESET(uncore, GEN2_);
4504}
4505
4506static int i915_irq_postinstall(struct drm_device *dev)
4507{
4508 struct drm_i915_private *dev_priv = to_i915(dev);
4509 struct intel_uncore *uncore = &dev_priv->uncore;
4510 u32 enable_mask;
4511
4512 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4513 I915_ERROR_MEMORY_REFRESH));
4514
4515
4516 dev_priv->irq_mask =
4517 ~(I915_ASLE_INTERRUPT |
4518 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4519 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4520 I915_MASTER_ERROR_INTERRUPT);
4521
4522 enable_mask =
4523 I915_ASLE_INTERRUPT |
4524 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4525 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4526 I915_MASTER_ERROR_INTERRUPT |
4527 I915_USER_INTERRUPT;
4528
4529 if (I915_HAS_HOTPLUG(dev_priv)) {
4530
4531 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4532
4533 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4534 }
4535
4536 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4537
4538
4539
4540 spin_lock_irq(&dev_priv->irq_lock);
4541 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4542 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4543 spin_unlock_irq(&dev_priv->irq_lock);
4544
4545 i915_enable_asle_pipestat(dev_priv);
4546
4547 return 0;
4548}
4549
4550static irqreturn_t i915_irq_handler(int irq, void *arg)
4551{
4552 struct drm_device *dev = arg;
4553 struct drm_i915_private *dev_priv = to_i915(dev);
4554 irqreturn_t ret = IRQ_NONE;
4555
4556 if (!intel_irqs_enabled(dev_priv))
4557 return IRQ_NONE;
4558
4559
4560 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4561
4562 do {
4563 u32 pipe_stats[I915_MAX_PIPES] = {};
4564 u32 eir = 0, eir_stuck = 0;
4565 u32 hotplug_status = 0;
4566 u32 iir;
4567
4568 iir = I915_READ(GEN2_IIR);
4569 if (iir == 0)
4570 break;
4571
4572 ret = IRQ_HANDLED;
4573
4574 if (I915_HAS_HOTPLUG(dev_priv) &&
4575 iir & I915_DISPLAY_PORT_INTERRUPT)
4576 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4577
4578
4579
4580 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4581
4582 if (iir & I915_MASTER_ERROR_INTERRUPT)
4583 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4584
4585 I915_WRITE(GEN2_IIR, iir);
4586
4587 if (iir & I915_USER_INTERRUPT)
4588 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4589
4590 if (iir & I915_MASTER_ERROR_INTERRUPT)
4591 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4592
4593 if (hotplug_status)
4594 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4595
4596 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4597 } while (0);
4598
4599 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4600
4601 return ret;
4602}
4603
4604static void i965_irq_reset(struct drm_device *dev)
4605{
4606 struct drm_i915_private *dev_priv = to_i915(dev);
4607 struct intel_uncore *uncore = &dev_priv->uncore;
4608
4609 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4610 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4611
4612 i9xx_pipestat_irq_reset(dev_priv);
4613
4614 GEN3_IRQ_RESET(uncore, GEN2_);
4615}
4616
4617static int i965_irq_postinstall(struct drm_device *dev)
4618{
4619 struct drm_i915_private *dev_priv = to_i915(dev);
4620 struct intel_uncore *uncore = &dev_priv->uncore;
4621 u32 enable_mask;
4622 u32 error_mask;
4623
4624
4625
4626
4627
4628 if (IS_G4X(dev_priv)) {
4629 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4630 GM45_ERROR_MEM_PRIV |
4631 GM45_ERROR_CP_PRIV |
4632 I915_ERROR_MEMORY_REFRESH);
4633 } else {
4634 error_mask = ~(I915_ERROR_PAGE_TABLE |
4635 I915_ERROR_MEMORY_REFRESH);
4636 }
4637 I915_WRITE(EMR, error_mask);
4638
4639
4640 dev_priv->irq_mask =
4641 ~(I915_ASLE_INTERRUPT |
4642 I915_DISPLAY_PORT_INTERRUPT |
4643 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4644 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4645 I915_MASTER_ERROR_INTERRUPT);
4646
4647 enable_mask =
4648 I915_ASLE_INTERRUPT |
4649 I915_DISPLAY_PORT_INTERRUPT |
4650 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4651 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4652 I915_MASTER_ERROR_INTERRUPT |
4653 I915_USER_INTERRUPT;
4654
4655 if (IS_G4X(dev_priv))
4656 enable_mask |= I915_BSD_USER_INTERRUPT;
4657
4658 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4659
4660
4661
4662 spin_lock_irq(&dev_priv->irq_lock);
4663 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4664 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4665 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4666 spin_unlock_irq(&dev_priv->irq_lock);
4667
4668 i915_enable_asle_pipestat(dev_priv);
4669
4670 return 0;
4671}
4672
4673static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4674{
4675 u32 hotplug_en;
4676
4677 lockdep_assert_held(&dev_priv->irq_lock);
4678
4679
4680
4681 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4682
4683
4684
4685
4686 if (IS_G4X(dev_priv))
4687 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4688 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4689
4690
4691 i915_hotplug_interrupt_update_locked(dev_priv,
4692 HOTPLUG_INT_EN_MASK |
4693 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4694 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4695 hotplug_en);
4696}
4697
4698static irqreturn_t i965_irq_handler(int irq, void *arg)
4699{
4700 struct drm_device *dev = arg;
4701 struct drm_i915_private *dev_priv = to_i915(dev);
4702 irqreturn_t ret = IRQ_NONE;
4703
4704 if (!intel_irqs_enabled(dev_priv))
4705 return IRQ_NONE;
4706
4707
4708 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4709
4710 do {
4711 u32 pipe_stats[I915_MAX_PIPES] = {};
4712 u32 eir = 0, eir_stuck = 0;
4713 u32 hotplug_status = 0;
4714 u32 iir;
4715
4716 iir = I915_READ(GEN2_IIR);
4717 if (iir == 0)
4718 break;
4719
4720 ret = IRQ_HANDLED;
4721
4722 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4723 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4724
4725
4726
4727 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4728
4729 if (iir & I915_MASTER_ERROR_INTERRUPT)
4730 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4731
4732 I915_WRITE(GEN2_IIR, iir);
4733
4734 if (iir & I915_USER_INTERRUPT)
4735 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4736
4737 if (iir & I915_BSD_USER_INTERRUPT)
4738 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4739
4740 if (iir & I915_MASTER_ERROR_INTERRUPT)
4741 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4742
4743 if (hotplug_status)
4744 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4745
4746 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4747 } while (0);
4748
4749 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4750
4751 return ret;
4752}
4753
4754
4755
4756
4757
4758
4759
4760
4761void intel_irq_init(struct drm_i915_private *dev_priv)
4762{
4763 struct drm_device *dev = &dev_priv->drm;
4764 struct intel_rps *rps = &dev_priv->gt_pm.rps;
4765 int i;
4766
4767 if (IS_I945GM(dev_priv))
4768 i945gm_vblank_work_init(dev_priv);
4769
4770 intel_hpd_init_work(dev_priv);
4771
4772 INIT_WORK(&rps->work, gen6_pm_rps_work);
4773
4774 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4775 for (i = 0; i < MAX_L3_SLICES; ++i)
4776 dev_priv->l3_parity.remap_info[i] = NULL;
4777
4778 if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
4779 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4780
4781
4782 if (IS_VALLEYVIEW(dev_priv))
4783
4784 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4785 else
4786 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4787 GEN6_PM_RP_DOWN_THRESHOLD |
4788 GEN6_PM_RP_DOWN_TIMEOUT);
4789
4790
4791 if (INTEL_GEN(dev_priv) > 9)
4792 GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4793
4794 rps->pm_intrmsk_mbz = 0;
4795
4796
4797
4798
4799
4800
4801
4802 if (INTEL_GEN(dev_priv) <= 7)
4803 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4804
4805 if (INTEL_GEN(dev_priv) >= 8)
4806 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4807
4808 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4809 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4810 else if (INTEL_GEN(dev_priv) >= 3)
4811 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4812
4813 dev->vblank_disable_immediate = true;
4814
4815
4816
4817
4818
4819
4820
4821 dev_priv->display_irqs_enabled = true;
4822 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4823 dev_priv->display_irqs_enabled = false;
4824
4825 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4826
4827
4828
4829
4830
4831
4832 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4833
4834 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4835 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4836
4837 if (IS_CHERRYVIEW(dev_priv)) {
4838 dev->driver->irq_handler = cherryview_irq_handler;
4839 dev->driver->irq_preinstall = cherryview_irq_reset;
4840 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4841 dev->driver->irq_uninstall = cherryview_irq_reset;
4842 dev->driver->enable_vblank = i965_enable_vblank;
4843 dev->driver->disable_vblank = i965_disable_vblank;
4844 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4845 } else if (IS_VALLEYVIEW(dev_priv)) {
4846 dev->driver->irq_handler = valleyview_irq_handler;
4847 dev->driver->irq_preinstall = valleyview_irq_reset;
4848 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4849 dev->driver->irq_uninstall = valleyview_irq_reset;
4850 dev->driver->enable_vblank = i965_enable_vblank;
4851 dev->driver->disable_vblank = i965_disable_vblank;
4852 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4853 } else if (INTEL_GEN(dev_priv) >= 11) {
4854 dev->driver->irq_handler = gen11_irq_handler;
4855 dev->driver->irq_preinstall = gen11_irq_reset;
4856 dev->driver->irq_postinstall = gen11_irq_postinstall;
4857 dev->driver->irq_uninstall = gen11_irq_reset;
4858 dev->driver->enable_vblank = gen8_enable_vblank;
4859 dev->driver->disable_vblank = gen8_disable_vblank;
4860 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4861 } else if (INTEL_GEN(dev_priv) >= 8) {
4862 dev->driver->irq_handler = gen8_irq_handler;
4863 dev->driver->irq_preinstall = gen8_irq_reset;
4864 dev->driver->irq_postinstall = gen8_irq_postinstall;
4865 dev->driver->irq_uninstall = gen8_irq_reset;
4866 dev->driver->enable_vblank = gen8_enable_vblank;
4867 dev->driver->disable_vblank = gen8_disable_vblank;
4868 if (IS_GEN9_LP(dev_priv))
4869 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4870 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4871 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4872 else
4873 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4874 } else if (HAS_PCH_SPLIT(dev_priv)) {
4875 dev->driver->irq_handler = ironlake_irq_handler;
4876 dev->driver->irq_preinstall = ironlake_irq_reset;
4877 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4878 dev->driver->irq_uninstall = ironlake_irq_reset;
4879 dev->driver->enable_vblank = ironlake_enable_vblank;
4880 dev->driver->disable_vblank = ironlake_disable_vblank;
4881 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4882 } else {
4883 if (IS_GEN(dev_priv, 2)) {
4884 dev->driver->irq_preinstall = i8xx_irq_reset;
4885 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4886 dev->driver->irq_handler = i8xx_irq_handler;
4887 dev->driver->irq_uninstall = i8xx_irq_reset;
4888 dev->driver->enable_vblank = i8xx_enable_vblank;
4889 dev->driver->disable_vblank = i8xx_disable_vblank;
4890 } else if (IS_I945GM(dev_priv)) {
4891 dev->driver->irq_preinstall = i915_irq_reset;
4892 dev->driver->irq_postinstall = i915_irq_postinstall;
4893 dev->driver->irq_uninstall = i915_irq_reset;
4894 dev->driver->irq_handler = i915_irq_handler;
4895 dev->driver->enable_vblank = i945gm_enable_vblank;
4896 dev->driver->disable_vblank = i945gm_disable_vblank;
4897 } else if (IS_GEN(dev_priv, 3)) {
4898 dev->driver->irq_preinstall = i915_irq_reset;
4899 dev->driver->irq_postinstall = i915_irq_postinstall;
4900 dev->driver->irq_uninstall = i915_irq_reset;
4901 dev->driver->irq_handler = i915_irq_handler;
4902 dev->driver->enable_vblank = i8xx_enable_vblank;
4903 dev->driver->disable_vblank = i8xx_disable_vblank;
4904 } else {
4905 dev->driver->irq_preinstall = i965_irq_reset;
4906 dev->driver->irq_postinstall = i965_irq_postinstall;
4907 dev->driver->irq_uninstall = i965_irq_reset;
4908 dev->driver->irq_handler = i965_irq_handler;
4909 dev->driver->enable_vblank = i965_enable_vblank;
4910 dev->driver->disable_vblank = i965_disable_vblank;
4911 }
4912 if (I915_HAS_HOTPLUG(dev_priv))
4913 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4914 }
4915}
4916
4917
4918
4919
4920
4921
4922
4923void intel_irq_fini(struct drm_i915_private *i915)
4924{
4925 int i;
4926
4927 if (IS_I945GM(i915))
4928 i945gm_vblank_work_fini(i915);
4929
4930 for (i = 0; i < MAX_L3_SLICES; ++i)
4931 kfree(i915->l3_parity.remap_info[i]);
4932}
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945int intel_irq_install(struct drm_i915_private *dev_priv)
4946{
4947
4948
4949
4950
4951
4952 dev_priv->runtime_pm.irqs_enabled = true;
4953
4954 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4955}
4956
4957
4958
4959
4960
4961
4962
4963
4964void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4965{
4966 drm_irq_uninstall(&dev_priv->drm);
4967 intel_hpd_cancel_work(dev_priv);
4968 dev_priv->runtime_pm.irqs_enabled = false;
4969}
4970
4971
4972
4973
4974
4975
4976
4977
4978void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4979{
4980 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4981 dev_priv->runtime_pm.irqs_enabled = false;
4982 synchronize_irq(dev_priv->drm.irq);
4983}
4984
4985
4986
4987
4988
4989
4990
4991
4992void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4993{
4994 dev_priv->runtime_pm.irqs_enabled = true;
4995 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4996 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4997}
4998