1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
35#include <drm/drm_drv.h>
36#include <drm/drm_irq.h>
37
38#include "display/intel_display_types.h"
39#include "display/intel_fifo_underrun.h"
40#include "display/intel_hotplug.h"
41#include "display/intel_lpe_audio.h"
42#include "display/intel_psr.h"
43
44#include "gt/intel_gt.h"
45#include "gt/intel_gt_irq.h"
46#include "gt/intel_gt_pm_irq.h"
47#include "gt/intel_rps.h"
48
49#include "i915_drv.h"
50#include "i915_irq.h"
51#include "i915_trace.h"
52#include "intel_pm.h"
53
54
55
56
57
58
59
60
61
62typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
63
64static const u32 hpd_ilk[HPD_NUM_PINS] = {
65 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
66};
67
68static const u32 hpd_ivb[HPD_NUM_PINS] = {
69 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
70};
71
72static const u32 hpd_bdw[HPD_NUM_PINS] = {
73 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
74};
75
76static const u32 hpd_ibx[HPD_NUM_PINS] = {
77 [HPD_CRT] = SDE_CRT_HOTPLUG,
78 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
79 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
80 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
81 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
82};
83
84static const u32 hpd_cpt[HPD_NUM_PINS] = {
85 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
86 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
87 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
88 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
89 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
90};
91
92static const u32 hpd_spt[HPD_NUM_PINS] = {
93 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
94 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
95 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
96 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
97 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
98};
99
100static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
101 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
102 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
103 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
104 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
105 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
106 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
107};
108
109static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
110 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
111 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
112 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
113 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
114 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
115 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
116};
117
118static const u32 hpd_status_i915[HPD_NUM_PINS] = {
119 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
120 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
121 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
122 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
123 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
124 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
125};
126
127static const u32 hpd_bxt[HPD_NUM_PINS] = {
128 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
129 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
130 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
131};
132
133static const u32 hpd_gen11[HPD_NUM_PINS] = {
134 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
135 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
136 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
137 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
138};
139
140static const u32 hpd_gen12[HPD_NUM_PINS] = {
141 [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
142 [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
143 [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
144 [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
145 [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
146 [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
147};
148
149static const u32 hpd_icp[HPD_NUM_PINS] = {
150 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
151 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
152 [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
153 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
154 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
155 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
156};
157
158static const u32 hpd_tgp[HPD_NUM_PINS] = {
159 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
160 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
161 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
162 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
163 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
164 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
165 [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
166 [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
167 [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
168};
169
170static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
171{
172 struct i915_hotplug *hpd = &dev_priv->hotplug;
173
174 if (HAS_GMCH(dev_priv)) {
175 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
176 IS_CHERRYVIEW(dev_priv))
177 hpd->hpd = hpd_status_g4x;
178 else
179 hpd->hpd = hpd_status_i915;
180 return;
181 }
182
183 if (INTEL_GEN(dev_priv) >= 12)
184 hpd->hpd = hpd_gen12;
185 else if (INTEL_GEN(dev_priv) >= 11)
186 hpd->hpd = hpd_gen11;
187 else if (IS_GEN9_LP(dev_priv))
188 hpd->hpd = hpd_bxt;
189 else if (INTEL_GEN(dev_priv) >= 8)
190 hpd->hpd = hpd_bdw;
191 else if (INTEL_GEN(dev_priv) >= 7)
192 hpd->hpd = hpd_ivb;
193 else
194 hpd->hpd = hpd_ilk;
195
196 if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
197 return;
198
199 if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv))
200 hpd->pch_hpd = hpd_tgp;
201 else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
202 hpd->pch_hpd = hpd_icp;
203 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
204 hpd->pch_hpd = hpd_spt;
205 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
206 hpd->pch_hpd = hpd_cpt;
207 else if (HAS_PCH_IBX(dev_priv))
208 hpd->pch_hpd = hpd_ibx;
209 else
210 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
211}
212
213static void
214intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
215{
216 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
217
218 drm_crtc_handle_vblank(&crtc->base);
219}
220
221void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
222 i915_reg_t iir, i915_reg_t ier)
223{
224 intel_uncore_write(uncore, imr, 0xffffffff);
225 intel_uncore_posting_read(uncore, imr);
226
227 intel_uncore_write(uncore, ier, 0);
228
229
230 intel_uncore_write(uncore, iir, 0xffffffff);
231 intel_uncore_posting_read(uncore, iir);
232 intel_uncore_write(uncore, iir, 0xffffffff);
233 intel_uncore_posting_read(uncore, iir);
234}
235
236void gen2_irq_reset(struct intel_uncore *uncore)
237{
238 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
239 intel_uncore_posting_read16(uncore, GEN2_IMR);
240
241 intel_uncore_write16(uncore, GEN2_IER, 0);
242
243
244 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
245 intel_uncore_posting_read16(uncore, GEN2_IIR);
246 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
247 intel_uncore_posting_read16(uncore, GEN2_IIR);
248}
249
250
251
252
253static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
254{
255 u32 val = intel_uncore_read(uncore, reg);
256
257 if (val == 0)
258 return;
259
260 drm_WARN(&uncore->i915->drm, 1,
261 "Interrupt register 0x%x is not zero: 0x%08x\n",
262 i915_mmio_reg_offset(reg), val);
263 intel_uncore_write(uncore, reg, 0xffffffff);
264 intel_uncore_posting_read(uncore, reg);
265 intel_uncore_write(uncore, reg, 0xffffffff);
266 intel_uncore_posting_read(uncore, reg);
267}
268
269static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
270{
271 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
272
273 if (val == 0)
274 return;
275
276 drm_WARN(&uncore->i915->drm, 1,
277 "Interrupt register 0x%x is not zero: 0x%08x\n",
278 i915_mmio_reg_offset(GEN2_IIR), val);
279 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
280 intel_uncore_posting_read16(uncore, GEN2_IIR);
281 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
282 intel_uncore_posting_read16(uncore, GEN2_IIR);
283}
284
285void gen3_irq_init(struct intel_uncore *uncore,
286 i915_reg_t imr, u32 imr_val,
287 i915_reg_t ier, u32 ier_val,
288 i915_reg_t iir)
289{
290 gen3_assert_iir_is_zero(uncore, iir);
291
292 intel_uncore_write(uncore, ier, ier_val);
293 intel_uncore_write(uncore, imr, imr_val);
294 intel_uncore_posting_read(uncore, imr);
295}
296
297void gen2_irq_init(struct intel_uncore *uncore,
298 u32 imr_val, u32 ier_val)
299{
300 gen2_assert_iir_is_zero(uncore);
301
302 intel_uncore_write16(uncore, GEN2_IER, ier_val);
303 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
304 intel_uncore_posting_read16(uncore, GEN2_IMR);
305}
306
307
308static inline void
309i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
310 u32 mask,
311 u32 bits)
312{
313 u32 val;
314
315 lockdep_assert_held(&dev_priv->irq_lock);
316 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
317
318 val = I915_READ(PORT_HOTPLUG_EN);
319 val &= ~mask;
320 val |= bits;
321 I915_WRITE(PORT_HOTPLUG_EN, val);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
337 u32 mask,
338 u32 bits)
339{
340 spin_lock_irq(&dev_priv->irq_lock);
341 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
342 spin_unlock_irq(&dev_priv->irq_lock);
343}
344
345
346
347
348
349
350
351void ilk_update_display_irq(struct drm_i915_private *dev_priv,
352 u32 interrupt_mask,
353 u32 enabled_irq_mask)
354{
355 u32 new_val;
356
357 lockdep_assert_held(&dev_priv->irq_lock);
358
359 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
360
361 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
362 return;
363
364 new_val = dev_priv->irq_mask;
365 new_val &= ~interrupt_mask;
366 new_val |= (~enabled_irq_mask & interrupt_mask);
367
368 if (new_val != dev_priv->irq_mask) {
369 dev_priv->irq_mask = new_val;
370 I915_WRITE(DEIMR, dev_priv->irq_mask);
371 POSTING_READ(DEIMR);
372 }
373}
374
375
376
377
378
379
380
381static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
382 u32 interrupt_mask,
383 u32 enabled_irq_mask)
384{
385 u32 new_val;
386 u32 old_val;
387
388 lockdep_assert_held(&dev_priv->irq_lock);
389
390 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
391
392 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
393 return;
394
395 old_val = I915_READ(GEN8_DE_PORT_IMR);
396
397 new_val = old_val;
398 new_val &= ~interrupt_mask;
399 new_val |= (~enabled_irq_mask & interrupt_mask);
400
401 if (new_val != old_val) {
402 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
403 POSTING_READ(GEN8_DE_PORT_IMR);
404 }
405}
406
407
408
409
410
411
412
413
414void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
415 enum pipe pipe,
416 u32 interrupt_mask,
417 u32 enabled_irq_mask)
418{
419 u32 new_val;
420
421 lockdep_assert_held(&dev_priv->irq_lock);
422
423 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
424
425 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
426 return;
427
428 new_val = dev_priv->de_irq_mask[pipe];
429 new_val &= ~interrupt_mask;
430 new_val |= (~enabled_irq_mask & interrupt_mask);
431
432 if (new_val != dev_priv->de_irq_mask[pipe]) {
433 dev_priv->de_irq_mask[pipe] = new_val;
434 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
435 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
436 }
437}
438
439
440
441
442
443
444
445void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
446 u32 interrupt_mask,
447 u32 enabled_irq_mask)
448{
449 u32 sdeimr = I915_READ(SDEIMR);
450 sdeimr &= ~interrupt_mask;
451 sdeimr |= (~enabled_irq_mask & interrupt_mask);
452
453 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
454
455 lockdep_assert_held(&dev_priv->irq_lock);
456
457 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
458 return;
459
460 I915_WRITE(SDEIMR, sdeimr);
461 POSTING_READ(SDEIMR);
462}
463
464u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
465 enum pipe pipe)
466{
467 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
468 u32 enable_mask = status_mask << 16;
469
470 lockdep_assert_held(&dev_priv->irq_lock);
471
472 if (INTEL_GEN(dev_priv) < 5)
473 goto out;
474
475
476
477
478
479 if (drm_WARN_ON_ONCE(&dev_priv->drm,
480 status_mask & PIPE_A_PSR_STATUS_VLV))
481 return 0;
482
483
484
485
486 if (drm_WARN_ON_ONCE(&dev_priv->drm,
487 status_mask & PIPE_B_PSR_STATUS_VLV))
488 return 0;
489
490 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
491 SPRITE0_FLIP_DONE_INT_EN_VLV |
492 SPRITE1_FLIP_DONE_INT_EN_VLV);
493 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
494 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
495 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
496 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
497
498out:
499 drm_WARN_ONCE(&dev_priv->drm,
500 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
501 status_mask & ~PIPESTAT_INT_STATUS_MASK,
502 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
503 pipe_name(pipe), enable_mask, status_mask);
504
505 return enable_mask;
506}
507
508void i915_enable_pipestat(struct drm_i915_private *dev_priv,
509 enum pipe pipe, u32 status_mask)
510{
511 i915_reg_t reg = PIPESTAT(pipe);
512 u32 enable_mask;
513
514 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
515 "pipe %c: status_mask=0x%x\n",
516 pipe_name(pipe), status_mask);
517
518 lockdep_assert_held(&dev_priv->irq_lock);
519 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
520
521 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
522 return;
523
524 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
525 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
526
527 I915_WRITE(reg, enable_mask | status_mask);
528 POSTING_READ(reg);
529}
530
531void i915_disable_pipestat(struct drm_i915_private *dev_priv,
532 enum pipe pipe, u32 status_mask)
533{
534 i915_reg_t reg = PIPESTAT(pipe);
535 u32 enable_mask;
536
537 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
538 "pipe %c: status_mask=0x%x\n",
539 pipe_name(pipe), status_mask);
540
541 lockdep_assert_held(&dev_priv->irq_lock);
542 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
543
544 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
545 return;
546
547 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
548 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
549
550 I915_WRITE(reg, enable_mask | status_mask);
551 POSTING_READ(reg);
552}
553
554static bool i915_has_asle(struct drm_i915_private *dev_priv)
555{
556 if (!dev_priv->opregion.asle)
557 return false;
558
559 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
560}
561
562
563
564
565
566static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
567{
568 if (!i915_has_asle(dev_priv))
569 return;
570
571 spin_lock_irq(&dev_priv->irq_lock);
572
573 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
574 if (INTEL_GEN(dev_priv) >= 4)
575 i915_enable_pipestat(dev_priv, PIPE_A,
576 PIPE_LEGACY_BLC_EVENT_STATUS);
577
578 spin_unlock_irq(&dev_priv->irq_lock);
579}
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634u32 i915_get_vblank_counter(struct drm_crtc *crtc)
635{
636 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
637 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
638 const struct drm_display_mode *mode = &vblank->hwmode;
639 enum pipe pipe = to_intel_crtc(crtc)->pipe;
640 i915_reg_t high_frame, low_frame;
641 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
642 unsigned long irqflags;
643
644
645
646
647
648
649
650
651
652
653
654
655 if (!vblank->max_vblank_count)
656 return 0;
657
658 htotal = mode->crtc_htotal;
659 hsync_start = mode->crtc_hsync_start;
660 vbl_start = mode->crtc_vblank_start;
661 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
662 vbl_start = DIV_ROUND_UP(vbl_start, 2);
663
664
665 vbl_start *= htotal;
666
667
668 vbl_start -= htotal - hsync_start;
669
670 high_frame = PIPEFRAME(pipe);
671 low_frame = PIPEFRAMEPIXEL(pipe);
672
673 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
674
675
676
677
678
679
680 do {
681 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
682 low = intel_de_read_fw(dev_priv, low_frame);
683 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
684 } while (high1 != high2);
685
686 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
687
688 high1 >>= PIPE_FRAME_HIGH_SHIFT;
689 pixel = low & PIPE_PIXEL_MASK;
690 low >>= PIPE_FRAME_LOW_SHIFT;
691
692
693
694
695
696
697 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
698}
699
700u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
701{
702 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
703 enum pipe pipe = to_intel_crtc(crtc)->pipe;
704
705 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
706}
707
708
709
710
711
712
713
714
715
716static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
717{
718 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
719 struct drm_vblank_crtc *vblank =
720 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
721 const struct drm_display_mode *mode = &vblank->hwmode;
722 u32 vblank_start = mode->crtc_vblank_start;
723 u32 vtotal = mode->crtc_vtotal;
724 u32 htotal = mode->crtc_htotal;
725 u32 clock = mode->crtc_clock;
726 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
727
728
729
730
731
732
733
734 do {
735
736
737
738
739
740 scan_prev_time = intel_de_read_fw(dev_priv,
741 PIPE_FRMTMSTMP(crtc->pipe));
742
743
744
745
746
747 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
748
749 scan_post_time = intel_de_read_fw(dev_priv,
750 PIPE_FRMTMSTMP(crtc->pipe));
751 } while (scan_post_time != scan_prev_time);
752
753 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
754 clock), 1000 * htotal);
755 scanline = min(scanline, vtotal - 1);
756 scanline = (scanline + vblank_start) % vtotal;
757
758 return scanline;
759}
760
761
762
763
764
765static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
766{
767 struct drm_device *dev = crtc->base.dev;
768 struct drm_i915_private *dev_priv = to_i915(dev);
769 const struct drm_display_mode *mode;
770 struct drm_vblank_crtc *vblank;
771 enum pipe pipe = crtc->pipe;
772 int position, vtotal;
773
774 if (!crtc->active)
775 return -1;
776
777 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
778 mode = &vblank->hwmode;
779
780 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
781 return __intel_get_crtc_scanline_from_timestamp(crtc);
782
783 vtotal = mode->crtc_vtotal;
784 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
785 vtotal /= 2;
786
787 if (IS_GEN(dev_priv, 2))
788 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
789 else
790 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
791
792
793
794
795
796
797
798
799
800
801
802
803
804 if (HAS_DDI(dev_priv) && !position) {
805 int i, temp;
806
807 for (i = 0; i < 100; i++) {
808 udelay(1);
809 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
810 if (temp != position) {
811 position = temp;
812 break;
813 }
814 }
815 }
816
817
818
819
820
821 return (position + crtc->scanline_offset) % vtotal;
822}
823
824static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
825 bool in_vblank_irq,
826 int *vpos, int *hpos,
827 ktime_t *stime, ktime_t *etime,
828 const struct drm_display_mode *mode)
829{
830 struct drm_device *dev = _crtc->dev;
831 struct drm_i915_private *dev_priv = to_i915(dev);
832 struct intel_crtc *crtc = to_intel_crtc(_crtc);
833 enum pipe pipe = crtc->pipe;
834 int position;
835 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
836 unsigned long irqflags;
837 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
838 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
839 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
840
841 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
842 drm_dbg(&dev_priv->drm,
843 "trying to get scanoutpos for disabled "
844 "pipe %c\n", pipe_name(pipe));
845 return false;
846 }
847
848 htotal = mode->crtc_htotal;
849 hsync_start = mode->crtc_hsync_start;
850 vtotal = mode->crtc_vtotal;
851 vbl_start = mode->crtc_vblank_start;
852 vbl_end = mode->crtc_vblank_end;
853
854 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
855 vbl_start = DIV_ROUND_UP(vbl_start, 2);
856 vbl_end /= 2;
857 vtotal /= 2;
858 }
859
860
861
862
863
864
865 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
866
867
868
869
870 if (stime)
871 *stime = ktime_get();
872
873 if (use_scanline_counter) {
874
875
876
877 position = __intel_get_crtc_scanline(crtc);
878 } else {
879
880
881
882
883 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
884
885
886 vbl_start *= htotal;
887 vbl_end *= htotal;
888 vtotal *= htotal;
889
890
891
892
893
894
895
896
897
898
899 if (position >= vtotal)
900 position = vtotal - 1;
901
902
903
904
905
906
907
908
909
910
911 position = (position + htotal - hsync_start) % vtotal;
912 }
913
914
915 if (etime)
916 *etime = ktime_get();
917
918
919
920 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
921
922
923
924
925
926
927
928 if (position >= vbl_start)
929 position -= vbl_end;
930 else
931 position += vtotal - vbl_end;
932
933 if (use_scanline_counter) {
934 *vpos = position;
935 *hpos = 0;
936 } else {
937 *vpos = position / htotal;
938 *hpos = position - (*vpos * htotal);
939 }
940
941 return true;
942}
943
944bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
945 ktime_t *vblank_time, bool in_vblank_irq)
946{
947 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
948 crtc, max_error, vblank_time, in_vblank_irq,
949 i915_get_crtc_scanoutpos);
950}
951
952int intel_get_crtc_scanline(struct intel_crtc *crtc)
953{
954 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
955 unsigned long irqflags;
956 int position;
957
958 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
959 position = __intel_get_crtc_scanline(crtc);
960 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
961
962 return position;
963}
964
965
966
967
968
969
970
971
972
973
974static void ivb_parity_work(struct work_struct *work)
975{
976 struct drm_i915_private *dev_priv =
977 container_of(work, typeof(*dev_priv), l3_parity.error_work);
978 struct intel_gt *gt = &dev_priv->gt;
979 u32 error_status, row, bank, subbank;
980 char *parity_event[6];
981 u32 misccpctl;
982 u8 slice = 0;
983
984
985
986
987
988 mutex_lock(&dev_priv->drm.struct_mutex);
989
990
991 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
992 goto out;
993
994 misccpctl = I915_READ(GEN7_MISCCPCTL);
995 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
996 POSTING_READ(GEN7_MISCCPCTL);
997
998 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
999 i915_reg_t reg;
1000
1001 slice--;
1002 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1003 slice >= NUM_L3_SLICES(dev_priv)))
1004 break;
1005
1006 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1007
1008 reg = GEN7_L3CDERRST1(slice);
1009
1010 error_status = I915_READ(reg);
1011 row = GEN7_PARITY_ERROR_ROW(error_status);
1012 bank = GEN7_PARITY_ERROR_BANK(error_status);
1013 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1014
1015 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1016 POSTING_READ(reg);
1017
1018 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1019 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1020 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1021 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1022 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1023 parity_event[5] = NULL;
1024
1025 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1026 KOBJ_CHANGE, parity_event);
1027
1028 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1029 slice, row, bank, subbank);
1030
1031 kfree(parity_event[4]);
1032 kfree(parity_event[3]);
1033 kfree(parity_event[2]);
1034 kfree(parity_event[1]);
1035 }
1036
1037 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1038
1039out:
1040 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1041 spin_lock_irq(>->irq_lock);
1042 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1043 spin_unlock_irq(>->irq_lock);
1044
1045 mutex_unlock(&dev_priv->drm.struct_mutex);
1046}
1047
1048static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1049{
1050 switch (pin) {
1051 case HPD_PORT_C:
1052 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1053 case HPD_PORT_D:
1054 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1055 case HPD_PORT_E:
1056 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1057 case HPD_PORT_F:
1058 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1059 default:
1060 return false;
1061 }
1062}
1063
1064static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1065{
1066 switch (pin) {
1067 case HPD_PORT_D:
1068 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1069 case HPD_PORT_E:
1070 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1071 case HPD_PORT_F:
1072 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1073 case HPD_PORT_G:
1074 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1075 case HPD_PORT_H:
1076 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1077 case HPD_PORT_I:
1078 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1079 default:
1080 return false;
1081 }
1082}
1083
1084static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1085{
1086 switch (pin) {
1087 case HPD_PORT_A:
1088 return val & PORTA_HOTPLUG_LONG_DETECT;
1089 case HPD_PORT_B:
1090 return val & PORTB_HOTPLUG_LONG_DETECT;
1091 case HPD_PORT_C:
1092 return val & PORTC_HOTPLUG_LONG_DETECT;
1093 default:
1094 return false;
1095 }
1096}
1097
1098static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1099{
1100 switch (pin) {
1101 case HPD_PORT_A:
1102 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1103 case HPD_PORT_B:
1104 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1105 case HPD_PORT_C:
1106 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1107 default:
1108 return false;
1109 }
1110}
1111
1112static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1113{
1114 switch (pin) {
1115 case HPD_PORT_C:
1116 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1117 case HPD_PORT_D:
1118 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1119 case HPD_PORT_E:
1120 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1121 case HPD_PORT_F:
1122 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1123 default:
1124 return false;
1125 }
1126}
1127
1128static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1129{
1130 switch (pin) {
1131 case HPD_PORT_D:
1132 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1133 case HPD_PORT_E:
1134 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1135 case HPD_PORT_F:
1136 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1137 case HPD_PORT_G:
1138 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1139 case HPD_PORT_H:
1140 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1141 case HPD_PORT_I:
1142 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1143 default:
1144 return false;
1145 }
1146}
1147
1148static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1149{
1150 switch (pin) {
1151 case HPD_PORT_E:
1152 return val & PORTE_HOTPLUG_LONG_DETECT;
1153 default:
1154 return false;
1155 }
1156}
1157
1158static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1159{
1160 switch (pin) {
1161 case HPD_PORT_A:
1162 return val & PORTA_HOTPLUG_LONG_DETECT;
1163 case HPD_PORT_B:
1164 return val & PORTB_HOTPLUG_LONG_DETECT;
1165 case HPD_PORT_C:
1166 return val & PORTC_HOTPLUG_LONG_DETECT;
1167 case HPD_PORT_D:
1168 return val & PORTD_HOTPLUG_LONG_DETECT;
1169 default:
1170 return false;
1171 }
1172}
1173
1174static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1175{
1176 switch (pin) {
1177 case HPD_PORT_A:
1178 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1179 default:
1180 return false;
1181 }
1182}
1183
1184static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1185{
1186 switch (pin) {
1187 case HPD_PORT_B:
1188 return val & PORTB_HOTPLUG_LONG_DETECT;
1189 case HPD_PORT_C:
1190 return val & PORTC_HOTPLUG_LONG_DETECT;
1191 case HPD_PORT_D:
1192 return val & PORTD_HOTPLUG_LONG_DETECT;
1193 default:
1194 return false;
1195 }
1196}
1197
1198static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1199{
1200 switch (pin) {
1201 case HPD_PORT_B:
1202 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1203 case HPD_PORT_C:
1204 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1205 case HPD_PORT_D:
1206 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1207 default:
1208 return false;
1209 }
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1220 u32 *pin_mask, u32 *long_mask,
1221 u32 hotplug_trigger, u32 dig_hotplug_reg,
1222 const u32 hpd[HPD_NUM_PINS],
1223 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1224{
1225 enum hpd_pin pin;
1226
1227 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1228
1229 for_each_hpd_pin(pin) {
1230 if ((hpd[pin] & hotplug_trigger) == 0)
1231 continue;
1232
1233 *pin_mask |= BIT(pin);
1234
1235 if (long_pulse_detect(pin, dig_hotplug_reg))
1236 *long_mask |= BIT(pin);
1237 }
1238
1239 drm_dbg(&dev_priv->drm,
1240 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1241 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1242
1243}
1244
1245static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1246{
1247 wake_up_all(&dev_priv->gmbus_wait_queue);
1248}
1249
1250static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1251{
1252 wake_up_all(&dev_priv->gmbus_wait_queue);
1253}
1254
1255#if defined(CONFIG_DEBUG_FS)
1256static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1257 enum pipe pipe,
1258 u32 crc0, u32 crc1,
1259 u32 crc2, u32 crc3,
1260 u32 crc4)
1261{
1262 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1263 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1264 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1265
1266 trace_intel_pipe_crc(crtc, crcs);
1267
1268 spin_lock(&pipe_crc->lock);
1269
1270
1271
1272
1273
1274
1275
1276
1277 if (pipe_crc->skipped <= 0 ||
1278 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1279 pipe_crc->skipped++;
1280 spin_unlock(&pipe_crc->lock);
1281 return;
1282 }
1283 spin_unlock(&pipe_crc->lock);
1284
1285 drm_crtc_add_crc_entry(&crtc->base, true,
1286 drm_crtc_accurate_vblank_count(&crtc->base),
1287 crcs);
1288}
1289#else
1290static inline void
1291display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1292 enum pipe pipe,
1293 u32 crc0, u32 crc1,
1294 u32 crc2, u32 crc3,
1295 u32 crc4) {}
1296#endif
1297
1298
1299static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1300 enum pipe pipe)
1301{
1302 display_pipe_crc_irq_handler(dev_priv, pipe,
1303 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1304 0, 0, 0, 0);
1305}
1306
1307static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1308 enum pipe pipe)
1309{
1310 display_pipe_crc_irq_handler(dev_priv, pipe,
1311 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1312 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1313 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1314 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1315 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1316}
1317
1318static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1319 enum pipe pipe)
1320{
1321 u32 res1, res2;
1322
1323 if (INTEL_GEN(dev_priv) >= 3)
1324 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1325 else
1326 res1 = 0;
1327
1328 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1329 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1330 else
1331 res2 = 0;
1332
1333 display_pipe_crc_irq_handler(dev_priv, pipe,
1334 I915_READ(PIPE_CRC_RES_RED(pipe)),
1335 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1336 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1337 res1, res2);
1338}
1339
1340static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1341{
1342 enum pipe pipe;
1343
1344 for_each_pipe(dev_priv, pipe) {
1345 I915_WRITE(PIPESTAT(pipe),
1346 PIPESTAT_INT_STATUS_MASK |
1347 PIPE_FIFO_UNDERRUN_STATUS);
1348
1349 dev_priv->pipestat_irq_mask[pipe] = 0;
1350 }
1351}
1352
1353static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1354 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1355{
1356 enum pipe pipe;
1357
1358 spin_lock(&dev_priv->irq_lock);
1359
1360 if (!dev_priv->display_irqs_enabled) {
1361 spin_unlock(&dev_priv->irq_lock);
1362 return;
1363 }
1364
1365 for_each_pipe(dev_priv, pipe) {
1366 i915_reg_t reg;
1367 u32 status_mask, enable_mask, iir_bit = 0;
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1379
1380 switch (pipe) {
1381 default:
1382 case PIPE_A:
1383 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1384 break;
1385 case PIPE_B:
1386 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1387 break;
1388 case PIPE_C:
1389 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1390 break;
1391 }
1392 if (iir & iir_bit)
1393 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1394
1395 if (!status_mask)
1396 continue;
1397
1398 reg = PIPESTAT(pipe);
1399 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1400 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 if (pipe_stats[pipe]) {
1412 I915_WRITE(reg, pipe_stats[pipe]);
1413 I915_WRITE(reg, enable_mask);
1414 }
1415 }
1416 spin_unlock(&dev_priv->irq_lock);
1417}
1418
1419static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1420 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1421{
1422 enum pipe pipe;
1423
1424 for_each_pipe(dev_priv, pipe) {
1425 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1426 intel_handle_vblank(dev_priv, pipe);
1427
1428 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1429 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1430
1431 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1432 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1433 }
1434}
1435
1436static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1437 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1438{
1439 bool blc_event = false;
1440 enum pipe pipe;
1441
1442 for_each_pipe(dev_priv, pipe) {
1443 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1444 intel_handle_vblank(dev_priv, pipe);
1445
1446 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1447 blc_event = true;
1448
1449 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1450 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1451
1452 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1453 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1454 }
1455
1456 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1457 intel_opregion_asle_intr(dev_priv);
1458}
1459
1460static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1461 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1462{
1463 bool blc_event = false;
1464 enum pipe pipe;
1465
1466 for_each_pipe(dev_priv, pipe) {
1467 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1468 intel_handle_vblank(dev_priv, pipe);
1469
1470 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1471 blc_event = true;
1472
1473 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1474 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1475
1476 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1477 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1478 }
1479
1480 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1481 intel_opregion_asle_intr(dev_priv);
1482
1483 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1484 gmbus_irq_handler(dev_priv);
1485}
1486
1487static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1488 u32 pipe_stats[I915_MAX_PIPES])
1489{
1490 enum pipe pipe;
1491
1492 for_each_pipe(dev_priv, pipe) {
1493 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1494 intel_handle_vblank(dev_priv, pipe);
1495
1496 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1497 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1498
1499 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1500 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1501 }
1502
1503 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1504 gmbus_irq_handler(dev_priv);
1505}
1506
1507static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1508{
1509 u32 hotplug_status = 0, hotplug_status_mask;
1510 int i;
1511
1512 if (IS_G4X(dev_priv) ||
1513 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1514 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1515 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1516 else
1517 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 for (i = 0; i < 10; i++) {
1529 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1530
1531 if (tmp == 0)
1532 return hotplug_status;
1533
1534 hotplug_status |= tmp;
1535 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1536 }
1537
1538 drm_WARN_ONCE(&dev_priv->drm, 1,
1539 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1540 I915_READ(PORT_HOTPLUG_STAT));
1541
1542 return hotplug_status;
1543}
1544
1545static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1546 u32 hotplug_status)
1547{
1548 u32 pin_mask = 0, long_mask = 0;
1549 u32 hotplug_trigger;
1550
1551 if (IS_G4X(dev_priv) ||
1552 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1553 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1554 else
1555 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1556
1557 if (hotplug_trigger) {
1558 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1559 hotplug_trigger, hotplug_trigger,
1560 dev_priv->hotplug.hpd,
1561 i9xx_port_hotplug_long_detect);
1562
1563 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1564 }
1565
1566 if ((IS_G4X(dev_priv) ||
1567 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1568 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1569 dp_aux_irq_handler(dev_priv);
1570}
1571
1572static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1573{
1574 struct drm_i915_private *dev_priv = arg;
1575 irqreturn_t ret = IRQ_NONE;
1576
1577 if (!intel_irqs_enabled(dev_priv))
1578 return IRQ_NONE;
1579
1580
1581 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1582
1583 do {
1584 u32 iir, gt_iir, pm_iir;
1585 u32 pipe_stats[I915_MAX_PIPES] = {};
1586 u32 hotplug_status = 0;
1587 u32 ier = 0;
1588
1589 gt_iir = I915_READ(GTIIR);
1590 pm_iir = I915_READ(GEN6_PMIIR);
1591 iir = I915_READ(VLV_IIR);
1592
1593 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1594 break;
1595
1596 ret = IRQ_HANDLED;
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611 I915_WRITE(VLV_MASTER_IER, 0);
1612 ier = I915_READ(VLV_IER);
1613 I915_WRITE(VLV_IER, 0);
1614
1615 if (gt_iir)
1616 I915_WRITE(GTIIR, gt_iir);
1617 if (pm_iir)
1618 I915_WRITE(GEN6_PMIIR, pm_iir);
1619
1620 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1621 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1622
1623
1624
1625 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1626
1627 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1628 I915_LPE_PIPE_B_INTERRUPT))
1629 intel_lpe_audio_irq_handler(dev_priv);
1630
1631
1632
1633
1634
1635 if (iir)
1636 I915_WRITE(VLV_IIR, iir);
1637
1638 I915_WRITE(VLV_IER, ier);
1639 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1640
1641 if (gt_iir)
1642 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1643 if (pm_iir)
1644 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1645
1646 if (hotplug_status)
1647 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1648
1649 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1650 } while (0);
1651
1652 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1653
1654 return ret;
1655}
1656
1657static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1658{
1659 struct drm_i915_private *dev_priv = arg;
1660 irqreturn_t ret = IRQ_NONE;
1661
1662 if (!intel_irqs_enabled(dev_priv))
1663 return IRQ_NONE;
1664
1665
1666 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1667
1668 do {
1669 u32 master_ctl, iir;
1670 u32 pipe_stats[I915_MAX_PIPES] = {};
1671 u32 hotplug_status = 0;
1672 u32 ier = 0;
1673
1674 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1675 iir = I915_READ(VLV_IIR);
1676
1677 if (master_ctl == 0 && iir == 0)
1678 break;
1679
1680 ret = IRQ_HANDLED;
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695 I915_WRITE(GEN8_MASTER_IRQ, 0);
1696 ier = I915_READ(VLV_IER);
1697 I915_WRITE(VLV_IER, 0);
1698
1699 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1700
1701 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1702 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1703
1704
1705
1706 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1707
1708 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1709 I915_LPE_PIPE_B_INTERRUPT |
1710 I915_LPE_PIPE_C_INTERRUPT))
1711 intel_lpe_audio_irq_handler(dev_priv);
1712
1713
1714
1715
1716
1717 if (iir)
1718 I915_WRITE(VLV_IIR, iir);
1719
1720 I915_WRITE(VLV_IER, ier);
1721 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1722
1723 if (hotplug_status)
1724 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1725
1726 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1727 } while (0);
1728
1729 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1730
1731 return ret;
1732}
1733
1734static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1735 u32 hotplug_trigger)
1736{
1737 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1738
1739
1740
1741
1742
1743
1744
1745 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1746 if (!hotplug_trigger) {
1747 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1748 PORTD_HOTPLUG_STATUS_MASK |
1749 PORTC_HOTPLUG_STATUS_MASK |
1750 PORTB_HOTPLUG_STATUS_MASK;
1751 dig_hotplug_reg &= ~mask;
1752 }
1753
1754 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1755 if (!hotplug_trigger)
1756 return;
1757
1758 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1759 hotplug_trigger, dig_hotplug_reg,
1760 dev_priv->hotplug.pch_hpd,
1761 pch_port_hotplug_long_detect);
1762
1763 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1764}
1765
1766static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1767{
1768 enum pipe pipe;
1769 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1770
1771 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1772
1773 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1774 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1775 SDE_AUDIO_POWER_SHIFT);
1776 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1777 port_name(port));
1778 }
1779
1780 if (pch_iir & SDE_AUX_MASK)
1781 dp_aux_irq_handler(dev_priv);
1782
1783 if (pch_iir & SDE_GMBUS)
1784 gmbus_irq_handler(dev_priv);
1785
1786 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1787 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1788
1789 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1790 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1791
1792 if (pch_iir & SDE_POISON)
1793 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1794
1795 if (pch_iir & SDE_FDI_MASK) {
1796 for_each_pipe(dev_priv, pipe)
1797 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1798 pipe_name(pipe),
1799 I915_READ(FDI_RX_IIR(pipe)));
1800 }
1801
1802 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1803 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1804
1805 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1806 drm_dbg(&dev_priv->drm,
1807 "PCH transcoder CRC error interrupt\n");
1808
1809 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1810 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1811
1812 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1813 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1814}
1815
1816static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1817{
1818 u32 err_int = I915_READ(GEN7_ERR_INT);
1819 enum pipe pipe;
1820
1821 if (err_int & ERR_INT_POISON)
1822 drm_err(&dev_priv->drm, "Poison interrupt\n");
1823
1824 for_each_pipe(dev_priv, pipe) {
1825 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1826 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1827
1828 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1829 if (IS_IVYBRIDGE(dev_priv))
1830 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1831 else
1832 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1833 }
1834 }
1835
1836 I915_WRITE(GEN7_ERR_INT, err_int);
1837}
1838
1839static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1840{
1841 u32 serr_int = I915_READ(SERR_INT);
1842 enum pipe pipe;
1843
1844 if (serr_int & SERR_INT_POISON)
1845 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1846
1847 for_each_pipe(dev_priv, pipe)
1848 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1849 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1850
1851 I915_WRITE(SERR_INT, serr_int);
1852}
1853
1854static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1855{
1856 enum pipe pipe;
1857 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1858
1859 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1860
1861 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1862 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1863 SDE_AUDIO_POWER_SHIFT_CPT);
1864 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1865 port_name(port));
1866 }
1867
1868 if (pch_iir & SDE_AUX_MASK_CPT)
1869 dp_aux_irq_handler(dev_priv);
1870
1871 if (pch_iir & SDE_GMBUS_CPT)
1872 gmbus_irq_handler(dev_priv);
1873
1874 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1875 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1876
1877 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1878 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1879
1880 if (pch_iir & SDE_FDI_MASK_CPT) {
1881 for_each_pipe(dev_priv, pipe)
1882 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1883 pipe_name(pipe),
1884 I915_READ(FDI_RX_IIR(pipe)));
1885 }
1886
1887 if (pch_iir & SDE_ERROR_CPT)
1888 cpt_serr_int_handler(dev_priv);
1889}
1890
1891static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1892{
1893 u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1894 u32 pin_mask = 0, long_mask = 0;
1895 bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
1896
1897 if (HAS_PCH_TGP(dev_priv)) {
1898 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1899 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1900 tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
1901 } else if (HAS_PCH_JSP(dev_priv)) {
1902 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1903 tc_hotplug_trigger = 0;
1904 } else if (HAS_PCH_MCC(dev_priv)) {
1905 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1906 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1907 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1908 } else {
1909 drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
1910 "Unrecognized PCH type 0x%x\n",
1911 INTEL_PCH_TYPE(dev_priv));
1912
1913 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1914 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1915 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1916 }
1917
1918 if (ddi_hotplug_trigger) {
1919 u32 dig_hotplug_reg;
1920
1921 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1922 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1923
1924 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1925 ddi_hotplug_trigger, dig_hotplug_reg,
1926 dev_priv->hotplug.pch_hpd,
1927 icp_ddi_port_hotplug_long_detect);
1928 }
1929
1930 if (tc_hotplug_trigger) {
1931 u32 dig_hotplug_reg;
1932
1933 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1934 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1935
1936 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1937 tc_hotplug_trigger, dig_hotplug_reg,
1938 dev_priv->hotplug.pch_hpd,
1939 tc_port_hotplug_long_detect);
1940 }
1941
1942 if (pin_mask)
1943 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1944
1945 if (pch_iir & SDE_GMBUS_ICP)
1946 gmbus_irq_handler(dev_priv);
1947}
1948
1949static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1950{
1951 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1952 ~SDE_PORTE_HOTPLUG_SPT;
1953 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1954 u32 pin_mask = 0, long_mask = 0;
1955
1956 if (hotplug_trigger) {
1957 u32 dig_hotplug_reg;
1958
1959 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1960 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1961
1962 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1963 hotplug_trigger, dig_hotplug_reg,
1964 dev_priv->hotplug.pch_hpd,
1965 spt_port_hotplug_long_detect);
1966 }
1967
1968 if (hotplug2_trigger) {
1969 u32 dig_hotplug_reg;
1970
1971 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1972 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1973
1974 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1975 hotplug2_trigger, dig_hotplug_reg,
1976 dev_priv->hotplug.pch_hpd,
1977 spt_port_hotplug2_long_detect);
1978 }
1979
1980 if (pin_mask)
1981 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1982
1983 if (pch_iir & SDE_GMBUS_CPT)
1984 gmbus_irq_handler(dev_priv);
1985}
1986
1987static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1988 u32 hotplug_trigger)
1989{
1990 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1991
1992 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1993 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1994
1995 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1996 hotplug_trigger, dig_hotplug_reg,
1997 dev_priv->hotplug.hpd,
1998 ilk_port_hotplug_long_detect);
1999
2000 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2001}
2002
2003static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2004 u32 de_iir)
2005{
2006 enum pipe pipe;
2007 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2008
2009 if (hotplug_trigger)
2010 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2011
2012 if (de_iir & DE_AUX_CHANNEL_A)
2013 dp_aux_irq_handler(dev_priv);
2014
2015 if (de_iir & DE_GSE)
2016 intel_opregion_asle_intr(dev_priv);
2017
2018 if (de_iir & DE_POISON)
2019 drm_err(&dev_priv->drm, "Poison interrupt\n");
2020
2021 for_each_pipe(dev_priv, pipe) {
2022 if (de_iir & DE_PIPE_VBLANK(pipe))
2023 intel_handle_vblank(dev_priv, pipe);
2024
2025 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2026 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2027
2028 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2029 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2030 }
2031
2032
2033 if (de_iir & DE_PCH_EVENT) {
2034 u32 pch_iir = I915_READ(SDEIIR);
2035
2036 if (HAS_PCH_CPT(dev_priv))
2037 cpt_irq_handler(dev_priv, pch_iir);
2038 else
2039 ibx_irq_handler(dev_priv, pch_iir);
2040
2041
2042 I915_WRITE(SDEIIR, pch_iir);
2043 }
2044
2045 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2046 gen5_rps_irq_handler(&dev_priv->gt.rps);
2047}
2048
2049static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2050 u32 de_iir)
2051{
2052 enum pipe pipe;
2053 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2054
2055 if (hotplug_trigger)
2056 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2057
2058 if (de_iir & DE_ERR_INT_IVB)
2059 ivb_err_int_handler(dev_priv);
2060
2061 if (de_iir & DE_EDP_PSR_INT_HSW) {
2062 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2063
2064 intel_psr_irq_handler(dev_priv, psr_iir);
2065 I915_WRITE(EDP_PSR_IIR, psr_iir);
2066 }
2067
2068 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2069 dp_aux_irq_handler(dev_priv);
2070
2071 if (de_iir & DE_GSE_IVB)
2072 intel_opregion_asle_intr(dev_priv);
2073
2074 for_each_pipe(dev_priv, pipe) {
2075 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2076 intel_handle_vblank(dev_priv, pipe);
2077 }
2078
2079
2080 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2081 u32 pch_iir = I915_READ(SDEIIR);
2082
2083 cpt_irq_handler(dev_priv, pch_iir);
2084
2085
2086 I915_WRITE(SDEIIR, pch_iir);
2087 }
2088}
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098static irqreturn_t ilk_irq_handler(int irq, void *arg)
2099{
2100 struct drm_i915_private *dev_priv = arg;
2101 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2102 irqreturn_t ret = IRQ_NONE;
2103
2104 if (!intel_irqs_enabled(dev_priv))
2105 return IRQ_NONE;
2106
2107
2108 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2109
2110
2111 de_ier = I915_READ(DEIER);
2112 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2113
2114
2115
2116
2117
2118
2119 if (!HAS_PCH_NOP(dev_priv)) {
2120 sde_ier = I915_READ(SDEIER);
2121 I915_WRITE(SDEIER, 0);
2122 }
2123
2124
2125
2126 gt_iir = I915_READ(GTIIR);
2127 if (gt_iir) {
2128 I915_WRITE(GTIIR, gt_iir);
2129 ret = IRQ_HANDLED;
2130 if (INTEL_GEN(dev_priv) >= 6)
2131 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2132 else
2133 gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2134 }
2135
2136 de_iir = I915_READ(DEIIR);
2137 if (de_iir) {
2138 I915_WRITE(DEIIR, de_iir);
2139 ret = IRQ_HANDLED;
2140 if (INTEL_GEN(dev_priv) >= 7)
2141 ivb_display_irq_handler(dev_priv, de_iir);
2142 else
2143 ilk_display_irq_handler(dev_priv, de_iir);
2144 }
2145
2146 if (INTEL_GEN(dev_priv) >= 6) {
2147 u32 pm_iir = I915_READ(GEN6_PMIIR);
2148 if (pm_iir) {
2149 I915_WRITE(GEN6_PMIIR, pm_iir);
2150 ret = IRQ_HANDLED;
2151 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
2152 }
2153 }
2154
2155 I915_WRITE(DEIER, de_ier);
2156 if (!HAS_PCH_NOP(dev_priv))
2157 I915_WRITE(SDEIER, sde_ier);
2158
2159
2160 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2161
2162 return ret;
2163}
2164
2165static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2166 u32 hotplug_trigger)
2167{
2168 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2169
2170 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2171 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2172
2173 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2174 hotplug_trigger, dig_hotplug_reg,
2175 dev_priv->hotplug.hpd,
2176 bxt_port_hotplug_long_detect);
2177
2178 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2179}
2180
2181static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2182{
2183 u32 pin_mask = 0, long_mask = 0;
2184 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2185 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2186 long_pulse_detect_func long_pulse_detect;
2187
2188 if (INTEL_GEN(dev_priv) >= 12)
2189 long_pulse_detect = gen12_port_hotplug_long_detect;
2190 else
2191 long_pulse_detect = gen11_port_hotplug_long_detect;
2192
2193 if (trigger_tc) {
2194 u32 dig_hotplug_reg;
2195
2196 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2197 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2198
2199 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2200 trigger_tc, dig_hotplug_reg,
2201 dev_priv->hotplug.hpd,
2202 long_pulse_detect);
2203 }
2204
2205 if (trigger_tbt) {
2206 u32 dig_hotplug_reg;
2207
2208 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2209 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2210
2211 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2212 trigger_tbt, dig_hotplug_reg,
2213 dev_priv->hotplug.hpd,
2214 long_pulse_detect);
2215 }
2216
2217 if (pin_mask)
2218 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2219 else
2220 drm_err(&dev_priv->drm,
2221 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2222}
2223
2224static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2225{
2226 u32 mask;
2227
2228 if (INTEL_GEN(dev_priv) >= 12)
2229 return TGL_DE_PORT_AUX_DDIA |
2230 TGL_DE_PORT_AUX_DDIB |
2231 TGL_DE_PORT_AUX_DDIC |
2232 TGL_DE_PORT_AUX_USBC1 |
2233 TGL_DE_PORT_AUX_USBC2 |
2234 TGL_DE_PORT_AUX_USBC3 |
2235 TGL_DE_PORT_AUX_USBC4 |
2236 TGL_DE_PORT_AUX_USBC5 |
2237 TGL_DE_PORT_AUX_USBC6;
2238
2239
2240 mask = GEN8_AUX_CHANNEL_A;
2241 if (INTEL_GEN(dev_priv) >= 9)
2242 mask |= GEN9_AUX_CHANNEL_B |
2243 GEN9_AUX_CHANNEL_C |
2244 GEN9_AUX_CHANNEL_D;
2245
2246 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2247 mask |= CNL_AUX_CHANNEL_F;
2248
2249 if (IS_GEN(dev_priv, 11))
2250 mask |= ICL_AUX_CHANNEL_E;
2251
2252 return mask;
2253}
2254
2255static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2256{
2257 if (INTEL_GEN(dev_priv) >= 11)
2258 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2259 else if (INTEL_GEN(dev_priv) >= 9)
2260 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2261 else
2262 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2263}
2264
2265static void
2266gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2267{
2268 bool found = false;
2269
2270 if (iir & GEN8_DE_MISC_GSE) {
2271 intel_opregion_asle_intr(dev_priv);
2272 found = true;
2273 }
2274
2275 if (iir & GEN8_DE_EDP_PSR) {
2276 u32 psr_iir;
2277 i915_reg_t iir_reg;
2278
2279 if (INTEL_GEN(dev_priv) >= 12)
2280 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2281 else
2282 iir_reg = EDP_PSR_IIR;
2283
2284 psr_iir = I915_READ(iir_reg);
2285 I915_WRITE(iir_reg, psr_iir);
2286
2287 if (psr_iir)
2288 found = true;
2289
2290 intel_psr_irq_handler(dev_priv, psr_iir);
2291 }
2292
2293 if (!found)
2294 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2295}
2296
2297static irqreturn_t
2298gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2299{
2300 irqreturn_t ret = IRQ_NONE;
2301 u32 iir;
2302 enum pipe pipe;
2303
2304 if (master_ctl & GEN8_DE_MISC_IRQ) {
2305 iir = I915_READ(GEN8_DE_MISC_IIR);
2306 if (iir) {
2307 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2308 ret = IRQ_HANDLED;
2309 gen8_de_misc_irq_handler(dev_priv, iir);
2310 } else {
2311 drm_err(&dev_priv->drm,
2312 "The master control interrupt lied (DE MISC)!\n");
2313 }
2314 }
2315
2316 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2317 iir = I915_READ(GEN11_DE_HPD_IIR);
2318 if (iir) {
2319 I915_WRITE(GEN11_DE_HPD_IIR, iir);
2320 ret = IRQ_HANDLED;
2321 gen11_hpd_irq_handler(dev_priv, iir);
2322 } else {
2323 drm_err(&dev_priv->drm,
2324 "The master control interrupt lied, (DE HPD)!\n");
2325 }
2326 }
2327
2328 if (master_ctl & GEN8_DE_PORT_IRQ) {
2329 iir = I915_READ(GEN8_DE_PORT_IIR);
2330 if (iir) {
2331 u32 tmp_mask;
2332 bool found = false;
2333
2334 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2335 ret = IRQ_HANDLED;
2336
2337 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2338 dp_aux_irq_handler(dev_priv);
2339 found = true;
2340 }
2341
2342 if (IS_GEN9_LP(dev_priv)) {
2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2344 if (tmp_mask) {
2345 bxt_hpd_irq_handler(dev_priv, tmp_mask);
2346 found = true;
2347 }
2348 } else if (IS_BROADWELL(dev_priv)) {
2349 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2350 if (tmp_mask) {
2351 ilk_hpd_irq_handler(dev_priv, tmp_mask);
2352 found = true;
2353 }
2354 }
2355
2356 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2357 gmbus_irq_handler(dev_priv);
2358 found = true;
2359 }
2360
2361 if (!found)
2362 drm_err(&dev_priv->drm,
2363 "Unexpected DE Port interrupt\n");
2364 }
2365 else
2366 drm_err(&dev_priv->drm,
2367 "The master control interrupt lied (DE PORT)!\n");
2368 }
2369
2370 for_each_pipe(dev_priv, pipe) {
2371 u32 fault_errors;
2372
2373 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2374 continue;
2375
2376 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2377 if (!iir) {
2378 drm_err(&dev_priv->drm,
2379 "The master control interrupt lied (DE PIPE)!\n");
2380 continue;
2381 }
2382
2383 ret = IRQ_HANDLED;
2384 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2385
2386 if (iir & GEN8_PIPE_VBLANK)
2387 intel_handle_vblank(dev_priv, pipe);
2388
2389 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2390 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2391
2392 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2393 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2394
2395 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2396 if (fault_errors)
2397 drm_err(&dev_priv->drm,
2398 "Fault errors on pipe %c: 0x%08x\n",
2399 pipe_name(pipe),
2400 fault_errors);
2401 }
2402
2403 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2404 master_ctl & GEN8_DE_PCH_IRQ) {
2405
2406
2407
2408
2409
2410 iir = I915_READ(SDEIIR);
2411 if (iir) {
2412 I915_WRITE(SDEIIR, iir);
2413 ret = IRQ_HANDLED;
2414
2415 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2416 icp_irq_handler(dev_priv, iir);
2417 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2418 spt_irq_handler(dev_priv, iir);
2419 else
2420 cpt_irq_handler(dev_priv, iir);
2421 } else {
2422
2423
2424
2425
2426 drm_dbg(&dev_priv->drm,
2427 "The master control interrupt lied (SDE)!\n");
2428 }
2429 }
2430
2431 return ret;
2432}
2433
2434static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2435{
2436 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2437
2438
2439
2440
2441
2442
2443
2444 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2445}
2446
2447static inline void gen8_master_intr_enable(void __iomem * const regs)
2448{
2449 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2450}
2451
2452static irqreturn_t gen8_irq_handler(int irq, void *arg)
2453{
2454 struct drm_i915_private *dev_priv = arg;
2455 void __iomem * const regs = dev_priv->uncore.regs;
2456 u32 master_ctl;
2457
2458 if (!intel_irqs_enabled(dev_priv))
2459 return IRQ_NONE;
2460
2461 master_ctl = gen8_master_intr_disable(regs);
2462 if (!master_ctl) {
2463 gen8_master_intr_enable(regs);
2464 return IRQ_NONE;
2465 }
2466
2467
2468 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2469
2470
2471 if (master_ctl & ~GEN8_GT_IRQS) {
2472 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2473 gen8_de_irq_handler(dev_priv, master_ctl);
2474 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2475 }
2476
2477 gen8_master_intr_enable(regs);
2478
2479 return IRQ_HANDLED;
2480}
2481
2482static u32
2483gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2484{
2485 void __iomem * const regs = gt->uncore->regs;
2486 u32 iir;
2487
2488 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2489 return 0;
2490
2491 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2492 if (likely(iir))
2493 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2494
2495 return iir;
2496}
2497
2498static void
2499gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2500{
2501 if (iir & GEN11_GU_MISC_GSE)
2502 intel_opregion_asle_intr(gt->i915);
2503}
2504
2505static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2506{
2507 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2508
2509
2510
2511
2512
2513
2514
2515 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2516}
2517
2518static inline void gen11_master_intr_enable(void __iomem * const regs)
2519{
2520 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2521}
2522
2523static void
2524gen11_display_irq_handler(struct drm_i915_private *i915)
2525{
2526 void __iomem * const regs = i915->uncore.regs;
2527 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2528
2529 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2530
2531
2532
2533
2534 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2535 gen8_de_irq_handler(i915, disp_ctl);
2536 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2537 GEN11_DISPLAY_IRQ_ENABLE);
2538
2539 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2540}
2541
2542static __always_inline irqreturn_t
2543__gen11_irq_handler(struct drm_i915_private * const i915,
2544 u32 (*intr_disable)(void __iomem * const regs),
2545 void (*intr_enable)(void __iomem * const regs))
2546{
2547 void __iomem * const regs = i915->uncore.regs;
2548 struct intel_gt *gt = &i915->gt;
2549 u32 master_ctl;
2550 u32 gu_misc_iir;
2551
2552 if (!intel_irqs_enabled(i915))
2553 return IRQ_NONE;
2554
2555 master_ctl = intr_disable(regs);
2556 if (!master_ctl) {
2557 intr_enable(regs);
2558 return IRQ_NONE;
2559 }
2560
2561
2562 gen11_gt_irq_handler(gt, master_ctl);
2563
2564
2565 if (master_ctl & GEN11_DISPLAY_IRQ)
2566 gen11_display_irq_handler(i915);
2567
2568 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2569
2570 intr_enable(regs);
2571
2572 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2573
2574 return IRQ_HANDLED;
2575}
2576
2577static irqreturn_t gen11_irq_handler(int irq, void *arg)
2578{
2579 return __gen11_irq_handler(arg,
2580 gen11_master_intr_disable,
2581 gen11_master_intr_enable);
2582}
2583
2584
2585
2586
2587int i8xx_enable_vblank(struct drm_crtc *crtc)
2588{
2589 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2590 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2591 unsigned long irqflags;
2592
2593 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2594 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2595 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2596
2597 return 0;
2598}
2599
2600int i915gm_enable_vblank(struct drm_crtc *crtc)
2601{
2602 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2603
2604
2605
2606
2607
2608
2609
2610 if (dev_priv->vblank_enabled++ == 0)
2611 I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2612
2613 return i8xx_enable_vblank(crtc);
2614}
2615
2616int i965_enable_vblank(struct drm_crtc *crtc)
2617{
2618 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2619 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2620 unsigned long irqflags;
2621
2622 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2623 i915_enable_pipestat(dev_priv, pipe,
2624 PIPE_START_VBLANK_INTERRUPT_STATUS);
2625 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2626
2627 return 0;
2628}
2629
2630int ilk_enable_vblank(struct drm_crtc *crtc)
2631{
2632 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2633 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2634 unsigned long irqflags;
2635 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2636 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2637
2638 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2639 ilk_enable_display_irq(dev_priv, bit);
2640 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2641
2642
2643
2644
2645 if (HAS_PSR(dev_priv))
2646 drm_crtc_vblank_restore(crtc);
2647
2648 return 0;
2649}
2650
2651int bdw_enable_vblank(struct drm_crtc *crtc)
2652{
2653 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2654 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2655 unsigned long irqflags;
2656
2657 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2658 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2659 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2660
2661
2662
2663
2664 if (HAS_PSR(dev_priv))
2665 drm_crtc_vblank_restore(crtc);
2666
2667 return 0;
2668}
2669
2670
2671
2672
2673void i8xx_disable_vblank(struct drm_crtc *crtc)
2674{
2675 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2676 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2677 unsigned long irqflags;
2678
2679 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2680 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2681 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2682}
2683
2684void i915gm_disable_vblank(struct drm_crtc *crtc)
2685{
2686 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2687
2688 i8xx_disable_vblank(crtc);
2689
2690 if (--dev_priv->vblank_enabled == 0)
2691 I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2692}
2693
2694void i965_disable_vblank(struct drm_crtc *crtc)
2695{
2696 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2697 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2698 unsigned long irqflags;
2699
2700 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2701 i915_disable_pipestat(dev_priv, pipe,
2702 PIPE_START_VBLANK_INTERRUPT_STATUS);
2703 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2704}
2705
2706void ilk_disable_vblank(struct drm_crtc *crtc)
2707{
2708 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2709 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2710 unsigned long irqflags;
2711 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2712 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2713
2714 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2715 ilk_disable_display_irq(dev_priv, bit);
2716 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2717}
2718
2719void bdw_disable_vblank(struct drm_crtc *crtc)
2720{
2721 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2722 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2723 unsigned long irqflags;
2724
2725 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2727 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2728}
2729
2730static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2731{
2732 struct intel_uncore *uncore = &dev_priv->uncore;
2733
2734 if (HAS_PCH_NOP(dev_priv))
2735 return;
2736
2737 GEN3_IRQ_RESET(uncore, SDE);
2738
2739 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2740 I915_WRITE(SERR_INT, 0xffffffff);
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
2752{
2753 if (HAS_PCH_NOP(dev_priv))
2754 return;
2755
2756 drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
2757 I915_WRITE(SDEIER, 0xffffffff);
2758 POSTING_READ(SDEIER);
2759}
2760
2761static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2762{
2763 struct intel_uncore *uncore = &dev_priv->uncore;
2764
2765 if (IS_CHERRYVIEW(dev_priv))
2766 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2767 else
2768 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2769
2770 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2771 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2772
2773 i9xx_pipestat_irq_reset(dev_priv);
2774
2775 GEN3_IRQ_RESET(uncore, VLV_);
2776 dev_priv->irq_mask = ~0u;
2777}
2778
2779static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2780{
2781 struct intel_uncore *uncore = &dev_priv->uncore;
2782
2783 u32 pipestat_mask;
2784 u32 enable_mask;
2785 enum pipe pipe;
2786
2787 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2788
2789 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2790 for_each_pipe(dev_priv, pipe)
2791 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2792
2793 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2794 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2795 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2796 I915_LPE_PIPE_A_INTERRUPT |
2797 I915_LPE_PIPE_B_INTERRUPT;
2798
2799 if (IS_CHERRYVIEW(dev_priv))
2800 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2801 I915_LPE_PIPE_C_INTERRUPT;
2802
2803 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2804
2805 dev_priv->irq_mask = ~enable_mask;
2806
2807 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2808}
2809
2810
2811
2812static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2813{
2814 struct intel_uncore *uncore = &dev_priv->uncore;
2815
2816 GEN3_IRQ_RESET(uncore, DE);
2817 if (IS_GEN(dev_priv, 7))
2818 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2819
2820 if (IS_HASWELL(dev_priv)) {
2821 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2822 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2823 }
2824
2825 gen5_gt_irq_reset(&dev_priv->gt);
2826
2827 ibx_irq_reset(dev_priv);
2828}
2829
2830static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2831{
2832 I915_WRITE(VLV_MASTER_IER, 0);
2833 POSTING_READ(VLV_MASTER_IER);
2834
2835 gen5_gt_irq_reset(&dev_priv->gt);
2836
2837 spin_lock_irq(&dev_priv->irq_lock);
2838 if (dev_priv->display_irqs_enabled)
2839 vlv_display_irq_reset(dev_priv);
2840 spin_unlock_irq(&dev_priv->irq_lock);
2841}
2842
2843static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2844{
2845 struct intel_uncore *uncore = &dev_priv->uncore;
2846 enum pipe pipe;
2847
2848 gen8_master_intr_disable(dev_priv->uncore.regs);
2849
2850 gen8_gt_irq_reset(&dev_priv->gt);
2851
2852 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2853 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2854
2855 for_each_pipe(dev_priv, pipe)
2856 if (intel_display_power_is_enabled(dev_priv,
2857 POWER_DOMAIN_PIPE(pipe)))
2858 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2859
2860 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2861 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2862 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2863
2864 if (HAS_PCH_SPLIT(dev_priv))
2865 ibx_irq_reset(dev_priv);
2866}
2867
2868static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2869{
2870 struct intel_uncore *uncore = &dev_priv->uncore;
2871 enum pipe pipe;
2872
2873 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2874
2875 if (INTEL_GEN(dev_priv) >= 12) {
2876 enum transcoder trans;
2877
2878 for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
2879 enum intel_display_power_domain domain;
2880
2881 domain = POWER_DOMAIN_TRANSCODER(trans);
2882 if (!intel_display_power_is_enabled(dev_priv, domain))
2883 continue;
2884
2885 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2886 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2887 }
2888 } else {
2889 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2890 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2891 }
2892
2893 for_each_pipe(dev_priv, pipe)
2894 if (intel_display_power_is_enabled(dev_priv,
2895 POWER_DOMAIN_PIPE(pipe)))
2896 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2897
2898 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2899 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2900 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2901
2902 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2903 GEN3_IRQ_RESET(uncore, SDE);
2904
2905
2906 if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) {
2907 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
2908 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2909 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
2910 SBCLK_RUN_REFCLK_DIS, 0);
2911 }
2912}
2913
2914static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2915{
2916 struct intel_uncore *uncore = &dev_priv->uncore;
2917
2918 gen11_master_intr_disable(dev_priv->uncore.regs);
2919
2920 gen11_gt_irq_reset(&dev_priv->gt);
2921 gen11_display_irq_reset(dev_priv);
2922
2923 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2924 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2925}
2926
2927void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2928 u8 pipe_mask)
2929{
2930 struct intel_uncore *uncore = &dev_priv->uncore;
2931
2932 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2933 enum pipe pipe;
2934
2935 spin_lock_irq(&dev_priv->irq_lock);
2936
2937 if (!intel_irqs_enabled(dev_priv)) {
2938 spin_unlock_irq(&dev_priv->irq_lock);
2939 return;
2940 }
2941
2942 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2943 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2944 dev_priv->de_irq_mask[pipe],
2945 ~dev_priv->de_irq_mask[pipe] | extra_ier);
2946
2947 spin_unlock_irq(&dev_priv->irq_lock);
2948}
2949
2950void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2951 u8 pipe_mask)
2952{
2953 struct intel_uncore *uncore = &dev_priv->uncore;
2954 enum pipe pipe;
2955
2956 spin_lock_irq(&dev_priv->irq_lock);
2957
2958 if (!intel_irqs_enabled(dev_priv)) {
2959 spin_unlock_irq(&dev_priv->irq_lock);
2960 return;
2961 }
2962
2963 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2964 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2965
2966 spin_unlock_irq(&dev_priv->irq_lock);
2967
2968
2969 intel_synchronize_irq(dev_priv);
2970}
2971
2972static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2973{
2974 struct intel_uncore *uncore = &dev_priv->uncore;
2975
2976 I915_WRITE(GEN8_MASTER_IRQ, 0);
2977 POSTING_READ(GEN8_MASTER_IRQ);
2978
2979 gen8_gt_irq_reset(&dev_priv->gt);
2980
2981 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2982
2983 spin_lock_irq(&dev_priv->irq_lock);
2984 if (dev_priv->display_irqs_enabled)
2985 vlv_display_irq_reset(dev_priv);
2986 spin_unlock_irq(&dev_priv->irq_lock);
2987}
2988
2989static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2990 const u32 hpd[HPD_NUM_PINS])
2991{
2992 struct intel_encoder *encoder;
2993 u32 enabled_irqs = 0;
2994
2995 for_each_intel_encoder(&dev_priv->drm, encoder)
2996 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
2997 enabled_irqs |= hpd[encoder->hpd_pin];
2998
2999 return enabled_irqs;
3000}
3001
3002static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3003{
3004 u32 hotplug;
3005
3006
3007
3008
3009
3010
3011 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3012 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3013 PORTC_PULSE_DURATION_MASK |
3014 PORTD_PULSE_DURATION_MASK);
3015 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3016 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3017 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3018
3019
3020
3021
3022 if (HAS_PCH_LPT_LP(dev_priv))
3023 hotplug |= PORTA_HOTPLUG_ENABLE;
3024 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3025}
3026
3027static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3028{
3029 u32 hotplug_irqs, enabled_irqs;
3030
3031 if (HAS_PCH_IBX(dev_priv))
3032 hotplug_irqs = SDE_HOTPLUG_MASK;
3033 else
3034 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3035
3036 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3037
3038 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3039
3040 ibx_hpd_detection_setup(dev_priv);
3041}
3042
3043static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
3044 u32 ddi_hotplug_enable_mask,
3045 u32 tc_hotplug_enable_mask)
3046{
3047 u32 hotplug;
3048
3049 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3050 hotplug |= ddi_hotplug_enable_mask;
3051 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3052
3053 if (tc_hotplug_enable_mask) {
3054 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3055 hotplug |= tc_hotplug_enable_mask;
3056 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3057 }
3058}
3059
3060static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3061 u32 sde_ddi_mask, u32 sde_tc_mask,
3062 u32 ddi_enable_mask, u32 tc_enable_mask)
3063{
3064 u32 hotplug_irqs, enabled_irqs;
3065
3066 hotplug_irqs = sde_ddi_mask | sde_tc_mask;
3067 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3068
3069 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3070
3071 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3072
3073 icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3074}
3075
3076
3077
3078
3079
3080static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3081{
3082 icp_hpd_irq_setup(dev_priv,
3083 SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
3084 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
3085}
3086
3087
3088
3089
3090
3091
3092static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3093{
3094 icp_hpd_irq_setup(dev_priv,
3095 SDE_DDI_MASK_TGP, 0,
3096 TGP_DDI_HPD_ENABLE_MASK, 0);
3097}
3098
3099static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3100{
3101 u32 hotplug;
3102
3103 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3104 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3105 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3106 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3107 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3108 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3109
3110 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3111 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3112 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3113 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3114 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3115 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3116}
3117
3118static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3119{
3120 u32 hotplug_irqs, enabled_irqs;
3121 u32 val;
3122
3123 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3124 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3125
3126 val = I915_READ(GEN11_DE_HPD_IMR);
3127 val &= ~hotplug_irqs;
3128 val |= ~enabled_irqs & hotplug_irqs;
3129 I915_WRITE(GEN11_DE_HPD_IMR, val);
3130 POSTING_READ(GEN11_DE_HPD_IMR);
3131
3132 gen11_hpd_detection_setup(dev_priv);
3133
3134 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3135 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
3136 TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
3137 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3138 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
3139 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
3140}
3141
3142static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3143{
3144 u32 val, hotplug;
3145
3146
3147 if (HAS_PCH_CNP(dev_priv)) {
3148 val = I915_READ(SOUTH_CHICKEN1);
3149 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3150 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3151 I915_WRITE(SOUTH_CHICKEN1, val);
3152 }
3153
3154
3155 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3156 hotplug |= PORTA_HOTPLUG_ENABLE |
3157 PORTB_HOTPLUG_ENABLE |
3158 PORTC_HOTPLUG_ENABLE |
3159 PORTD_HOTPLUG_ENABLE;
3160 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3161
3162 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3163 hotplug |= PORTE_HOTPLUG_ENABLE;
3164 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3165}
3166
3167static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3168{
3169 u32 hotplug_irqs, enabled_irqs;
3170
3171 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3172 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3173
3174 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3175 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3176
3177 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3178
3179 spt_hpd_detection_setup(dev_priv);
3180}
3181
3182static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3183{
3184 u32 hotplug;
3185
3186
3187
3188
3189
3190
3191 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3192 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3193 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3194 DIGITAL_PORTA_PULSE_DURATION_2ms;
3195 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3196}
3197
3198static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3199{
3200 u32 hotplug_irqs, enabled_irqs;
3201
3202 if (INTEL_GEN(dev_priv) >= 8) {
3203 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3204 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3205
3206 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3207 } else if (INTEL_GEN(dev_priv) >= 7) {
3208 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3209 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3210
3211 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3212 } else {
3213 hotplug_irqs = DE_DP_A_HOTPLUG;
3214 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3215
3216 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3217 }
3218
3219 ilk_hpd_detection_setup(dev_priv);
3220
3221 ibx_hpd_irq_setup(dev_priv);
3222}
3223
3224static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3225 u32 enabled_irqs)
3226{
3227 u32 hotplug;
3228
3229 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3230 hotplug |= PORTA_HOTPLUG_ENABLE |
3231 PORTB_HOTPLUG_ENABLE |
3232 PORTC_HOTPLUG_ENABLE;
3233
3234 drm_dbg_kms(&dev_priv->drm,
3235 "Invert bit setting: hp_ctl:%x hp_port:%x\n",
3236 hotplug, enabled_irqs);
3237 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3238
3239
3240
3241
3242
3243 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3244 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3245 hotplug |= BXT_DDIA_HPD_INVERT;
3246 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3247 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3248 hotplug |= BXT_DDIB_HPD_INVERT;
3249 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3250 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3251 hotplug |= BXT_DDIC_HPD_INVERT;
3252
3253 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3254}
3255
3256static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3257{
3258 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3259}
3260
3261static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3262{
3263 u32 hotplug_irqs, enabled_irqs;
3264
3265 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3266 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3267
3268 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3269
3270 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3271}
3272
3273static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3274{
3275 u32 mask;
3276
3277 if (HAS_PCH_NOP(dev_priv))
3278 return;
3279
3280 if (HAS_PCH_IBX(dev_priv))
3281 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3282 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3283 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3284 else
3285 mask = SDE_GMBUS_CPT;
3286
3287 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3288 I915_WRITE(SDEIMR, ~mask);
3289
3290 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3291 HAS_PCH_LPT(dev_priv))
3292 ibx_hpd_detection_setup(dev_priv);
3293 else
3294 spt_hpd_detection_setup(dev_priv);
3295}
3296
3297static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3298{
3299 struct intel_uncore *uncore = &dev_priv->uncore;
3300 u32 display_mask, extra_mask;
3301
3302 if (INTEL_GEN(dev_priv) >= 7) {
3303 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3304 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3305 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3306 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3307 DE_DP_A_HOTPLUG_IVB);
3308 } else {
3309 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3310 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3311 DE_PIPEA_CRC_DONE | DE_POISON);
3312 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3313 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3314 DE_DP_A_HOTPLUG);
3315 }
3316
3317 if (IS_HASWELL(dev_priv)) {
3318 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3319 display_mask |= DE_EDP_PSR_INT_HSW;
3320 }
3321
3322 dev_priv->irq_mask = ~display_mask;
3323
3324 ibx_irq_pre_postinstall(dev_priv);
3325
3326 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3327 display_mask | extra_mask);
3328
3329 gen5_gt_irq_postinstall(&dev_priv->gt);
3330
3331 ilk_hpd_detection_setup(dev_priv);
3332
3333 ibx_irq_postinstall(dev_priv);
3334
3335 if (IS_IRONLAKE_M(dev_priv)) {
3336
3337
3338
3339
3340
3341 spin_lock_irq(&dev_priv->irq_lock);
3342 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3343 spin_unlock_irq(&dev_priv->irq_lock);
3344 }
3345}
3346
3347void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3348{
3349 lockdep_assert_held(&dev_priv->irq_lock);
3350
3351 if (dev_priv->display_irqs_enabled)
3352 return;
3353
3354 dev_priv->display_irqs_enabled = true;
3355
3356 if (intel_irqs_enabled(dev_priv)) {
3357 vlv_display_irq_reset(dev_priv);
3358 vlv_display_irq_postinstall(dev_priv);
3359 }
3360}
3361
3362void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3363{
3364 lockdep_assert_held(&dev_priv->irq_lock);
3365
3366 if (!dev_priv->display_irqs_enabled)
3367 return;
3368
3369 dev_priv->display_irqs_enabled = false;
3370
3371 if (intel_irqs_enabled(dev_priv))
3372 vlv_display_irq_reset(dev_priv);
3373}
3374
3375
3376static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3377{
3378 gen5_gt_irq_postinstall(&dev_priv->gt);
3379
3380 spin_lock_irq(&dev_priv->irq_lock);
3381 if (dev_priv->display_irqs_enabled)
3382 vlv_display_irq_postinstall(dev_priv);
3383 spin_unlock_irq(&dev_priv->irq_lock);
3384
3385 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3386 POSTING_READ(VLV_MASTER_IER);
3387}
3388
3389static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3390{
3391 struct intel_uncore *uncore = &dev_priv->uncore;
3392
3393 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3394 GEN8_PIPE_CDCLK_CRC_DONE;
3395 u32 de_pipe_enables;
3396 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3397 u32 de_port_enables;
3398 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3399 enum pipe pipe;
3400
3401 if (INTEL_GEN(dev_priv) <= 10)
3402 de_misc_masked |= GEN8_DE_MISC_GSE;
3403
3404 if (IS_GEN9_LP(dev_priv))
3405 de_port_masked |= BXT_DE_PORT_GMBUS;
3406
3407 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3408 GEN8_PIPE_FIFO_UNDERRUN;
3409
3410 de_port_enables = de_port_masked;
3411 if (IS_GEN9_LP(dev_priv))
3412 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3413 else if (IS_BROADWELL(dev_priv))
3414 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3415
3416 if (INTEL_GEN(dev_priv) >= 12) {
3417 enum transcoder trans;
3418
3419 for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
3420 enum intel_display_power_domain domain;
3421
3422 domain = POWER_DOMAIN_TRANSCODER(trans);
3423 if (!intel_display_power_is_enabled(dev_priv, domain))
3424 continue;
3425
3426 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3427 }
3428 } else {
3429 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3430 }
3431
3432 for_each_pipe(dev_priv, pipe) {
3433 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3434
3435 if (intel_display_power_is_enabled(dev_priv,
3436 POWER_DOMAIN_PIPE(pipe)))
3437 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3438 dev_priv->de_irq_mask[pipe],
3439 de_pipe_enables);
3440 }
3441
3442 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3443 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3444
3445 if (INTEL_GEN(dev_priv) >= 11) {
3446 u32 de_hpd_masked = 0;
3447 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3448 GEN11_DE_TBT_HOTPLUG_MASK;
3449
3450 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3451 de_hpd_enables);
3452 gen11_hpd_detection_setup(dev_priv);
3453 } else if (IS_GEN9_LP(dev_priv)) {
3454 bxt_hpd_detection_setup(dev_priv);
3455 } else if (IS_BROADWELL(dev_priv)) {
3456 ilk_hpd_detection_setup(dev_priv);
3457 }
3458}
3459
3460static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3461{
3462 if (HAS_PCH_SPLIT(dev_priv))
3463 ibx_irq_pre_postinstall(dev_priv);
3464
3465 gen8_gt_irq_postinstall(&dev_priv->gt);
3466 gen8_de_irq_postinstall(dev_priv);
3467
3468 if (HAS_PCH_SPLIT(dev_priv))
3469 ibx_irq_postinstall(dev_priv);
3470
3471 gen8_master_intr_enable(dev_priv->uncore.regs);
3472}
3473
3474static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3475{
3476 u32 mask = SDE_GMBUS_ICP;
3477
3478 drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3479 I915_WRITE(SDEIER, 0xffffffff);
3480 POSTING_READ(SDEIER);
3481
3482 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3483 I915_WRITE(SDEIMR, ~mask);
3484
3485 if (HAS_PCH_TGP(dev_priv))
3486 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3487 TGP_TC_HPD_ENABLE_MASK);
3488 else if (HAS_PCH_JSP(dev_priv))
3489 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3490 else if (HAS_PCH_MCC(dev_priv))
3491 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3492 ICP_TC_HPD_ENABLE(PORT_TC1));
3493 else
3494 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3495 ICP_TC_HPD_ENABLE_MASK);
3496}
3497
3498static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3499{
3500 struct intel_uncore *uncore = &dev_priv->uncore;
3501 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3502
3503 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3504 icp_irq_postinstall(dev_priv);
3505
3506 gen11_gt_irq_postinstall(&dev_priv->gt);
3507 gen8_de_irq_postinstall(dev_priv);
3508
3509 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3510
3511 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3512
3513 gen11_master_intr_enable(uncore->regs);
3514 POSTING_READ(GEN11_GFX_MSTR_IRQ);
3515}
3516
3517static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3518{
3519 gen8_gt_irq_postinstall(&dev_priv->gt);
3520
3521 spin_lock_irq(&dev_priv->irq_lock);
3522 if (dev_priv->display_irqs_enabled)
3523 vlv_display_irq_postinstall(dev_priv);
3524 spin_unlock_irq(&dev_priv->irq_lock);
3525
3526 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3527 POSTING_READ(GEN8_MASTER_IRQ);
3528}
3529
3530static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3531{
3532 struct intel_uncore *uncore = &dev_priv->uncore;
3533
3534 i9xx_pipestat_irq_reset(dev_priv);
3535
3536 GEN2_IRQ_RESET(uncore);
3537}
3538
3539static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3540{
3541 struct intel_uncore *uncore = &dev_priv->uncore;
3542 u16 enable_mask;
3543
3544 intel_uncore_write16(uncore,
3545 EMR,
3546 ~(I915_ERROR_PAGE_TABLE |
3547 I915_ERROR_MEMORY_REFRESH));
3548
3549
3550 dev_priv->irq_mask =
3551 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3552 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3553 I915_MASTER_ERROR_INTERRUPT);
3554
3555 enable_mask =
3556 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3557 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3558 I915_MASTER_ERROR_INTERRUPT |
3559 I915_USER_INTERRUPT;
3560
3561 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3562
3563
3564
3565 spin_lock_irq(&dev_priv->irq_lock);
3566 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3567 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3568 spin_unlock_irq(&dev_priv->irq_lock);
3569}
3570
3571static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3572 u16 *eir, u16 *eir_stuck)
3573{
3574 struct intel_uncore *uncore = &i915->uncore;
3575 u16 emr;
3576
3577 *eir = intel_uncore_read16(uncore, EIR);
3578
3579 if (*eir)
3580 intel_uncore_write16(uncore, EIR, *eir);
3581
3582 *eir_stuck = intel_uncore_read16(uncore, EIR);
3583 if (*eir_stuck == 0)
3584 return;
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596 emr = intel_uncore_read16(uncore, EMR);
3597 intel_uncore_write16(uncore, EMR, 0xffff);
3598 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3599}
3600
3601static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3602 u16 eir, u16 eir_stuck)
3603{
3604 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3605
3606 if (eir_stuck)
3607 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3608 eir_stuck);
3609}
3610
3611static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3612 u32 *eir, u32 *eir_stuck)
3613{
3614 u32 emr;
3615
3616 *eir = I915_READ(EIR);
3617
3618 I915_WRITE(EIR, *eir);
3619
3620 *eir_stuck = I915_READ(EIR);
3621 if (*eir_stuck == 0)
3622 return;
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634 emr = I915_READ(EMR);
3635 I915_WRITE(EMR, 0xffffffff);
3636 I915_WRITE(EMR, emr | *eir_stuck);
3637}
3638
3639static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3640 u32 eir, u32 eir_stuck)
3641{
3642 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3643
3644 if (eir_stuck)
3645 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3646 eir_stuck);
3647}
3648
3649static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3650{
3651 struct drm_i915_private *dev_priv = arg;
3652 irqreturn_t ret = IRQ_NONE;
3653
3654 if (!intel_irqs_enabled(dev_priv))
3655 return IRQ_NONE;
3656
3657
3658 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3659
3660 do {
3661 u32 pipe_stats[I915_MAX_PIPES] = {};
3662 u16 eir = 0, eir_stuck = 0;
3663 u16 iir;
3664
3665 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3666 if (iir == 0)
3667 break;
3668
3669 ret = IRQ_HANDLED;
3670
3671
3672
3673 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3674
3675 if (iir & I915_MASTER_ERROR_INTERRUPT)
3676 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3677
3678 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3679
3680 if (iir & I915_USER_INTERRUPT)
3681 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3682
3683 if (iir & I915_MASTER_ERROR_INTERRUPT)
3684 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3685
3686 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3687 } while (0);
3688
3689 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3690
3691 return ret;
3692}
3693
3694static void i915_irq_reset(struct drm_i915_private *dev_priv)
3695{
3696 struct intel_uncore *uncore = &dev_priv->uncore;
3697
3698 if (I915_HAS_HOTPLUG(dev_priv)) {
3699 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3700 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3701 }
3702
3703 i9xx_pipestat_irq_reset(dev_priv);
3704
3705 GEN3_IRQ_RESET(uncore, GEN2_);
3706}
3707
3708static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3709{
3710 struct intel_uncore *uncore = &dev_priv->uncore;
3711 u32 enable_mask;
3712
3713 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3714 I915_ERROR_MEMORY_REFRESH));
3715
3716
3717 dev_priv->irq_mask =
3718 ~(I915_ASLE_INTERRUPT |
3719 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3720 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3721 I915_MASTER_ERROR_INTERRUPT);
3722
3723 enable_mask =
3724 I915_ASLE_INTERRUPT |
3725 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3726 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3727 I915_MASTER_ERROR_INTERRUPT |
3728 I915_USER_INTERRUPT;
3729
3730 if (I915_HAS_HOTPLUG(dev_priv)) {
3731
3732 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3733
3734 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3735 }
3736
3737 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3738
3739
3740
3741 spin_lock_irq(&dev_priv->irq_lock);
3742 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3743 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3744 spin_unlock_irq(&dev_priv->irq_lock);
3745
3746 i915_enable_asle_pipestat(dev_priv);
3747}
3748
3749static irqreturn_t i915_irq_handler(int irq, void *arg)
3750{
3751 struct drm_i915_private *dev_priv = arg;
3752 irqreturn_t ret = IRQ_NONE;
3753
3754 if (!intel_irqs_enabled(dev_priv))
3755 return IRQ_NONE;
3756
3757
3758 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3759
3760 do {
3761 u32 pipe_stats[I915_MAX_PIPES] = {};
3762 u32 eir = 0, eir_stuck = 0;
3763 u32 hotplug_status = 0;
3764 u32 iir;
3765
3766 iir = I915_READ(GEN2_IIR);
3767 if (iir == 0)
3768 break;
3769
3770 ret = IRQ_HANDLED;
3771
3772 if (I915_HAS_HOTPLUG(dev_priv) &&
3773 iir & I915_DISPLAY_PORT_INTERRUPT)
3774 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3775
3776
3777
3778 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3779
3780 if (iir & I915_MASTER_ERROR_INTERRUPT)
3781 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3782
3783 I915_WRITE(GEN2_IIR, iir);
3784
3785 if (iir & I915_USER_INTERRUPT)
3786 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3787
3788 if (iir & I915_MASTER_ERROR_INTERRUPT)
3789 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3790
3791 if (hotplug_status)
3792 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3793
3794 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3795 } while (0);
3796
3797 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3798
3799 return ret;
3800}
3801
3802static void i965_irq_reset(struct drm_i915_private *dev_priv)
3803{
3804 struct intel_uncore *uncore = &dev_priv->uncore;
3805
3806 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3807 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3808
3809 i9xx_pipestat_irq_reset(dev_priv);
3810
3811 GEN3_IRQ_RESET(uncore, GEN2_);
3812}
3813
3814static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3815{
3816 struct intel_uncore *uncore = &dev_priv->uncore;
3817 u32 enable_mask;
3818 u32 error_mask;
3819
3820
3821
3822
3823
3824 if (IS_G4X(dev_priv)) {
3825 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3826 GM45_ERROR_MEM_PRIV |
3827 GM45_ERROR_CP_PRIV |
3828 I915_ERROR_MEMORY_REFRESH);
3829 } else {
3830 error_mask = ~(I915_ERROR_PAGE_TABLE |
3831 I915_ERROR_MEMORY_REFRESH);
3832 }
3833 I915_WRITE(EMR, error_mask);
3834
3835
3836 dev_priv->irq_mask =
3837 ~(I915_ASLE_INTERRUPT |
3838 I915_DISPLAY_PORT_INTERRUPT |
3839 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3840 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3841 I915_MASTER_ERROR_INTERRUPT);
3842
3843 enable_mask =
3844 I915_ASLE_INTERRUPT |
3845 I915_DISPLAY_PORT_INTERRUPT |
3846 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3847 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3848 I915_MASTER_ERROR_INTERRUPT |
3849 I915_USER_INTERRUPT;
3850
3851 if (IS_G4X(dev_priv))
3852 enable_mask |= I915_BSD_USER_INTERRUPT;
3853
3854 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3855
3856
3857
3858 spin_lock_irq(&dev_priv->irq_lock);
3859 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3860 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3861 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3862 spin_unlock_irq(&dev_priv->irq_lock);
3863
3864 i915_enable_asle_pipestat(dev_priv);
3865}
3866
3867static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3868{
3869 u32 hotplug_en;
3870
3871 lockdep_assert_held(&dev_priv->irq_lock);
3872
3873
3874
3875 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3876
3877
3878
3879
3880 if (IS_G4X(dev_priv))
3881 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3882 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3883
3884
3885 i915_hotplug_interrupt_update_locked(dev_priv,
3886 HOTPLUG_INT_EN_MASK |
3887 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3888 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3889 hotplug_en);
3890}
3891
3892static irqreturn_t i965_irq_handler(int irq, void *arg)
3893{
3894 struct drm_i915_private *dev_priv = arg;
3895 irqreturn_t ret = IRQ_NONE;
3896
3897 if (!intel_irqs_enabled(dev_priv))
3898 return IRQ_NONE;
3899
3900
3901 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3902
3903 do {
3904 u32 pipe_stats[I915_MAX_PIPES] = {};
3905 u32 eir = 0, eir_stuck = 0;
3906 u32 hotplug_status = 0;
3907 u32 iir;
3908
3909 iir = I915_READ(GEN2_IIR);
3910 if (iir == 0)
3911 break;
3912
3913 ret = IRQ_HANDLED;
3914
3915 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3916 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3917
3918
3919
3920 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3921
3922 if (iir & I915_MASTER_ERROR_INTERRUPT)
3923 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3924
3925 I915_WRITE(GEN2_IIR, iir);
3926
3927 if (iir & I915_USER_INTERRUPT)
3928 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3929
3930 if (iir & I915_BSD_USER_INTERRUPT)
3931 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
3932
3933 if (iir & I915_MASTER_ERROR_INTERRUPT)
3934 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3935
3936 if (hotplug_status)
3937 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3938
3939 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3940 } while (0);
3941
3942 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3943
3944 return ret;
3945}
3946
3947
3948
3949
3950
3951
3952
3953
3954void intel_irq_init(struct drm_i915_private *dev_priv)
3955{
3956 struct drm_device *dev = &dev_priv->drm;
3957 int i;
3958
3959 intel_hpd_init_pins(dev_priv);
3960
3961 intel_hpd_init_work(dev_priv);
3962
3963 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3964 for (i = 0; i < MAX_L3_SLICES; ++i)
3965 dev_priv->l3_parity.remap_info[i] = NULL;
3966
3967
3968 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3969 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3970
3971 dev->vblank_disable_immediate = true;
3972
3973
3974
3975
3976
3977
3978
3979 dev_priv->display_irqs_enabled = true;
3980 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3981 dev_priv->display_irqs_enabled = false;
3982
3983 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3984
3985
3986
3987
3988
3989
3990 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
3991
3992 if (HAS_GMCH(dev_priv)) {
3993 if (I915_HAS_HOTPLUG(dev_priv))
3994 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3995 } else {
3996 if (HAS_PCH_JSP(dev_priv))
3997 dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
3998 else if (HAS_PCH_MCC(dev_priv))
3999 dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
4000 else if (INTEL_GEN(dev_priv) >= 11)
4001 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4002 else if (IS_GEN9_LP(dev_priv))
4003 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4004 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4005 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4006 else
4007 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4008 }
4009}
4010
4011
4012
4013
4014
4015
4016
4017void intel_irq_fini(struct drm_i915_private *i915)
4018{
4019 int i;
4020
4021 for (i = 0; i < MAX_L3_SLICES; ++i)
4022 kfree(i915->l3_parity.remap_info[i]);
4023}
4024
4025static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4026{
4027 if (HAS_GMCH(dev_priv)) {
4028 if (IS_CHERRYVIEW(dev_priv))
4029 return cherryview_irq_handler;
4030 else if (IS_VALLEYVIEW(dev_priv))
4031 return valleyview_irq_handler;
4032 else if (IS_GEN(dev_priv, 4))
4033 return i965_irq_handler;
4034 else if (IS_GEN(dev_priv, 3))
4035 return i915_irq_handler;
4036 else
4037 return i8xx_irq_handler;
4038 } else {
4039 if (INTEL_GEN(dev_priv) >= 11)
4040 return gen11_irq_handler;
4041 else if (INTEL_GEN(dev_priv) >= 8)
4042 return gen8_irq_handler;
4043 else
4044 return ilk_irq_handler;
4045 }
4046}
4047
4048static void intel_irq_reset(struct drm_i915_private *dev_priv)
4049{
4050 if (HAS_GMCH(dev_priv)) {
4051 if (IS_CHERRYVIEW(dev_priv))
4052 cherryview_irq_reset(dev_priv);
4053 else if (IS_VALLEYVIEW(dev_priv))
4054 valleyview_irq_reset(dev_priv);
4055 else if (IS_GEN(dev_priv, 4))
4056 i965_irq_reset(dev_priv);
4057 else if (IS_GEN(dev_priv, 3))
4058 i915_irq_reset(dev_priv);
4059 else
4060 i8xx_irq_reset(dev_priv);
4061 } else {
4062 if (INTEL_GEN(dev_priv) >= 11)
4063 gen11_irq_reset(dev_priv);
4064 else if (INTEL_GEN(dev_priv) >= 8)
4065 gen8_irq_reset(dev_priv);
4066 else
4067 ilk_irq_reset(dev_priv);
4068 }
4069}
4070
4071static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4072{
4073 if (HAS_GMCH(dev_priv)) {
4074 if (IS_CHERRYVIEW(dev_priv))
4075 cherryview_irq_postinstall(dev_priv);
4076 else if (IS_VALLEYVIEW(dev_priv))
4077 valleyview_irq_postinstall(dev_priv);
4078 else if (IS_GEN(dev_priv, 4))
4079 i965_irq_postinstall(dev_priv);
4080 else if (IS_GEN(dev_priv, 3))
4081 i915_irq_postinstall(dev_priv);
4082 else
4083 i8xx_irq_postinstall(dev_priv);
4084 } else {
4085 if (INTEL_GEN(dev_priv) >= 11)
4086 gen11_irq_postinstall(dev_priv);
4087 else if (INTEL_GEN(dev_priv) >= 8)
4088 gen8_irq_postinstall(dev_priv);
4089 else
4090 ilk_irq_postinstall(dev_priv);
4091 }
4092}
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105int intel_irq_install(struct drm_i915_private *dev_priv)
4106{
4107 int irq = dev_priv->drm.pdev->irq;
4108 int ret;
4109
4110
4111
4112
4113
4114
4115 dev_priv->runtime_pm.irqs_enabled = true;
4116
4117 dev_priv->drm.irq_enabled = true;
4118
4119 intel_irq_reset(dev_priv);
4120
4121 ret = request_irq(irq, intel_irq_handler(dev_priv),
4122 IRQF_SHARED, DRIVER_NAME, dev_priv);
4123 if (ret < 0) {
4124 dev_priv->drm.irq_enabled = false;
4125 return ret;
4126 }
4127
4128 intel_irq_postinstall(dev_priv);
4129
4130 return ret;
4131}
4132
4133
4134
4135
4136
4137
4138
4139
4140void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4141{
4142 int irq = dev_priv->drm.pdev->irq;
4143
4144
4145
4146
4147
4148
4149
4150 if (!dev_priv->drm.irq_enabled)
4151 return;
4152
4153 dev_priv->drm.irq_enabled = false;
4154
4155 intel_irq_reset(dev_priv);
4156
4157 free_irq(irq, dev_priv);
4158
4159 intel_hpd_cancel_work(dev_priv);
4160 dev_priv->runtime_pm.irqs_enabled = false;
4161}
4162
4163
4164
4165
4166
4167
4168
4169
4170void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4171{
4172 intel_irq_reset(dev_priv);
4173 dev_priv->runtime_pm.irqs_enabled = false;
4174 intel_synchronize_irq(dev_priv);
4175}
4176
4177
4178
4179
4180
4181
4182
4183
4184void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4185{
4186 dev_priv->runtime_pm.irqs_enabled = true;
4187 intel_irq_reset(dev_priv);
4188 intel_irq_postinstall(dev_priv);
4189}
4190
4191bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4192{
4193
4194
4195
4196
4197 return dev_priv->runtime_pm.irqs_enabled;
4198}
4199
4200void intel_synchronize_irq(struct drm_i915_private *i915)
4201{
4202 synchronize_irq(i915->drm.pdev->irq);
4203}
4204