1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
35#include <drm/drm_drv.h>
36#include <drm/drm_irq.h>
37
38#include "display/intel_display_types.h"
39#include "display/intel_fifo_underrun.h"
40#include "display/intel_hotplug.h"
41#include "display/intel_lpe_audio.h"
42#include "display/intel_psr.h"
43
44#include "gt/intel_breadcrumbs.h"
45#include "gt/intel_gt.h"
46#include "gt/intel_gt_irq.h"
47#include "gt/intel_gt_pm_irq.h"
48#include "gt/intel_rps.h"
49
50#include "i915_drv.h"
51#include "i915_irq.h"
52#include "i915_trace.h"
53#include "intel_pm.h"
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static inline void pmu_irq_stats(struct drm_i915_private *i915,
69 irqreturn_t res)
70{
71 if (unlikely(res != IRQ_HANDLED))
72 return;
73
74
75
76
77
78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
79}
80
81typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
82typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
83 enum hpd_pin pin);
84
85static const u32 hpd_ilk[HPD_NUM_PINS] = {
86 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
87};
88
89static const u32 hpd_ivb[HPD_NUM_PINS] = {
90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
91};
92
93static const u32 hpd_bdw[HPD_NUM_PINS] = {
94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
95};
96
97static const u32 hpd_ibx[HPD_NUM_PINS] = {
98 [HPD_CRT] = SDE_CRT_HOTPLUG,
99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
103};
104
105static const u32 hpd_cpt[HPD_NUM_PINS] = {
106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
111};
112
113static const u32 hpd_spt[HPD_NUM_PINS] = {
114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
119};
120
121static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
122 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
128};
129
130static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
137};
138
139static const u32 hpd_status_i915[HPD_NUM_PINS] = {
140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
146};
147
148static const u32 hpd_bxt[HPD_NUM_PINS] = {
149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
152};
153
154static const u32 hpd_gen11[HPD_NUM_PINS] = {
155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
161};
162
163static const u32 hpd_icp[HPD_NUM_PINS] = {
164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
173};
174
175static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
180};
181
182static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
183{
184 struct i915_hotplug *hpd = &dev_priv->hotplug;
185
186 if (HAS_GMCH(dev_priv)) {
187 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
188 IS_CHERRYVIEW(dev_priv))
189 hpd->hpd = hpd_status_g4x;
190 else
191 hpd->hpd = hpd_status_i915;
192 return;
193 }
194
195 if (INTEL_GEN(dev_priv) >= 11)
196 hpd->hpd = hpd_gen11;
197 else if (IS_GEN9_LP(dev_priv))
198 hpd->hpd = hpd_bxt;
199 else if (INTEL_GEN(dev_priv) >= 8)
200 hpd->hpd = hpd_bdw;
201 else if (INTEL_GEN(dev_priv) >= 7)
202 hpd->hpd = hpd_ivb;
203 else
204 hpd->hpd = hpd_ilk;
205
206 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
207 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
208 return;
209
210 if (HAS_PCH_DG1(dev_priv))
211 hpd->pch_hpd = hpd_sde_dg1;
212 else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
213 HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
214 hpd->pch_hpd = hpd_icp;
215 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
216 hpd->pch_hpd = hpd_spt;
217 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
218 hpd->pch_hpd = hpd_cpt;
219 else if (HAS_PCH_IBX(dev_priv))
220 hpd->pch_hpd = hpd_ibx;
221 else
222 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
223}
224
225static void
226intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
227{
228 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
229
230 drm_crtc_handle_vblank(&crtc->base);
231}
232
233void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
234 i915_reg_t iir, i915_reg_t ier)
235{
236 intel_uncore_write(uncore, imr, 0xffffffff);
237 intel_uncore_posting_read(uncore, imr);
238
239 intel_uncore_write(uncore, ier, 0);
240
241
242 intel_uncore_write(uncore, iir, 0xffffffff);
243 intel_uncore_posting_read(uncore, iir);
244 intel_uncore_write(uncore, iir, 0xffffffff);
245 intel_uncore_posting_read(uncore, iir);
246}
247
248void gen2_irq_reset(struct intel_uncore *uncore)
249{
250 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
251 intel_uncore_posting_read16(uncore, GEN2_IMR);
252
253 intel_uncore_write16(uncore, GEN2_IER, 0);
254
255
256 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
257 intel_uncore_posting_read16(uncore, GEN2_IIR);
258 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
259 intel_uncore_posting_read16(uncore, GEN2_IIR);
260}
261
262
263
264
265static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
266{
267 u32 val = intel_uncore_read(uncore, reg);
268
269 if (val == 0)
270 return;
271
272 drm_WARN(&uncore->i915->drm, 1,
273 "Interrupt register 0x%x is not zero: 0x%08x\n",
274 i915_mmio_reg_offset(reg), val);
275 intel_uncore_write(uncore, reg, 0xffffffff);
276 intel_uncore_posting_read(uncore, reg);
277 intel_uncore_write(uncore, reg, 0xffffffff);
278 intel_uncore_posting_read(uncore, reg);
279}
280
281static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
282{
283 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
284
285 if (val == 0)
286 return;
287
288 drm_WARN(&uncore->i915->drm, 1,
289 "Interrupt register 0x%x is not zero: 0x%08x\n",
290 i915_mmio_reg_offset(GEN2_IIR), val);
291 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
292 intel_uncore_posting_read16(uncore, GEN2_IIR);
293 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
294 intel_uncore_posting_read16(uncore, GEN2_IIR);
295}
296
297void gen3_irq_init(struct intel_uncore *uncore,
298 i915_reg_t imr, u32 imr_val,
299 i915_reg_t ier, u32 ier_val,
300 i915_reg_t iir)
301{
302 gen3_assert_iir_is_zero(uncore, iir);
303
304 intel_uncore_write(uncore, ier, ier_val);
305 intel_uncore_write(uncore, imr, imr_val);
306 intel_uncore_posting_read(uncore, imr);
307}
308
309void gen2_irq_init(struct intel_uncore *uncore,
310 u32 imr_val, u32 ier_val)
311{
312 gen2_assert_iir_is_zero(uncore);
313
314 intel_uncore_write16(uncore, GEN2_IER, ier_val);
315 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
316 intel_uncore_posting_read16(uncore, GEN2_IMR);
317}
318
319
320static inline void
321i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
322 u32 mask,
323 u32 bits)
324{
325 u32 val;
326
327 lockdep_assert_held(&dev_priv->irq_lock);
328 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
329
330 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
331 val &= ~mask;
332 val |= bits;
333 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
349 u32 mask,
350 u32 bits)
351{
352 spin_lock_irq(&dev_priv->irq_lock);
353 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
354 spin_unlock_irq(&dev_priv->irq_lock);
355}
356
357
358
359
360
361
362
363void ilk_update_display_irq(struct drm_i915_private *dev_priv,
364 u32 interrupt_mask,
365 u32 enabled_irq_mask)
366{
367 u32 new_val;
368
369 lockdep_assert_held(&dev_priv->irq_lock);
370 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
371
372 new_val = dev_priv->irq_mask;
373 new_val &= ~interrupt_mask;
374 new_val |= (~enabled_irq_mask & interrupt_mask);
375
376 if (new_val != dev_priv->irq_mask &&
377 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
378 dev_priv->irq_mask = new_val;
379 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
380 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
381 }
382}
383
384
385
386
387
388
389
390static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
391 u32 interrupt_mask,
392 u32 enabled_irq_mask)
393{
394 u32 new_val;
395 u32 old_val;
396
397 lockdep_assert_held(&dev_priv->irq_lock);
398
399 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
400
401 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
402 return;
403
404 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
405
406 new_val = old_val;
407 new_val &= ~interrupt_mask;
408 new_val |= (~enabled_irq_mask & interrupt_mask);
409
410 if (new_val != old_val) {
411 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
412 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
413 }
414}
415
416
417
418
419
420
421
422
423void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
424 enum pipe pipe,
425 u32 interrupt_mask,
426 u32 enabled_irq_mask)
427{
428 u32 new_val;
429
430 lockdep_assert_held(&dev_priv->irq_lock);
431
432 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
433
434 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
435 return;
436
437 new_val = dev_priv->de_irq_mask[pipe];
438 new_val &= ~interrupt_mask;
439 new_val |= (~enabled_irq_mask & interrupt_mask);
440
441 if (new_val != dev_priv->de_irq_mask[pipe]) {
442 dev_priv->de_irq_mask[pipe] = new_val;
443 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
444 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
445 }
446}
447
448
449
450
451
452
453
454void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
455 u32 interrupt_mask,
456 u32 enabled_irq_mask)
457{
458 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
459 sdeimr &= ~interrupt_mask;
460 sdeimr |= (~enabled_irq_mask & interrupt_mask);
461
462 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
463
464 lockdep_assert_held(&dev_priv->irq_lock);
465
466 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
467 return;
468
469 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
470 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
471}
472
473u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
474 enum pipe pipe)
475{
476 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
477 u32 enable_mask = status_mask << 16;
478
479 lockdep_assert_held(&dev_priv->irq_lock);
480
481 if (INTEL_GEN(dev_priv) < 5)
482 goto out;
483
484
485
486
487
488 if (drm_WARN_ON_ONCE(&dev_priv->drm,
489 status_mask & PIPE_A_PSR_STATUS_VLV))
490 return 0;
491
492
493
494
495 if (drm_WARN_ON_ONCE(&dev_priv->drm,
496 status_mask & PIPE_B_PSR_STATUS_VLV))
497 return 0;
498
499 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
500 SPRITE0_FLIP_DONE_INT_EN_VLV |
501 SPRITE1_FLIP_DONE_INT_EN_VLV);
502 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
503 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
504 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
505 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
506
507out:
508 drm_WARN_ONCE(&dev_priv->drm,
509 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
510 status_mask & ~PIPESTAT_INT_STATUS_MASK,
511 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
512 pipe_name(pipe), enable_mask, status_mask);
513
514 return enable_mask;
515}
516
517void i915_enable_pipestat(struct drm_i915_private *dev_priv,
518 enum pipe pipe, u32 status_mask)
519{
520 i915_reg_t reg = PIPESTAT(pipe);
521 u32 enable_mask;
522
523 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
524 "pipe %c: status_mask=0x%x\n",
525 pipe_name(pipe), status_mask);
526
527 lockdep_assert_held(&dev_priv->irq_lock);
528 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
529
530 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
531 return;
532
533 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
534 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
535
536 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
537 intel_uncore_posting_read(&dev_priv->uncore, reg);
538}
539
540void i915_disable_pipestat(struct drm_i915_private *dev_priv,
541 enum pipe pipe, u32 status_mask)
542{
543 i915_reg_t reg = PIPESTAT(pipe);
544 u32 enable_mask;
545
546 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
547 "pipe %c: status_mask=0x%x\n",
548 pipe_name(pipe), status_mask);
549
550 lockdep_assert_held(&dev_priv->irq_lock);
551 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
552
553 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
554 return;
555
556 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
557 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
558
559 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
560 intel_uncore_posting_read(&dev_priv->uncore, reg);
561}
562
563static bool i915_has_asle(struct drm_i915_private *dev_priv)
564{
565 if (!dev_priv->opregion.asle)
566 return false;
567
568 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
569}
570
571
572
573
574
575static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
576{
577 if (!i915_has_asle(dev_priv))
578 return;
579
580 spin_lock_irq(&dev_priv->irq_lock);
581
582 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
583 if (INTEL_GEN(dev_priv) >= 4)
584 i915_enable_pipestat(dev_priv, PIPE_A,
585 PIPE_LEGACY_BLC_EVENT_STATUS);
586
587 spin_unlock_irq(&dev_priv->irq_lock);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643u32 i915_get_vblank_counter(struct drm_crtc *crtc)
644{
645 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
646 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
647 const struct drm_display_mode *mode = &vblank->hwmode;
648 enum pipe pipe = to_intel_crtc(crtc)->pipe;
649 i915_reg_t high_frame, low_frame;
650 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
651 unsigned long irqflags;
652
653
654
655
656
657
658
659
660
661
662
663
664 if (!vblank->max_vblank_count)
665 return 0;
666
667 htotal = mode->crtc_htotal;
668 hsync_start = mode->crtc_hsync_start;
669 vbl_start = mode->crtc_vblank_start;
670 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
671 vbl_start = DIV_ROUND_UP(vbl_start, 2);
672
673
674 vbl_start *= htotal;
675
676
677 vbl_start -= htotal - hsync_start;
678
679 high_frame = PIPEFRAME(pipe);
680 low_frame = PIPEFRAMEPIXEL(pipe);
681
682 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
683
684
685
686
687
688
689 do {
690 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
691 low = intel_de_read_fw(dev_priv, low_frame);
692 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
693 } while (high1 != high2);
694
695 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
696
697 high1 >>= PIPE_FRAME_HIGH_SHIFT;
698 pixel = low & PIPE_PIXEL_MASK;
699 low >>= PIPE_FRAME_LOW_SHIFT;
700
701
702
703
704
705
706 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
707}
708
709u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
710{
711 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
712 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
713 enum pipe pipe = to_intel_crtc(crtc)->pipe;
714
715 if (!vblank->max_vblank_count)
716 return 0;
717
718 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
719}
720
721static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
722{
723 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
724 struct drm_vblank_crtc *vblank =
725 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
726 const struct drm_display_mode *mode = &vblank->hwmode;
727 u32 htotal = mode->crtc_htotal;
728 u32 clock = mode->crtc_clock;
729 u32 scan_prev_time, scan_curr_time, scan_post_time;
730
731
732
733
734
735
736
737 do {
738
739
740
741
742
743 scan_prev_time = intel_de_read_fw(dev_priv,
744 PIPE_FRMTMSTMP(crtc->pipe));
745
746
747
748
749
750 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
751
752 scan_post_time = intel_de_read_fw(dev_priv,
753 PIPE_FRMTMSTMP(crtc->pipe));
754 } while (scan_post_time != scan_prev_time);
755
756 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
757 clock), 1000 * htotal);
758}
759
760
761
762
763
764
765
766
767
768static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
769{
770 struct drm_vblank_crtc *vblank =
771 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
772 const struct drm_display_mode *mode = &vblank->hwmode;
773 u32 vblank_start = mode->crtc_vblank_start;
774 u32 vtotal = mode->crtc_vtotal;
775 u32 scanline;
776
777 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
778 scanline = min(scanline, vtotal - 1);
779 scanline = (scanline + vblank_start) % vtotal;
780
781 return scanline;
782}
783
784
785
786
787
788static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
789{
790 struct drm_device *dev = crtc->base.dev;
791 struct drm_i915_private *dev_priv = to_i915(dev);
792 const struct drm_display_mode *mode;
793 struct drm_vblank_crtc *vblank;
794 enum pipe pipe = crtc->pipe;
795 int position, vtotal;
796
797 if (!crtc->active)
798 return -1;
799
800 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
801 mode = &vblank->hwmode;
802
803 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
804 return __intel_get_crtc_scanline_from_timestamp(crtc);
805
806 vtotal = mode->crtc_vtotal;
807 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
808 vtotal /= 2;
809
810 if (IS_GEN(dev_priv, 2))
811 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
812 else
813 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
814
815
816
817
818
819
820
821
822
823
824
825
826
827 if (HAS_DDI(dev_priv) && !position) {
828 int i, temp;
829
830 for (i = 0; i < 100; i++) {
831 udelay(1);
832 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
833 if (temp != position) {
834 position = temp;
835 break;
836 }
837 }
838 }
839
840
841
842
843
844 return (position + crtc->scanline_offset) % vtotal;
845}
846
847static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
848 bool in_vblank_irq,
849 int *vpos, int *hpos,
850 ktime_t *stime, ktime_t *etime,
851 const struct drm_display_mode *mode)
852{
853 struct drm_device *dev = _crtc->dev;
854 struct drm_i915_private *dev_priv = to_i915(dev);
855 struct intel_crtc *crtc = to_intel_crtc(_crtc);
856 enum pipe pipe = crtc->pipe;
857 int position;
858 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
859 unsigned long irqflags;
860 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
861 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
862 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
863
864 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
865 drm_dbg(&dev_priv->drm,
866 "trying to get scanoutpos for disabled "
867 "pipe %c\n", pipe_name(pipe));
868 return false;
869 }
870
871 htotal = mode->crtc_htotal;
872 hsync_start = mode->crtc_hsync_start;
873 vtotal = mode->crtc_vtotal;
874 vbl_start = mode->crtc_vblank_start;
875 vbl_end = mode->crtc_vblank_end;
876
877 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
878 vbl_start = DIV_ROUND_UP(vbl_start, 2);
879 vbl_end /= 2;
880 vtotal /= 2;
881 }
882
883
884
885
886
887
888 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
889
890
891
892
893 if (stime)
894 *stime = ktime_get();
895
896 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
897 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
898
899 position = __intel_get_crtc_scanline(crtc);
900
901
902
903
904
905
906
907 if (position >= vbl_start && scanlines < position)
908 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
909 } else if (use_scanline_counter) {
910
911
912
913 position = __intel_get_crtc_scanline(crtc);
914 } else {
915
916
917
918
919 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
920
921
922 vbl_start *= htotal;
923 vbl_end *= htotal;
924 vtotal *= htotal;
925
926
927
928
929
930
931
932
933
934
935 if (position >= vtotal)
936 position = vtotal - 1;
937
938
939
940
941
942
943
944
945
946
947 position = (position + htotal - hsync_start) % vtotal;
948 }
949
950
951 if (etime)
952 *etime = ktime_get();
953
954
955
956 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
957
958
959
960
961
962
963
964 if (position >= vbl_start)
965 position -= vbl_end;
966 else
967 position += vtotal - vbl_end;
968
969 if (use_scanline_counter) {
970 *vpos = position;
971 *hpos = 0;
972 } else {
973 *vpos = position / htotal;
974 *hpos = position - (*vpos * htotal);
975 }
976
977 return true;
978}
979
980bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
981 ktime_t *vblank_time, bool in_vblank_irq)
982{
983 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
984 crtc, max_error, vblank_time, in_vblank_irq,
985 i915_get_crtc_scanoutpos);
986}
987
988int intel_get_crtc_scanline(struct intel_crtc *crtc)
989{
990 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
991 unsigned long irqflags;
992 int position;
993
994 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
995 position = __intel_get_crtc_scanline(crtc);
996 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
997
998 return position;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static void ivb_parity_work(struct work_struct *work)
1011{
1012 struct drm_i915_private *dev_priv =
1013 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1014 struct intel_gt *gt = &dev_priv->gt;
1015 u32 error_status, row, bank, subbank;
1016 char *parity_event[6];
1017 u32 misccpctl;
1018 u8 slice = 0;
1019
1020
1021
1022
1023
1024 mutex_lock(&dev_priv->drm.struct_mutex);
1025
1026
1027 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1028 goto out;
1029
1030 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1031 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1032 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1033
1034 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1035 i915_reg_t reg;
1036
1037 slice--;
1038 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1039 slice >= NUM_L3_SLICES(dev_priv)))
1040 break;
1041
1042 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1043
1044 reg = GEN7_L3CDERRST1(slice);
1045
1046 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1047 row = GEN7_PARITY_ERROR_ROW(error_status);
1048 bank = GEN7_PARITY_ERROR_BANK(error_status);
1049 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1050
1051 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1052 intel_uncore_posting_read(&dev_priv->uncore, reg);
1053
1054 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1055 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1056 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1057 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1058 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1059 parity_event[5] = NULL;
1060
1061 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1062 KOBJ_CHANGE, parity_event);
1063
1064 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1065 slice, row, bank, subbank);
1066
1067 kfree(parity_event[4]);
1068 kfree(parity_event[3]);
1069 kfree(parity_event[2]);
1070 kfree(parity_event[1]);
1071 }
1072
1073 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1074
1075out:
1076 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1077 spin_lock_irq(>->irq_lock);
1078 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1079 spin_unlock_irq(>->irq_lock);
1080
1081 mutex_unlock(&dev_priv->drm.struct_mutex);
1082}
1083
1084static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1085{
1086 switch (pin) {
1087 case HPD_PORT_TC1:
1088 case HPD_PORT_TC2:
1089 case HPD_PORT_TC3:
1090 case HPD_PORT_TC4:
1091 case HPD_PORT_TC5:
1092 case HPD_PORT_TC6:
1093 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1094 default:
1095 return false;
1096 }
1097}
1098
1099static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1100{
1101 switch (pin) {
1102 case HPD_PORT_A:
1103 return val & PORTA_HOTPLUG_LONG_DETECT;
1104 case HPD_PORT_B:
1105 return val & PORTB_HOTPLUG_LONG_DETECT;
1106 case HPD_PORT_C:
1107 return val & PORTC_HOTPLUG_LONG_DETECT;
1108 default:
1109 return false;
1110 }
1111}
1112
1113static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1114{
1115 switch (pin) {
1116 case HPD_PORT_A:
1117 case HPD_PORT_B:
1118 case HPD_PORT_C:
1119 case HPD_PORT_D:
1120 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1121 default:
1122 return false;
1123 }
1124}
1125
1126static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1127{
1128 switch (pin) {
1129 case HPD_PORT_TC1:
1130 case HPD_PORT_TC2:
1131 case HPD_PORT_TC3:
1132 case HPD_PORT_TC4:
1133 case HPD_PORT_TC5:
1134 case HPD_PORT_TC6:
1135 return val & ICP_TC_HPD_LONG_DETECT(pin);
1136 default:
1137 return false;
1138 }
1139}
1140
1141static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1142{
1143 switch (pin) {
1144 case HPD_PORT_E:
1145 return val & PORTE_HOTPLUG_LONG_DETECT;
1146 default:
1147 return false;
1148 }
1149}
1150
1151static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1152{
1153 switch (pin) {
1154 case HPD_PORT_A:
1155 return val & PORTA_HOTPLUG_LONG_DETECT;
1156 case HPD_PORT_B:
1157 return val & PORTB_HOTPLUG_LONG_DETECT;
1158 case HPD_PORT_C:
1159 return val & PORTC_HOTPLUG_LONG_DETECT;
1160 case HPD_PORT_D:
1161 return val & PORTD_HOTPLUG_LONG_DETECT;
1162 default:
1163 return false;
1164 }
1165}
1166
1167static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1168{
1169 switch (pin) {
1170 case HPD_PORT_A:
1171 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1172 default:
1173 return false;
1174 }
1175}
1176
1177static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1178{
1179 switch (pin) {
1180 case HPD_PORT_B:
1181 return val & PORTB_HOTPLUG_LONG_DETECT;
1182 case HPD_PORT_C:
1183 return val & PORTC_HOTPLUG_LONG_DETECT;
1184 case HPD_PORT_D:
1185 return val & PORTD_HOTPLUG_LONG_DETECT;
1186 default:
1187 return false;
1188 }
1189}
1190
1191static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1192{
1193 switch (pin) {
1194 case HPD_PORT_B:
1195 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1196 case HPD_PORT_C:
1197 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1198 case HPD_PORT_D:
1199 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1200 default:
1201 return false;
1202 }
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1213 u32 *pin_mask, u32 *long_mask,
1214 u32 hotplug_trigger, u32 dig_hotplug_reg,
1215 const u32 hpd[HPD_NUM_PINS],
1216 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1217{
1218 enum hpd_pin pin;
1219
1220 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1221
1222 for_each_hpd_pin(pin) {
1223 if ((hpd[pin] & hotplug_trigger) == 0)
1224 continue;
1225
1226 *pin_mask |= BIT(pin);
1227
1228 if (long_pulse_detect(pin, dig_hotplug_reg))
1229 *long_mask |= BIT(pin);
1230 }
1231
1232 drm_dbg(&dev_priv->drm,
1233 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1234 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1235
1236}
1237
1238static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1239 const u32 hpd[HPD_NUM_PINS])
1240{
1241 struct intel_encoder *encoder;
1242 u32 enabled_irqs = 0;
1243
1244 for_each_intel_encoder(&dev_priv->drm, encoder)
1245 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1246 enabled_irqs |= hpd[encoder->hpd_pin];
1247
1248 return enabled_irqs;
1249}
1250
1251static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1252 const u32 hpd[HPD_NUM_PINS])
1253{
1254 struct intel_encoder *encoder;
1255 u32 hotplug_irqs = 0;
1256
1257 for_each_intel_encoder(&dev_priv->drm, encoder)
1258 hotplug_irqs |= hpd[encoder->hpd_pin];
1259
1260 return hotplug_irqs;
1261}
1262
1263static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1264 hotplug_enables_func hotplug_enables)
1265{
1266 struct intel_encoder *encoder;
1267 u32 hotplug = 0;
1268
1269 for_each_intel_encoder(&i915->drm, encoder)
1270 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1271
1272 return hotplug;
1273}
1274
1275static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1276{
1277 wake_up_all(&dev_priv->gmbus_wait_queue);
1278}
1279
1280static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1281{
1282 wake_up_all(&dev_priv->gmbus_wait_queue);
1283}
1284
1285#if defined(CONFIG_DEBUG_FS)
1286static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1287 enum pipe pipe,
1288 u32 crc0, u32 crc1,
1289 u32 crc2, u32 crc3,
1290 u32 crc4)
1291{
1292 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1293 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1294 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1295
1296 trace_intel_pipe_crc(crtc, crcs);
1297
1298 spin_lock(&pipe_crc->lock);
1299
1300
1301
1302
1303
1304
1305
1306
1307 if (pipe_crc->skipped <= 0 ||
1308 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1309 pipe_crc->skipped++;
1310 spin_unlock(&pipe_crc->lock);
1311 return;
1312 }
1313 spin_unlock(&pipe_crc->lock);
1314
1315 drm_crtc_add_crc_entry(&crtc->base, true,
1316 drm_crtc_accurate_vblank_count(&crtc->base),
1317 crcs);
1318}
1319#else
1320static inline void
1321display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1322 enum pipe pipe,
1323 u32 crc0, u32 crc1,
1324 u32 crc2, u32 crc3,
1325 u32 crc4) {}
1326#endif
1327
1328static void flip_done_handler(struct drm_i915_private *i915,
1329 enum pipe pipe)
1330{
1331 struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
1332 struct drm_crtc_state *crtc_state = crtc->base.state;
1333 struct drm_pending_vblank_event *e = crtc_state->event;
1334 struct drm_device *dev = &i915->drm;
1335 unsigned long irqflags;
1336
1337 spin_lock_irqsave(&dev->event_lock, irqflags);
1338
1339 crtc_state->event = NULL;
1340
1341 drm_crtc_send_vblank_event(&crtc->base, e);
1342
1343 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1344}
1345
1346static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1347 enum pipe pipe)
1348{
1349 display_pipe_crc_irq_handler(dev_priv, pipe,
1350 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1351 0, 0, 0, 0);
1352}
1353
1354static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1355 enum pipe pipe)
1356{
1357 display_pipe_crc_irq_handler(dev_priv, pipe,
1358 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1359 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1360 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1361 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1362 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1363}
1364
1365static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1366 enum pipe pipe)
1367{
1368 u32 res1, res2;
1369
1370 if (INTEL_GEN(dev_priv) >= 3)
1371 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1372 else
1373 res1 = 0;
1374
1375 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1376 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1377 else
1378 res2 = 0;
1379
1380 display_pipe_crc_irq_handler(dev_priv, pipe,
1381 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1382 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1383 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1384 res1, res2);
1385}
1386
1387static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1388{
1389 enum pipe pipe;
1390
1391 for_each_pipe(dev_priv, pipe) {
1392 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1393 PIPESTAT_INT_STATUS_MASK |
1394 PIPE_FIFO_UNDERRUN_STATUS);
1395
1396 dev_priv->pipestat_irq_mask[pipe] = 0;
1397 }
1398}
1399
1400static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1401 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1402{
1403 enum pipe pipe;
1404
1405 spin_lock(&dev_priv->irq_lock);
1406
1407 if (!dev_priv->display_irqs_enabled) {
1408 spin_unlock(&dev_priv->irq_lock);
1409 return;
1410 }
1411
1412 for_each_pipe(dev_priv, pipe) {
1413 i915_reg_t reg;
1414 u32 status_mask, enable_mask, iir_bit = 0;
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1426
1427 switch (pipe) {
1428 default:
1429 case PIPE_A:
1430 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1431 break;
1432 case PIPE_B:
1433 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1434 break;
1435 case PIPE_C:
1436 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1437 break;
1438 }
1439 if (iir & iir_bit)
1440 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1441
1442 if (!status_mask)
1443 continue;
1444
1445 reg = PIPESTAT(pipe);
1446 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1447 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 if (pipe_stats[pipe]) {
1459 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1460 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1461 }
1462 }
1463 spin_unlock(&dev_priv->irq_lock);
1464}
1465
1466static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1467 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1468{
1469 enum pipe pipe;
1470
1471 for_each_pipe(dev_priv, pipe) {
1472 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1473 intel_handle_vblank(dev_priv, pipe);
1474
1475 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1476 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1477
1478 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1479 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1480 }
1481}
1482
1483static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1484 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1485{
1486 bool blc_event = false;
1487 enum pipe pipe;
1488
1489 for_each_pipe(dev_priv, pipe) {
1490 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1491 intel_handle_vblank(dev_priv, pipe);
1492
1493 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1494 blc_event = true;
1495
1496 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1497 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1498
1499 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1500 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1501 }
1502
1503 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1504 intel_opregion_asle_intr(dev_priv);
1505}
1506
1507static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1508 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1509{
1510 bool blc_event = false;
1511 enum pipe pipe;
1512
1513 for_each_pipe(dev_priv, pipe) {
1514 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1515 intel_handle_vblank(dev_priv, pipe);
1516
1517 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1518 blc_event = true;
1519
1520 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1521 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1522
1523 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1524 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1525 }
1526
1527 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1528 intel_opregion_asle_intr(dev_priv);
1529
1530 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1531 gmbus_irq_handler(dev_priv);
1532}
1533
1534static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1535 u32 pipe_stats[I915_MAX_PIPES])
1536{
1537 enum pipe pipe;
1538
1539 for_each_pipe(dev_priv, pipe) {
1540 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1541 intel_handle_vblank(dev_priv, pipe);
1542
1543 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1544 flip_done_handler(dev_priv, pipe);
1545
1546 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1547 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1548
1549 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1550 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1551 }
1552
1553 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1554 gmbus_irq_handler(dev_priv);
1555}
1556
1557static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1558{
1559 u32 hotplug_status = 0, hotplug_status_mask;
1560 int i;
1561
1562 if (IS_G4X(dev_priv) ||
1563 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1564 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1565 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1566 else
1567 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 for (i = 0; i < 10; i++) {
1579 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1580
1581 if (tmp == 0)
1582 return hotplug_status;
1583
1584 hotplug_status |= tmp;
1585 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1586 }
1587
1588 drm_WARN_ONCE(&dev_priv->drm, 1,
1589 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1590 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1591
1592 return hotplug_status;
1593}
1594
1595static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1596 u32 hotplug_status)
1597{
1598 u32 pin_mask = 0, long_mask = 0;
1599 u32 hotplug_trigger;
1600
1601 if (IS_G4X(dev_priv) ||
1602 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1603 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1604 else
1605 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1606
1607 if (hotplug_trigger) {
1608 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1609 hotplug_trigger, hotplug_trigger,
1610 dev_priv->hotplug.hpd,
1611 i9xx_port_hotplug_long_detect);
1612
1613 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1614 }
1615
1616 if ((IS_G4X(dev_priv) ||
1617 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1618 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1619 dp_aux_irq_handler(dev_priv);
1620}
1621
1622static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1623{
1624 struct drm_i915_private *dev_priv = arg;
1625 irqreturn_t ret = IRQ_NONE;
1626
1627 if (!intel_irqs_enabled(dev_priv))
1628 return IRQ_NONE;
1629
1630
1631 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1632
1633 do {
1634 u32 iir, gt_iir, pm_iir;
1635 u32 pipe_stats[I915_MAX_PIPES] = {};
1636 u32 hotplug_status = 0;
1637 u32 ier = 0;
1638
1639 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1640 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1641 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1642
1643 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1644 break;
1645
1646 ret = IRQ_HANDLED;
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1662 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1663 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1664
1665 if (gt_iir)
1666 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1667 if (pm_iir)
1668 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1669
1670 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1671 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1672
1673
1674
1675 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1676
1677 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1678 I915_LPE_PIPE_B_INTERRUPT))
1679 intel_lpe_audio_irq_handler(dev_priv);
1680
1681
1682
1683
1684
1685 if (iir)
1686 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1687
1688 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1689 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1690
1691 if (gt_iir)
1692 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1693 if (pm_iir)
1694 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1695
1696 if (hotplug_status)
1697 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1698
1699 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1700 } while (0);
1701
1702 pmu_irq_stats(dev_priv, ret);
1703
1704 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1705
1706 return ret;
1707}
1708
1709static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1710{
1711 struct drm_i915_private *dev_priv = arg;
1712 irqreturn_t ret = IRQ_NONE;
1713
1714 if (!intel_irqs_enabled(dev_priv))
1715 return IRQ_NONE;
1716
1717
1718 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1719
1720 do {
1721 u32 master_ctl, iir;
1722 u32 pipe_stats[I915_MAX_PIPES] = {};
1723 u32 hotplug_status = 0;
1724 u32 ier = 0;
1725
1726 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1727 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1728
1729 if (master_ctl == 0 && iir == 0)
1730 break;
1731
1732 ret = IRQ_HANDLED;
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1748 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1749 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1750
1751 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1752
1753 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1754 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1755
1756
1757
1758 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1759
1760 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1761 I915_LPE_PIPE_B_INTERRUPT |
1762 I915_LPE_PIPE_C_INTERRUPT))
1763 intel_lpe_audio_irq_handler(dev_priv);
1764
1765
1766
1767
1768
1769 if (iir)
1770 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1771
1772 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1773 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1774
1775 if (hotplug_status)
1776 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1777
1778 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1779 } while (0);
1780
1781 pmu_irq_stats(dev_priv, ret);
1782
1783 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1784
1785 return ret;
1786}
1787
1788static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1789 u32 hotplug_trigger)
1790{
1791 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1792
1793
1794
1795
1796
1797
1798
1799 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1800 if (!hotplug_trigger) {
1801 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1802 PORTD_HOTPLUG_STATUS_MASK |
1803 PORTC_HOTPLUG_STATUS_MASK |
1804 PORTB_HOTPLUG_STATUS_MASK;
1805 dig_hotplug_reg &= ~mask;
1806 }
1807
1808 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1809 if (!hotplug_trigger)
1810 return;
1811
1812 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1813 hotplug_trigger, dig_hotplug_reg,
1814 dev_priv->hotplug.pch_hpd,
1815 pch_port_hotplug_long_detect);
1816
1817 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1818}
1819
1820static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1821{
1822 enum pipe pipe;
1823 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1824
1825 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1826
1827 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1828 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1829 SDE_AUDIO_POWER_SHIFT);
1830 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1831 port_name(port));
1832 }
1833
1834 if (pch_iir & SDE_AUX_MASK)
1835 dp_aux_irq_handler(dev_priv);
1836
1837 if (pch_iir & SDE_GMBUS)
1838 gmbus_irq_handler(dev_priv);
1839
1840 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1841 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1842
1843 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1844 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1845
1846 if (pch_iir & SDE_POISON)
1847 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1848
1849 if (pch_iir & SDE_FDI_MASK) {
1850 for_each_pipe(dev_priv, pipe)
1851 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1852 pipe_name(pipe),
1853 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1854 }
1855
1856 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1857 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1858
1859 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1860 drm_dbg(&dev_priv->drm,
1861 "PCH transcoder CRC error interrupt\n");
1862
1863 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1864 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1865
1866 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1867 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1868}
1869
1870static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1871{
1872 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1873 enum pipe pipe;
1874
1875 if (err_int & ERR_INT_POISON)
1876 drm_err(&dev_priv->drm, "Poison interrupt\n");
1877
1878 for_each_pipe(dev_priv, pipe) {
1879 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1880 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1881
1882 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1883 if (IS_IVYBRIDGE(dev_priv))
1884 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1885 else
1886 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1887 }
1888 }
1889
1890 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1891}
1892
1893static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1894{
1895 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1896 enum pipe pipe;
1897
1898 if (serr_int & SERR_INT_POISON)
1899 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1900
1901 for_each_pipe(dev_priv, pipe)
1902 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1903 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1904
1905 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1906}
1907
1908static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1909{
1910 enum pipe pipe;
1911 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1912
1913 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1914
1915 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1916 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1917 SDE_AUDIO_POWER_SHIFT_CPT);
1918 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1919 port_name(port));
1920 }
1921
1922 if (pch_iir & SDE_AUX_MASK_CPT)
1923 dp_aux_irq_handler(dev_priv);
1924
1925 if (pch_iir & SDE_GMBUS_CPT)
1926 gmbus_irq_handler(dev_priv);
1927
1928 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1929 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1930
1931 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1932 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1933
1934 if (pch_iir & SDE_FDI_MASK_CPT) {
1935 for_each_pipe(dev_priv, pipe)
1936 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1937 pipe_name(pipe),
1938 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1939 }
1940
1941 if (pch_iir & SDE_ERROR_CPT)
1942 cpt_serr_int_handler(dev_priv);
1943}
1944
1945static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1946{
1947 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1948 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1949 u32 pin_mask = 0, long_mask = 0;
1950
1951 if (ddi_hotplug_trigger) {
1952 u32 dig_hotplug_reg;
1953
1954 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1955 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1956
1957 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1958 ddi_hotplug_trigger, dig_hotplug_reg,
1959 dev_priv->hotplug.pch_hpd,
1960 icp_ddi_port_hotplug_long_detect);
1961 }
1962
1963 if (tc_hotplug_trigger) {
1964 u32 dig_hotplug_reg;
1965
1966 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
1967 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1968
1969 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1970 tc_hotplug_trigger, dig_hotplug_reg,
1971 dev_priv->hotplug.pch_hpd,
1972 icp_tc_port_hotplug_long_detect);
1973 }
1974
1975 if (pin_mask)
1976 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1977
1978 if (pch_iir & SDE_GMBUS_ICP)
1979 gmbus_irq_handler(dev_priv);
1980}
1981
1982static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1983{
1984 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1985 ~SDE_PORTE_HOTPLUG_SPT;
1986 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1987 u32 pin_mask = 0, long_mask = 0;
1988
1989 if (hotplug_trigger) {
1990 u32 dig_hotplug_reg;
1991
1992 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1993 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1994
1995 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1996 hotplug_trigger, dig_hotplug_reg,
1997 dev_priv->hotplug.pch_hpd,
1998 spt_port_hotplug_long_detect);
1999 }
2000
2001 if (hotplug2_trigger) {
2002 u32 dig_hotplug_reg;
2003
2004 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2005 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2006
2007 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2008 hotplug2_trigger, dig_hotplug_reg,
2009 dev_priv->hotplug.pch_hpd,
2010 spt_port_hotplug2_long_detect);
2011 }
2012
2013 if (pin_mask)
2014 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2015
2016 if (pch_iir & SDE_GMBUS_CPT)
2017 gmbus_irq_handler(dev_priv);
2018}
2019
2020static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2021 u32 hotplug_trigger)
2022{
2023 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2024
2025 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2026 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2027
2028 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2029 hotplug_trigger, dig_hotplug_reg,
2030 dev_priv->hotplug.hpd,
2031 ilk_port_hotplug_long_detect);
2032
2033 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2034}
2035
2036static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2037 u32 de_iir)
2038{
2039 enum pipe pipe;
2040 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2041
2042 if (hotplug_trigger)
2043 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2044
2045 if (de_iir & DE_AUX_CHANNEL_A)
2046 dp_aux_irq_handler(dev_priv);
2047
2048 if (de_iir & DE_GSE)
2049 intel_opregion_asle_intr(dev_priv);
2050
2051 if (de_iir & DE_POISON)
2052 drm_err(&dev_priv->drm, "Poison interrupt\n");
2053
2054 for_each_pipe(dev_priv, pipe) {
2055 if (de_iir & DE_PIPE_VBLANK(pipe))
2056 intel_handle_vblank(dev_priv, pipe);
2057
2058 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2059 flip_done_handler(dev_priv, pipe);
2060
2061 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2062 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2063
2064 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2065 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2066 }
2067
2068
2069 if (de_iir & DE_PCH_EVENT) {
2070 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2071
2072 if (HAS_PCH_CPT(dev_priv))
2073 cpt_irq_handler(dev_priv, pch_iir);
2074 else
2075 ibx_irq_handler(dev_priv, pch_iir);
2076
2077
2078 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2079 }
2080
2081 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2082 gen5_rps_irq_handler(&dev_priv->gt.rps);
2083}
2084
2085static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2086 u32 de_iir)
2087{
2088 enum pipe pipe;
2089 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2090
2091 if (hotplug_trigger)
2092 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2093
2094 if (de_iir & DE_ERR_INT_IVB)
2095 ivb_err_int_handler(dev_priv);
2096
2097 if (de_iir & DE_EDP_PSR_INT_HSW) {
2098 u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
2099
2100 intel_psr_irq_handler(dev_priv, psr_iir);
2101 intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
2102 }
2103
2104 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2105 dp_aux_irq_handler(dev_priv);
2106
2107 if (de_iir & DE_GSE_IVB)
2108 intel_opregion_asle_intr(dev_priv);
2109
2110 for_each_pipe(dev_priv, pipe) {
2111 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2112 intel_handle_vblank(dev_priv, pipe);
2113
2114 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2115 flip_done_handler(dev_priv, pipe);
2116 }
2117
2118
2119 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2120 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2121
2122 cpt_irq_handler(dev_priv, pch_iir);
2123
2124
2125 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2126 }
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137static irqreturn_t ilk_irq_handler(int irq, void *arg)
2138{
2139 struct drm_i915_private *i915 = arg;
2140 void __iomem * const regs = i915->uncore.regs;
2141 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2142 irqreturn_t ret = IRQ_NONE;
2143
2144 if (unlikely(!intel_irqs_enabled(i915)))
2145 return IRQ_NONE;
2146
2147
2148 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2149
2150
2151 de_ier = raw_reg_read(regs, DEIER);
2152 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2153
2154
2155
2156
2157
2158
2159 if (!HAS_PCH_NOP(i915)) {
2160 sde_ier = raw_reg_read(regs, SDEIER);
2161 raw_reg_write(regs, SDEIER, 0);
2162 }
2163
2164
2165
2166 gt_iir = raw_reg_read(regs, GTIIR);
2167 if (gt_iir) {
2168 raw_reg_write(regs, GTIIR, gt_iir);
2169 if (INTEL_GEN(i915) >= 6)
2170 gen6_gt_irq_handler(&i915->gt, gt_iir);
2171 else
2172 gen5_gt_irq_handler(&i915->gt, gt_iir);
2173 ret = IRQ_HANDLED;
2174 }
2175
2176 de_iir = raw_reg_read(regs, DEIIR);
2177 if (de_iir) {
2178 raw_reg_write(regs, DEIIR, de_iir);
2179 if (INTEL_GEN(i915) >= 7)
2180 ivb_display_irq_handler(i915, de_iir);
2181 else
2182 ilk_display_irq_handler(i915, de_iir);
2183 ret = IRQ_HANDLED;
2184 }
2185
2186 if (INTEL_GEN(i915) >= 6) {
2187 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2188 if (pm_iir) {
2189 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2190 gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2191 ret = IRQ_HANDLED;
2192 }
2193 }
2194
2195 raw_reg_write(regs, DEIER, de_ier);
2196 if (sde_ier)
2197 raw_reg_write(regs, SDEIER, sde_ier);
2198
2199 pmu_irq_stats(i915, ret);
2200
2201
2202 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2203
2204 return ret;
2205}
2206
2207static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2208 u32 hotplug_trigger)
2209{
2210 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2211
2212 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2213 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2214
2215 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2216 hotplug_trigger, dig_hotplug_reg,
2217 dev_priv->hotplug.hpd,
2218 bxt_port_hotplug_long_detect);
2219
2220 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2221}
2222
2223static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2224{
2225 u32 pin_mask = 0, long_mask = 0;
2226 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2227 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2228
2229 if (trigger_tc) {
2230 u32 dig_hotplug_reg;
2231
2232 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2233 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2234
2235 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2236 trigger_tc, dig_hotplug_reg,
2237 dev_priv->hotplug.hpd,
2238 gen11_port_hotplug_long_detect);
2239 }
2240
2241 if (trigger_tbt) {
2242 u32 dig_hotplug_reg;
2243
2244 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2245 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2246
2247 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2248 trigger_tbt, dig_hotplug_reg,
2249 dev_priv->hotplug.hpd,
2250 gen11_port_hotplug_long_detect);
2251 }
2252
2253 if (pin_mask)
2254 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2255 else
2256 drm_err(&dev_priv->drm,
2257 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2258}
2259
2260static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2261{
2262 u32 mask;
2263
2264 if (INTEL_GEN(dev_priv) >= 12)
2265 return TGL_DE_PORT_AUX_DDIA |
2266 TGL_DE_PORT_AUX_DDIB |
2267 TGL_DE_PORT_AUX_DDIC |
2268 TGL_DE_PORT_AUX_USBC1 |
2269 TGL_DE_PORT_AUX_USBC2 |
2270 TGL_DE_PORT_AUX_USBC3 |
2271 TGL_DE_PORT_AUX_USBC4 |
2272 TGL_DE_PORT_AUX_USBC5 |
2273 TGL_DE_PORT_AUX_USBC6;
2274
2275
2276 mask = GEN8_AUX_CHANNEL_A;
2277 if (INTEL_GEN(dev_priv) >= 9)
2278 mask |= GEN9_AUX_CHANNEL_B |
2279 GEN9_AUX_CHANNEL_C |
2280 GEN9_AUX_CHANNEL_D;
2281
2282 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2283 mask |= CNL_AUX_CHANNEL_F;
2284
2285 if (IS_GEN(dev_priv, 11))
2286 mask |= ICL_AUX_CHANNEL_E;
2287
2288 return mask;
2289}
2290
2291static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2292{
2293 if (IS_ROCKETLAKE(dev_priv))
2294 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2295 else if (INTEL_GEN(dev_priv) >= 11)
2296 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2297 else if (INTEL_GEN(dev_priv) >= 9)
2298 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2299 else
2300 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2301}
2302
2303static void
2304gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2305{
2306 bool found = false;
2307
2308 if (iir & GEN8_DE_MISC_GSE) {
2309 intel_opregion_asle_intr(dev_priv);
2310 found = true;
2311 }
2312
2313 if (iir & GEN8_DE_EDP_PSR) {
2314 u32 psr_iir;
2315 i915_reg_t iir_reg;
2316
2317 if (INTEL_GEN(dev_priv) >= 12)
2318 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2319 else
2320 iir_reg = EDP_PSR_IIR;
2321
2322 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2323 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2324
2325 if (psr_iir)
2326 found = true;
2327
2328 intel_psr_irq_handler(dev_priv, psr_iir);
2329 }
2330
2331 if (!found)
2332 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2333}
2334
2335static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2336 u32 te_trigger)
2337{
2338 enum pipe pipe = INVALID_PIPE;
2339 enum transcoder dsi_trans;
2340 enum port port;
2341 u32 val, tmp;
2342
2343
2344
2345
2346
2347 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2348 val &= PORT_SYNC_MODE_ENABLE;
2349
2350
2351
2352
2353
2354 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2355 PORT_A : PORT_B;
2356 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2357
2358
2359 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2360 val = val & OP_MODE_MASK;
2361
2362 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2363 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2364 return;
2365 }
2366
2367
2368 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2369 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2370 case TRANS_DDI_EDP_INPUT_A_ON:
2371 pipe = PIPE_A;
2372 break;
2373 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2374 pipe = PIPE_B;
2375 break;
2376 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2377 pipe = PIPE_C;
2378 break;
2379 default:
2380 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2381 return;
2382 }
2383
2384 intel_handle_vblank(dev_priv, pipe);
2385
2386
2387 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2388 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2389 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2390}
2391
2392static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2393{
2394 if (INTEL_GEN(i915) >= 9)
2395 return GEN9_PIPE_PLANE1_FLIP_DONE;
2396 else
2397 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2398}
2399
2400static irqreturn_t
2401gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2402{
2403 irqreturn_t ret = IRQ_NONE;
2404 u32 iir;
2405 enum pipe pipe;
2406
2407 if (master_ctl & GEN8_DE_MISC_IRQ) {
2408 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2409 if (iir) {
2410 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2411 ret = IRQ_HANDLED;
2412 gen8_de_misc_irq_handler(dev_priv, iir);
2413 } else {
2414 drm_err(&dev_priv->drm,
2415 "The master control interrupt lied (DE MISC)!\n");
2416 }
2417 }
2418
2419 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2420 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2421 if (iir) {
2422 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2423 ret = IRQ_HANDLED;
2424 gen11_hpd_irq_handler(dev_priv, iir);
2425 } else {
2426 drm_err(&dev_priv->drm,
2427 "The master control interrupt lied, (DE HPD)!\n");
2428 }
2429 }
2430
2431 if (master_ctl & GEN8_DE_PORT_IRQ) {
2432 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2433 if (iir) {
2434 bool found = false;
2435
2436 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2437 ret = IRQ_HANDLED;
2438
2439 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2440 dp_aux_irq_handler(dev_priv);
2441 found = true;
2442 }
2443
2444 if (IS_GEN9_LP(dev_priv)) {
2445 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2446
2447 if (hotplug_trigger) {
2448 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2449 found = true;
2450 }
2451 } else if (IS_BROADWELL(dev_priv)) {
2452 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2453
2454 if (hotplug_trigger) {
2455 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2456 found = true;
2457 }
2458 }
2459
2460 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2461 gmbus_irq_handler(dev_priv);
2462 found = true;
2463 }
2464
2465 if (INTEL_GEN(dev_priv) >= 11) {
2466 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2467
2468 if (te_trigger) {
2469 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2470 found = true;
2471 }
2472 }
2473
2474 if (!found)
2475 drm_err(&dev_priv->drm,
2476 "Unexpected DE Port interrupt\n");
2477 }
2478 else
2479 drm_err(&dev_priv->drm,
2480 "The master control interrupt lied (DE PORT)!\n");
2481 }
2482
2483 for_each_pipe(dev_priv, pipe) {
2484 u32 fault_errors;
2485
2486 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2487 continue;
2488
2489 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2490 if (!iir) {
2491 drm_err(&dev_priv->drm,
2492 "The master control interrupt lied (DE PIPE)!\n");
2493 continue;
2494 }
2495
2496 ret = IRQ_HANDLED;
2497 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2498
2499 if (iir & GEN8_PIPE_VBLANK)
2500 intel_handle_vblank(dev_priv, pipe);
2501
2502 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2503 flip_done_handler(dev_priv, pipe);
2504
2505 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2506 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2507
2508 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2509 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2510
2511 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2512 if (fault_errors)
2513 drm_err(&dev_priv->drm,
2514 "Fault errors on pipe %c: 0x%08x\n",
2515 pipe_name(pipe),
2516 fault_errors);
2517 }
2518
2519 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2520 master_ctl & GEN8_DE_PCH_IRQ) {
2521
2522
2523
2524
2525
2526 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2527 if (iir) {
2528 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2529 ret = IRQ_HANDLED;
2530
2531 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2532 icp_irq_handler(dev_priv, iir);
2533 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2534 spt_irq_handler(dev_priv, iir);
2535 else
2536 cpt_irq_handler(dev_priv, iir);
2537 } else {
2538
2539
2540
2541
2542 drm_dbg(&dev_priv->drm,
2543 "The master control interrupt lied (SDE)!\n");
2544 }
2545 }
2546
2547 return ret;
2548}
2549
2550static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2551{
2552 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2553
2554
2555
2556
2557
2558
2559
2560 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2561}
2562
2563static inline void gen8_master_intr_enable(void __iomem * const regs)
2564{
2565 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2566}
2567
2568static irqreturn_t gen8_irq_handler(int irq, void *arg)
2569{
2570 struct drm_i915_private *dev_priv = arg;
2571 void __iomem * const regs = dev_priv->uncore.regs;
2572 u32 master_ctl;
2573
2574 if (!intel_irqs_enabled(dev_priv))
2575 return IRQ_NONE;
2576
2577 master_ctl = gen8_master_intr_disable(regs);
2578 if (!master_ctl) {
2579 gen8_master_intr_enable(regs);
2580 return IRQ_NONE;
2581 }
2582
2583
2584 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2585
2586
2587 if (master_ctl & ~GEN8_GT_IRQS) {
2588 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2589 gen8_de_irq_handler(dev_priv, master_ctl);
2590 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2591 }
2592
2593 gen8_master_intr_enable(regs);
2594
2595 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2596
2597 return IRQ_HANDLED;
2598}
2599
2600static u32
2601gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2602{
2603 void __iomem * const regs = gt->uncore->regs;
2604 u32 iir;
2605
2606 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2607 return 0;
2608
2609 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2610 if (likely(iir))
2611 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2612
2613 return iir;
2614}
2615
2616static void
2617gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2618{
2619 if (iir & GEN11_GU_MISC_GSE)
2620 intel_opregion_asle_intr(gt->i915);
2621}
2622
2623static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2624{
2625 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2626
2627
2628
2629
2630
2631
2632
2633 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2634}
2635
2636static inline void gen11_master_intr_enable(void __iomem * const regs)
2637{
2638 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2639}
2640
2641static void
2642gen11_display_irq_handler(struct drm_i915_private *i915)
2643{
2644 void __iomem * const regs = i915->uncore.regs;
2645 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2646
2647 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2648
2649
2650
2651
2652 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2653 gen8_de_irq_handler(i915, disp_ctl);
2654 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2655 GEN11_DISPLAY_IRQ_ENABLE);
2656
2657 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2658}
2659
2660static __always_inline irqreturn_t
2661__gen11_irq_handler(struct drm_i915_private * const i915,
2662 u32 (*intr_disable)(void __iomem * const regs),
2663 void (*intr_enable)(void __iomem * const regs))
2664{
2665 void __iomem * const regs = i915->uncore.regs;
2666 struct intel_gt *gt = &i915->gt;
2667 u32 master_ctl;
2668 u32 gu_misc_iir;
2669
2670 if (!intel_irqs_enabled(i915))
2671 return IRQ_NONE;
2672
2673 master_ctl = intr_disable(regs);
2674 if (!master_ctl) {
2675 intr_enable(regs);
2676 return IRQ_NONE;
2677 }
2678
2679
2680 gen11_gt_irq_handler(gt, master_ctl);
2681
2682
2683 if (master_ctl & GEN11_DISPLAY_IRQ)
2684 gen11_display_irq_handler(i915);
2685
2686 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2687
2688 intr_enable(regs);
2689
2690 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2691
2692 pmu_irq_stats(i915, IRQ_HANDLED);
2693
2694 return IRQ_HANDLED;
2695}
2696
2697static irqreturn_t gen11_irq_handler(int irq, void *arg)
2698{
2699 return __gen11_irq_handler(arg,
2700 gen11_master_intr_disable,
2701 gen11_master_intr_enable);
2702}
2703
2704static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
2705{
2706 u32 val;
2707
2708
2709 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
2710
2711
2712 val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
2713 if (unlikely(!val))
2714 return 0;
2715
2716 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
2717
2718
2719
2720
2721
2722
2723 val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
2724 if (unlikely(!val))
2725 return 0;
2726
2727 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
2728
2729 return val;
2730}
2731
2732static inline void dg1_master_intr_enable(void __iomem * const regs)
2733{
2734 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
2735}
2736
2737static irqreturn_t dg1_irq_handler(int irq, void *arg)
2738{
2739 return __gen11_irq_handler(arg,
2740 dg1_master_intr_disable_and_ack,
2741 dg1_master_intr_enable);
2742}
2743
2744
2745
2746
2747int i8xx_enable_vblank(struct drm_crtc *crtc)
2748{
2749 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2750 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2751 unsigned long irqflags;
2752
2753 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2754 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2755 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2756
2757 return 0;
2758}
2759
2760int i915gm_enable_vblank(struct drm_crtc *crtc)
2761{
2762 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2763
2764
2765
2766
2767
2768
2769
2770 if (dev_priv->vblank_enabled++ == 0)
2771 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2772
2773 return i8xx_enable_vblank(crtc);
2774}
2775
2776int i965_enable_vblank(struct drm_crtc *crtc)
2777{
2778 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2779 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2780 unsigned long irqflags;
2781
2782 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2783 i915_enable_pipestat(dev_priv, pipe,
2784 PIPE_START_VBLANK_INTERRUPT_STATUS);
2785 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2786
2787 return 0;
2788}
2789
2790int ilk_enable_vblank(struct drm_crtc *crtc)
2791{
2792 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2793 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2794 unsigned long irqflags;
2795 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2796 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2797
2798 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2799 ilk_enable_display_irq(dev_priv, bit);
2800 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2801
2802
2803
2804
2805 if (HAS_PSR(dev_priv))
2806 drm_crtc_vblank_restore(crtc);
2807
2808 return 0;
2809}
2810
2811static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2812 bool enable)
2813{
2814 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2815 enum port port;
2816 u32 tmp;
2817
2818 if (!(intel_crtc->mode_flags &
2819 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2820 return false;
2821
2822
2823 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2824 port = PORT_B;
2825 else
2826 port = PORT_A;
2827
2828 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2829 if (enable)
2830 tmp &= ~DSI_TE_EVENT;
2831 else
2832 tmp |= DSI_TE_EVENT;
2833
2834 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2835
2836 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2837 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2838
2839 return true;
2840}
2841
2842int bdw_enable_vblank(struct drm_crtc *crtc)
2843{
2844 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2845 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2846 enum pipe pipe = intel_crtc->pipe;
2847 unsigned long irqflags;
2848
2849 if (gen11_dsi_configure_te(intel_crtc, true))
2850 return 0;
2851
2852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2853 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2854 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2855
2856
2857
2858
2859 if (HAS_PSR(dev_priv))
2860 drm_crtc_vblank_restore(crtc);
2861
2862 return 0;
2863}
2864
2865
2866
2867
2868void i8xx_disable_vblank(struct drm_crtc *crtc)
2869{
2870 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2871 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2872 unsigned long irqflags;
2873
2874 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2875 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2876 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2877}
2878
2879void i915gm_disable_vblank(struct drm_crtc *crtc)
2880{
2881 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2882
2883 i8xx_disable_vblank(crtc);
2884
2885 if (--dev_priv->vblank_enabled == 0)
2886 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2887}
2888
2889void i965_disable_vblank(struct drm_crtc *crtc)
2890{
2891 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2892 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2893 unsigned long irqflags;
2894
2895 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2896 i915_disable_pipestat(dev_priv, pipe,
2897 PIPE_START_VBLANK_INTERRUPT_STATUS);
2898 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2899}
2900
2901void ilk_disable_vblank(struct drm_crtc *crtc)
2902{
2903 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2904 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2905 unsigned long irqflags;
2906 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2907 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2908
2909 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2910 ilk_disable_display_irq(dev_priv, bit);
2911 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2912}
2913
2914void bdw_disable_vblank(struct drm_crtc *crtc)
2915{
2916 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2918 enum pipe pipe = intel_crtc->pipe;
2919 unsigned long irqflags;
2920
2921 if (gen11_dsi_configure_te(intel_crtc, false))
2922 return;
2923
2924 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2925 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2926 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2927}
2928
2929static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2930{
2931 struct intel_uncore *uncore = &dev_priv->uncore;
2932
2933 if (HAS_PCH_NOP(dev_priv))
2934 return;
2935
2936 GEN3_IRQ_RESET(uncore, SDE);
2937
2938 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2939 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2940}
2941
2942static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2943{
2944 struct intel_uncore *uncore = &dev_priv->uncore;
2945
2946 if (IS_CHERRYVIEW(dev_priv))
2947 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2948 else
2949 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2950
2951 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2952 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
2953
2954 i9xx_pipestat_irq_reset(dev_priv);
2955
2956 GEN3_IRQ_RESET(uncore, VLV_);
2957 dev_priv->irq_mask = ~0u;
2958}
2959
2960static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2961{
2962 struct intel_uncore *uncore = &dev_priv->uncore;
2963
2964 u32 pipestat_mask;
2965 u32 enable_mask;
2966 enum pipe pipe;
2967
2968 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2969
2970 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2971 for_each_pipe(dev_priv, pipe)
2972 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2973
2974 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2975 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2976 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2977 I915_LPE_PIPE_A_INTERRUPT |
2978 I915_LPE_PIPE_B_INTERRUPT;
2979
2980 if (IS_CHERRYVIEW(dev_priv))
2981 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2982 I915_LPE_PIPE_C_INTERRUPT;
2983
2984 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2985
2986 dev_priv->irq_mask = ~enable_mask;
2987
2988 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2989}
2990
2991
2992
2993static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2994{
2995 struct intel_uncore *uncore = &dev_priv->uncore;
2996
2997 GEN3_IRQ_RESET(uncore, DE);
2998 dev_priv->irq_mask = ~0u;
2999
3000 if (IS_GEN(dev_priv, 7))
3001 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3002
3003 if (IS_HASWELL(dev_priv)) {
3004 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3005 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3006 }
3007
3008 gen5_gt_irq_reset(&dev_priv->gt);
3009
3010 ibx_irq_reset(dev_priv);
3011}
3012
3013static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3014{
3015 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3016 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3017
3018 gen5_gt_irq_reset(&dev_priv->gt);
3019
3020 spin_lock_irq(&dev_priv->irq_lock);
3021 if (dev_priv->display_irqs_enabled)
3022 vlv_display_irq_reset(dev_priv);
3023 spin_unlock_irq(&dev_priv->irq_lock);
3024}
3025
3026static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3027{
3028 struct intel_uncore *uncore = &dev_priv->uncore;
3029 enum pipe pipe;
3030
3031 gen8_master_intr_disable(dev_priv->uncore.regs);
3032
3033 gen8_gt_irq_reset(&dev_priv->gt);
3034
3035 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3036 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3037
3038 for_each_pipe(dev_priv, pipe)
3039 if (intel_display_power_is_enabled(dev_priv,
3040 POWER_DOMAIN_PIPE(pipe)))
3041 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3042
3043 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3044 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3045 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3046
3047 if (HAS_PCH_SPLIT(dev_priv))
3048 ibx_irq_reset(dev_priv);
3049}
3050
3051static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3052{
3053 struct intel_uncore *uncore = &dev_priv->uncore;
3054 enum pipe pipe;
3055 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3056 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3057
3058 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3059
3060 if (INTEL_GEN(dev_priv) >= 12) {
3061 enum transcoder trans;
3062
3063 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3064 enum intel_display_power_domain domain;
3065
3066 domain = POWER_DOMAIN_TRANSCODER(trans);
3067 if (!intel_display_power_is_enabled(dev_priv, domain))
3068 continue;
3069
3070 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3071 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3072 }
3073 } else {
3074 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3075 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3076 }
3077
3078 for_each_pipe(dev_priv, pipe)
3079 if (intel_display_power_is_enabled(dev_priv,
3080 POWER_DOMAIN_PIPE(pipe)))
3081 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3082
3083 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3084 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3085 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3086
3087 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3088 GEN3_IRQ_RESET(uncore, SDE);
3089
3090
3091 if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
3092 (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
3093 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
3094 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
3095 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
3096 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
3097 SBCLK_RUN_REFCLK_DIS, 0);
3098 }
3099}
3100
3101static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3102{
3103 struct intel_uncore *uncore = &dev_priv->uncore;
3104
3105 if (HAS_MASTER_UNIT_IRQ(dev_priv))
3106 dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
3107 else
3108 gen11_master_intr_disable(dev_priv->uncore.regs);
3109
3110 gen11_gt_irq_reset(&dev_priv->gt);
3111 gen11_display_irq_reset(dev_priv);
3112
3113 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3114 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3115}
3116
3117void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3118 u8 pipe_mask)
3119{
3120 struct intel_uncore *uncore = &dev_priv->uncore;
3121 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
3122 gen8_de_pipe_flip_done_mask(dev_priv);
3123 enum pipe pipe;
3124
3125 spin_lock_irq(&dev_priv->irq_lock);
3126
3127 if (!intel_irqs_enabled(dev_priv)) {
3128 spin_unlock_irq(&dev_priv->irq_lock);
3129 return;
3130 }
3131
3132 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3133 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3134 dev_priv->de_irq_mask[pipe],
3135 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3136
3137 spin_unlock_irq(&dev_priv->irq_lock);
3138}
3139
3140void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3141 u8 pipe_mask)
3142{
3143 struct intel_uncore *uncore = &dev_priv->uncore;
3144 enum pipe pipe;
3145
3146 spin_lock_irq(&dev_priv->irq_lock);
3147
3148 if (!intel_irqs_enabled(dev_priv)) {
3149 spin_unlock_irq(&dev_priv->irq_lock);
3150 return;
3151 }
3152
3153 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3154 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3155
3156 spin_unlock_irq(&dev_priv->irq_lock);
3157
3158
3159 intel_synchronize_irq(dev_priv);
3160}
3161
3162static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3163{
3164 struct intel_uncore *uncore = &dev_priv->uncore;
3165
3166 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3167 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3168
3169 gen8_gt_irq_reset(&dev_priv->gt);
3170
3171 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3172
3173 spin_lock_irq(&dev_priv->irq_lock);
3174 if (dev_priv->display_irqs_enabled)
3175 vlv_display_irq_reset(dev_priv);
3176 spin_unlock_irq(&dev_priv->irq_lock);
3177}
3178
3179static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3180 enum hpd_pin pin)
3181{
3182 switch (pin) {
3183 case HPD_PORT_A:
3184
3185
3186
3187
3188 return HAS_PCH_LPT_LP(i915) ?
3189 PORTA_HOTPLUG_ENABLE : 0;
3190 case HPD_PORT_B:
3191 return PORTB_HOTPLUG_ENABLE |
3192 PORTB_PULSE_DURATION_2ms;
3193 case HPD_PORT_C:
3194 return PORTC_HOTPLUG_ENABLE |
3195 PORTC_PULSE_DURATION_2ms;
3196 case HPD_PORT_D:
3197 return PORTD_HOTPLUG_ENABLE |
3198 PORTD_PULSE_DURATION_2ms;
3199 default:
3200 return 0;
3201 }
3202}
3203
3204static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3205{
3206 u32 hotplug;
3207
3208
3209
3210
3211
3212
3213 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3214 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3215 PORTB_HOTPLUG_ENABLE |
3216 PORTC_HOTPLUG_ENABLE |
3217 PORTD_HOTPLUG_ENABLE |
3218 PORTB_PULSE_DURATION_MASK |
3219 PORTC_PULSE_DURATION_MASK |
3220 PORTD_PULSE_DURATION_MASK);
3221 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3222 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3223}
3224
3225static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3226{
3227 u32 hotplug_irqs, enabled_irqs;
3228
3229 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3230 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3231
3232 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3233
3234 ibx_hpd_detection_setup(dev_priv);
3235}
3236
3237static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3238 enum hpd_pin pin)
3239{
3240 switch (pin) {
3241 case HPD_PORT_A:
3242 case HPD_PORT_B:
3243 case HPD_PORT_C:
3244 case HPD_PORT_D:
3245 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3246 default:
3247 return 0;
3248 }
3249}
3250
3251static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3252 enum hpd_pin pin)
3253{
3254 switch (pin) {
3255 case HPD_PORT_TC1:
3256 case HPD_PORT_TC2:
3257 case HPD_PORT_TC3:
3258 case HPD_PORT_TC4:
3259 case HPD_PORT_TC5:
3260 case HPD_PORT_TC6:
3261 return ICP_TC_HPD_ENABLE(pin);
3262 default:
3263 return 0;
3264 }
3265}
3266
3267static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3268{
3269 u32 hotplug;
3270
3271 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3272 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3273 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3274 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3275 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3276 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3277 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3278}
3279
3280static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3281{
3282 u32 hotplug;
3283
3284 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3285 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3286 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3287 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3288 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3289 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3290 ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3291 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3292 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3293}
3294
3295static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3296{
3297 u32 hotplug_irqs, enabled_irqs;
3298
3299 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3300 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3301
3302 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3303 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3304
3305 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3306
3307 icp_ddi_hpd_detection_setup(dev_priv);
3308 icp_tc_hpd_detection_setup(dev_priv);
3309}
3310
3311static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3312 enum hpd_pin pin)
3313{
3314 switch (pin) {
3315 case HPD_PORT_TC1:
3316 case HPD_PORT_TC2:
3317 case HPD_PORT_TC3:
3318 case HPD_PORT_TC4:
3319 case HPD_PORT_TC5:
3320 case HPD_PORT_TC6:
3321 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3322 default:
3323 return 0;
3324 }
3325}
3326
3327static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3328{
3329 u32 val;
3330
3331 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3332 val |= (INVERT_DDIA_HPD |
3333 INVERT_DDIB_HPD |
3334 INVERT_DDIC_HPD |
3335 INVERT_DDID_HPD);
3336 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3337
3338 icp_hpd_irq_setup(dev_priv);
3339}
3340
3341static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3342{
3343 u32 hotplug;
3344
3345 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3346 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3347 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3348 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3349 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3350 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3351 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3352 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3353 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3354}
3355
3356static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3357{
3358 u32 hotplug;
3359
3360 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3361 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3362 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3363 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3364 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3365 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3366 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3367 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3368 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3369}
3370
3371static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3372{
3373 u32 hotplug_irqs, enabled_irqs;
3374 u32 val;
3375
3376 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3377 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3378
3379 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3380 val &= ~hotplug_irqs;
3381 val |= ~enabled_irqs & hotplug_irqs;
3382 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3383 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3384
3385 gen11_tc_hpd_detection_setup(dev_priv);
3386 gen11_tbt_hpd_detection_setup(dev_priv);
3387
3388 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3389 icp_hpd_irq_setup(dev_priv);
3390}
3391
3392static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3393 enum hpd_pin pin)
3394{
3395 switch (pin) {
3396 case HPD_PORT_A:
3397 return PORTA_HOTPLUG_ENABLE;
3398 case HPD_PORT_B:
3399 return PORTB_HOTPLUG_ENABLE;
3400 case HPD_PORT_C:
3401 return PORTC_HOTPLUG_ENABLE;
3402 case HPD_PORT_D:
3403 return PORTD_HOTPLUG_ENABLE;
3404 default:
3405 return 0;
3406 }
3407}
3408
3409static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3410 enum hpd_pin pin)
3411{
3412 switch (pin) {
3413 case HPD_PORT_E:
3414 return PORTE_HOTPLUG_ENABLE;
3415 default:
3416 return 0;
3417 }
3418}
3419
3420static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3421{
3422 u32 val, hotplug;
3423
3424
3425 if (HAS_PCH_CNP(dev_priv)) {
3426 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3427 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3428 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3429 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3430 }
3431
3432
3433 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3434 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3435 PORTB_HOTPLUG_ENABLE |
3436 PORTC_HOTPLUG_ENABLE |
3437 PORTD_HOTPLUG_ENABLE);
3438 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3439 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3440
3441 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3442 hotplug &= ~PORTE_HOTPLUG_ENABLE;
3443 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3444 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3445}
3446
3447static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3448{
3449 u32 hotplug_irqs, enabled_irqs;
3450
3451 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3452 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3453
3454 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3455 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3456
3457 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3458
3459 spt_hpd_detection_setup(dev_priv);
3460}
3461
3462static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3463 enum hpd_pin pin)
3464{
3465 switch (pin) {
3466 case HPD_PORT_A:
3467 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3468 DIGITAL_PORTA_PULSE_DURATION_2ms;
3469 default:
3470 return 0;
3471 }
3472}
3473
3474static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3475{
3476 u32 hotplug;
3477
3478
3479
3480
3481
3482
3483 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3484 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3485 DIGITAL_PORTA_PULSE_DURATION_MASK);
3486 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3487 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3488}
3489
3490static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3491{
3492 u32 hotplug_irqs, enabled_irqs;
3493
3494 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3495 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3496
3497 if (INTEL_GEN(dev_priv) >= 8)
3498 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3499 else
3500 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3501
3502 ilk_hpd_detection_setup(dev_priv);
3503
3504 ibx_hpd_irq_setup(dev_priv);
3505}
3506
3507static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3508 enum hpd_pin pin)
3509{
3510 u32 hotplug;
3511
3512 switch (pin) {
3513 case HPD_PORT_A:
3514 hotplug = PORTA_HOTPLUG_ENABLE;
3515 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3516 hotplug |= BXT_DDIA_HPD_INVERT;
3517 return hotplug;
3518 case HPD_PORT_B:
3519 hotplug = PORTB_HOTPLUG_ENABLE;
3520 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3521 hotplug |= BXT_DDIB_HPD_INVERT;
3522 return hotplug;
3523 case HPD_PORT_C:
3524 hotplug = PORTC_HOTPLUG_ENABLE;
3525 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3526 hotplug |= BXT_DDIC_HPD_INVERT;
3527 return hotplug;
3528 default:
3529 return 0;
3530 }
3531}
3532
3533static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3534{
3535 u32 hotplug;
3536
3537 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3538 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3539 PORTB_HOTPLUG_ENABLE |
3540 PORTC_HOTPLUG_ENABLE |
3541 BXT_DDIA_HPD_INVERT |
3542 BXT_DDIB_HPD_INVERT |
3543 BXT_DDIC_HPD_INVERT);
3544 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3545 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3546}
3547
3548static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3549{
3550 u32 hotplug_irqs, enabled_irqs;
3551
3552 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3553 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3554
3555 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3556
3557 bxt_hpd_detection_setup(dev_priv);
3558}
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3572{
3573 struct intel_uncore *uncore = &dev_priv->uncore;
3574 u32 mask;
3575
3576 if (HAS_PCH_NOP(dev_priv))
3577 return;
3578
3579 if (HAS_PCH_IBX(dev_priv))
3580 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3581 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3582 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3583 else
3584 mask = SDE_GMBUS_CPT;
3585
3586 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3587}
3588
3589static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3590{
3591 struct intel_uncore *uncore = &dev_priv->uncore;
3592 u32 display_mask, extra_mask;
3593
3594 if (INTEL_GEN(dev_priv) >= 7) {
3595 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3596 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3597 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3598 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3599 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3600 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3601 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3602 DE_DP_A_HOTPLUG_IVB);
3603 } else {
3604 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3605 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3606 DE_PIPEA_CRC_DONE | DE_POISON);
3607 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3608 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3609 DE_PLANE_FLIP_DONE(PLANE_A) |
3610 DE_PLANE_FLIP_DONE(PLANE_B) |
3611 DE_DP_A_HOTPLUG);
3612 }
3613
3614 if (IS_HASWELL(dev_priv)) {
3615 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3616 display_mask |= DE_EDP_PSR_INT_HSW;
3617 }
3618
3619 if (IS_IRONLAKE_M(dev_priv))
3620 extra_mask |= DE_PCU_EVENT;
3621
3622 dev_priv->irq_mask = ~display_mask;
3623
3624 ibx_irq_postinstall(dev_priv);
3625
3626 gen5_gt_irq_postinstall(&dev_priv->gt);
3627
3628 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3629 display_mask | extra_mask);
3630}
3631
3632void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3633{
3634 lockdep_assert_held(&dev_priv->irq_lock);
3635
3636 if (dev_priv->display_irqs_enabled)
3637 return;
3638
3639 dev_priv->display_irqs_enabled = true;
3640
3641 if (intel_irqs_enabled(dev_priv)) {
3642 vlv_display_irq_reset(dev_priv);
3643 vlv_display_irq_postinstall(dev_priv);
3644 }
3645}
3646
3647void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3648{
3649 lockdep_assert_held(&dev_priv->irq_lock);
3650
3651 if (!dev_priv->display_irqs_enabled)
3652 return;
3653
3654 dev_priv->display_irqs_enabled = false;
3655
3656 if (intel_irqs_enabled(dev_priv))
3657 vlv_display_irq_reset(dev_priv);
3658}
3659
3660
3661static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3662{
3663 gen5_gt_irq_postinstall(&dev_priv->gt);
3664
3665 spin_lock_irq(&dev_priv->irq_lock);
3666 if (dev_priv->display_irqs_enabled)
3667 vlv_display_irq_postinstall(dev_priv);
3668 spin_unlock_irq(&dev_priv->irq_lock);
3669
3670 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3671 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3672}
3673
3674static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3675{
3676 struct intel_uncore *uncore = &dev_priv->uncore;
3677
3678 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3679 GEN8_PIPE_CDCLK_CRC_DONE;
3680 u32 de_pipe_enables;
3681 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3682 u32 de_port_enables;
3683 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3684 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3685 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3686 enum pipe pipe;
3687
3688 if (INTEL_GEN(dev_priv) <= 10)
3689 de_misc_masked |= GEN8_DE_MISC_GSE;
3690
3691 if (IS_GEN9_LP(dev_priv))
3692 de_port_masked |= BXT_DE_PORT_GMBUS;
3693
3694 if (INTEL_GEN(dev_priv) >= 11) {
3695 enum port port;
3696
3697 if (intel_bios_is_dsi_present(dev_priv, &port))
3698 de_port_masked |= DSI0_TE | DSI1_TE;
3699 }
3700
3701 de_pipe_enables = de_pipe_masked |
3702 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
3703 gen8_de_pipe_flip_done_mask(dev_priv);
3704
3705 de_port_enables = de_port_masked;
3706 if (IS_GEN9_LP(dev_priv))
3707 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3708 else if (IS_BROADWELL(dev_priv))
3709 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3710
3711 if (INTEL_GEN(dev_priv) >= 12) {
3712 enum transcoder trans;
3713
3714 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3715 enum intel_display_power_domain domain;
3716
3717 domain = POWER_DOMAIN_TRANSCODER(trans);
3718 if (!intel_display_power_is_enabled(dev_priv, domain))
3719 continue;
3720
3721 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3722 }
3723 } else {
3724 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3725 }
3726
3727 for_each_pipe(dev_priv, pipe) {
3728 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3729
3730 if (intel_display_power_is_enabled(dev_priv,
3731 POWER_DOMAIN_PIPE(pipe)))
3732 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3733 dev_priv->de_irq_mask[pipe],
3734 de_pipe_enables);
3735 }
3736
3737 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3738 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3739
3740 if (INTEL_GEN(dev_priv) >= 11) {
3741 u32 de_hpd_masked = 0;
3742 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3743 GEN11_DE_TBT_HOTPLUG_MASK;
3744
3745 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3746 de_hpd_enables);
3747 }
3748}
3749
3750static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3751{
3752 if (HAS_PCH_SPLIT(dev_priv))
3753 ibx_irq_postinstall(dev_priv);
3754
3755 gen8_gt_irq_postinstall(&dev_priv->gt);
3756 gen8_de_irq_postinstall(dev_priv);
3757
3758 gen8_master_intr_enable(dev_priv->uncore.regs);
3759}
3760
3761static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3762{
3763 struct intel_uncore *uncore = &dev_priv->uncore;
3764 u32 mask = SDE_GMBUS_ICP;
3765
3766 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3767}
3768
3769static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3770{
3771 struct intel_uncore *uncore = &dev_priv->uncore;
3772 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3773
3774 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3775 icp_irq_postinstall(dev_priv);
3776
3777 gen11_gt_irq_postinstall(&dev_priv->gt);
3778 gen8_de_irq_postinstall(dev_priv);
3779
3780 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3781
3782 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3783
3784 if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
3785 dg1_master_intr_enable(uncore->regs);
3786 intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
3787 } else {
3788 gen11_master_intr_enable(uncore->regs);
3789 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3790 }
3791}
3792
3793static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3794{
3795 gen8_gt_irq_postinstall(&dev_priv->gt);
3796
3797 spin_lock_irq(&dev_priv->irq_lock);
3798 if (dev_priv->display_irqs_enabled)
3799 vlv_display_irq_postinstall(dev_priv);
3800 spin_unlock_irq(&dev_priv->irq_lock);
3801
3802 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3803 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3804}
3805
3806static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3807{
3808 struct intel_uncore *uncore = &dev_priv->uncore;
3809
3810 i9xx_pipestat_irq_reset(dev_priv);
3811
3812 GEN2_IRQ_RESET(uncore);
3813 dev_priv->irq_mask = ~0u;
3814}
3815
3816static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3817{
3818 struct intel_uncore *uncore = &dev_priv->uncore;
3819 u16 enable_mask;
3820
3821 intel_uncore_write16(uncore,
3822 EMR,
3823 ~(I915_ERROR_PAGE_TABLE |
3824 I915_ERROR_MEMORY_REFRESH));
3825
3826
3827 dev_priv->irq_mask =
3828 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3829 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3830 I915_MASTER_ERROR_INTERRUPT);
3831
3832 enable_mask =
3833 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3834 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3835 I915_MASTER_ERROR_INTERRUPT |
3836 I915_USER_INTERRUPT;
3837
3838 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3839
3840
3841
3842 spin_lock_irq(&dev_priv->irq_lock);
3843 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3844 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3845 spin_unlock_irq(&dev_priv->irq_lock);
3846}
3847
3848static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3849 u16 *eir, u16 *eir_stuck)
3850{
3851 struct intel_uncore *uncore = &i915->uncore;
3852 u16 emr;
3853
3854 *eir = intel_uncore_read16(uncore, EIR);
3855
3856 if (*eir)
3857 intel_uncore_write16(uncore, EIR, *eir);
3858
3859 *eir_stuck = intel_uncore_read16(uncore, EIR);
3860 if (*eir_stuck == 0)
3861 return;
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873 emr = intel_uncore_read16(uncore, EMR);
3874 intel_uncore_write16(uncore, EMR, 0xffff);
3875 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3876}
3877
3878static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3879 u16 eir, u16 eir_stuck)
3880{
3881 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3882
3883 if (eir_stuck)
3884 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3885 eir_stuck);
3886}
3887
3888static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3889 u32 *eir, u32 *eir_stuck)
3890{
3891 u32 emr;
3892
3893 *eir = intel_uncore_read(&dev_priv->uncore, EIR);
3894
3895 intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3896
3897 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3898 if (*eir_stuck == 0)
3899 return;
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911 emr = intel_uncore_read(&dev_priv->uncore, EMR);
3912 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
3913 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3914}
3915
3916static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3917 u32 eir, u32 eir_stuck)
3918{
3919 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3920
3921 if (eir_stuck)
3922 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3923 eir_stuck);
3924}
3925
3926static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3927{
3928 struct drm_i915_private *dev_priv = arg;
3929 irqreturn_t ret = IRQ_NONE;
3930
3931 if (!intel_irqs_enabled(dev_priv))
3932 return IRQ_NONE;
3933
3934
3935 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3936
3937 do {
3938 u32 pipe_stats[I915_MAX_PIPES] = {};
3939 u16 eir = 0, eir_stuck = 0;
3940 u16 iir;
3941
3942 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3943 if (iir == 0)
3944 break;
3945
3946 ret = IRQ_HANDLED;
3947
3948
3949
3950 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3951
3952 if (iir & I915_MASTER_ERROR_INTERRUPT)
3953 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3954
3955 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3956
3957 if (iir & I915_USER_INTERRUPT)
3958 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3959
3960 if (iir & I915_MASTER_ERROR_INTERRUPT)
3961 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3962
3963 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3964 } while (0);
3965
3966 pmu_irq_stats(dev_priv, ret);
3967
3968 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3969
3970 return ret;
3971}
3972
3973static void i915_irq_reset(struct drm_i915_private *dev_priv)
3974{
3975 struct intel_uncore *uncore = &dev_priv->uncore;
3976
3977 if (I915_HAS_HOTPLUG(dev_priv)) {
3978 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3979 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3980 }
3981
3982 i9xx_pipestat_irq_reset(dev_priv);
3983
3984 GEN3_IRQ_RESET(uncore, GEN2_);
3985 dev_priv->irq_mask = ~0u;
3986}
3987
3988static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3989{
3990 struct intel_uncore *uncore = &dev_priv->uncore;
3991 u32 enable_mask;
3992
3993 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
3994 I915_ERROR_MEMORY_REFRESH));
3995
3996
3997 dev_priv->irq_mask =
3998 ~(I915_ASLE_INTERRUPT |
3999 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4000 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4001 I915_MASTER_ERROR_INTERRUPT);
4002
4003 enable_mask =
4004 I915_ASLE_INTERRUPT |
4005 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4006 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4007 I915_MASTER_ERROR_INTERRUPT |
4008 I915_USER_INTERRUPT;
4009
4010 if (I915_HAS_HOTPLUG(dev_priv)) {
4011
4012 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4013
4014 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4015 }
4016
4017 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4018
4019
4020
4021 spin_lock_irq(&dev_priv->irq_lock);
4022 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4023 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4024 spin_unlock_irq(&dev_priv->irq_lock);
4025
4026 i915_enable_asle_pipestat(dev_priv);
4027}
4028
4029static irqreturn_t i915_irq_handler(int irq, void *arg)
4030{
4031 struct drm_i915_private *dev_priv = arg;
4032 irqreturn_t ret = IRQ_NONE;
4033
4034 if (!intel_irqs_enabled(dev_priv))
4035 return IRQ_NONE;
4036
4037
4038 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4039
4040 do {
4041 u32 pipe_stats[I915_MAX_PIPES] = {};
4042 u32 eir = 0, eir_stuck = 0;
4043 u32 hotplug_status = 0;
4044 u32 iir;
4045
4046 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4047 if (iir == 0)
4048 break;
4049
4050 ret = IRQ_HANDLED;
4051
4052 if (I915_HAS_HOTPLUG(dev_priv) &&
4053 iir & I915_DISPLAY_PORT_INTERRUPT)
4054 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4055
4056
4057
4058 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4059
4060 if (iir & I915_MASTER_ERROR_INTERRUPT)
4061 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4062
4063 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4064
4065 if (iir & I915_USER_INTERRUPT)
4066 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4067
4068 if (iir & I915_MASTER_ERROR_INTERRUPT)
4069 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4070
4071 if (hotplug_status)
4072 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4073
4074 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4075 } while (0);
4076
4077 pmu_irq_stats(dev_priv, ret);
4078
4079 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4080
4081 return ret;
4082}
4083
4084static void i965_irq_reset(struct drm_i915_private *dev_priv)
4085{
4086 struct intel_uncore *uncore = &dev_priv->uncore;
4087
4088 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4089 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4090
4091 i9xx_pipestat_irq_reset(dev_priv);
4092
4093 GEN3_IRQ_RESET(uncore, GEN2_);
4094 dev_priv->irq_mask = ~0u;
4095}
4096
4097static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4098{
4099 struct intel_uncore *uncore = &dev_priv->uncore;
4100 u32 enable_mask;
4101 u32 error_mask;
4102
4103
4104
4105
4106
4107 if (IS_G4X(dev_priv)) {
4108 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4109 GM45_ERROR_MEM_PRIV |
4110 GM45_ERROR_CP_PRIV |
4111 I915_ERROR_MEMORY_REFRESH);
4112 } else {
4113 error_mask = ~(I915_ERROR_PAGE_TABLE |
4114 I915_ERROR_MEMORY_REFRESH);
4115 }
4116 intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4117
4118
4119 dev_priv->irq_mask =
4120 ~(I915_ASLE_INTERRUPT |
4121 I915_DISPLAY_PORT_INTERRUPT |
4122 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4123 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4124 I915_MASTER_ERROR_INTERRUPT);
4125
4126 enable_mask =
4127 I915_ASLE_INTERRUPT |
4128 I915_DISPLAY_PORT_INTERRUPT |
4129 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4130 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4131 I915_MASTER_ERROR_INTERRUPT |
4132 I915_USER_INTERRUPT;
4133
4134 if (IS_G4X(dev_priv))
4135 enable_mask |= I915_BSD_USER_INTERRUPT;
4136
4137 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4138
4139
4140
4141 spin_lock_irq(&dev_priv->irq_lock);
4142 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4143 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4144 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4145 spin_unlock_irq(&dev_priv->irq_lock);
4146
4147 i915_enable_asle_pipestat(dev_priv);
4148}
4149
4150static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4151{
4152 u32 hotplug_en;
4153
4154 lockdep_assert_held(&dev_priv->irq_lock);
4155
4156
4157
4158 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4159
4160
4161
4162
4163 if (IS_G4X(dev_priv))
4164 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4165 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4166
4167
4168 i915_hotplug_interrupt_update_locked(dev_priv,
4169 HOTPLUG_INT_EN_MASK |
4170 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4171 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4172 hotplug_en);
4173}
4174
4175static irqreturn_t i965_irq_handler(int irq, void *arg)
4176{
4177 struct drm_i915_private *dev_priv = arg;
4178 irqreturn_t ret = IRQ_NONE;
4179
4180 if (!intel_irqs_enabled(dev_priv))
4181 return IRQ_NONE;
4182
4183
4184 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4185
4186 do {
4187 u32 pipe_stats[I915_MAX_PIPES] = {};
4188 u32 eir = 0, eir_stuck = 0;
4189 u32 hotplug_status = 0;
4190 u32 iir;
4191
4192 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4193 if (iir == 0)
4194 break;
4195
4196 ret = IRQ_HANDLED;
4197
4198 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4199 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4200
4201
4202
4203 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4204
4205 if (iir & I915_MASTER_ERROR_INTERRUPT)
4206 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4207
4208 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4209
4210 if (iir & I915_USER_INTERRUPT)
4211 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4212
4213 if (iir & I915_BSD_USER_INTERRUPT)
4214 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
4215
4216 if (iir & I915_MASTER_ERROR_INTERRUPT)
4217 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4218
4219 if (hotplug_status)
4220 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4221
4222 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4223 } while (0);
4224
4225 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4226
4227 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4228
4229 return ret;
4230}
4231
4232
4233
4234
4235
4236
4237
4238
4239void intel_irq_init(struct drm_i915_private *dev_priv)
4240{
4241 struct drm_device *dev = &dev_priv->drm;
4242 int i;
4243
4244 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4245 for (i = 0; i < MAX_L3_SLICES; ++i)
4246 dev_priv->l3_parity.remap_info[i] = NULL;
4247
4248
4249 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4250 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4251
4252 if (!HAS_DISPLAY(dev_priv))
4253 return;
4254
4255 intel_hpd_init_pins(dev_priv);
4256
4257 intel_hpd_init_work(dev_priv);
4258
4259 dev->vblank_disable_immediate = true;
4260
4261
4262
4263
4264
4265
4266
4267 dev_priv->display_irqs_enabled = true;
4268 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4269 dev_priv->display_irqs_enabled = false;
4270
4271 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4272
4273
4274
4275
4276
4277
4278 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4279
4280 if (HAS_GMCH(dev_priv)) {
4281 if (I915_HAS_HOTPLUG(dev_priv))
4282 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4283 } else {
4284 if (HAS_PCH_DG1(dev_priv))
4285 dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
4286 else if (INTEL_GEN(dev_priv) >= 11)
4287 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4288 else if (IS_GEN9_LP(dev_priv))
4289 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4290 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4291 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4292 else
4293 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4294 }
4295}
4296
4297
4298
4299
4300
4301
4302
4303void intel_irq_fini(struct drm_i915_private *i915)
4304{
4305 int i;
4306
4307 for (i = 0; i < MAX_L3_SLICES; ++i)
4308 kfree(i915->l3_parity.remap_info[i]);
4309}
4310
4311static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4312{
4313 if (HAS_GMCH(dev_priv)) {
4314 if (IS_CHERRYVIEW(dev_priv))
4315 return cherryview_irq_handler;
4316 else if (IS_VALLEYVIEW(dev_priv))
4317 return valleyview_irq_handler;
4318 else if (IS_GEN(dev_priv, 4))
4319 return i965_irq_handler;
4320 else if (IS_GEN(dev_priv, 3))
4321 return i915_irq_handler;
4322 else
4323 return i8xx_irq_handler;
4324 } else {
4325 if (HAS_MASTER_UNIT_IRQ(dev_priv))
4326 return dg1_irq_handler;
4327 if (INTEL_GEN(dev_priv) >= 11)
4328 return gen11_irq_handler;
4329 else if (INTEL_GEN(dev_priv) >= 8)
4330 return gen8_irq_handler;
4331 else
4332 return ilk_irq_handler;
4333 }
4334}
4335
4336static void intel_irq_reset(struct drm_i915_private *dev_priv)
4337{
4338 if (HAS_GMCH(dev_priv)) {
4339 if (IS_CHERRYVIEW(dev_priv))
4340 cherryview_irq_reset(dev_priv);
4341 else if (IS_VALLEYVIEW(dev_priv))
4342 valleyview_irq_reset(dev_priv);
4343 else if (IS_GEN(dev_priv, 4))
4344 i965_irq_reset(dev_priv);
4345 else if (IS_GEN(dev_priv, 3))
4346 i915_irq_reset(dev_priv);
4347 else
4348 i8xx_irq_reset(dev_priv);
4349 } else {
4350 if (INTEL_GEN(dev_priv) >= 11)
4351 gen11_irq_reset(dev_priv);
4352 else if (INTEL_GEN(dev_priv) >= 8)
4353 gen8_irq_reset(dev_priv);
4354 else
4355 ilk_irq_reset(dev_priv);
4356 }
4357}
4358
4359static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4360{
4361 if (HAS_GMCH(dev_priv)) {
4362 if (IS_CHERRYVIEW(dev_priv))
4363 cherryview_irq_postinstall(dev_priv);
4364 else if (IS_VALLEYVIEW(dev_priv))
4365 valleyview_irq_postinstall(dev_priv);
4366 else if (IS_GEN(dev_priv, 4))
4367 i965_irq_postinstall(dev_priv);
4368 else if (IS_GEN(dev_priv, 3))
4369 i915_irq_postinstall(dev_priv);
4370 else
4371 i8xx_irq_postinstall(dev_priv);
4372 } else {
4373 if (INTEL_GEN(dev_priv) >= 11)
4374 gen11_irq_postinstall(dev_priv);
4375 else if (INTEL_GEN(dev_priv) >= 8)
4376 gen8_irq_postinstall(dev_priv);
4377 else
4378 ilk_irq_postinstall(dev_priv);
4379 }
4380}
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393int intel_irq_install(struct drm_i915_private *dev_priv)
4394{
4395 int irq = dev_priv->drm.pdev->irq;
4396 int ret;
4397
4398
4399
4400
4401
4402
4403 dev_priv->runtime_pm.irqs_enabled = true;
4404
4405 dev_priv->drm.irq_enabled = true;
4406
4407 intel_irq_reset(dev_priv);
4408
4409 ret = request_irq(irq, intel_irq_handler(dev_priv),
4410 IRQF_SHARED, DRIVER_NAME, dev_priv);
4411 if (ret < 0) {
4412 dev_priv->drm.irq_enabled = false;
4413 return ret;
4414 }
4415
4416 intel_irq_postinstall(dev_priv);
4417
4418 return ret;
4419}
4420
4421
4422
4423
4424
4425
4426
4427
4428void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4429{
4430 int irq = dev_priv->drm.pdev->irq;
4431
4432
4433
4434
4435
4436
4437
4438 if (!dev_priv->drm.irq_enabled)
4439 return;
4440
4441 dev_priv->drm.irq_enabled = false;
4442
4443 intel_irq_reset(dev_priv);
4444
4445 free_irq(irq, dev_priv);
4446
4447 intel_hpd_cancel_work(dev_priv);
4448 dev_priv->runtime_pm.irqs_enabled = false;
4449}
4450
4451
4452
4453
4454
4455
4456
4457
4458void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4459{
4460 intel_irq_reset(dev_priv);
4461 dev_priv->runtime_pm.irqs_enabled = false;
4462 intel_synchronize_irq(dev_priv);
4463}
4464
4465
4466
4467
4468
4469
4470
4471
4472void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4473{
4474 dev_priv->runtime_pm.irqs_enabled = true;
4475 intel_irq_reset(dev_priv);
4476 intel_irq_postinstall(dev_priv);
4477}
4478
4479bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4480{
4481
4482
4483
4484
4485 return dev_priv->runtime_pm.irqs_enabled;
4486}
4487
4488void intel_synchronize_irq(struct drm_i915_private *i915)
4489{
4490 synchronize_irq(i915->drm.pdev->irq);
4491}
4492