1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
35#include <drm/drm_drv.h>
36#include <drm/drm_irq.h>
37
38#include "display/intel_display_types.h"
39#include "display/intel_fifo_underrun.h"
40#include "display/intel_hotplug.h"
41#include "display/intel_lpe_audio.h"
42#include "display/intel_psr.h"
43
44#include "gt/intel_breadcrumbs.h"
45#include "gt/intel_gt.h"
46#include "gt/intel_gt_irq.h"
47#include "gt/intel_gt_pm_irq.h"
48#include "gt/intel_rps.h"
49
50#include "i915_drv.h"
51#include "i915_irq.h"
52#include "i915_trace.h"
53#include "intel_pm.h"
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static inline void pmu_irq_stats(struct drm_i915_private *i915,
69 irqreturn_t res)
70{
71 if (unlikely(res != IRQ_HANDLED))
72 return;
73
74
75
76
77
78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
79}
80
81typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
82typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
83 enum hpd_pin pin);
84
85static const u32 hpd_ilk[HPD_NUM_PINS] = {
86 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
87};
88
89static const u32 hpd_ivb[HPD_NUM_PINS] = {
90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
91};
92
93static const u32 hpd_bdw[HPD_NUM_PINS] = {
94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
95};
96
97static const u32 hpd_ibx[HPD_NUM_PINS] = {
98 [HPD_CRT] = SDE_CRT_HOTPLUG,
99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
103};
104
105static const u32 hpd_cpt[HPD_NUM_PINS] = {
106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
111};
112
113static const u32 hpd_spt[HPD_NUM_PINS] = {
114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
119};
120
121static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
122 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
128};
129
130static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
137};
138
139static const u32 hpd_status_i915[HPD_NUM_PINS] = {
140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
146};
147
148static const u32 hpd_bxt[HPD_NUM_PINS] = {
149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
152};
153
154static const u32 hpd_gen11[HPD_NUM_PINS] = {
155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
161};
162
163static const u32 hpd_icp[HPD_NUM_PINS] = {
164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
173};
174
175static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
180};
181
182static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
183{
184 struct i915_hotplug *hpd = &dev_priv->hotplug;
185
186 if (HAS_GMCH(dev_priv)) {
187 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
188 IS_CHERRYVIEW(dev_priv))
189 hpd->hpd = hpd_status_g4x;
190 else
191 hpd->hpd = hpd_status_i915;
192 return;
193 }
194
195 if (INTEL_GEN(dev_priv) >= 11)
196 hpd->hpd = hpd_gen11;
197 else if (IS_GEN9_LP(dev_priv))
198 hpd->hpd = hpd_bxt;
199 else if (INTEL_GEN(dev_priv) >= 8)
200 hpd->hpd = hpd_bdw;
201 else if (INTEL_GEN(dev_priv) >= 7)
202 hpd->hpd = hpd_ivb;
203 else
204 hpd->hpd = hpd_ilk;
205
206 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
207 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
208 return;
209
210 if (HAS_PCH_DG1(dev_priv))
211 hpd->pch_hpd = hpd_sde_dg1;
212 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
213 hpd->pch_hpd = hpd_icp;
214 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
215 hpd->pch_hpd = hpd_spt;
216 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
217 hpd->pch_hpd = hpd_cpt;
218 else if (HAS_PCH_IBX(dev_priv))
219 hpd->pch_hpd = hpd_ibx;
220 else
221 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
222}
223
224static void
225intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
226{
227 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
228
229 drm_crtc_handle_vblank(&crtc->base);
230}
231
232void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
233 i915_reg_t iir, i915_reg_t ier)
234{
235 intel_uncore_write(uncore, imr, 0xffffffff);
236 intel_uncore_posting_read(uncore, imr);
237
238 intel_uncore_write(uncore, ier, 0);
239
240
241 intel_uncore_write(uncore, iir, 0xffffffff);
242 intel_uncore_posting_read(uncore, iir);
243 intel_uncore_write(uncore, iir, 0xffffffff);
244 intel_uncore_posting_read(uncore, iir);
245}
246
247void gen2_irq_reset(struct intel_uncore *uncore)
248{
249 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
250 intel_uncore_posting_read16(uncore, GEN2_IMR);
251
252 intel_uncore_write16(uncore, GEN2_IER, 0);
253
254
255 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
256 intel_uncore_posting_read16(uncore, GEN2_IIR);
257 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
258 intel_uncore_posting_read16(uncore, GEN2_IIR);
259}
260
261
262
263
264static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
265{
266 u32 val = intel_uncore_read(uncore, reg);
267
268 if (val == 0)
269 return;
270
271 drm_WARN(&uncore->i915->drm, 1,
272 "Interrupt register 0x%x is not zero: 0x%08x\n",
273 i915_mmio_reg_offset(reg), val);
274 intel_uncore_write(uncore, reg, 0xffffffff);
275 intel_uncore_posting_read(uncore, reg);
276 intel_uncore_write(uncore, reg, 0xffffffff);
277 intel_uncore_posting_read(uncore, reg);
278}
279
280static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
281{
282 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
283
284 if (val == 0)
285 return;
286
287 drm_WARN(&uncore->i915->drm, 1,
288 "Interrupt register 0x%x is not zero: 0x%08x\n",
289 i915_mmio_reg_offset(GEN2_IIR), val);
290 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
291 intel_uncore_posting_read16(uncore, GEN2_IIR);
292 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
293 intel_uncore_posting_read16(uncore, GEN2_IIR);
294}
295
296void gen3_irq_init(struct intel_uncore *uncore,
297 i915_reg_t imr, u32 imr_val,
298 i915_reg_t ier, u32 ier_val,
299 i915_reg_t iir)
300{
301 gen3_assert_iir_is_zero(uncore, iir);
302
303 intel_uncore_write(uncore, ier, ier_val);
304 intel_uncore_write(uncore, imr, imr_val);
305 intel_uncore_posting_read(uncore, imr);
306}
307
308void gen2_irq_init(struct intel_uncore *uncore,
309 u32 imr_val, u32 ier_val)
310{
311 gen2_assert_iir_is_zero(uncore);
312
313 intel_uncore_write16(uncore, GEN2_IER, ier_val);
314 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
315 intel_uncore_posting_read16(uncore, GEN2_IMR);
316}
317
318
319static inline void
320i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
321 u32 mask,
322 u32 bits)
323{
324 u32 val;
325
326 lockdep_assert_held(&dev_priv->irq_lock);
327 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
328
329 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
330 val &= ~mask;
331 val |= bits;
332 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
348 u32 mask,
349 u32 bits)
350{
351 spin_lock_irq(&dev_priv->irq_lock);
352 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
353 spin_unlock_irq(&dev_priv->irq_lock);
354}
355
356
357
358
359
360
361
362void ilk_update_display_irq(struct drm_i915_private *dev_priv,
363 u32 interrupt_mask,
364 u32 enabled_irq_mask)
365{
366 u32 new_val;
367
368 lockdep_assert_held(&dev_priv->irq_lock);
369 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
370
371 new_val = dev_priv->irq_mask;
372 new_val &= ~interrupt_mask;
373 new_val |= (~enabled_irq_mask & interrupt_mask);
374
375 if (new_val != dev_priv->irq_mask &&
376 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
377 dev_priv->irq_mask = new_val;
378 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
379 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
380 }
381}
382
383
384
385
386
387
388
389static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
390 u32 interrupt_mask,
391 u32 enabled_irq_mask)
392{
393 u32 new_val;
394 u32 old_val;
395
396 lockdep_assert_held(&dev_priv->irq_lock);
397
398 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
399
400 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
401 return;
402
403 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
404
405 new_val = old_val;
406 new_val &= ~interrupt_mask;
407 new_val |= (~enabled_irq_mask & interrupt_mask);
408
409 if (new_val != old_val) {
410 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
411 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
412 }
413}
414
415
416
417
418
419
420
421
422void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
423 enum pipe pipe,
424 u32 interrupt_mask,
425 u32 enabled_irq_mask)
426{
427 u32 new_val;
428
429 lockdep_assert_held(&dev_priv->irq_lock);
430
431 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
432
433 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
434 return;
435
436 new_val = dev_priv->de_irq_mask[pipe];
437 new_val &= ~interrupt_mask;
438 new_val |= (~enabled_irq_mask & interrupt_mask);
439
440 if (new_val != dev_priv->de_irq_mask[pipe]) {
441 dev_priv->de_irq_mask[pipe] = new_val;
442 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
443 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
444 }
445}
446
447
448
449
450
451
452
453void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
454 u32 interrupt_mask,
455 u32 enabled_irq_mask)
456{
457 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
458 sdeimr &= ~interrupt_mask;
459 sdeimr |= (~enabled_irq_mask & interrupt_mask);
460
461 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
462
463 lockdep_assert_held(&dev_priv->irq_lock);
464
465 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
466 return;
467
468 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
469 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
470}
471
472u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
473 enum pipe pipe)
474{
475 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
476 u32 enable_mask = status_mask << 16;
477
478 lockdep_assert_held(&dev_priv->irq_lock);
479
480 if (INTEL_GEN(dev_priv) < 5)
481 goto out;
482
483
484
485
486
487 if (drm_WARN_ON_ONCE(&dev_priv->drm,
488 status_mask & PIPE_A_PSR_STATUS_VLV))
489 return 0;
490
491
492
493
494 if (drm_WARN_ON_ONCE(&dev_priv->drm,
495 status_mask & PIPE_B_PSR_STATUS_VLV))
496 return 0;
497
498 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
499 SPRITE0_FLIP_DONE_INT_EN_VLV |
500 SPRITE1_FLIP_DONE_INT_EN_VLV);
501 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
502 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
503 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
504 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
505
506out:
507 drm_WARN_ONCE(&dev_priv->drm,
508 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
509 status_mask & ~PIPESTAT_INT_STATUS_MASK,
510 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
511 pipe_name(pipe), enable_mask, status_mask);
512
513 return enable_mask;
514}
515
516void i915_enable_pipestat(struct drm_i915_private *dev_priv,
517 enum pipe pipe, u32 status_mask)
518{
519 i915_reg_t reg = PIPESTAT(pipe);
520 u32 enable_mask;
521
522 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
523 "pipe %c: status_mask=0x%x\n",
524 pipe_name(pipe), status_mask);
525
526 lockdep_assert_held(&dev_priv->irq_lock);
527 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
528
529 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
530 return;
531
532 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
533 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
534
535 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
536 intel_uncore_posting_read(&dev_priv->uncore, reg);
537}
538
539void i915_disable_pipestat(struct drm_i915_private *dev_priv,
540 enum pipe pipe, u32 status_mask)
541{
542 i915_reg_t reg = PIPESTAT(pipe);
543 u32 enable_mask;
544
545 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
546 "pipe %c: status_mask=0x%x\n",
547 pipe_name(pipe), status_mask);
548
549 lockdep_assert_held(&dev_priv->irq_lock);
550 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
551
552 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
553 return;
554
555 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
556 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
557
558 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
559 intel_uncore_posting_read(&dev_priv->uncore, reg);
560}
561
562static bool i915_has_asle(struct drm_i915_private *dev_priv)
563{
564 if (!dev_priv->opregion.asle)
565 return false;
566
567 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
568}
569
570
571
572
573
574static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
575{
576 if (!i915_has_asle(dev_priv))
577 return;
578
579 spin_lock_irq(&dev_priv->irq_lock);
580
581 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
582 if (INTEL_GEN(dev_priv) >= 4)
583 i915_enable_pipestat(dev_priv, PIPE_A,
584 PIPE_LEGACY_BLC_EVENT_STATUS);
585
586 spin_unlock_irq(&dev_priv->irq_lock);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642u32 i915_get_vblank_counter(struct drm_crtc *crtc)
643{
644 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
645 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
646 const struct drm_display_mode *mode = &vblank->hwmode;
647 enum pipe pipe = to_intel_crtc(crtc)->pipe;
648 i915_reg_t high_frame, low_frame;
649 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
650 unsigned long irqflags;
651
652
653
654
655
656
657
658
659
660
661
662
663 if (!vblank->max_vblank_count)
664 return 0;
665
666 htotal = mode->crtc_htotal;
667 hsync_start = mode->crtc_hsync_start;
668 vbl_start = mode->crtc_vblank_start;
669 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
670 vbl_start = DIV_ROUND_UP(vbl_start, 2);
671
672
673 vbl_start *= htotal;
674
675
676 vbl_start -= htotal - hsync_start;
677
678 high_frame = PIPEFRAME(pipe);
679 low_frame = PIPEFRAMEPIXEL(pipe);
680
681 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
682
683
684
685
686
687
688 do {
689 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
690 low = intel_de_read_fw(dev_priv, low_frame);
691 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
692 } while (high1 != high2);
693
694 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
695
696 high1 >>= PIPE_FRAME_HIGH_SHIFT;
697 pixel = low & PIPE_PIXEL_MASK;
698 low >>= PIPE_FRAME_LOW_SHIFT;
699
700
701
702
703
704
705 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
706}
707
708u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
709{
710 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
711 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
712 enum pipe pipe = to_intel_crtc(crtc)->pipe;
713
714 if (!vblank->max_vblank_count)
715 return 0;
716
717 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
718}
719
720static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
721{
722 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
723 struct drm_vblank_crtc *vblank =
724 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
725 const struct drm_display_mode *mode = &vblank->hwmode;
726 u32 htotal = mode->crtc_htotal;
727 u32 clock = mode->crtc_clock;
728 u32 scan_prev_time, scan_curr_time, scan_post_time;
729
730
731
732
733
734
735
736 do {
737
738
739
740
741
742 scan_prev_time = intel_de_read_fw(dev_priv,
743 PIPE_FRMTMSTMP(crtc->pipe));
744
745
746
747
748
749 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
750
751 scan_post_time = intel_de_read_fw(dev_priv,
752 PIPE_FRMTMSTMP(crtc->pipe));
753 } while (scan_post_time != scan_prev_time);
754
755 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
756 clock), 1000 * htotal);
757}
758
759
760
761
762
763
764
765
766
767static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
768{
769 struct drm_vblank_crtc *vblank =
770 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
771 const struct drm_display_mode *mode = &vblank->hwmode;
772 u32 vblank_start = mode->crtc_vblank_start;
773 u32 vtotal = mode->crtc_vtotal;
774 u32 scanline;
775
776 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
777 scanline = min(scanline, vtotal - 1);
778 scanline = (scanline + vblank_start) % vtotal;
779
780 return scanline;
781}
782
783
784
785
786
787static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
788{
789 struct drm_device *dev = crtc->base.dev;
790 struct drm_i915_private *dev_priv = to_i915(dev);
791 const struct drm_display_mode *mode;
792 struct drm_vblank_crtc *vblank;
793 enum pipe pipe = crtc->pipe;
794 int position, vtotal;
795
796 if (!crtc->active)
797 return -1;
798
799 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
800 mode = &vblank->hwmode;
801
802 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
803 return __intel_get_crtc_scanline_from_timestamp(crtc);
804
805 vtotal = mode->crtc_vtotal;
806 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
807 vtotal /= 2;
808
809 if (IS_GEN(dev_priv, 2))
810 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
811 else
812 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
813
814
815
816
817
818
819
820
821
822
823
824
825
826 if (HAS_DDI(dev_priv) && !position) {
827 int i, temp;
828
829 for (i = 0; i < 100; i++) {
830 udelay(1);
831 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
832 if (temp != position) {
833 position = temp;
834 break;
835 }
836 }
837 }
838
839
840
841
842
843 return (position + crtc->scanline_offset) % vtotal;
844}
845
846static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
847 bool in_vblank_irq,
848 int *vpos, int *hpos,
849 ktime_t *stime, ktime_t *etime,
850 const struct drm_display_mode *mode)
851{
852 struct drm_device *dev = _crtc->dev;
853 struct drm_i915_private *dev_priv = to_i915(dev);
854 struct intel_crtc *crtc = to_intel_crtc(_crtc);
855 enum pipe pipe = crtc->pipe;
856 int position;
857 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
858 unsigned long irqflags;
859 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
860 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
861 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
862
863 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
864 drm_dbg(&dev_priv->drm,
865 "trying to get scanoutpos for disabled "
866 "pipe %c\n", pipe_name(pipe));
867 return false;
868 }
869
870 htotal = mode->crtc_htotal;
871 hsync_start = mode->crtc_hsync_start;
872 vtotal = mode->crtc_vtotal;
873 vbl_start = mode->crtc_vblank_start;
874 vbl_end = mode->crtc_vblank_end;
875
876 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
877 vbl_start = DIV_ROUND_UP(vbl_start, 2);
878 vbl_end /= 2;
879 vtotal /= 2;
880 }
881
882
883
884
885
886
887 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
888
889
890
891
892 if (stime)
893 *stime = ktime_get();
894
895 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
896 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
897
898 position = __intel_get_crtc_scanline(crtc);
899
900
901
902
903
904
905
906 if (position >= vbl_start && scanlines < position)
907 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
908 } else if (use_scanline_counter) {
909
910
911
912 position = __intel_get_crtc_scanline(crtc);
913 } else {
914
915
916
917
918 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
919
920
921 vbl_start *= htotal;
922 vbl_end *= htotal;
923 vtotal *= htotal;
924
925
926
927
928
929
930
931
932
933
934 if (position >= vtotal)
935 position = vtotal - 1;
936
937
938
939
940
941
942
943
944
945
946 position = (position + htotal - hsync_start) % vtotal;
947 }
948
949
950 if (etime)
951 *etime = ktime_get();
952
953
954
955 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
956
957
958
959
960
961
962
963 if (position >= vbl_start)
964 position -= vbl_end;
965 else
966 position += vtotal - vbl_end;
967
968 if (use_scanline_counter) {
969 *vpos = position;
970 *hpos = 0;
971 } else {
972 *vpos = position / htotal;
973 *hpos = position - (*vpos * htotal);
974 }
975
976 return true;
977}
978
979bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
980 ktime_t *vblank_time, bool in_vblank_irq)
981{
982 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
983 crtc, max_error, vblank_time, in_vblank_irq,
984 i915_get_crtc_scanoutpos);
985}
986
987int intel_get_crtc_scanline(struct intel_crtc *crtc)
988{
989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
990 unsigned long irqflags;
991 int position;
992
993 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
994 position = __intel_get_crtc_scanline(crtc);
995 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
996
997 return position;
998}
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static void ivb_parity_work(struct work_struct *work)
1010{
1011 struct drm_i915_private *dev_priv =
1012 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1013 struct intel_gt *gt = &dev_priv->gt;
1014 u32 error_status, row, bank, subbank;
1015 char *parity_event[6];
1016 u32 misccpctl;
1017 u8 slice = 0;
1018
1019
1020
1021
1022
1023 mutex_lock(&dev_priv->drm.struct_mutex);
1024
1025
1026 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1027 goto out;
1028
1029 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1030 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1031 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1032
1033 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1034 i915_reg_t reg;
1035
1036 slice--;
1037 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1038 slice >= NUM_L3_SLICES(dev_priv)))
1039 break;
1040
1041 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1042
1043 reg = GEN7_L3CDERRST1(slice);
1044
1045 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1046 row = GEN7_PARITY_ERROR_ROW(error_status);
1047 bank = GEN7_PARITY_ERROR_BANK(error_status);
1048 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1049
1050 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1051 intel_uncore_posting_read(&dev_priv->uncore, reg);
1052
1053 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1054 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1055 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1056 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1057 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1058 parity_event[5] = NULL;
1059
1060 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1061 KOBJ_CHANGE, parity_event);
1062
1063 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1064 slice, row, bank, subbank);
1065
1066 kfree(parity_event[4]);
1067 kfree(parity_event[3]);
1068 kfree(parity_event[2]);
1069 kfree(parity_event[1]);
1070 }
1071
1072 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1073
1074out:
1075 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1076 spin_lock_irq(>->irq_lock);
1077 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1078 spin_unlock_irq(>->irq_lock);
1079
1080 mutex_unlock(&dev_priv->drm.struct_mutex);
1081}
1082
1083static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1084{
1085 switch (pin) {
1086 case HPD_PORT_TC1:
1087 case HPD_PORT_TC2:
1088 case HPD_PORT_TC3:
1089 case HPD_PORT_TC4:
1090 case HPD_PORT_TC5:
1091 case HPD_PORT_TC6:
1092 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1093 default:
1094 return false;
1095 }
1096}
1097
1098static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1099{
1100 switch (pin) {
1101 case HPD_PORT_A:
1102 return val & PORTA_HOTPLUG_LONG_DETECT;
1103 case HPD_PORT_B:
1104 return val & PORTB_HOTPLUG_LONG_DETECT;
1105 case HPD_PORT_C:
1106 return val & PORTC_HOTPLUG_LONG_DETECT;
1107 default:
1108 return false;
1109 }
1110}
1111
1112static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1113{
1114 switch (pin) {
1115 case HPD_PORT_A:
1116 case HPD_PORT_B:
1117 case HPD_PORT_C:
1118 case HPD_PORT_D:
1119 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1120 default:
1121 return false;
1122 }
1123}
1124
1125static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1126{
1127 switch (pin) {
1128 case HPD_PORT_TC1:
1129 case HPD_PORT_TC2:
1130 case HPD_PORT_TC3:
1131 case HPD_PORT_TC4:
1132 case HPD_PORT_TC5:
1133 case HPD_PORT_TC6:
1134 return val & ICP_TC_HPD_LONG_DETECT(pin);
1135 default:
1136 return false;
1137 }
1138}
1139
1140static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1141{
1142 switch (pin) {
1143 case HPD_PORT_E:
1144 return val & PORTE_HOTPLUG_LONG_DETECT;
1145 default:
1146 return false;
1147 }
1148}
1149
1150static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1151{
1152 switch (pin) {
1153 case HPD_PORT_A:
1154 return val & PORTA_HOTPLUG_LONG_DETECT;
1155 case HPD_PORT_B:
1156 return val & PORTB_HOTPLUG_LONG_DETECT;
1157 case HPD_PORT_C:
1158 return val & PORTC_HOTPLUG_LONG_DETECT;
1159 case HPD_PORT_D:
1160 return val & PORTD_HOTPLUG_LONG_DETECT;
1161 default:
1162 return false;
1163 }
1164}
1165
1166static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1167{
1168 switch (pin) {
1169 case HPD_PORT_A:
1170 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1171 default:
1172 return false;
1173 }
1174}
1175
1176static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177{
1178 switch (pin) {
1179 case HPD_PORT_B:
1180 return val & PORTB_HOTPLUG_LONG_DETECT;
1181 case HPD_PORT_C:
1182 return val & PORTC_HOTPLUG_LONG_DETECT;
1183 case HPD_PORT_D:
1184 return val & PORTD_HOTPLUG_LONG_DETECT;
1185 default:
1186 return false;
1187 }
1188}
1189
1190static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1191{
1192 switch (pin) {
1193 case HPD_PORT_B:
1194 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1195 case HPD_PORT_C:
1196 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1197 case HPD_PORT_D:
1198 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1199 default:
1200 return false;
1201 }
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1212 u32 *pin_mask, u32 *long_mask,
1213 u32 hotplug_trigger, u32 dig_hotplug_reg,
1214 const u32 hpd[HPD_NUM_PINS],
1215 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1216{
1217 enum hpd_pin pin;
1218
1219 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1220
1221 for_each_hpd_pin(pin) {
1222 if ((hpd[pin] & hotplug_trigger) == 0)
1223 continue;
1224
1225 *pin_mask |= BIT(pin);
1226
1227 if (long_pulse_detect(pin, dig_hotplug_reg))
1228 *long_mask |= BIT(pin);
1229 }
1230
1231 drm_dbg(&dev_priv->drm,
1232 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1233 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1234
1235}
1236
1237static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1238 const u32 hpd[HPD_NUM_PINS])
1239{
1240 struct intel_encoder *encoder;
1241 u32 enabled_irqs = 0;
1242
1243 for_each_intel_encoder(&dev_priv->drm, encoder)
1244 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1245 enabled_irqs |= hpd[encoder->hpd_pin];
1246
1247 return enabled_irqs;
1248}
1249
1250static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1251 const u32 hpd[HPD_NUM_PINS])
1252{
1253 struct intel_encoder *encoder;
1254 u32 hotplug_irqs = 0;
1255
1256 for_each_intel_encoder(&dev_priv->drm, encoder)
1257 hotplug_irqs |= hpd[encoder->hpd_pin];
1258
1259 return hotplug_irqs;
1260}
1261
1262static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1263 hotplug_enables_func hotplug_enables)
1264{
1265 struct intel_encoder *encoder;
1266 u32 hotplug = 0;
1267
1268 for_each_intel_encoder(&i915->drm, encoder)
1269 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1270
1271 return hotplug;
1272}
1273
1274static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1275{
1276 wake_up_all(&dev_priv->gmbus_wait_queue);
1277}
1278
1279static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1280{
1281 wake_up_all(&dev_priv->gmbus_wait_queue);
1282}
1283
1284#if defined(CONFIG_DEBUG_FS)
1285static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1286 enum pipe pipe,
1287 u32 crc0, u32 crc1,
1288 u32 crc2, u32 crc3,
1289 u32 crc4)
1290{
1291 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1292 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1293 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1294
1295 trace_intel_pipe_crc(crtc, crcs);
1296
1297 spin_lock(&pipe_crc->lock);
1298
1299
1300
1301
1302
1303
1304
1305
1306 if (pipe_crc->skipped <= 0 ||
1307 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1308 pipe_crc->skipped++;
1309 spin_unlock(&pipe_crc->lock);
1310 return;
1311 }
1312 spin_unlock(&pipe_crc->lock);
1313
1314 drm_crtc_add_crc_entry(&crtc->base, true,
1315 drm_crtc_accurate_vblank_count(&crtc->base),
1316 crcs);
1317}
1318#else
1319static inline void
1320display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1321 enum pipe pipe,
1322 u32 crc0, u32 crc1,
1323 u32 crc2, u32 crc3,
1324 u32 crc4) {}
1325#endif
1326
1327static void flip_done_handler(struct drm_i915_private *i915,
1328 enum pipe pipe)
1329{
1330 struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
1331 struct drm_crtc_state *crtc_state = crtc->base.state;
1332 struct drm_pending_vblank_event *e = crtc_state->event;
1333 struct drm_device *dev = &i915->drm;
1334 unsigned long irqflags;
1335
1336 spin_lock_irqsave(&dev->event_lock, irqflags);
1337
1338 crtc_state->event = NULL;
1339
1340 drm_crtc_send_vblank_event(&crtc->base, e);
1341
1342 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1343}
1344
1345static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1346 enum pipe pipe)
1347{
1348 display_pipe_crc_irq_handler(dev_priv, pipe,
1349 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1350 0, 0, 0, 0);
1351}
1352
1353static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1354 enum pipe pipe)
1355{
1356 display_pipe_crc_irq_handler(dev_priv, pipe,
1357 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1358 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1359 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1360 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1361 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1362}
1363
1364static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1365 enum pipe pipe)
1366{
1367 u32 res1, res2;
1368
1369 if (INTEL_GEN(dev_priv) >= 3)
1370 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1371 else
1372 res1 = 0;
1373
1374 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1375 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1376 else
1377 res2 = 0;
1378
1379 display_pipe_crc_irq_handler(dev_priv, pipe,
1380 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1381 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1382 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1383 res1, res2);
1384}
1385
1386static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1387{
1388 enum pipe pipe;
1389
1390 for_each_pipe(dev_priv, pipe) {
1391 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1392 PIPESTAT_INT_STATUS_MASK |
1393 PIPE_FIFO_UNDERRUN_STATUS);
1394
1395 dev_priv->pipestat_irq_mask[pipe] = 0;
1396 }
1397}
1398
1399static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1400 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1401{
1402 enum pipe pipe;
1403
1404 spin_lock(&dev_priv->irq_lock);
1405
1406 if (!dev_priv->display_irqs_enabled) {
1407 spin_unlock(&dev_priv->irq_lock);
1408 return;
1409 }
1410
1411 for_each_pipe(dev_priv, pipe) {
1412 i915_reg_t reg;
1413 u32 status_mask, enable_mask, iir_bit = 0;
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1425
1426 switch (pipe) {
1427 default:
1428 case PIPE_A:
1429 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1430 break;
1431 case PIPE_B:
1432 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1433 break;
1434 case PIPE_C:
1435 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1436 break;
1437 }
1438 if (iir & iir_bit)
1439 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1440
1441 if (!status_mask)
1442 continue;
1443
1444 reg = PIPESTAT(pipe);
1445 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1446 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 if (pipe_stats[pipe]) {
1458 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1459 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1460 }
1461 }
1462 spin_unlock(&dev_priv->irq_lock);
1463}
1464
1465static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1466 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1467{
1468 enum pipe pipe;
1469
1470 for_each_pipe(dev_priv, pipe) {
1471 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1472 intel_handle_vblank(dev_priv, pipe);
1473
1474 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1475 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1476
1477 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1478 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1479 }
1480}
1481
1482static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1483 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1484{
1485 bool blc_event = false;
1486 enum pipe pipe;
1487
1488 for_each_pipe(dev_priv, pipe) {
1489 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1490 intel_handle_vblank(dev_priv, pipe);
1491
1492 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1493 blc_event = true;
1494
1495 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1496 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1497
1498 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1499 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1500 }
1501
1502 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1503 intel_opregion_asle_intr(dev_priv);
1504}
1505
1506static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1507 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1508{
1509 bool blc_event = false;
1510 enum pipe pipe;
1511
1512 for_each_pipe(dev_priv, pipe) {
1513 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1514 intel_handle_vblank(dev_priv, pipe);
1515
1516 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1517 blc_event = true;
1518
1519 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1520 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1521
1522 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1523 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1524 }
1525
1526 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1527 intel_opregion_asle_intr(dev_priv);
1528
1529 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1530 gmbus_irq_handler(dev_priv);
1531}
1532
1533static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1534 u32 pipe_stats[I915_MAX_PIPES])
1535{
1536 enum pipe pipe;
1537
1538 for_each_pipe(dev_priv, pipe) {
1539 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540 intel_handle_vblank(dev_priv, pipe);
1541
1542 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1543 flip_done_handler(dev_priv, pipe);
1544
1545 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547
1548 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 }
1551
1552 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1553 gmbus_irq_handler(dev_priv);
1554}
1555
1556static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1557{
1558 u32 hotplug_status = 0, hotplug_status_mask;
1559 int i;
1560
1561 if (IS_G4X(dev_priv) ||
1562 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1563 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1564 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1565 else
1566 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 for (i = 0; i < 10; i++) {
1578 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1579
1580 if (tmp == 0)
1581 return hotplug_status;
1582
1583 hotplug_status |= tmp;
1584 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1585 }
1586
1587 drm_WARN_ONCE(&dev_priv->drm, 1,
1588 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1589 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1590
1591 return hotplug_status;
1592}
1593
1594static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1595 u32 hotplug_status)
1596{
1597 u32 pin_mask = 0, long_mask = 0;
1598 u32 hotplug_trigger;
1599
1600 if (IS_G4X(dev_priv) ||
1601 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1602 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1603 else
1604 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1605
1606 if (hotplug_trigger) {
1607 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1608 hotplug_trigger, hotplug_trigger,
1609 dev_priv->hotplug.hpd,
1610 i9xx_port_hotplug_long_detect);
1611
1612 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1613 }
1614
1615 if ((IS_G4X(dev_priv) ||
1616 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1617 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1618 dp_aux_irq_handler(dev_priv);
1619}
1620
1621static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1622{
1623 struct drm_i915_private *dev_priv = arg;
1624 irqreturn_t ret = IRQ_NONE;
1625
1626 if (!intel_irqs_enabled(dev_priv))
1627 return IRQ_NONE;
1628
1629
1630 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1631
1632 do {
1633 u32 iir, gt_iir, pm_iir;
1634 u32 pipe_stats[I915_MAX_PIPES] = {};
1635 u32 hotplug_status = 0;
1636 u32 ier = 0;
1637
1638 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1639 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1640 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1641
1642 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1643 break;
1644
1645 ret = IRQ_HANDLED;
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1661 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1662 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1663
1664 if (gt_iir)
1665 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1666 if (pm_iir)
1667 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1668
1669 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1670 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1671
1672
1673
1674 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1675
1676 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1677 I915_LPE_PIPE_B_INTERRUPT))
1678 intel_lpe_audio_irq_handler(dev_priv);
1679
1680
1681
1682
1683
1684 if (iir)
1685 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1686
1687 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1688 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1689
1690 if (gt_iir)
1691 gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1692 if (pm_iir)
1693 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1694
1695 if (hotplug_status)
1696 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1697
1698 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1699 } while (0);
1700
1701 pmu_irq_stats(dev_priv, ret);
1702
1703 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1704
1705 return ret;
1706}
1707
1708static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1709{
1710 struct drm_i915_private *dev_priv = arg;
1711 irqreturn_t ret = IRQ_NONE;
1712
1713 if (!intel_irqs_enabled(dev_priv))
1714 return IRQ_NONE;
1715
1716
1717 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1718
1719 do {
1720 u32 master_ctl, iir;
1721 u32 pipe_stats[I915_MAX_PIPES] = {};
1722 u32 hotplug_status = 0;
1723 u32 ier = 0;
1724
1725 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1726 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1727
1728 if (master_ctl == 0 && iir == 0)
1729 break;
1730
1731 ret = IRQ_HANDLED;
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1747 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1748 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1749
1750 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1751
1752 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1753 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1754
1755
1756
1757 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1758
1759 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1760 I915_LPE_PIPE_B_INTERRUPT |
1761 I915_LPE_PIPE_C_INTERRUPT))
1762 intel_lpe_audio_irq_handler(dev_priv);
1763
1764
1765
1766
1767
1768 if (iir)
1769 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1770
1771 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1772 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1773
1774 if (hotplug_status)
1775 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1776
1777 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1778 } while (0);
1779
1780 pmu_irq_stats(dev_priv, ret);
1781
1782 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1783
1784 return ret;
1785}
1786
1787static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1788 u32 hotplug_trigger)
1789{
1790 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1791
1792
1793
1794
1795
1796
1797
1798 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1799 if (!hotplug_trigger) {
1800 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1801 PORTD_HOTPLUG_STATUS_MASK |
1802 PORTC_HOTPLUG_STATUS_MASK |
1803 PORTB_HOTPLUG_STATUS_MASK;
1804 dig_hotplug_reg &= ~mask;
1805 }
1806
1807 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1808 if (!hotplug_trigger)
1809 return;
1810
1811 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1812 hotplug_trigger, dig_hotplug_reg,
1813 dev_priv->hotplug.pch_hpd,
1814 pch_port_hotplug_long_detect);
1815
1816 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1817}
1818
1819static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1820{
1821 enum pipe pipe;
1822 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1823
1824 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1825
1826 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1827 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1828 SDE_AUDIO_POWER_SHIFT);
1829 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1830 port_name(port));
1831 }
1832
1833 if (pch_iir & SDE_AUX_MASK)
1834 dp_aux_irq_handler(dev_priv);
1835
1836 if (pch_iir & SDE_GMBUS)
1837 gmbus_irq_handler(dev_priv);
1838
1839 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1840 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1841
1842 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1843 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1844
1845 if (pch_iir & SDE_POISON)
1846 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1847
1848 if (pch_iir & SDE_FDI_MASK) {
1849 for_each_pipe(dev_priv, pipe)
1850 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1851 pipe_name(pipe),
1852 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1853 }
1854
1855 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1856 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1857
1858 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1859 drm_dbg(&dev_priv->drm,
1860 "PCH transcoder CRC error interrupt\n");
1861
1862 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1863 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1864
1865 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1866 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1867}
1868
1869static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1870{
1871 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1872 enum pipe pipe;
1873
1874 if (err_int & ERR_INT_POISON)
1875 drm_err(&dev_priv->drm, "Poison interrupt\n");
1876
1877 for_each_pipe(dev_priv, pipe) {
1878 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1879 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1880
1881 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1882 if (IS_IVYBRIDGE(dev_priv))
1883 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1884 else
1885 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1886 }
1887 }
1888
1889 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1890}
1891
1892static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1893{
1894 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1895 enum pipe pipe;
1896
1897 if (serr_int & SERR_INT_POISON)
1898 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1899
1900 for_each_pipe(dev_priv, pipe)
1901 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1902 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1903
1904 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1905}
1906
1907static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1908{
1909 enum pipe pipe;
1910 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1911
1912 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1913
1914 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1915 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1916 SDE_AUDIO_POWER_SHIFT_CPT);
1917 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1918 port_name(port));
1919 }
1920
1921 if (pch_iir & SDE_AUX_MASK_CPT)
1922 dp_aux_irq_handler(dev_priv);
1923
1924 if (pch_iir & SDE_GMBUS_CPT)
1925 gmbus_irq_handler(dev_priv);
1926
1927 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1928 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1929
1930 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1931 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1932
1933 if (pch_iir & SDE_FDI_MASK_CPT) {
1934 for_each_pipe(dev_priv, pipe)
1935 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1936 pipe_name(pipe),
1937 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1938 }
1939
1940 if (pch_iir & SDE_ERROR_CPT)
1941 cpt_serr_int_handler(dev_priv);
1942}
1943
1944static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1945{
1946 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1947 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1948 u32 pin_mask = 0, long_mask = 0;
1949
1950 if (ddi_hotplug_trigger) {
1951 u32 dig_hotplug_reg;
1952
1953 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1954 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1955
1956 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1957 ddi_hotplug_trigger, dig_hotplug_reg,
1958 dev_priv->hotplug.pch_hpd,
1959 icp_ddi_port_hotplug_long_detect);
1960 }
1961
1962 if (tc_hotplug_trigger) {
1963 u32 dig_hotplug_reg;
1964
1965 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
1966 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1967
1968 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1969 tc_hotplug_trigger, dig_hotplug_reg,
1970 dev_priv->hotplug.pch_hpd,
1971 icp_tc_port_hotplug_long_detect);
1972 }
1973
1974 if (pin_mask)
1975 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1976
1977 if (pch_iir & SDE_GMBUS_ICP)
1978 gmbus_irq_handler(dev_priv);
1979}
1980
1981static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1982{
1983 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1984 ~SDE_PORTE_HOTPLUG_SPT;
1985 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1986 u32 pin_mask = 0, long_mask = 0;
1987
1988 if (hotplug_trigger) {
1989 u32 dig_hotplug_reg;
1990
1991 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1992 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1993
1994 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1995 hotplug_trigger, dig_hotplug_reg,
1996 dev_priv->hotplug.pch_hpd,
1997 spt_port_hotplug_long_detect);
1998 }
1999
2000 if (hotplug2_trigger) {
2001 u32 dig_hotplug_reg;
2002
2003 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2004 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2005
2006 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2007 hotplug2_trigger, dig_hotplug_reg,
2008 dev_priv->hotplug.pch_hpd,
2009 spt_port_hotplug2_long_detect);
2010 }
2011
2012 if (pin_mask)
2013 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2014
2015 if (pch_iir & SDE_GMBUS_CPT)
2016 gmbus_irq_handler(dev_priv);
2017}
2018
2019static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2020 u32 hotplug_trigger)
2021{
2022 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2023
2024 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2025 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2026
2027 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2028 hotplug_trigger, dig_hotplug_reg,
2029 dev_priv->hotplug.hpd,
2030 ilk_port_hotplug_long_detect);
2031
2032 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2033}
2034
2035static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2036 u32 de_iir)
2037{
2038 enum pipe pipe;
2039 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2040
2041 if (hotplug_trigger)
2042 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2043
2044 if (de_iir & DE_AUX_CHANNEL_A)
2045 dp_aux_irq_handler(dev_priv);
2046
2047 if (de_iir & DE_GSE)
2048 intel_opregion_asle_intr(dev_priv);
2049
2050 if (de_iir & DE_POISON)
2051 drm_err(&dev_priv->drm, "Poison interrupt\n");
2052
2053 for_each_pipe(dev_priv, pipe) {
2054 if (de_iir & DE_PIPE_VBLANK(pipe))
2055 intel_handle_vblank(dev_priv, pipe);
2056
2057 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2058 flip_done_handler(dev_priv, pipe);
2059
2060 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2061 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062
2063 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2064 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2065 }
2066
2067
2068 if (de_iir & DE_PCH_EVENT) {
2069 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2070
2071 if (HAS_PCH_CPT(dev_priv))
2072 cpt_irq_handler(dev_priv, pch_iir);
2073 else
2074 ibx_irq_handler(dev_priv, pch_iir);
2075
2076
2077 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2078 }
2079
2080 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2081 gen5_rps_irq_handler(&dev_priv->gt.rps);
2082}
2083
2084static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2085 u32 de_iir)
2086{
2087 enum pipe pipe;
2088 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2089
2090 if (hotplug_trigger)
2091 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2092
2093 if (de_iir & DE_ERR_INT_IVB)
2094 ivb_err_int_handler(dev_priv);
2095
2096 if (de_iir & DE_EDP_PSR_INT_HSW) {
2097 u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
2098
2099 intel_psr_irq_handler(dev_priv, psr_iir);
2100 intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
2101 }
2102
2103 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2104 dp_aux_irq_handler(dev_priv);
2105
2106 if (de_iir & DE_GSE_IVB)
2107 intel_opregion_asle_intr(dev_priv);
2108
2109 for_each_pipe(dev_priv, pipe) {
2110 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2111 intel_handle_vblank(dev_priv, pipe);
2112
2113 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2114 flip_done_handler(dev_priv, pipe);
2115 }
2116
2117
2118 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2119 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2120
2121 cpt_irq_handler(dev_priv, pch_iir);
2122
2123
2124 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2125 }
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static irqreturn_t ilk_irq_handler(int irq, void *arg)
2137{
2138 struct drm_i915_private *i915 = arg;
2139 void __iomem * const regs = i915->uncore.regs;
2140 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2141 irqreturn_t ret = IRQ_NONE;
2142
2143 if (unlikely(!intel_irqs_enabled(i915)))
2144 return IRQ_NONE;
2145
2146
2147 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2148
2149
2150 de_ier = raw_reg_read(regs, DEIER);
2151 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2152
2153
2154
2155
2156
2157
2158 if (!HAS_PCH_NOP(i915)) {
2159 sde_ier = raw_reg_read(regs, SDEIER);
2160 raw_reg_write(regs, SDEIER, 0);
2161 }
2162
2163
2164
2165 gt_iir = raw_reg_read(regs, GTIIR);
2166 if (gt_iir) {
2167 raw_reg_write(regs, GTIIR, gt_iir);
2168 if (INTEL_GEN(i915) >= 6)
2169 gen6_gt_irq_handler(&i915->gt, gt_iir);
2170 else
2171 gen5_gt_irq_handler(&i915->gt, gt_iir);
2172 ret = IRQ_HANDLED;
2173 }
2174
2175 de_iir = raw_reg_read(regs, DEIIR);
2176 if (de_iir) {
2177 raw_reg_write(regs, DEIIR, de_iir);
2178 if (INTEL_GEN(i915) >= 7)
2179 ivb_display_irq_handler(i915, de_iir);
2180 else
2181 ilk_display_irq_handler(i915, de_iir);
2182 ret = IRQ_HANDLED;
2183 }
2184
2185 if (INTEL_GEN(i915) >= 6) {
2186 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2187 if (pm_iir) {
2188 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2189 gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2190 ret = IRQ_HANDLED;
2191 }
2192 }
2193
2194 raw_reg_write(regs, DEIER, de_ier);
2195 if (sde_ier)
2196 raw_reg_write(regs, SDEIER, sde_ier);
2197
2198 pmu_irq_stats(i915, ret);
2199
2200
2201 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2202
2203 return ret;
2204}
2205
2206static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2207 u32 hotplug_trigger)
2208{
2209 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2210
2211 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2212 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2213
2214 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2215 hotplug_trigger, dig_hotplug_reg,
2216 dev_priv->hotplug.hpd,
2217 bxt_port_hotplug_long_detect);
2218
2219 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2220}
2221
2222static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2223{
2224 u32 pin_mask = 0, long_mask = 0;
2225 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2226 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2227
2228 if (trigger_tc) {
2229 u32 dig_hotplug_reg;
2230
2231 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2232 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2233
2234 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2235 trigger_tc, dig_hotplug_reg,
2236 dev_priv->hotplug.hpd,
2237 gen11_port_hotplug_long_detect);
2238 }
2239
2240 if (trigger_tbt) {
2241 u32 dig_hotplug_reg;
2242
2243 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2244 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2245
2246 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2247 trigger_tbt, dig_hotplug_reg,
2248 dev_priv->hotplug.hpd,
2249 gen11_port_hotplug_long_detect);
2250 }
2251
2252 if (pin_mask)
2253 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2254 else
2255 drm_err(&dev_priv->drm,
2256 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2257}
2258
2259static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2260{
2261 u32 mask;
2262
2263 if (INTEL_GEN(dev_priv) >= 12)
2264 return TGL_DE_PORT_AUX_DDIA |
2265 TGL_DE_PORT_AUX_DDIB |
2266 TGL_DE_PORT_AUX_DDIC |
2267 TGL_DE_PORT_AUX_USBC1 |
2268 TGL_DE_PORT_AUX_USBC2 |
2269 TGL_DE_PORT_AUX_USBC3 |
2270 TGL_DE_PORT_AUX_USBC4 |
2271 TGL_DE_PORT_AUX_USBC5 |
2272 TGL_DE_PORT_AUX_USBC6;
2273
2274
2275 mask = GEN8_AUX_CHANNEL_A;
2276 if (INTEL_GEN(dev_priv) >= 9)
2277 mask |= GEN9_AUX_CHANNEL_B |
2278 GEN9_AUX_CHANNEL_C |
2279 GEN9_AUX_CHANNEL_D;
2280
2281 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2282 mask |= CNL_AUX_CHANNEL_F;
2283
2284 if (IS_GEN(dev_priv, 11))
2285 mask |= ICL_AUX_CHANNEL_E;
2286
2287 return mask;
2288}
2289
2290static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2291{
2292 if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
2293 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2294 else if (INTEL_GEN(dev_priv) >= 11)
2295 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2296 else if (INTEL_GEN(dev_priv) >= 9)
2297 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2298 else
2299 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2300}
2301
2302static void
2303gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2304{
2305 bool found = false;
2306
2307 if (iir & GEN8_DE_MISC_GSE) {
2308 intel_opregion_asle_intr(dev_priv);
2309 found = true;
2310 }
2311
2312 if (iir & GEN8_DE_EDP_PSR) {
2313 u32 psr_iir;
2314 i915_reg_t iir_reg;
2315
2316 if (INTEL_GEN(dev_priv) >= 12)
2317 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2318 else
2319 iir_reg = EDP_PSR_IIR;
2320
2321 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2322 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2323
2324 if (psr_iir)
2325 found = true;
2326
2327 intel_psr_irq_handler(dev_priv, psr_iir);
2328 }
2329
2330 if (!found)
2331 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2332}
2333
2334static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2335 u32 te_trigger)
2336{
2337 enum pipe pipe = INVALID_PIPE;
2338 enum transcoder dsi_trans;
2339 enum port port;
2340 u32 val, tmp;
2341
2342
2343
2344
2345
2346 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2347 val &= PORT_SYNC_MODE_ENABLE;
2348
2349
2350
2351
2352
2353 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2354 PORT_A : PORT_B;
2355 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2356
2357
2358 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2359 val = val & OP_MODE_MASK;
2360
2361 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2362 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2363 return;
2364 }
2365
2366
2367 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2368 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2369 case TRANS_DDI_EDP_INPUT_A_ON:
2370 pipe = PIPE_A;
2371 break;
2372 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2373 pipe = PIPE_B;
2374 break;
2375 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2376 pipe = PIPE_C;
2377 break;
2378 default:
2379 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2380 return;
2381 }
2382
2383 intel_handle_vblank(dev_priv, pipe);
2384
2385
2386 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2387 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2388 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2389}
2390
2391static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2392{
2393 if (INTEL_GEN(i915) >= 9)
2394 return GEN9_PIPE_PLANE1_FLIP_DONE;
2395 else
2396 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2397}
2398
2399static irqreturn_t
2400gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2401{
2402 irqreturn_t ret = IRQ_NONE;
2403 u32 iir;
2404 enum pipe pipe;
2405
2406 if (master_ctl & GEN8_DE_MISC_IRQ) {
2407 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2408 if (iir) {
2409 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2410 ret = IRQ_HANDLED;
2411 gen8_de_misc_irq_handler(dev_priv, iir);
2412 } else {
2413 drm_err(&dev_priv->drm,
2414 "The master control interrupt lied (DE MISC)!\n");
2415 }
2416 }
2417
2418 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2419 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2420 if (iir) {
2421 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2422 ret = IRQ_HANDLED;
2423 gen11_hpd_irq_handler(dev_priv, iir);
2424 } else {
2425 drm_err(&dev_priv->drm,
2426 "The master control interrupt lied, (DE HPD)!\n");
2427 }
2428 }
2429
2430 if (master_ctl & GEN8_DE_PORT_IRQ) {
2431 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2432 if (iir) {
2433 bool found = false;
2434
2435 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2436 ret = IRQ_HANDLED;
2437
2438 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2439 dp_aux_irq_handler(dev_priv);
2440 found = true;
2441 }
2442
2443 if (IS_GEN9_LP(dev_priv)) {
2444 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2445
2446 if (hotplug_trigger) {
2447 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2448 found = true;
2449 }
2450 } else if (IS_BROADWELL(dev_priv)) {
2451 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2452
2453 if (hotplug_trigger) {
2454 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2455 found = true;
2456 }
2457 }
2458
2459 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2460 gmbus_irq_handler(dev_priv);
2461 found = true;
2462 }
2463
2464 if (INTEL_GEN(dev_priv) >= 11) {
2465 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2466
2467 if (te_trigger) {
2468 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2469 found = true;
2470 }
2471 }
2472
2473 if (!found)
2474 drm_err(&dev_priv->drm,
2475 "Unexpected DE Port interrupt\n");
2476 }
2477 else
2478 drm_err(&dev_priv->drm,
2479 "The master control interrupt lied (DE PORT)!\n");
2480 }
2481
2482 for_each_pipe(dev_priv, pipe) {
2483 u32 fault_errors;
2484
2485 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2486 continue;
2487
2488 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2489 if (!iir) {
2490 drm_err(&dev_priv->drm,
2491 "The master control interrupt lied (DE PIPE)!\n");
2492 continue;
2493 }
2494
2495 ret = IRQ_HANDLED;
2496 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2497
2498 if (iir & GEN8_PIPE_VBLANK)
2499 intel_handle_vblank(dev_priv, pipe);
2500
2501 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2502 flip_done_handler(dev_priv, pipe);
2503
2504 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2505 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2506
2507 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2508 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2509
2510 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2511 if (fault_errors)
2512 drm_err(&dev_priv->drm,
2513 "Fault errors on pipe %c: 0x%08x\n",
2514 pipe_name(pipe),
2515 fault_errors);
2516 }
2517
2518 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2519 master_ctl & GEN8_DE_PCH_IRQ) {
2520
2521
2522
2523
2524
2525 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2526 if (iir) {
2527 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2528 ret = IRQ_HANDLED;
2529
2530 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2531 icp_irq_handler(dev_priv, iir);
2532 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2533 spt_irq_handler(dev_priv, iir);
2534 else
2535 cpt_irq_handler(dev_priv, iir);
2536 } else {
2537
2538
2539
2540
2541 drm_dbg(&dev_priv->drm,
2542 "The master control interrupt lied (SDE)!\n");
2543 }
2544 }
2545
2546 return ret;
2547}
2548
2549static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2550{
2551 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2552
2553
2554
2555
2556
2557
2558
2559 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2560}
2561
2562static inline void gen8_master_intr_enable(void __iomem * const regs)
2563{
2564 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2565}
2566
2567static irqreturn_t gen8_irq_handler(int irq, void *arg)
2568{
2569 struct drm_i915_private *dev_priv = arg;
2570 void __iomem * const regs = dev_priv->uncore.regs;
2571 u32 master_ctl;
2572
2573 if (!intel_irqs_enabled(dev_priv))
2574 return IRQ_NONE;
2575
2576 master_ctl = gen8_master_intr_disable(regs);
2577 if (!master_ctl) {
2578 gen8_master_intr_enable(regs);
2579 return IRQ_NONE;
2580 }
2581
2582
2583 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2584
2585
2586 if (master_ctl & ~GEN8_GT_IRQS) {
2587 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2588 gen8_de_irq_handler(dev_priv, master_ctl);
2589 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2590 }
2591
2592 gen8_master_intr_enable(regs);
2593
2594 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2595
2596 return IRQ_HANDLED;
2597}
2598
2599static u32
2600gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2601{
2602 void __iomem * const regs = gt->uncore->regs;
2603 u32 iir;
2604
2605 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2606 return 0;
2607
2608 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2609 if (likely(iir))
2610 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2611
2612 return iir;
2613}
2614
2615static void
2616gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2617{
2618 if (iir & GEN11_GU_MISC_GSE)
2619 intel_opregion_asle_intr(gt->i915);
2620}
2621
2622static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2623{
2624 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2625
2626
2627
2628
2629
2630
2631
2632 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2633}
2634
2635static inline void gen11_master_intr_enable(void __iomem * const regs)
2636{
2637 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2638}
2639
2640static void
2641gen11_display_irq_handler(struct drm_i915_private *i915)
2642{
2643 void __iomem * const regs = i915->uncore.regs;
2644 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2645
2646 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2647
2648
2649
2650
2651 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2652 gen8_de_irq_handler(i915, disp_ctl);
2653 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2654 GEN11_DISPLAY_IRQ_ENABLE);
2655
2656 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2657}
2658
2659static __always_inline irqreturn_t
2660__gen11_irq_handler(struct drm_i915_private * const i915,
2661 u32 (*intr_disable)(void __iomem * const regs),
2662 void (*intr_enable)(void __iomem * const regs))
2663{
2664 void __iomem * const regs = i915->uncore.regs;
2665 struct intel_gt *gt = &i915->gt;
2666 u32 master_ctl;
2667 u32 gu_misc_iir;
2668
2669 if (!intel_irqs_enabled(i915))
2670 return IRQ_NONE;
2671
2672 master_ctl = intr_disable(regs);
2673 if (!master_ctl) {
2674 intr_enable(regs);
2675 return IRQ_NONE;
2676 }
2677
2678
2679 gen11_gt_irq_handler(gt, master_ctl);
2680
2681
2682 if (master_ctl & GEN11_DISPLAY_IRQ)
2683 gen11_display_irq_handler(i915);
2684
2685 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2686
2687 intr_enable(regs);
2688
2689 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2690
2691 pmu_irq_stats(i915, IRQ_HANDLED);
2692
2693 return IRQ_HANDLED;
2694}
2695
2696static irqreturn_t gen11_irq_handler(int irq, void *arg)
2697{
2698 return __gen11_irq_handler(arg,
2699 gen11_master_intr_disable,
2700 gen11_master_intr_enable);
2701}
2702
2703static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
2704{
2705 u32 val;
2706
2707
2708 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
2709
2710
2711 val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
2712 if (unlikely(!val))
2713 return 0;
2714
2715 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
2716
2717
2718
2719
2720
2721
2722 val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
2723 if (unlikely(!val))
2724 return 0;
2725
2726 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
2727
2728 return val;
2729}
2730
2731static inline void dg1_master_intr_enable(void __iomem * const regs)
2732{
2733 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
2734}
2735
2736static irqreturn_t dg1_irq_handler(int irq, void *arg)
2737{
2738 return __gen11_irq_handler(arg,
2739 dg1_master_intr_disable_and_ack,
2740 dg1_master_intr_enable);
2741}
2742
2743
2744
2745
2746int i8xx_enable_vblank(struct drm_crtc *crtc)
2747{
2748 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2749 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2750 unsigned long irqflags;
2751
2752 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2753 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2754 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2755
2756 return 0;
2757}
2758
2759int i915gm_enable_vblank(struct drm_crtc *crtc)
2760{
2761 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2762
2763
2764
2765
2766
2767
2768
2769 if (dev_priv->vblank_enabled++ == 0)
2770 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2771
2772 return i8xx_enable_vblank(crtc);
2773}
2774
2775int i965_enable_vblank(struct drm_crtc *crtc)
2776{
2777 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2778 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2779 unsigned long irqflags;
2780
2781 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2782 i915_enable_pipestat(dev_priv, pipe,
2783 PIPE_START_VBLANK_INTERRUPT_STATUS);
2784 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2785
2786 return 0;
2787}
2788
2789int ilk_enable_vblank(struct drm_crtc *crtc)
2790{
2791 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2792 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2793 unsigned long irqflags;
2794 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2795 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2796
2797 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2798 ilk_enable_display_irq(dev_priv, bit);
2799 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2800
2801
2802
2803
2804 if (HAS_PSR(dev_priv))
2805 drm_crtc_vblank_restore(crtc);
2806
2807 return 0;
2808}
2809
2810static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2811 bool enable)
2812{
2813 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2814 enum port port;
2815 u32 tmp;
2816
2817 if (!(intel_crtc->mode_flags &
2818 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2819 return false;
2820
2821
2822 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2823 port = PORT_B;
2824 else
2825 port = PORT_A;
2826
2827 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2828 if (enable)
2829 tmp &= ~DSI_TE_EVENT;
2830 else
2831 tmp |= DSI_TE_EVENT;
2832
2833 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2834
2835 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2836 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2837
2838 return true;
2839}
2840
2841int bdw_enable_vblank(struct drm_crtc *crtc)
2842{
2843 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2845 enum pipe pipe = intel_crtc->pipe;
2846 unsigned long irqflags;
2847
2848 if (gen11_dsi_configure_te(intel_crtc, true))
2849 return 0;
2850
2851 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2852 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2853 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2854
2855
2856
2857
2858 if (HAS_PSR(dev_priv))
2859 drm_crtc_vblank_restore(crtc);
2860
2861 return 0;
2862}
2863
2864
2865
2866
2867void i8xx_disable_vblank(struct drm_crtc *crtc)
2868{
2869 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2870 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2871 unsigned long irqflags;
2872
2873 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2874 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2875 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2876}
2877
2878void i915gm_disable_vblank(struct drm_crtc *crtc)
2879{
2880 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2881
2882 i8xx_disable_vblank(crtc);
2883
2884 if (--dev_priv->vblank_enabled == 0)
2885 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2886}
2887
2888void i965_disable_vblank(struct drm_crtc *crtc)
2889{
2890 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2891 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2892 unsigned long irqflags;
2893
2894 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2895 i915_disable_pipestat(dev_priv, pipe,
2896 PIPE_START_VBLANK_INTERRUPT_STATUS);
2897 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2898}
2899
2900void ilk_disable_vblank(struct drm_crtc *crtc)
2901{
2902 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2903 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2904 unsigned long irqflags;
2905 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2906 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2907
2908 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2909 ilk_disable_display_irq(dev_priv, bit);
2910 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2911}
2912
2913void bdw_disable_vblank(struct drm_crtc *crtc)
2914{
2915 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2916 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2917 enum pipe pipe = intel_crtc->pipe;
2918 unsigned long irqflags;
2919
2920 if (gen11_dsi_configure_te(intel_crtc, false))
2921 return;
2922
2923 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2924 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2925 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2926}
2927
2928static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2929{
2930 struct intel_uncore *uncore = &dev_priv->uncore;
2931
2932 if (HAS_PCH_NOP(dev_priv))
2933 return;
2934
2935 GEN3_IRQ_RESET(uncore, SDE);
2936
2937 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2938 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2939}
2940
2941static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2942{
2943 struct intel_uncore *uncore = &dev_priv->uncore;
2944
2945 if (IS_CHERRYVIEW(dev_priv))
2946 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2947 else
2948 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2949
2950 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2951 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
2952
2953 i9xx_pipestat_irq_reset(dev_priv);
2954
2955 GEN3_IRQ_RESET(uncore, VLV_);
2956 dev_priv->irq_mask = ~0u;
2957}
2958
2959static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2960{
2961 struct intel_uncore *uncore = &dev_priv->uncore;
2962
2963 u32 pipestat_mask;
2964 u32 enable_mask;
2965 enum pipe pipe;
2966
2967 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2968
2969 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2970 for_each_pipe(dev_priv, pipe)
2971 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2972
2973 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2974 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2975 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2976 I915_LPE_PIPE_A_INTERRUPT |
2977 I915_LPE_PIPE_B_INTERRUPT;
2978
2979 if (IS_CHERRYVIEW(dev_priv))
2980 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2981 I915_LPE_PIPE_C_INTERRUPT;
2982
2983 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2984
2985 dev_priv->irq_mask = ~enable_mask;
2986
2987 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2988}
2989
2990
2991
2992static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2993{
2994 struct intel_uncore *uncore = &dev_priv->uncore;
2995
2996 GEN3_IRQ_RESET(uncore, DE);
2997 dev_priv->irq_mask = ~0u;
2998
2999 if (IS_GEN(dev_priv, 7))
3000 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3001
3002 if (IS_HASWELL(dev_priv)) {
3003 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3004 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3005 }
3006
3007 gen5_gt_irq_reset(&dev_priv->gt);
3008
3009 ibx_irq_reset(dev_priv);
3010}
3011
3012static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3013{
3014 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3015 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3016
3017 gen5_gt_irq_reset(&dev_priv->gt);
3018
3019 spin_lock_irq(&dev_priv->irq_lock);
3020 if (dev_priv->display_irqs_enabled)
3021 vlv_display_irq_reset(dev_priv);
3022 spin_unlock_irq(&dev_priv->irq_lock);
3023}
3024
3025static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3026{
3027 struct intel_uncore *uncore = &dev_priv->uncore;
3028 enum pipe pipe;
3029
3030 gen8_master_intr_disable(dev_priv->uncore.regs);
3031
3032 gen8_gt_irq_reset(&dev_priv->gt);
3033
3034 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3035 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3036
3037 for_each_pipe(dev_priv, pipe)
3038 if (intel_display_power_is_enabled(dev_priv,
3039 POWER_DOMAIN_PIPE(pipe)))
3040 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3041
3042 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3043 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3044 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3045
3046 if (HAS_PCH_SPLIT(dev_priv))
3047 ibx_irq_reset(dev_priv);
3048}
3049
3050static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3051{
3052 struct intel_uncore *uncore = &dev_priv->uncore;
3053 enum pipe pipe;
3054 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3055 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3056
3057 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3058
3059 if (INTEL_GEN(dev_priv) >= 12) {
3060 enum transcoder trans;
3061
3062 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3063 enum intel_display_power_domain domain;
3064
3065 domain = POWER_DOMAIN_TRANSCODER(trans);
3066 if (!intel_display_power_is_enabled(dev_priv, domain))
3067 continue;
3068
3069 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3070 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3071 }
3072 } else {
3073 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3074 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3075 }
3076
3077 for_each_pipe(dev_priv, pipe)
3078 if (intel_display_power_is_enabled(dev_priv,
3079 POWER_DOMAIN_PIPE(pipe)))
3080 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3081
3082 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3083 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3084 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3085
3086 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3087 GEN3_IRQ_RESET(uncore, SDE);
3088
3089
3090 if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
3091 (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
3092 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
3093 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
3094 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
3095 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
3096 SBCLK_RUN_REFCLK_DIS, 0);
3097 }
3098}
3099
3100static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3101{
3102 struct intel_uncore *uncore = &dev_priv->uncore;
3103
3104 if (HAS_MASTER_UNIT_IRQ(dev_priv))
3105 dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
3106 else
3107 gen11_master_intr_disable(dev_priv->uncore.regs);
3108
3109 gen11_gt_irq_reset(&dev_priv->gt);
3110 gen11_display_irq_reset(dev_priv);
3111
3112 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3113 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3114}
3115
3116void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3117 u8 pipe_mask)
3118{
3119 struct intel_uncore *uncore = &dev_priv->uncore;
3120 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
3121 gen8_de_pipe_flip_done_mask(dev_priv);
3122 enum pipe pipe;
3123
3124 spin_lock_irq(&dev_priv->irq_lock);
3125
3126 if (!intel_irqs_enabled(dev_priv)) {
3127 spin_unlock_irq(&dev_priv->irq_lock);
3128 return;
3129 }
3130
3131 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3132 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3133 dev_priv->de_irq_mask[pipe],
3134 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3135
3136 spin_unlock_irq(&dev_priv->irq_lock);
3137}
3138
3139void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3140 u8 pipe_mask)
3141{
3142 struct intel_uncore *uncore = &dev_priv->uncore;
3143 enum pipe pipe;
3144
3145 spin_lock_irq(&dev_priv->irq_lock);
3146
3147 if (!intel_irqs_enabled(dev_priv)) {
3148 spin_unlock_irq(&dev_priv->irq_lock);
3149 return;
3150 }
3151
3152 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3153 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3154
3155 spin_unlock_irq(&dev_priv->irq_lock);
3156
3157
3158 intel_synchronize_irq(dev_priv);
3159}
3160
3161static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3162{
3163 struct intel_uncore *uncore = &dev_priv->uncore;
3164
3165 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3166 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3167
3168 gen8_gt_irq_reset(&dev_priv->gt);
3169
3170 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3171
3172 spin_lock_irq(&dev_priv->irq_lock);
3173 if (dev_priv->display_irqs_enabled)
3174 vlv_display_irq_reset(dev_priv);
3175 spin_unlock_irq(&dev_priv->irq_lock);
3176}
3177
3178static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3179 enum hpd_pin pin)
3180{
3181 switch (pin) {
3182 case HPD_PORT_A:
3183
3184
3185
3186
3187 return HAS_PCH_LPT_LP(i915) ?
3188 PORTA_HOTPLUG_ENABLE : 0;
3189 case HPD_PORT_B:
3190 return PORTB_HOTPLUG_ENABLE |
3191 PORTB_PULSE_DURATION_2ms;
3192 case HPD_PORT_C:
3193 return PORTC_HOTPLUG_ENABLE |
3194 PORTC_PULSE_DURATION_2ms;
3195 case HPD_PORT_D:
3196 return PORTD_HOTPLUG_ENABLE |
3197 PORTD_PULSE_DURATION_2ms;
3198 default:
3199 return 0;
3200 }
3201}
3202
3203static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3204{
3205 u32 hotplug;
3206
3207
3208
3209
3210
3211
3212 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3213 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3214 PORTB_HOTPLUG_ENABLE |
3215 PORTC_HOTPLUG_ENABLE |
3216 PORTD_HOTPLUG_ENABLE |
3217 PORTB_PULSE_DURATION_MASK |
3218 PORTC_PULSE_DURATION_MASK |
3219 PORTD_PULSE_DURATION_MASK);
3220 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3221 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3222}
3223
3224static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3225{
3226 u32 hotplug_irqs, enabled_irqs;
3227
3228 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3229 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3230
3231 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3232
3233 ibx_hpd_detection_setup(dev_priv);
3234}
3235
3236static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3237 enum hpd_pin pin)
3238{
3239 switch (pin) {
3240 case HPD_PORT_A:
3241 case HPD_PORT_B:
3242 case HPD_PORT_C:
3243 case HPD_PORT_D:
3244 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3245 default:
3246 return 0;
3247 }
3248}
3249
3250static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3251 enum hpd_pin pin)
3252{
3253 switch (pin) {
3254 case HPD_PORT_TC1:
3255 case HPD_PORT_TC2:
3256 case HPD_PORT_TC3:
3257 case HPD_PORT_TC4:
3258 case HPD_PORT_TC5:
3259 case HPD_PORT_TC6:
3260 return ICP_TC_HPD_ENABLE(pin);
3261 default:
3262 return 0;
3263 }
3264}
3265
3266static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3267{
3268 u32 hotplug;
3269
3270 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3271 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3272 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3273 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3274 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3275 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3276 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3277}
3278
3279static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3280{
3281 u32 hotplug;
3282
3283 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3284 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3285 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3286 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3287 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3288 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3289 ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3290 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3291 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3292}
3293
3294static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3295{
3296 u32 hotplug_irqs, enabled_irqs;
3297
3298 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3299 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3300
3301 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3302 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3303
3304 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3305
3306 icp_ddi_hpd_detection_setup(dev_priv);
3307 icp_tc_hpd_detection_setup(dev_priv);
3308}
3309
3310static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3311 enum hpd_pin pin)
3312{
3313 switch (pin) {
3314 case HPD_PORT_TC1:
3315 case HPD_PORT_TC2:
3316 case HPD_PORT_TC3:
3317 case HPD_PORT_TC4:
3318 case HPD_PORT_TC5:
3319 case HPD_PORT_TC6:
3320 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3321 default:
3322 return 0;
3323 }
3324}
3325
3326static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3327{
3328 u32 val;
3329
3330 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3331 val |= (INVERT_DDIA_HPD |
3332 INVERT_DDIB_HPD |
3333 INVERT_DDIC_HPD |
3334 INVERT_DDID_HPD);
3335 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3336
3337 icp_hpd_irq_setup(dev_priv);
3338}
3339
3340static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3341{
3342 u32 hotplug;
3343
3344 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3345 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3346 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3347 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3348 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3349 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3350 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3351 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3352 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3353}
3354
3355static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3356{
3357 u32 hotplug;
3358
3359 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3360 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3361 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3362 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3363 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3364 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3365 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3366 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3367 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3368}
3369
3370static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3371{
3372 u32 hotplug_irqs, enabled_irqs;
3373 u32 val;
3374
3375 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3376 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3377
3378 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3379 val &= ~hotplug_irqs;
3380 val |= ~enabled_irqs & hotplug_irqs;
3381 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3382 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3383
3384 gen11_tc_hpd_detection_setup(dev_priv);
3385 gen11_tbt_hpd_detection_setup(dev_priv);
3386
3387 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3388 icp_hpd_irq_setup(dev_priv);
3389}
3390
3391static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3392 enum hpd_pin pin)
3393{
3394 switch (pin) {
3395 case HPD_PORT_A:
3396 return PORTA_HOTPLUG_ENABLE;
3397 case HPD_PORT_B:
3398 return PORTB_HOTPLUG_ENABLE;
3399 case HPD_PORT_C:
3400 return PORTC_HOTPLUG_ENABLE;
3401 case HPD_PORT_D:
3402 return PORTD_HOTPLUG_ENABLE;
3403 default:
3404 return 0;
3405 }
3406}
3407
3408static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3409 enum hpd_pin pin)
3410{
3411 switch (pin) {
3412 case HPD_PORT_E:
3413 return PORTE_HOTPLUG_ENABLE;
3414 default:
3415 return 0;
3416 }
3417}
3418
3419static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3420{
3421 u32 val, hotplug;
3422
3423
3424 if (HAS_PCH_CNP(dev_priv)) {
3425 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3426 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3427 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3428 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3429 }
3430
3431
3432 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3433 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3434 PORTB_HOTPLUG_ENABLE |
3435 PORTC_HOTPLUG_ENABLE |
3436 PORTD_HOTPLUG_ENABLE);
3437 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3438 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3439
3440 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3441 hotplug &= ~PORTE_HOTPLUG_ENABLE;
3442 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3443 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3444}
3445
3446static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3447{
3448 u32 hotplug_irqs, enabled_irqs;
3449
3450 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3451 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3452
3453 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3454 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3455
3456 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3457
3458 spt_hpd_detection_setup(dev_priv);
3459}
3460
3461static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3462 enum hpd_pin pin)
3463{
3464 switch (pin) {
3465 case HPD_PORT_A:
3466 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3467 DIGITAL_PORTA_PULSE_DURATION_2ms;
3468 default:
3469 return 0;
3470 }
3471}
3472
3473static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3474{
3475 u32 hotplug;
3476
3477
3478
3479
3480
3481
3482 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3483 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3484 DIGITAL_PORTA_PULSE_DURATION_MASK);
3485 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3486 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3487}
3488
3489static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3490{
3491 u32 hotplug_irqs, enabled_irqs;
3492
3493 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3494 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3495
3496 if (INTEL_GEN(dev_priv) >= 8)
3497 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3498 else
3499 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3500
3501 ilk_hpd_detection_setup(dev_priv);
3502
3503 ibx_hpd_irq_setup(dev_priv);
3504}
3505
3506static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3507 enum hpd_pin pin)
3508{
3509 u32 hotplug;
3510
3511 switch (pin) {
3512 case HPD_PORT_A:
3513 hotplug = PORTA_HOTPLUG_ENABLE;
3514 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3515 hotplug |= BXT_DDIA_HPD_INVERT;
3516 return hotplug;
3517 case HPD_PORT_B:
3518 hotplug = PORTB_HOTPLUG_ENABLE;
3519 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3520 hotplug |= BXT_DDIB_HPD_INVERT;
3521 return hotplug;
3522 case HPD_PORT_C:
3523 hotplug = PORTC_HOTPLUG_ENABLE;
3524 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3525 hotplug |= BXT_DDIC_HPD_INVERT;
3526 return hotplug;
3527 default:
3528 return 0;
3529 }
3530}
3531
3532static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3533{
3534 u32 hotplug;
3535
3536 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3537 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3538 PORTB_HOTPLUG_ENABLE |
3539 PORTC_HOTPLUG_ENABLE |
3540 BXT_DDIA_HPD_INVERT |
3541 BXT_DDIB_HPD_INVERT |
3542 BXT_DDIC_HPD_INVERT);
3543 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3544 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3545}
3546
3547static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3548{
3549 u32 hotplug_irqs, enabled_irqs;
3550
3551 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3552 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3553
3554 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3555
3556 bxt_hpd_detection_setup(dev_priv);
3557}
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3571{
3572 struct intel_uncore *uncore = &dev_priv->uncore;
3573 u32 mask;
3574
3575 if (HAS_PCH_NOP(dev_priv))
3576 return;
3577
3578 if (HAS_PCH_IBX(dev_priv))
3579 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3580 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3581 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3582 else
3583 mask = SDE_GMBUS_CPT;
3584
3585 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3586}
3587
3588static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3589{
3590 struct intel_uncore *uncore = &dev_priv->uncore;
3591 u32 display_mask, extra_mask;
3592
3593 if (INTEL_GEN(dev_priv) >= 7) {
3594 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3595 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3596 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3597 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3598 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3599 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3600 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3601 DE_DP_A_HOTPLUG_IVB);
3602 } else {
3603 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3604 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3605 DE_PIPEA_CRC_DONE | DE_POISON);
3606 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3607 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3608 DE_PLANE_FLIP_DONE(PLANE_A) |
3609 DE_PLANE_FLIP_DONE(PLANE_B) |
3610 DE_DP_A_HOTPLUG);
3611 }
3612
3613 if (IS_HASWELL(dev_priv)) {
3614 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3615 display_mask |= DE_EDP_PSR_INT_HSW;
3616 }
3617
3618 if (IS_IRONLAKE_M(dev_priv))
3619 extra_mask |= DE_PCU_EVENT;
3620
3621 dev_priv->irq_mask = ~display_mask;
3622
3623 ibx_irq_postinstall(dev_priv);
3624
3625 gen5_gt_irq_postinstall(&dev_priv->gt);
3626
3627 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3628 display_mask | extra_mask);
3629}
3630
3631void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3632{
3633 lockdep_assert_held(&dev_priv->irq_lock);
3634
3635 if (dev_priv->display_irqs_enabled)
3636 return;
3637
3638 dev_priv->display_irqs_enabled = true;
3639
3640 if (intel_irqs_enabled(dev_priv)) {
3641 vlv_display_irq_reset(dev_priv);
3642 vlv_display_irq_postinstall(dev_priv);
3643 }
3644}
3645
3646void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3647{
3648 lockdep_assert_held(&dev_priv->irq_lock);
3649
3650 if (!dev_priv->display_irqs_enabled)
3651 return;
3652
3653 dev_priv->display_irqs_enabled = false;
3654
3655 if (intel_irqs_enabled(dev_priv))
3656 vlv_display_irq_reset(dev_priv);
3657}
3658
3659
3660static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3661{
3662 gen5_gt_irq_postinstall(&dev_priv->gt);
3663
3664 spin_lock_irq(&dev_priv->irq_lock);
3665 if (dev_priv->display_irqs_enabled)
3666 vlv_display_irq_postinstall(dev_priv);
3667 spin_unlock_irq(&dev_priv->irq_lock);
3668
3669 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3670 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3671}
3672
3673static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3674{
3675 struct intel_uncore *uncore = &dev_priv->uncore;
3676
3677 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3678 GEN8_PIPE_CDCLK_CRC_DONE;
3679 u32 de_pipe_enables;
3680 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3681 u32 de_port_enables;
3682 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3683 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3684 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3685 enum pipe pipe;
3686
3687 if (INTEL_GEN(dev_priv) <= 10)
3688 de_misc_masked |= GEN8_DE_MISC_GSE;
3689
3690 if (IS_GEN9_LP(dev_priv))
3691 de_port_masked |= BXT_DE_PORT_GMBUS;
3692
3693 if (INTEL_GEN(dev_priv) >= 11) {
3694 enum port port;
3695
3696 if (intel_bios_is_dsi_present(dev_priv, &port))
3697 de_port_masked |= DSI0_TE | DSI1_TE;
3698 }
3699
3700 de_pipe_enables = de_pipe_masked |
3701 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
3702 gen8_de_pipe_flip_done_mask(dev_priv);
3703
3704 de_port_enables = de_port_masked;
3705 if (IS_GEN9_LP(dev_priv))
3706 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3707 else if (IS_BROADWELL(dev_priv))
3708 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3709
3710 if (INTEL_GEN(dev_priv) >= 12) {
3711 enum transcoder trans;
3712
3713 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3714 enum intel_display_power_domain domain;
3715
3716 domain = POWER_DOMAIN_TRANSCODER(trans);
3717 if (!intel_display_power_is_enabled(dev_priv, domain))
3718 continue;
3719
3720 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3721 }
3722 } else {
3723 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3724 }
3725
3726 for_each_pipe(dev_priv, pipe) {
3727 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3728
3729 if (intel_display_power_is_enabled(dev_priv,
3730 POWER_DOMAIN_PIPE(pipe)))
3731 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3732 dev_priv->de_irq_mask[pipe],
3733 de_pipe_enables);
3734 }
3735
3736 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3737 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3738
3739 if (INTEL_GEN(dev_priv) >= 11) {
3740 u32 de_hpd_masked = 0;
3741 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3742 GEN11_DE_TBT_HOTPLUG_MASK;
3743
3744 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3745 de_hpd_enables);
3746 }
3747}
3748
3749static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3750{
3751 if (HAS_PCH_SPLIT(dev_priv))
3752 ibx_irq_postinstall(dev_priv);
3753
3754 gen8_gt_irq_postinstall(&dev_priv->gt);
3755 gen8_de_irq_postinstall(dev_priv);
3756
3757 gen8_master_intr_enable(dev_priv->uncore.regs);
3758}
3759
3760static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3761{
3762 struct intel_uncore *uncore = &dev_priv->uncore;
3763 u32 mask = SDE_GMBUS_ICP;
3764
3765 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3766}
3767
3768static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3769{
3770 struct intel_uncore *uncore = &dev_priv->uncore;
3771 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3772
3773 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3774 icp_irq_postinstall(dev_priv);
3775
3776 gen11_gt_irq_postinstall(&dev_priv->gt);
3777 gen8_de_irq_postinstall(dev_priv);
3778
3779 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3780
3781 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3782
3783 if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
3784 dg1_master_intr_enable(uncore->regs);
3785 intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
3786 } else {
3787 gen11_master_intr_enable(uncore->regs);
3788 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3789 }
3790}
3791
3792static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3793{
3794 gen8_gt_irq_postinstall(&dev_priv->gt);
3795
3796 spin_lock_irq(&dev_priv->irq_lock);
3797 if (dev_priv->display_irqs_enabled)
3798 vlv_display_irq_postinstall(dev_priv);
3799 spin_unlock_irq(&dev_priv->irq_lock);
3800
3801 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3802 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3803}
3804
3805static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3806{
3807 struct intel_uncore *uncore = &dev_priv->uncore;
3808
3809 i9xx_pipestat_irq_reset(dev_priv);
3810
3811 GEN2_IRQ_RESET(uncore);
3812 dev_priv->irq_mask = ~0u;
3813}
3814
3815static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3816{
3817 struct intel_uncore *uncore = &dev_priv->uncore;
3818 u16 enable_mask;
3819
3820 intel_uncore_write16(uncore,
3821 EMR,
3822 ~(I915_ERROR_PAGE_TABLE |
3823 I915_ERROR_MEMORY_REFRESH));
3824
3825
3826 dev_priv->irq_mask =
3827 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3828 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3829 I915_MASTER_ERROR_INTERRUPT);
3830
3831 enable_mask =
3832 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3833 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3834 I915_MASTER_ERROR_INTERRUPT |
3835 I915_USER_INTERRUPT;
3836
3837 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3838
3839
3840
3841 spin_lock_irq(&dev_priv->irq_lock);
3842 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3843 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3844 spin_unlock_irq(&dev_priv->irq_lock);
3845}
3846
3847static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3848 u16 *eir, u16 *eir_stuck)
3849{
3850 struct intel_uncore *uncore = &i915->uncore;
3851 u16 emr;
3852
3853 *eir = intel_uncore_read16(uncore, EIR);
3854
3855 if (*eir)
3856 intel_uncore_write16(uncore, EIR, *eir);
3857
3858 *eir_stuck = intel_uncore_read16(uncore, EIR);
3859 if (*eir_stuck == 0)
3860 return;
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872 emr = intel_uncore_read16(uncore, EMR);
3873 intel_uncore_write16(uncore, EMR, 0xffff);
3874 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3875}
3876
3877static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3878 u16 eir, u16 eir_stuck)
3879{
3880 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3881
3882 if (eir_stuck)
3883 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3884 eir_stuck);
3885}
3886
3887static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3888 u32 *eir, u32 *eir_stuck)
3889{
3890 u32 emr;
3891
3892 *eir = intel_uncore_read(&dev_priv->uncore, EIR);
3893
3894 intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3895
3896 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3897 if (*eir_stuck == 0)
3898 return;
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910 emr = intel_uncore_read(&dev_priv->uncore, EMR);
3911 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
3912 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3913}
3914
3915static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3916 u32 eir, u32 eir_stuck)
3917{
3918 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3919
3920 if (eir_stuck)
3921 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3922 eir_stuck);
3923}
3924
3925static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3926{
3927 struct drm_i915_private *dev_priv = arg;
3928 irqreturn_t ret = IRQ_NONE;
3929
3930 if (!intel_irqs_enabled(dev_priv))
3931 return IRQ_NONE;
3932
3933
3934 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3935
3936 do {
3937 u32 pipe_stats[I915_MAX_PIPES] = {};
3938 u16 eir = 0, eir_stuck = 0;
3939 u16 iir;
3940
3941 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3942 if (iir == 0)
3943 break;
3944
3945 ret = IRQ_HANDLED;
3946
3947
3948
3949 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3950
3951 if (iir & I915_MASTER_ERROR_INTERRUPT)
3952 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3953
3954 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3955
3956 if (iir & I915_USER_INTERRUPT)
3957 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3958
3959 if (iir & I915_MASTER_ERROR_INTERRUPT)
3960 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3961
3962 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3963 } while (0);
3964
3965 pmu_irq_stats(dev_priv, ret);
3966
3967 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3968
3969 return ret;
3970}
3971
3972static void i915_irq_reset(struct drm_i915_private *dev_priv)
3973{
3974 struct intel_uncore *uncore = &dev_priv->uncore;
3975
3976 if (I915_HAS_HOTPLUG(dev_priv)) {
3977 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3978 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3979 }
3980
3981 i9xx_pipestat_irq_reset(dev_priv);
3982
3983 GEN3_IRQ_RESET(uncore, GEN2_);
3984 dev_priv->irq_mask = ~0u;
3985}
3986
3987static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3988{
3989 struct intel_uncore *uncore = &dev_priv->uncore;
3990 u32 enable_mask;
3991
3992 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
3993 I915_ERROR_MEMORY_REFRESH));
3994
3995
3996 dev_priv->irq_mask =
3997 ~(I915_ASLE_INTERRUPT |
3998 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3999 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4000 I915_MASTER_ERROR_INTERRUPT);
4001
4002 enable_mask =
4003 I915_ASLE_INTERRUPT |
4004 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4005 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4006 I915_MASTER_ERROR_INTERRUPT |
4007 I915_USER_INTERRUPT;
4008
4009 if (I915_HAS_HOTPLUG(dev_priv)) {
4010
4011 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4012
4013 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4014 }
4015
4016 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4017
4018
4019
4020 spin_lock_irq(&dev_priv->irq_lock);
4021 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4022 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4023 spin_unlock_irq(&dev_priv->irq_lock);
4024
4025 i915_enable_asle_pipestat(dev_priv);
4026}
4027
4028static irqreturn_t i915_irq_handler(int irq, void *arg)
4029{
4030 struct drm_i915_private *dev_priv = arg;
4031 irqreturn_t ret = IRQ_NONE;
4032
4033 if (!intel_irqs_enabled(dev_priv))
4034 return IRQ_NONE;
4035
4036
4037 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4038
4039 do {
4040 u32 pipe_stats[I915_MAX_PIPES] = {};
4041 u32 eir = 0, eir_stuck = 0;
4042 u32 hotplug_status = 0;
4043 u32 iir;
4044
4045 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4046 if (iir == 0)
4047 break;
4048
4049 ret = IRQ_HANDLED;
4050
4051 if (I915_HAS_HOTPLUG(dev_priv) &&
4052 iir & I915_DISPLAY_PORT_INTERRUPT)
4053 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4054
4055
4056
4057 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4058
4059 if (iir & I915_MASTER_ERROR_INTERRUPT)
4060 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4061
4062 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4063
4064 if (iir & I915_USER_INTERRUPT)
4065 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4066
4067 if (iir & I915_MASTER_ERROR_INTERRUPT)
4068 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4069
4070 if (hotplug_status)
4071 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4072
4073 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4074 } while (0);
4075
4076 pmu_irq_stats(dev_priv, ret);
4077
4078 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4079
4080 return ret;
4081}
4082
4083static void i965_irq_reset(struct drm_i915_private *dev_priv)
4084{
4085 struct intel_uncore *uncore = &dev_priv->uncore;
4086
4087 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4088 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4089
4090 i9xx_pipestat_irq_reset(dev_priv);
4091
4092 GEN3_IRQ_RESET(uncore, GEN2_);
4093 dev_priv->irq_mask = ~0u;
4094}
4095
4096static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4097{
4098 struct intel_uncore *uncore = &dev_priv->uncore;
4099 u32 enable_mask;
4100 u32 error_mask;
4101
4102
4103
4104
4105
4106 if (IS_G4X(dev_priv)) {
4107 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4108 GM45_ERROR_MEM_PRIV |
4109 GM45_ERROR_CP_PRIV |
4110 I915_ERROR_MEMORY_REFRESH);
4111 } else {
4112 error_mask = ~(I915_ERROR_PAGE_TABLE |
4113 I915_ERROR_MEMORY_REFRESH);
4114 }
4115 intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4116
4117
4118 dev_priv->irq_mask =
4119 ~(I915_ASLE_INTERRUPT |
4120 I915_DISPLAY_PORT_INTERRUPT |
4121 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4122 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4123 I915_MASTER_ERROR_INTERRUPT);
4124
4125 enable_mask =
4126 I915_ASLE_INTERRUPT |
4127 I915_DISPLAY_PORT_INTERRUPT |
4128 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4129 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4130 I915_MASTER_ERROR_INTERRUPT |
4131 I915_USER_INTERRUPT;
4132
4133 if (IS_G4X(dev_priv))
4134 enable_mask |= I915_BSD_USER_INTERRUPT;
4135
4136 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4137
4138
4139
4140 spin_lock_irq(&dev_priv->irq_lock);
4141 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4142 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4143 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4144 spin_unlock_irq(&dev_priv->irq_lock);
4145
4146 i915_enable_asle_pipestat(dev_priv);
4147}
4148
4149static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4150{
4151 u32 hotplug_en;
4152
4153 lockdep_assert_held(&dev_priv->irq_lock);
4154
4155
4156
4157 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4158
4159
4160
4161
4162 if (IS_G4X(dev_priv))
4163 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4164 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4165
4166
4167 i915_hotplug_interrupt_update_locked(dev_priv,
4168 HOTPLUG_INT_EN_MASK |
4169 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4170 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4171 hotplug_en);
4172}
4173
4174static irqreturn_t i965_irq_handler(int irq, void *arg)
4175{
4176 struct drm_i915_private *dev_priv = arg;
4177 irqreturn_t ret = IRQ_NONE;
4178
4179 if (!intel_irqs_enabled(dev_priv))
4180 return IRQ_NONE;
4181
4182
4183 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4184
4185 do {
4186 u32 pipe_stats[I915_MAX_PIPES] = {};
4187 u32 eir = 0, eir_stuck = 0;
4188 u32 hotplug_status = 0;
4189 u32 iir;
4190
4191 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4192 if (iir == 0)
4193 break;
4194
4195 ret = IRQ_HANDLED;
4196
4197 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4198 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4199
4200
4201
4202 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4203
4204 if (iir & I915_MASTER_ERROR_INTERRUPT)
4205 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4206
4207 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4208
4209 if (iir & I915_USER_INTERRUPT)
4210 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4211
4212 if (iir & I915_BSD_USER_INTERRUPT)
4213 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
4214
4215 if (iir & I915_MASTER_ERROR_INTERRUPT)
4216 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4217
4218 if (hotplug_status)
4219 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4220
4221 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4222 } while (0);
4223
4224 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4225
4226 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4227
4228 return ret;
4229}
4230
4231
4232
4233
4234
4235
4236
4237
4238void intel_irq_init(struct drm_i915_private *dev_priv)
4239{
4240 struct drm_device *dev = &dev_priv->drm;
4241 int i;
4242
4243 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4244 for (i = 0; i < MAX_L3_SLICES; ++i)
4245 dev_priv->l3_parity.remap_info[i] = NULL;
4246
4247
4248 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4249 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4250
4251 if (!HAS_DISPLAY(dev_priv))
4252 return;
4253
4254 intel_hpd_init_pins(dev_priv);
4255
4256 intel_hpd_init_work(dev_priv);
4257
4258 dev->vblank_disable_immediate = true;
4259
4260
4261
4262
4263
4264
4265
4266 dev_priv->display_irqs_enabled = true;
4267 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4268 dev_priv->display_irqs_enabled = false;
4269
4270 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4271
4272
4273
4274
4275
4276
4277 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4278
4279 if (HAS_GMCH(dev_priv)) {
4280 if (I915_HAS_HOTPLUG(dev_priv))
4281 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4282 } else {
4283 if (HAS_PCH_DG1(dev_priv))
4284 dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
4285 else if (INTEL_GEN(dev_priv) >= 11)
4286 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4287 else if (IS_GEN9_LP(dev_priv))
4288 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4289 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4290 dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
4291 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4292 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4293 else
4294 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4295 }
4296}
4297
4298
4299
4300
4301
4302
4303
4304void intel_irq_fini(struct drm_i915_private *i915)
4305{
4306 int i;
4307
4308 for (i = 0; i < MAX_L3_SLICES; ++i)
4309 kfree(i915->l3_parity.remap_info[i]);
4310}
4311
4312static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4313{
4314 if (HAS_GMCH(dev_priv)) {
4315 if (IS_CHERRYVIEW(dev_priv))
4316 return cherryview_irq_handler;
4317 else if (IS_VALLEYVIEW(dev_priv))
4318 return valleyview_irq_handler;
4319 else if (IS_GEN(dev_priv, 4))
4320 return i965_irq_handler;
4321 else if (IS_GEN(dev_priv, 3))
4322 return i915_irq_handler;
4323 else
4324 return i8xx_irq_handler;
4325 } else {
4326 if (HAS_MASTER_UNIT_IRQ(dev_priv))
4327 return dg1_irq_handler;
4328 if (INTEL_GEN(dev_priv) >= 11)
4329 return gen11_irq_handler;
4330 else if (INTEL_GEN(dev_priv) >= 8)
4331 return gen8_irq_handler;
4332 else
4333 return ilk_irq_handler;
4334 }
4335}
4336
4337static void intel_irq_reset(struct drm_i915_private *dev_priv)
4338{
4339 if (HAS_GMCH(dev_priv)) {
4340 if (IS_CHERRYVIEW(dev_priv))
4341 cherryview_irq_reset(dev_priv);
4342 else if (IS_VALLEYVIEW(dev_priv))
4343 valleyview_irq_reset(dev_priv);
4344 else if (IS_GEN(dev_priv, 4))
4345 i965_irq_reset(dev_priv);
4346 else if (IS_GEN(dev_priv, 3))
4347 i915_irq_reset(dev_priv);
4348 else
4349 i8xx_irq_reset(dev_priv);
4350 } else {
4351 if (INTEL_GEN(dev_priv) >= 11)
4352 gen11_irq_reset(dev_priv);
4353 else if (INTEL_GEN(dev_priv) >= 8)
4354 gen8_irq_reset(dev_priv);
4355 else
4356 ilk_irq_reset(dev_priv);
4357 }
4358}
4359
4360static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4361{
4362 if (HAS_GMCH(dev_priv)) {
4363 if (IS_CHERRYVIEW(dev_priv))
4364 cherryview_irq_postinstall(dev_priv);
4365 else if (IS_VALLEYVIEW(dev_priv))
4366 valleyview_irq_postinstall(dev_priv);
4367 else if (IS_GEN(dev_priv, 4))
4368 i965_irq_postinstall(dev_priv);
4369 else if (IS_GEN(dev_priv, 3))
4370 i915_irq_postinstall(dev_priv);
4371 else
4372 i8xx_irq_postinstall(dev_priv);
4373 } else {
4374 if (INTEL_GEN(dev_priv) >= 11)
4375 gen11_irq_postinstall(dev_priv);
4376 else if (INTEL_GEN(dev_priv) >= 8)
4377 gen8_irq_postinstall(dev_priv);
4378 else
4379 ilk_irq_postinstall(dev_priv);
4380 }
4381}
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394int intel_irq_install(struct drm_i915_private *dev_priv)
4395{
4396 int irq = dev_priv->drm.pdev->irq;
4397 int ret;
4398
4399
4400
4401
4402
4403
4404 dev_priv->runtime_pm.irqs_enabled = true;
4405
4406 dev_priv->drm.irq_enabled = true;
4407
4408 intel_irq_reset(dev_priv);
4409
4410 ret = request_irq(irq, intel_irq_handler(dev_priv),
4411 IRQF_SHARED, DRIVER_NAME, dev_priv);
4412 if (ret < 0) {
4413 dev_priv->drm.irq_enabled = false;
4414 return ret;
4415 }
4416
4417 intel_irq_postinstall(dev_priv);
4418
4419 return ret;
4420}
4421
4422
4423
4424
4425
4426
4427
4428
4429void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4430{
4431 int irq = dev_priv->drm.pdev->irq;
4432
4433
4434
4435
4436
4437
4438
4439 if (!dev_priv->drm.irq_enabled)
4440 return;
4441
4442 dev_priv->drm.irq_enabled = false;
4443
4444 intel_irq_reset(dev_priv);
4445
4446 free_irq(irq, dev_priv);
4447
4448 intel_hpd_cancel_work(dev_priv);
4449 dev_priv->runtime_pm.irqs_enabled = false;
4450}
4451
4452
4453
4454
4455
4456
4457
4458
4459void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4460{
4461 intel_irq_reset(dev_priv);
4462 dev_priv->runtime_pm.irqs_enabled = false;
4463 intel_synchronize_irq(dev_priv);
4464}
4465
4466
4467
4468
4469
4470
4471
4472
4473void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4474{
4475 dev_priv->runtime_pm.irqs_enabled = true;
4476 intel_irq_reset(dev_priv);
4477 intel_irq_postinstall(dev_priv);
4478}
4479
4480bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4481{
4482
4483
4484
4485
4486 return dev_priv->runtime_pm.irqs_enabled;
4487}
4488
4489void intel_synchronize_irq(struct drm_i915_private *i915)
4490{
4491 synchronize_irq(i915->drm.pdev->irq);
4492}
4493