linux/drivers/gpu/drm/i915/i915_irq.c
<<
>>
Prefs
   1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/circ_buf.h>
  32#include <linux/slab.h>
  33#include <linux/sysrq.h>
  34
  35#include <drm/drm_drv.h>
  36#include <drm/drm_irq.h>
  37#include <drm/i915_drm.h>
  38
  39#include "display/intel_display_types.h"
  40#include "display/intel_fifo_underrun.h"
  41#include "display/intel_hotplug.h"
  42#include "display/intel_lpe_audio.h"
  43#include "display/intel_psr.h"
  44
  45#include "gt/intel_gt.h"
  46#include "gt/intel_gt_irq.h"
  47#include "gt/intel_gt_pm_irq.h"
  48#include "gt/intel_rps.h"
  49
  50#include "i915_drv.h"
  51#include "i915_irq.h"
  52#include "i915_trace.h"
  53#include "intel_pm.h"
  54
  55/**
  56 * DOC: interrupt handling
  57 *
  58 * These functions provide the basic support for enabling and disabling the
  59 * interrupt handling support. There's a lot more functionality in i915_irq.c
  60 * and related files, but that will be described in separate chapters.
  61 */
  62
  63typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
  64
  65static const u32 hpd_ilk[HPD_NUM_PINS] = {
  66        [HPD_PORT_A] = DE_DP_A_HOTPLUG,
  67};
  68
  69static const u32 hpd_ivb[HPD_NUM_PINS] = {
  70        [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
  71};
  72
  73static const u32 hpd_bdw[HPD_NUM_PINS] = {
  74        [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
  75};
  76
  77static const u32 hpd_ibx[HPD_NUM_PINS] = {
  78        [HPD_CRT] = SDE_CRT_HOTPLUG,
  79        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  80        [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  81        [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  82        [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  83};
  84
  85static const u32 hpd_cpt[HPD_NUM_PINS] = {
  86        [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  87        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  88        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  89        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  90        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  91};
  92
  93static const u32 hpd_spt[HPD_NUM_PINS] = {
  94        [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
  95        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  96        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  97        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
  98        [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
  99};
 100
 101static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
 102        [HPD_CRT] = CRT_HOTPLUG_INT_EN,
 103        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
 104        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
 105        [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
 106        [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
 107        [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
 108};
 109
 110static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
 111        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 112        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
 113        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
 114        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 115        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
 116        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 117};
 118
 119static const u32 hpd_status_i915[HPD_NUM_PINS] = {
 120        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 121        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
 122        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
 123        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 124        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
 125        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 126};
 127
 128/* BXT hpd list */
 129static const u32 hpd_bxt[HPD_NUM_PINS] = {
 130        [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
 131        [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
 132        [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
 133};
 134
 135static const u32 hpd_gen11[HPD_NUM_PINS] = {
 136        [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
 137        [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
 138        [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
 139        [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
 140};
 141
 142static const u32 hpd_gen12[HPD_NUM_PINS] = {
 143        [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
 144        [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
 145        [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
 146        [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
 147        [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
 148        [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
 149};
 150
 151static const u32 hpd_icp[HPD_NUM_PINS] = {
 152        [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
 153        [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
 154        [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
 155        [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
 156        [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
 157        [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
 158};
 159
 160static const u32 hpd_tgp[HPD_NUM_PINS] = {
 161        [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
 162        [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
 163        [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
 164        [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
 165        [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
 166        [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
 167        [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
 168        [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
 169        [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
 170};
 171
 172void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
 173                    i915_reg_t iir, i915_reg_t ier)
 174{
 175        intel_uncore_write(uncore, imr, 0xffffffff);
 176        intel_uncore_posting_read(uncore, imr);
 177
 178        intel_uncore_write(uncore, ier, 0);
 179
 180        /* IIR can theoretically queue up two events. Be paranoid. */
 181        intel_uncore_write(uncore, iir, 0xffffffff);
 182        intel_uncore_posting_read(uncore, iir);
 183        intel_uncore_write(uncore, iir, 0xffffffff);
 184        intel_uncore_posting_read(uncore, iir);
 185}
 186
 187void gen2_irq_reset(struct intel_uncore *uncore)
 188{
 189        intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
 190        intel_uncore_posting_read16(uncore, GEN2_IMR);
 191
 192        intel_uncore_write16(uncore, GEN2_IER, 0);
 193
 194        /* IIR can theoretically queue up two events. Be paranoid. */
 195        intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 196        intel_uncore_posting_read16(uncore, GEN2_IIR);
 197        intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 198        intel_uncore_posting_read16(uncore, GEN2_IIR);
 199}
 200
 201/*
 202 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 203 */
 204static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
 205{
 206        u32 val = intel_uncore_read(uncore, reg);
 207
 208        if (val == 0)
 209                return;
 210
 211        WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
 212             i915_mmio_reg_offset(reg), val);
 213        intel_uncore_write(uncore, reg, 0xffffffff);
 214        intel_uncore_posting_read(uncore, reg);
 215        intel_uncore_write(uncore, reg, 0xffffffff);
 216        intel_uncore_posting_read(uncore, reg);
 217}
 218
 219static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
 220{
 221        u16 val = intel_uncore_read16(uncore, GEN2_IIR);
 222
 223        if (val == 0)
 224                return;
 225
 226        WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
 227             i915_mmio_reg_offset(GEN2_IIR), val);
 228        intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 229        intel_uncore_posting_read16(uncore, GEN2_IIR);
 230        intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 231        intel_uncore_posting_read16(uncore, GEN2_IIR);
 232}
 233
 234void gen3_irq_init(struct intel_uncore *uncore,
 235                   i915_reg_t imr, u32 imr_val,
 236                   i915_reg_t ier, u32 ier_val,
 237                   i915_reg_t iir)
 238{
 239        gen3_assert_iir_is_zero(uncore, iir);
 240
 241        intel_uncore_write(uncore, ier, ier_val);
 242        intel_uncore_write(uncore, imr, imr_val);
 243        intel_uncore_posting_read(uncore, imr);
 244}
 245
 246void gen2_irq_init(struct intel_uncore *uncore,
 247                   u32 imr_val, u32 ier_val)
 248{
 249        gen2_assert_iir_is_zero(uncore);
 250
 251        intel_uncore_write16(uncore, GEN2_IER, ier_val);
 252        intel_uncore_write16(uncore, GEN2_IMR, imr_val);
 253        intel_uncore_posting_read16(uncore, GEN2_IMR);
 254}
 255
 256/* For display hotplug interrupt */
 257static inline void
 258i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
 259                                     u32 mask,
 260                                     u32 bits)
 261{
 262        u32 val;
 263
 264        lockdep_assert_held(&dev_priv->irq_lock);
 265        WARN_ON(bits & ~mask);
 266
 267        val = I915_READ(PORT_HOTPLUG_EN);
 268        val &= ~mask;
 269        val |= bits;
 270        I915_WRITE(PORT_HOTPLUG_EN, val);
 271}
 272
 273/**
 274 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 275 * @dev_priv: driver private
 276 * @mask: bits to update
 277 * @bits: bits to enable
 278 * NOTE: the HPD enable bits are modified both inside and outside
 279 * of an interrupt context. To avoid that read-modify-write cycles
 280 * interfer, these bits are protected by a spinlock. Since this
 281 * function is usually not called from a context where the lock is
 282 * held already, this function acquires the lock itself. A non-locking
 283 * version is also available.
 284 */
 285void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 286                                   u32 mask,
 287                                   u32 bits)
 288{
 289        spin_lock_irq(&dev_priv->irq_lock);
 290        i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
 291        spin_unlock_irq(&dev_priv->irq_lock);
 292}
 293
 294/**
 295 * ilk_update_display_irq - update DEIMR
 296 * @dev_priv: driver private
 297 * @interrupt_mask: mask of interrupt bits to update
 298 * @enabled_irq_mask: mask of interrupt bits to enable
 299 */
 300void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 301                            u32 interrupt_mask,
 302                            u32 enabled_irq_mask)
 303{
 304        u32 new_val;
 305
 306        lockdep_assert_held(&dev_priv->irq_lock);
 307
 308        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 309
 310        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 311                return;
 312
 313        new_val = dev_priv->irq_mask;
 314        new_val &= ~interrupt_mask;
 315        new_val |= (~enabled_irq_mask & interrupt_mask);
 316
 317        if (new_val != dev_priv->irq_mask) {
 318                dev_priv->irq_mask = new_val;
 319                I915_WRITE(DEIMR, dev_priv->irq_mask);
 320                POSTING_READ(DEIMR);
 321        }
 322}
 323
 324/**
 325 * bdw_update_port_irq - update DE port interrupt
 326 * @dev_priv: driver private
 327 * @interrupt_mask: mask of interrupt bits to update
 328 * @enabled_irq_mask: mask of interrupt bits to enable
 329 */
 330static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 331                                u32 interrupt_mask,
 332                                u32 enabled_irq_mask)
 333{
 334        u32 new_val;
 335        u32 old_val;
 336
 337        lockdep_assert_held(&dev_priv->irq_lock);
 338
 339        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 340
 341        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 342                return;
 343
 344        old_val = I915_READ(GEN8_DE_PORT_IMR);
 345
 346        new_val = old_val;
 347        new_val &= ~interrupt_mask;
 348        new_val |= (~enabled_irq_mask & interrupt_mask);
 349
 350        if (new_val != old_val) {
 351                I915_WRITE(GEN8_DE_PORT_IMR, new_val);
 352                POSTING_READ(GEN8_DE_PORT_IMR);
 353        }
 354}
 355
 356/**
 357 * bdw_update_pipe_irq - update DE pipe interrupt
 358 * @dev_priv: driver private
 359 * @pipe: pipe whose interrupt to update
 360 * @interrupt_mask: mask of interrupt bits to update
 361 * @enabled_irq_mask: mask of interrupt bits to enable
 362 */
 363void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 364                         enum pipe pipe,
 365                         u32 interrupt_mask,
 366                         u32 enabled_irq_mask)
 367{
 368        u32 new_val;
 369
 370        lockdep_assert_held(&dev_priv->irq_lock);
 371
 372        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 373
 374        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 375                return;
 376
 377        new_val = dev_priv->de_irq_mask[pipe];
 378        new_val &= ~interrupt_mask;
 379        new_val |= (~enabled_irq_mask & interrupt_mask);
 380
 381        if (new_val != dev_priv->de_irq_mask[pipe]) {
 382                dev_priv->de_irq_mask[pipe] = new_val;
 383                I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
 384                POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
 385        }
 386}
 387
 388/**
 389 * ibx_display_interrupt_update - update SDEIMR
 390 * @dev_priv: driver private
 391 * @interrupt_mask: mask of interrupt bits to update
 392 * @enabled_irq_mask: mask of interrupt bits to enable
 393 */
 394void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 395                                  u32 interrupt_mask,
 396                                  u32 enabled_irq_mask)
 397{
 398        u32 sdeimr = I915_READ(SDEIMR);
 399        sdeimr &= ~interrupt_mask;
 400        sdeimr |= (~enabled_irq_mask & interrupt_mask);
 401
 402        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 403
 404        lockdep_assert_held(&dev_priv->irq_lock);
 405
 406        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 407                return;
 408
 409        I915_WRITE(SDEIMR, sdeimr);
 410        POSTING_READ(SDEIMR);
 411}
 412
 413u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
 414                              enum pipe pipe)
 415{
 416        u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
 417        u32 enable_mask = status_mask << 16;
 418
 419        lockdep_assert_held(&dev_priv->irq_lock);
 420
 421        if (INTEL_GEN(dev_priv) < 5)
 422                goto out;
 423
 424        /*
 425         * On pipe A we don't support the PSR interrupt yet,
 426         * on pipe B and C the same bit MBZ.
 427         */
 428        if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
 429                return 0;
 430        /*
 431         * On pipe B and C we don't support the PSR interrupt yet, on pipe
 432         * A the same bit is for perf counters which we don't use either.
 433         */
 434        if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
 435                return 0;
 436
 437        enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
 438                         SPRITE0_FLIP_DONE_INT_EN_VLV |
 439                         SPRITE1_FLIP_DONE_INT_EN_VLV);
 440        if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
 441                enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
 442        if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
 443                enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 444
 445out:
 446        WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 447                  status_mask & ~PIPESTAT_INT_STATUS_MASK,
 448                  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
 449                  pipe_name(pipe), enable_mask, status_mask);
 450
 451        return enable_mask;
 452}
 453
 454void i915_enable_pipestat(struct drm_i915_private *dev_priv,
 455                          enum pipe pipe, u32 status_mask)
 456{
 457        i915_reg_t reg = PIPESTAT(pipe);
 458        u32 enable_mask;
 459
 460        WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
 461                  "pipe %c: status_mask=0x%x\n",
 462                  pipe_name(pipe), status_mask);
 463
 464        lockdep_assert_held(&dev_priv->irq_lock);
 465        WARN_ON(!intel_irqs_enabled(dev_priv));
 466
 467        if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
 468                return;
 469
 470        dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 471        enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 472
 473        I915_WRITE(reg, enable_mask | status_mask);
 474        POSTING_READ(reg);
 475}
 476
 477void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 478                           enum pipe pipe, u32 status_mask)
 479{
 480        i915_reg_t reg = PIPESTAT(pipe);
 481        u32 enable_mask;
 482
 483        WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
 484                  "pipe %c: status_mask=0x%x\n",
 485                  pipe_name(pipe), status_mask);
 486
 487        lockdep_assert_held(&dev_priv->irq_lock);
 488        WARN_ON(!intel_irqs_enabled(dev_priv));
 489
 490        if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
 491                return;
 492
 493        dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 494        enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 495
 496        I915_WRITE(reg, enable_mask | status_mask);
 497        POSTING_READ(reg);
 498}
 499
 500static bool i915_has_asle(struct drm_i915_private *dev_priv)
 501{
 502        if (!dev_priv->opregion.asle)
 503                return false;
 504
 505        return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
 506}
 507
 508/**
 509 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
 510 * @dev_priv: i915 device private
 511 */
 512static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 513{
 514        if (!i915_has_asle(dev_priv))
 515                return;
 516
 517        spin_lock_irq(&dev_priv->irq_lock);
 518
 519        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
 520        if (INTEL_GEN(dev_priv) >= 4)
 521                i915_enable_pipestat(dev_priv, PIPE_A,
 522                                     PIPE_LEGACY_BLC_EVENT_STATUS);
 523
 524        spin_unlock_irq(&dev_priv->irq_lock);
 525}
 526
 527/*
 528 * This timing diagram depicts the video signal in and
 529 * around the vertical blanking period.
 530 *
 531 * Assumptions about the fictitious mode used in this example:
 532 *  vblank_start >= 3
 533 *  vsync_start = vblank_start + 1
 534 *  vsync_end = vblank_start + 2
 535 *  vtotal = vblank_start + 3
 536 *
 537 *           start of vblank:
 538 *           latch double buffered registers
 539 *           increment frame counter (ctg+)
 540 *           generate start of vblank interrupt (gen4+)
 541 *           |
 542 *           |          frame start:
 543 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 544 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 545 *           |          |
 546 *           |          |  start of vsync:
 547 *           |          |  generate vsync interrupt
 548 *           |          |  |
 549 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 550 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 551 * ----va---> <-----------------vb--------------------> <--------va-------------
 552 *       |          |       <----vs----->                     |
 553 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 554 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 555 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 556 *       |          |                                         |
 557 *       last visible pixel                                   first visible pixel
 558 *                  |                                         increment frame counter (gen3/4)
 559 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 560 *
 561 * x  = horizontal active
 562 * _  = horizontal blanking
 563 * hs = horizontal sync
 564 * va = vertical active
 565 * vb = vertical blanking
 566 * vs = vertical sync
 567 * vbs = vblank_start (number)
 568 *
 569 * Summary:
 570 * - most events happen at the start of horizontal sync
 571 * - frame start happens at the start of horizontal blank, 1-4 lines
 572 *   (depending on PIPECONF settings) after the start of vblank
 573 * - gen3/4 pixel and frame counter are synchronized with the start
 574 *   of horizontal active on the first line of vertical active
 575 */
 576
 577/* Called from drm generic code, passed a 'crtc', which
 578 * we use as a pipe index
 579 */
 580u32 i915_get_vblank_counter(struct drm_crtc *crtc)
 581{
 582        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 583        struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
 584        const struct drm_display_mode *mode = &vblank->hwmode;
 585        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 586        i915_reg_t high_frame, low_frame;
 587        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 588        unsigned long irqflags;
 589
 590        /*
 591         * On i965gm TV output the frame counter only works up to
 592         * the point when we enable the TV encoder. After that the
 593         * frame counter ceases to work and reads zero. We need a
 594         * vblank wait before enabling the TV encoder and so we
 595         * have to enable vblank interrupts while the frame counter
 596         * is still in a working state. However the core vblank code
 597         * does not like us returning non-zero frame counter values
 598         * when we've told it that we don't have a working frame
 599         * counter. Thus we must stop non-zero values leaking out.
 600         */
 601        if (!vblank->max_vblank_count)
 602                return 0;
 603
 604        htotal = mode->crtc_htotal;
 605        hsync_start = mode->crtc_hsync_start;
 606        vbl_start = mode->crtc_vblank_start;
 607        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 608                vbl_start = DIV_ROUND_UP(vbl_start, 2);
 609
 610        /* Convert to pixel count */
 611        vbl_start *= htotal;
 612
 613        /* Start of vblank event occurs at start of hsync */
 614        vbl_start -= htotal - hsync_start;
 615
 616        high_frame = PIPEFRAME(pipe);
 617        low_frame = PIPEFRAMEPIXEL(pipe);
 618
 619        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 620
 621        /*
 622         * High & low register fields aren't synchronized, so make sure
 623         * we get a low value that's stable across two reads of the high
 624         * register.
 625         */
 626        do {
 627                high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
 628                low   = I915_READ_FW(low_frame);
 629                high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
 630        } while (high1 != high2);
 631
 632        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 633
 634        high1 >>= PIPE_FRAME_HIGH_SHIFT;
 635        pixel = low & PIPE_PIXEL_MASK;
 636        low >>= PIPE_FRAME_LOW_SHIFT;
 637
 638        /*
 639         * The frame counter increments at beginning of active.
 640         * Cook up a vblank counter by also checking the pixel
 641         * counter against vblank start.
 642         */
 643        return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 644}
 645
 646u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
 647{
 648        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 649        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 650
 651        return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
 652}
 653
 654/*
 655 * On certain encoders on certain platforms, pipe
 656 * scanline register will not work to get the scanline,
 657 * since the timings are driven from the PORT or issues
 658 * with scanline register updates.
 659 * This function will use Framestamp and current
 660 * timestamp registers to calculate the scanline.
 661 */
 662static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
 663{
 664        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 665        struct drm_vblank_crtc *vblank =
 666                &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 667        const struct drm_display_mode *mode = &vblank->hwmode;
 668        u32 vblank_start = mode->crtc_vblank_start;
 669        u32 vtotal = mode->crtc_vtotal;
 670        u32 htotal = mode->crtc_htotal;
 671        u32 clock = mode->crtc_clock;
 672        u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
 673
 674        /*
 675         * To avoid the race condition where we might cross into the
 676         * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
 677         * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
 678         * during the same frame.
 679         */
 680        do {
 681                /*
 682                 * This field provides read back of the display
 683                 * pipe frame time stamp. The time stamp value
 684                 * is sampled at every start of vertical blank.
 685                 */
 686                scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
 687
 688                /*
 689                 * The TIMESTAMP_CTR register has the current
 690                 * time stamp value.
 691                 */
 692                scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
 693
 694                scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
 695        } while (scan_post_time != scan_prev_time);
 696
 697        scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
 698                                        clock), 1000 * htotal);
 699        scanline = min(scanline, vtotal - 1);
 700        scanline = (scanline + vblank_start) % vtotal;
 701
 702        return scanline;
 703}
 704
 705/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
 706static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 707{
 708        struct drm_device *dev = crtc->base.dev;
 709        struct drm_i915_private *dev_priv = to_i915(dev);
 710        const struct drm_display_mode *mode;
 711        struct drm_vblank_crtc *vblank;
 712        enum pipe pipe = crtc->pipe;
 713        int position, vtotal;
 714
 715        if (!crtc->active)
 716                return -1;
 717
 718        vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 719        mode = &vblank->hwmode;
 720
 721        if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
 722                return __intel_get_crtc_scanline_from_timestamp(crtc);
 723
 724        vtotal = mode->crtc_vtotal;
 725        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 726                vtotal /= 2;
 727
 728        if (IS_GEN(dev_priv, 2))
 729                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
 730        else
 731                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 732
 733        /*
 734         * On HSW, the DSL reg (0x70000) appears to return 0 if we
 735         * read it just before the start of vblank.  So try it again
 736         * so we don't accidentally end up spanning a vblank frame
 737         * increment, causing the pipe_update_end() code to squak at us.
 738         *
 739         * The nature of this problem means we can't simply check the ISR
 740         * bit and return the vblank start value; nor can we use the scanline
 741         * debug register in the transcoder as it appears to have the same
 742         * problem.  We may need to extend this to include other platforms,
 743         * but so far testing only shows the problem on HSW.
 744         */
 745        if (HAS_DDI(dev_priv) && !position) {
 746                int i, temp;
 747
 748                for (i = 0; i < 100; i++) {
 749                        udelay(1);
 750                        temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 751                        if (temp != position) {
 752                                position = temp;
 753                                break;
 754                        }
 755                }
 756        }
 757
 758        /*
 759         * See update_scanline_offset() for the details on the
 760         * scanline_offset adjustment.
 761         */
 762        return (position + crtc->scanline_offset) % vtotal;
 763}
 764
 765bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
 766                              bool in_vblank_irq, int *vpos, int *hpos,
 767                              ktime_t *stime, ktime_t *etime,
 768                              const struct drm_display_mode *mode)
 769{
 770        struct drm_i915_private *dev_priv = to_i915(dev);
 771        struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
 772        enum pipe pipe = crtc->pipe;
 773        int position;
 774        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 775        unsigned long irqflags;
 776        bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
 777                IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
 778                mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
 779
 780        if (WARN_ON(!mode->crtc_clock)) {
 781                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 782                                 "pipe %c\n", pipe_name(pipe));
 783                return false;
 784        }
 785
 786        htotal = mode->crtc_htotal;
 787        hsync_start = mode->crtc_hsync_start;
 788        vtotal = mode->crtc_vtotal;
 789        vbl_start = mode->crtc_vblank_start;
 790        vbl_end = mode->crtc_vblank_end;
 791
 792        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
 793                vbl_start = DIV_ROUND_UP(vbl_start, 2);
 794                vbl_end /= 2;
 795                vtotal /= 2;
 796        }
 797
 798        /*
 799         * Lock uncore.lock, as we will do multiple timing critical raw
 800         * register reads, potentially with preemption disabled, so the
 801         * following code must not block on uncore.lock.
 802         */
 803        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 804
 805        /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 806
 807        /* Get optional system timestamp before query. */
 808        if (stime)
 809                *stime = ktime_get();
 810
 811        if (use_scanline_counter) {
 812                /* No obvious pixelcount register. Only query vertical
 813                 * scanout position from Display scan line register.
 814                 */
 815                position = __intel_get_crtc_scanline(crtc);
 816        } else {
 817                /* Have access to pixelcount since start of frame.
 818                 * We can split this into vertical and horizontal
 819                 * scanout position.
 820                 */
 821                position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 822
 823                /* convert to pixel counts */
 824                vbl_start *= htotal;
 825                vbl_end *= htotal;
 826                vtotal *= htotal;
 827
 828                /*
 829                 * In interlaced modes, the pixel counter counts all pixels,
 830                 * so one field will have htotal more pixels. In order to avoid
 831                 * the reported position from jumping backwards when the pixel
 832                 * counter is beyond the length of the shorter field, just
 833                 * clamp the position the length of the shorter field. This
 834                 * matches how the scanline counter based position works since
 835                 * the scanline counter doesn't count the two half lines.
 836                 */
 837                if (position >= vtotal)
 838                        position = vtotal - 1;
 839
 840                /*
 841                 * Start of vblank interrupt is triggered at start of hsync,
 842                 * just prior to the first active line of vblank. However we
 843                 * consider lines to start at the leading edge of horizontal
 844                 * active. So, should we get here before we've crossed into
 845                 * the horizontal active of the first line in vblank, we would
 846                 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
 847                 * always add htotal-hsync_start to the current pixel position.
 848                 */
 849                position = (position + htotal - hsync_start) % vtotal;
 850        }
 851
 852        /* Get optional system timestamp after query. */
 853        if (etime)
 854                *etime = ktime_get();
 855
 856        /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
 857
 858        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 859
 860        /*
 861         * While in vblank, position will be negative
 862         * counting up towards 0 at vbl_end. And outside
 863         * vblank, position will be positive counting
 864         * up since vbl_end.
 865         */
 866        if (position >= vbl_start)
 867                position -= vbl_end;
 868        else
 869                position += vtotal - vbl_end;
 870
 871        if (use_scanline_counter) {
 872                *vpos = position;
 873                *hpos = 0;
 874        } else {
 875                *vpos = position / htotal;
 876                *hpos = position - (*vpos * htotal);
 877        }
 878
 879        return true;
 880}
 881
 882int intel_get_crtc_scanline(struct intel_crtc *crtc)
 883{
 884        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 885        unsigned long irqflags;
 886        int position;
 887
 888        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 889        position = __intel_get_crtc_scanline(crtc);
 890        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 891
 892        return position;
 893}
 894
 895/**
 896 * ivb_parity_work - Workqueue called when a parity error interrupt
 897 * occurred.
 898 * @work: workqueue struct
 899 *
 900 * Doesn't actually do anything except notify userspace. As a consequence of
 901 * this event, userspace should try to remap the bad rows since statistically
 902 * it is likely the same row is more likely to go bad again.
 903 */
 904static void ivb_parity_work(struct work_struct *work)
 905{
 906        struct drm_i915_private *dev_priv =
 907                container_of(work, typeof(*dev_priv), l3_parity.error_work);
 908        struct intel_gt *gt = &dev_priv->gt;
 909        u32 error_status, row, bank, subbank;
 910        char *parity_event[6];
 911        u32 misccpctl;
 912        u8 slice = 0;
 913
 914        /* We must turn off DOP level clock gating to access the L3 registers.
 915         * In order to prevent a get/put style interface, acquire struct mutex
 916         * any time we access those registers.
 917         */
 918        mutex_lock(&dev_priv->drm.struct_mutex);
 919
 920        /* If we've screwed up tracking, just let the interrupt fire again */
 921        if (WARN_ON(!dev_priv->l3_parity.which_slice))
 922                goto out;
 923
 924        misccpctl = I915_READ(GEN7_MISCCPCTL);
 925        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 926        POSTING_READ(GEN7_MISCCPCTL);
 927
 928        while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
 929                i915_reg_t reg;
 930
 931                slice--;
 932                if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
 933                        break;
 934
 935                dev_priv->l3_parity.which_slice &= ~(1<<slice);
 936
 937                reg = GEN7_L3CDERRST1(slice);
 938
 939                error_status = I915_READ(reg);
 940                row = GEN7_PARITY_ERROR_ROW(error_status);
 941                bank = GEN7_PARITY_ERROR_BANK(error_status);
 942                subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 943
 944                I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
 945                POSTING_READ(reg);
 946
 947                parity_event[0] = I915_L3_PARITY_UEVENT "=1";
 948                parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
 949                parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
 950                parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
 951                parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
 952                parity_event[5] = NULL;
 953
 954                kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
 955                                   KOBJ_CHANGE, parity_event);
 956
 957                DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
 958                          slice, row, bank, subbank);
 959
 960                kfree(parity_event[4]);
 961                kfree(parity_event[3]);
 962                kfree(parity_event[2]);
 963                kfree(parity_event[1]);
 964        }
 965
 966        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 967
 968out:
 969        WARN_ON(dev_priv->l3_parity.which_slice);
 970        spin_lock_irq(&gt->irq_lock);
 971        gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
 972        spin_unlock_irq(&gt->irq_lock);
 973
 974        mutex_unlock(&dev_priv->drm.struct_mutex);
 975}
 976
 977static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 978{
 979        switch (pin) {
 980        case HPD_PORT_C:
 981                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
 982        case HPD_PORT_D:
 983                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
 984        case HPD_PORT_E:
 985                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
 986        case HPD_PORT_F:
 987                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
 988        default:
 989                return false;
 990        }
 991}
 992
 993static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 994{
 995        switch (pin) {
 996        case HPD_PORT_D:
 997                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
 998        case HPD_PORT_E:
 999                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1000        case HPD_PORT_F:
1001                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1002        case HPD_PORT_G:
1003                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1004        case HPD_PORT_H:
1005                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1006        case HPD_PORT_I:
1007                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1008        default:
1009                return false;
1010        }
1011}
1012
1013static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1014{
1015        switch (pin) {
1016        case HPD_PORT_A:
1017                return val & PORTA_HOTPLUG_LONG_DETECT;
1018        case HPD_PORT_B:
1019                return val & PORTB_HOTPLUG_LONG_DETECT;
1020        case HPD_PORT_C:
1021                return val & PORTC_HOTPLUG_LONG_DETECT;
1022        default:
1023                return false;
1024        }
1025}
1026
1027static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1028{
1029        switch (pin) {
1030        case HPD_PORT_A:
1031                return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1032        case HPD_PORT_B:
1033                return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1034        case HPD_PORT_C:
1035                return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1036        default:
1037                return false;
1038        }
1039}
1040
1041static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1042{
1043        switch (pin) {
1044        case HPD_PORT_C:
1045                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1046        case HPD_PORT_D:
1047                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1048        case HPD_PORT_E:
1049                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1050        case HPD_PORT_F:
1051                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1052        default:
1053                return false;
1054        }
1055}
1056
1057static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1058{
1059        switch (pin) {
1060        case HPD_PORT_D:
1061                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1062        case HPD_PORT_E:
1063                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1064        case HPD_PORT_F:
1065                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1066        case HPD_PORT_G:
1067                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1068        case HPD_PORT_H:
1069                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1070        case HPD_PORT_I:
1071                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1072        default:
1073                return false;
1074        }
1075}
1076
1077static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1078{
1079        switch (pin) {
1080        case HPD_PORT_E:
1081                return val & PORTE_HOTPLUG_LONG_DETECT;
1082        default:
1083                return false;
1084        }
1085}
1086
1087static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1088{
1089        switch (pin) {
1090        case HPD_PORT_A:
1091                return val & PORTA_HOTPLUG_LONG_DETECT;
1092        case HPD_PORT_B:
1093                return val & PORTB_HOTPLUG_LONG_DETECT;
1094        case HPD_PORT_C:
1095                return val & PORTC_HOTPLUG_LONG_DETECT;
1096        case HPD_PORT_D:
1097                return val & PORTD_HOTPLUG_LONG_DETECT;
1098        default:
1099                return false;
1100        }
1101}
1102
1103static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1104{
1105        switch (pin) {
1106        case HPD_PORT_A:
1107                return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1108        default:
1109                return false;
1110        }
1111}
1112
1113static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1114{
1115        switch (pin) {
1116        case HPD_PORT_B:
1117                return val & PORTB_HOTPLUG_LONG_DETECT;
1118        case HPD_PORT_C:
1119                return val & PORTC_HOTPLUG_LONG_DETECT;
1120        case HPD_PORT_D:
1121                return val & PORTD_HOTPLUG_LONG_DETECT;
1122        default:
1123                return false;
1124        }
1125}
1126
1127static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1128{
1129        switch (pin) {
1130        case HPD_PORT_B:
1131                return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1132        case HPD_PORT_C:
1133                return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1134        case HPD_PORT_D:
1135                return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1136        default:
1137                return false;
1138        }
1139}
1140
1141/*
1142 * Get a bit mask of pins that have triggered, and which ones may be long.
1143 * This can be called multiple times with the same masks to accumulate
1144 * hotplug detection results from several registers.
1145 *
1146 * Note that the caller is expected to zero out the masks initially.
1147 */
1148static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1149                               u32 *pin_mask, u32 *long_mask,
1150                               u32 hotplug_trigger, u32 dig_hotplug_reg,
1151                               const u32 hpd[HPD_NUM_PINS],
1152                               bool long_pulse_detect(enum hpd_pin pin, u32 val))
1153{
1154        enum hpd_pin pin;
1155
1156        BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1157
1158        for_each_hpd_pin(pin) {
1159                if ((hpd[pin] & hotplug_trigger) == 0)
1160                        continue;
1161
1162                *pin_mask |= BIT(pin);
1163
1164                if (long_pulse_detect(pin, dig_hotplug_reg))
1165                        *long_mask |= BIT(pin);
1166        }
1167
1168        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1169                         hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1170
1171}
1172
1173static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1174{
1175        wake_up_all(&dev_priv->gmbus_wait_queue);
1176}
1177
1178static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1179{
1180        wake_up_all(&dev_priv->gmbus_wait_queue);
1181}
1182
1183#if defined(CONFIG_DEBUG_FS)
1184static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1185                                         enum pipe pipe,
1186                                         u32 crc0, u32 crc1,
1187                                         u32 crc2, u32 crc3,
1188                                         u32 crc4)
1189{
1190        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1191        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1192        u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1193
1194        trace_intel_pipe_crc(crtc, crcs);
1195
1196        spin_lock(&pipe_crc->lock);
1197        /*
1198         * For some not yet identified reason, the first CRC is
1199         * bonkers. So let's just wait for the next vblank and read
1200         * out the buggy result.
1201         *
1202         * On GEN8+ sometimes the second CRC is bonkers as well, so
1203         * don't trust that one either.
1204         */
1205        if (pipe_crc->skipped <= 0 ||
1206            (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1207                pipe_crc->skipped++;
1208                spin_unlock(&pipe_crc->lock);
1209                return;
1210        }
1211        spin_unlock(&pipe_crc->lock);
1212
1213        drm_crtc_add_crc_entry(&crtc->base, true,
1214                                drm_crtc_accurate_vblank_count(&crtc->base),
1215                                crcs);
1216}
1217#else
1218static inline void
1219display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1220                             enum pipe pipe,
1221                             u32 crc0, u32 crc1,
1222                             u32 crc2, u32 crc3,
1223                             u32 crc4) {}
1224#endif
1225
1226
1227static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1228                                     enum pipe pipe)
1229{
1230        display_pipe_crc_irq_handler(dev_priv, pipe,
1231                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1232                                     0, 0, 0, 0);
1233}
1234
1235static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1236                                     enum pipe pipe)
1237{
1238        display_pipe_crc_irq_handler(dev_priv, pipe,
1239                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1240                                     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1241                                     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1242                                     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1243                                     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1244}
1245
1246static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1247                                      enum pipe pipe)
1248{
1249        u32 res1, res2;
1250
1251        if (INTEL_GEN(dev_priv) >= 3)
1252                res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1253        else
1254                res1 = 0;
1255
1256        if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1257                res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1258        else
1259                res2 = 0;
1260
1261        display_pipe_crc_irq_handler(dev_priv, pipe,
1262                                     I915_READ(PIPE_CRC_RES_RED(pipe)),
1263                                     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1264                                     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1265                                     res1, res2);
1266}
1267
1268static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1269{
1270        enum pipe pipe;
1271
1272        for_each_pipe(dev_priv, pipe) {
1273                I915_WRITE(PIPESTAT(pipe),
1274                           PIPESTAT_INT_STATUS_MASK |
1275                           PIPE_FIFO_UNDERRUN_STATUS);
1276
1277                dev_priv->pipestat_irq_mask[pipe] = 0;
1278        }
1279}
1280
1281static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1282                                  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1283{
1284        enum pipe pipe;
1285
1286        spin_lock(&dev_priv->irq_lock);
1287
1288        if (!dev_priv->display_irqs_enabled) {
1289                spin_unlock(&dev_priv->irq_lock);
1290                return;
1291        }
1292
1293        for_each_pipe(dev_priv, pipe) {
1294                i915_reg_t reg;
1295                u32 status_mask, enable_mask, iir_bit = 0;
1296
1297                /*
1298                 * PIPESTAT bits get signalled even when the interrupt is
1299                 * disabled with the mask bits, and some of the status bits do
1300                 * not generate interrupts at all (like the underrun bit). Hence
1301                 * we need to be careful that we only handle what we want to
1302                 * handle.
1303                 */
1304
1305                /* fifo underruns are filterered in the underrun handler. */
1306                status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1307
1308                switch (pipe) {
1309                default:
1310                case PIPE_A:
1311                        iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1312                        break;
1313                case PIPE_B:
1314                        iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1315                        break;
1316                case PIPE_C:
1317                        iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1318                        break;
1319                }
1320                if (iir & iir_bit)
1321                        status_mask |= dev_priv->pipestat_irq_mask[pipe];
1322
1323                if (!status_mask)
1324                        continue;
1325
1326                reg = PIPESTAT(pipe);
1327                pipe_stats[pipe] = I915_READ(reg) & status_mask;
1328                enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1329
1330                /*
1331                 * Clear the PIPE*STAT regs before the IIR
1332                 *
1333                 * Toggle the enable bits to make sure we get an
1334                 * edge in the ISR pipe event bit if we don't clear
1335                 * all the enabled status bits. Otherwise the edge
1336                 * triggered IIR on i965/g4x wouldn't notice that
1337                 * an interrupt is still pending.
1338                 */
1339                if (pipe_stats[pipe]) {
1340                        I915_WRITE(reg, pipe_stats[pipe]);
1341                        I915_WRITE(reg, enable_mask);
1342                }
1343        }
1344        spin_unlock(&dev_priv->irq_lock);
1345}
1346
1347static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1348                                      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1349{
1350        enum pipe pipe;
1351
1352        for_each_pipe(dev_priv, pipe) {
1353                if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1354                        drm_handle_vblank(&dev_priv->drm, pipe);
1355
1356                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1357                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1358
1359                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1360                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1361        }
1362}
1363
1364static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1365                                      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1366{
1367        bool blc_event = false;
1368        enum pipe pipe;
1369
1370        for_each_pipe(dev_priv, pipe) {
1371                if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1372                        drm_handle_vblank(&dev_priv->drm, pipe);
1373
1374                if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1375                        blc_event = true;
1376
1377                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1378                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1379
1380                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1381                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1382        }
1383
1384        if (blc_event || (iir & I915_ASLE_INTERRUPT))
1385                intel_opregion_asle_intr(dev_priv);
1386}
1387
1388static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1389                                      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1390{
1391        bool blc_event = false;
1392        enum pipe pipe;
1393
1394        for_each_pipe(dev_priv, pipe) {
1395                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1396                        drm_handle_vblank(&dev_priv->drm, pipe);
1397
1398                if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1399                        blc_event = true;
1400
1401                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1402                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1403
1404                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1405                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1406        }
1407
1408        if (blc_event || (iir & I915_ASLE_INTERRUPT))
1409                intel_opregion_asle_intr(dev_priv);
1410
1411        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1412                gmbus_irq_handler(dev_priv);
1413}
1414
1415static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1416                                            u32 pipe_stats[I915_MAX_PIPES])
1417{
1418        enum pipe pipe;
1419
1420        for_each_pipe(dev_priv, pipe) {
1421                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1422                        drm_handle_vblank(&dev_priv->drm, pipe);
1423
1424                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1425                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1426
1427                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1428                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1429        }
1430
1431        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1432                gmbus_irq_handler(dev_priv);
1433}
1434
1435static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1436{
1437        u32 hotplug_status = 0, hotplug_status_mask;
1438        int i;
1439
1440        if (IS_G4X(dev_priv) ||
1441            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1442                hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1443                        DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1444        else
1445                hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1446
1447        /*
1448         * We absolutely have to clear all the pending interrupt
1449         * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1450         * interrupt bit won't have an edge, and the i965/g4x
1451         * edge triggered IIR will not notice that an interrupt
1452         * is still pending. We can't use PORT_HOTPLUG_EN to
1453         * guarantee the edge as the act of toggling the enable
1454         * bits can itself generate a new hotplug interrupt :(
1455         */
1456        for (i = 0; i < 10; i++) {
1457                u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1458
1459                if (tmp == 0)
1460                        return hotplug_status;
1461
1462                hotplug_status |= tmp;
1463                I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1464        }
1465
1466        WARN_ONCE(1,
1467                  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1468                  I915_READ(PORT_HOTPLUG_STAT));
1469
1470        return hotplug_status;
1471}
1472
1473static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1474                                 u32 hotplug_status)
1475{
1476        u32 pin_mask = 0, long_mask = 0;
1477
1478        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1479            IS_CHERRYVIEW(dev_priv)) {
1480                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1481
1482                if (hotplug_trigger) {
1483                        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1484                                           hotplug_trigger, hotplug_trigger,
1485                                           hpd_status_g4x,
1486                                           i9xx_port_hotplug_long_detect);
1487
1488                        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1489                }
1490
1491                if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1492                        dp_aux_irq_handler(dev_priv);
1493        } else {
1494                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1495
1496                if (hotplug_trigger) {
1497                        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1498                                           hotplug_trigger, hotplug_trigger,
1499                                           hpd_status_i915,
1500                                           i9xx_port_hotplug_long_detect);
1501                        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1502                }
1503        }
1504}
1505
1506static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1507{
1508        struct drm_i915_private *dev_priv = arg;
1509        irqreturn_t ret = IRQ_NONE;
1510
1511        if (!intel_irqs_enabled(dev_priv))
1512                return IRQ_NONE;
1513
1514        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1515        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1516
1517        do {
1518                u32 iir, gt_iir, pm_iir;
1519                u32 pipe_stats[I915_MAX_PIPES] = {};
1520                u32 hotplug_status = 0;
1521                u32 ier = 0;
1522
1523                gt_iir = I915_READ(GTIIR);
1524                pm_iir = I915_READ(GEN6_PMIIR);
1525                iir = I915_READ(VLV_IIR);
1526
1527                if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1528                        break;
1529
1530                ret = IRQ_HANDLED;
1531
1532                /*
1533                 * Theory on interrupt generation, based on empirical evidence:
1534                 *
1535                 * x = ((VLV_IIR & VLV_IER) ||
1536                 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1537                 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1538                 *
1539                 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1540                 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1541                 * guarantee the CPU interrupt will be raised again even if we
1542                 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1543                 * bits this time around.
1544                 */
1545                I915_WRITE(VLV_MASTER_IER, 0);
1546                ier = I915_READ(VLV_IER);
1547                I915_WRITE(VLV_IER, 0);
1548
1549                if (gt_iir)
1550                        I915_WRITE(GTIIR, gt_iir);
1551                if (pm_iir)
1552                        I915_WRITE(GEN6_PMIIR, pm_iir);
1553
1554                if (iir & I915_DISPLAY_PORT_INTERRUPT)
1555                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1556
1557                /* Call regardless, as some status bits might not be
1558                 * signalled in iir */
1559                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1560
1561                if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1562                           I915_LPE_PIPE_B_INTERRUPT))
1563                        intel_lpe_audio_irq_handler(dev_priv);
1564
1565                /*
1566                 * VLV_IIR is single buffered, and reflects the level
1567                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1568                 */
1569                if (iir)
1570                        I915_WRITE(VLV_IIR, iir);
1571
1572                I915_WRITE(VLV_IER, ier);
1573                I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1574
1575                if (gt_iir)
1576                        gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1577                if (pm_iir)
1578                        gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1579
1580                if (hotplug_status)
1581                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1582
1583                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1584        } while (0);
1585
1586        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1587
1588        return ret;
1589}
1590
1591static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1592{
1593        struct drm_i915_private *dev_priv = arg;
1594        irqreturn_t ret = IRQ_NONE;
1595
1596        if (!intel_irqs_enabled(dev_priv))
1597                return IRQ_NONE;
1598
1599        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1600        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1601
1602        do {
1603                u32 master_ctl, iir;
1604                u32 pipe_stats[I915_MAX_PIPES] = {};
1605                u32 hotplug_status = 0;
1606                u32 gt_iir[4];
1607                u32 ier = 0;
1608
1609                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1610                iir = I915_READ(VLV_IIR);
1611
1612                if (master_ctl == 0 && iir == 0)
1613                        break;
1614
1615                ret = IRQ_HANDLED;
1616
1617                /*
1618                 * Theory on interrupt generation, based on empirical evidence:
1619                 *
1620                 * x = ((VLV_IIR & VLV_IER) ||
1621                 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1622                 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1623                 *
1624                 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1625                 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1626                 * guarantee the CPU interrupt will be raised again even if we
1627                 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1628                 * bits this time around.
1629                 */
1630                I915_WRITE(GEN8_MASTER_IRQ, 0);
1631                ier = I915_READ(VLV_IER);
1632                I915_WRITE(VLV_IER, 0);
1633
1634                gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
1635
1636                if (iir & I915_DISPLAY_PORT_INTERRUPT)
1637                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1638
1639                /* Call regardless, as some status bits might not be
1640                 * signalled in iir */
1641                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1642
1643                if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1644                           I915_LPE_PIPE_B_INTERRUPT |
1645                           I915_LPE_PIPE_C_INTERRUPT))
1646                        intel_lpe_audio_irq_handler(dev_priv);
1647
1648                /*
1649                 * VLV_IIR is single buffered, and reflects the level
1650                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1651                 */
1652                if (iir)
1653                        I915_WRITE(VLV_IIR, iir);
1654
1655                I915_WRITE(VLV_IER, ier);
1656                I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1657
1658                gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
1659
1660                if (hotplug_status)
1661                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1662
1663                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1664        } while (0);
1665
1666        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1667
1668        return ret;
1669}
1670
1671static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1672                                u32 hotplug_trigger,
1673                                const u32 hpd[HPD_NUM_PINS])
1674{
1675        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1676
1677        /*
1678         * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1679         * unless we touch the hotplug register, even if hotplug_trigger is
1680         * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1681         * errors.
1682         */
1683        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1684        if (!hotplug_trigger) {
1685                u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1686                        PORTD_HOTPLUG_STATUS_MASK |
1687                        PORTC_HOTPLUG_STATUS_MASK |
1688                        PORTB_HOTPLUG_STATUS_MASK;
1689                dig_hotplug_reg &= ~mask;
1690        }
1691
1692        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1693        if (!hotplug_trigger)
1694                return;
1695
1696        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1697                           dig_hotplug_reg, hpd,
1698                           pch_port_hotplug_long_detect);
1699
1700        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1701}
1702
1703static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1704{
1705        enum pipe pipe;
1706        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1707
1708        ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1709
1710        if (pch_iir & SDE_AUDIO_POWER_MASK) {
1711                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1712                               SDE_AUDIO_POWER_SHIFT);
1713                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1714                                 port_name(port));
1715        }
1716
1717        if (pch_iir & SDE_AUX_MASK)
1718                dp_aux_irq_handler(dev_priv);
1719
1720        if (pch_iir & SDE_GMBUS)
1721                gmbus_irq_handler(dev_priv);
1722
1723        if (pch_iir & SDE_AUDIO_HDCP_MASK)
1724                DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1725
1726        if (pch_iir & SDE_AUDIO_TRANS_MASK)
1727                DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1728
1729        if (pch_iir & SDE_POISON)
1730                DRM_ERROR("PCH poison interrupt\n");
1731
1732        if (pch_iir & SDE_FDI_MASK)
1733                for_each_pipe(dev_priv, pipe)
1734                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1735                                         pipe_name(pipe),
1736                                         I915_READ(FDI_RX_IIR(pipe)));
1737
1738        if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1739                DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1740
1741        if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1742                DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1743
1744        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1745                intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1746
1747        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1748                intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1749}
1750
1751static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1752{
1753        u32 err_int = I915_READ(GEN7_ERR_INT);
1754        enum pipe pipe;
1755
1756        if (err_int & ERR_INT_POISON)
1757                DRM_ERROR("Poison interrupt\n");
1758
1759        for_each_pipe(dev_priv, pipe) {
1760                if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1761                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1762
1763                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1764                        if (IS_IVYBRIDGE(dev_priv))
1765                                ivb_pipe_crc_irq_handler(dev_priv, pipe);
1766                        else
1767                                hsw_pipe_crc_irq_handler(dev_priv, pipe);
1768                }
1769        }
1770
1771        I915_WRITE(GEN7_ERR_INT, err_int);
1772}
1773
1774static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1775{
1776        u32 serr_int = I915_READ(SERR_INT);
1777        enum pipe pipe;
1778
1779        if (serr_int & SERR_INT_POISON)
1780                DRM_ERROR("PCH poison interrupt\n");
1781
1782        for_each_pipe(dev_priv, pipe)
1783                if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1784                        intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1785
1786        I915_WRITE(SERR_INT, serr_int);
1787}
1788
1789static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1790{
1791        enum pipe pipe;
1792        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1793
1794        ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
1795
1796        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1797                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1798                               SDE_AUDIO_POWER_SHIFT_CPT);
1799                DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1800                                 port_name(port));
1801        }
1802
1803        if (pch_iir & SDE_AUX_MASK_CPT)
1804                dp_aux_irq_handler(dev_priv);
1805
1806        if (pch_iir & SDE_GMBUS_CPT)
1807                gmbus_irq_handler(dev_priv);
1808
1809        if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1810                DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1811
1812        if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1813                DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1814
1815        if (pch_iir & SDE_FDI_MASK_CPT)
1816                for_each_pipe(dev_priv, pipe)
1817                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1818                                         pipe_name(pipe),
1819                                         I915_READ(FDI_RX_IIR(pipe)));
1820
1821        if (pch_iir & SDE_ERROR_CPT)
1822                cpt_serr_int_handler(dev_priv);
1823}
1824
1825static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1826{
1827        u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1828        u32 pin_mask = 0, long_mask = 0;
1829        bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
1830        const u32 *pins;
1831
1832        if (HAS_PCH_TGP(dev_priv)) {
1833                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1834                tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1835                tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
1836                pins = hpd_tgp;
1837        } else if (HAS_PCH_JSP(dev_priv)) {
1838                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1839                tc_hotplug_trigger = 0;
1840                pins = hpd_tgp;
1841        } else if (HAS_PCH_MCC(dev_priv)) {
1842                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1843                tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1844                tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1845                pins = hpd_icp;
1846        } else {
1847                WARN(!HAS_PCH_ICP(dev_priv),
1848                     "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
1849
1850                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1851                tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1852                tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1853                pins = hpd_icp;
1854        }
1855
1856        if (ddi_hotplug_trigger) {
1857                u32 dig_hotplug_reg;
1858
1859                dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1860                I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1861
1862                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1863                                   ddi_hotplug_trigger,
1864                                   dig_hotplug_reg, pins,
1865                                   icp_ddi_port_hotplug_long_detect);
1866        }
1867
1868        if (tc_hotplug_trigger) {
1869                u32 dig_hotplug_reg;
1870
1871                dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1872                I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1873
1874                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1875                                   tc_hotplug_trigger,
1876                                   dig_hotplug_reg, pins,
1877                                   tc_port_hotplug_long_detect);
1878        }
1879
1880        if (pin_mask)
1881                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1882
1883        if (pch_iir & SDE_GMBUS_ICP)
1884                gmbus_irq_handler(dev_priv);
1885}
1886
1887static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1888{
1889        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1890                ~SDE_PORTE_HOTPLUG_SPT;
1891        u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1892        u32 pin_mask = 0, long_mask = 0;
1893
1894        if (hotplug_trigger) {
1895                u32 dig_hotplug_reg;
1896
1897                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1898                I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1899
1900                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1901                                   hotplug_trigger, dig_hotplug_reg, hpd_spt,
1902                                   spt_port_hotplug_long_detect);
1903        }
1904
1905        if (hotplug2_trigger) {
1906                u32 dig_hotplug_reg;
1907
1908                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1909                I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1910
1911                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1912                                   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
1913                                   spt_port_hotplug2_long_detect);
1914        }
1915
1916        if (pin_mask)
1917                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1918
1919        if (pch_iir & SDE_GMBUS_CPT)
1920                gmbus_irq_handler(dev_priv);
1921}
1922
1923static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1924                                u32 hotplug_trigger,
1925                                const u32 hpd[HPD_NUM_PINS])
1926{
1927        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1928
1929        dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1930        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1931
1932        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1933                           dig_hotplug_reg, hpd,
1934                           ilk_port_hotplug_long_detect);
1935
1936        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1937}
1938
1939static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1940                                    u32 de_iir)
1941{
1942        enum pipe pipe;
1943        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1944
1945        if (hotplug_trigger)
1946                ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
1947
1948        if (de_iir & DE_AUX_CHANNEL_A)
1949                dp_aux_irq_handler(dev_priv);
1950
1951        if (de_iir & DE_GSE)
1952                intel_opregion_asle_intr(dev_priv);
1953
1954        if (de_iir & DE_POISON)
1955                DRM_ERROR("Poison interrupt\n");
1956
1957        for_each_pipe(dev_priv, pipe) {
1958                if (de_iir & DE_PIPE_VBLANK(pipe))
1959                        drm_handle_vblank(&dev_priv->drm, pipe);
1960
1961                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1962                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1963
1964                if (de_iir & DE_PIPE_CRC_DONE(pipe))
1965                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1966        }
1967
1968        /* check event from PCH */
1969        if (de_iir & DE_PCH_EVENT) {
1970                u32 pch_iir = I915_READ(SDEIIR);
1971
1972                if (HAS_PCH_CPT(dev_priv))
1973                        cpt_irq_handler(dev_priv, pch_iir);
1974                else
1975                        ibx_irq_handler(dev_priv, pch_iir);
1976
1977                /* should clear PCH hotplug event before clear CPU irq */
1978                I915_WRITE(SDEIIR, pch_iir);
1979        }
1980
1981        if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
1982                gen5_rps_irq_handler(&dev_priv->gt.rps);
1983}
1984
1985static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1986                                    u32 de_iir)
1987{
1988        enum pipe pipe;
1989        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1990
1991        if (hotplug_trigger)
1992                ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
1993
1994        if (de_iir & DE_ERR_INT_IVB)
1995                ivb_err_int_handler(dev_priv);
1996
1997        if (de_iir & DE_EDP_PSR_INT_HSW) {
1998                u32 psr_iir = I915_READ(EDP_PSR_IIR);
1999
2000                intel_psr_irq_handler(dev_priv, psr_iir);
2001                I915_WRITE(EDP_PSR_IIR, psr_iir);
2002        }
2003
2004        if (de_iir & DE_AUX_CHANNEL_A_IVB)
2005                dp_aux_irq_handler(dev_priv);
2006
2007        if (de_iir & DE_GSE_IVB)
2008                intel_opregion_asle_intr(dev_priv);
2009
2010        for_each_pipe(dev_priv, pipe) {
2011                if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2012                        drm_handle_vblank(&dev_priv->drm, pipe);
2013        }
2014
2015        /* check event from PCH */
2016        if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2017                u32 pch_iir = I915_READ(SDEIIR);
2018
2019                cpt_irq_handler(dev_priv, pch_iir);
2020
2021                /* clear PCH hotplug event before clear CPU irq */
2022                I915_WRITE(SDEIIR, pch_iir);
2023        }
2024}
2025
2026/*
2027 * To handle irqs with the minimum potential races with fresh interrupts, we:
2028 * 1 - Disable Master Interrupt Control.
2029 * 2 - Find the source(s) of the interrupt.
2030 * 3 - Clear the Interrupt Identity bits (IIR).
2031 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2032 * 5 - Re-enable Master Interrupt Control.
2033 */
2034static irqreturn_t ilk_irq_handler(int irq, void *arg)
2035{
2036        struct drm_i915_private *dev_priv = arg;
2037        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2038        irqreturn_t ret = IRQ_NONE;
2039
2040        if (!intel_irqs_enabled(dev_priv))
2041                return IRQ_NONE;
2042
2043        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2044        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2045
2046        /* disable master interrupt before clearing iir  */
2047        de_ier = I915_READ(DEIER);
2048        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2049
2050        /* Disable south interrupts. We'll only write to SDEIIR once, so further
2051         * interrupts will will be stored on its back queue, and then we'll be
2052         * able to process them after we restore SDEIER (as soon as we restore
2053         * it, we'll get an interrupt if SDEIIR still has something to process
2054         * due to its back queue). */
2055        if (!HAS_PCH_NOP(dev_priv)) {
2056                sde_ier = I915_READ(SDEIER);
2057                I915_WRITE(SDEIER, 0);
2058        }
2059
2060        /* Find, clear, then process each source of interrupt */
2061
2062        gt_iir = I915_READ(GTIIR);
2063        if (gt_iir) {
2064                I915_WRITE(GTIIR, gt_iir);
2065                ret = IRQ_HANDLED;
2066                if (INTEL_GEN(dev_priv) >= 6)
2067                        gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2068                else
2069                        gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2070        }
2071
2072        de_iir = I915_READ(DEIIR);
2073        if (de_iir) {
2074                I915_WRITE(DEIIR, de_iir);
2075                ret = IRQ_HANDLED;
2076                if (INTEL_GEN(dev_priv) >= 7)
2077                        ivb_display_irq_handler(dev_priv, de_iir);
2078                else
2079                        ilk_display_irq_handler(dev_priv, de_iir);
2080        }
2081
2082        if (INTEL_GEN(dev_priv) >= 6) {
2083                u32 pm_iir = I915_READ(GEN6_PMIIR);
2084                if (pm_iir) {
2085                        I915_WRITE(GEN6_PMIIR, pm_iir);
2086                        ret = IRQ_HANDLED;
2087                        gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
2088                }
2089        }
2090
2091        I915_WRITE(DEIER, de_ier);
2092        if (!HAS_PCH_NOP(dev_priv))
2093                I915_WRITE(SDEIER, sde_ier);
2094
2095        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2096        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2097
2098        return ret;
2099}
2100
2101static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2102                                u32 hotplug_trigger,
2103                                const u32 hpd[HPD_NUM_PINS])
2104{
2105        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2106
2107        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2108        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2109
2110        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2111                           dig_hotplug_reg, hpd,
2112                           bxt_port_hotplug_long_detect);
2113
2114        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2115}
2116
2117static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2118{
2119        u32 pin_mask = 0, long_mask = 0;
2120        u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2121        u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2122        long_pulse_detect_func long_pulse_detect;
2123        const u32 *hpd;
2124
2125        if (INTEL_GEN(dev_priv) >= 12) {
2126                long_pulse_detect = gen12_port_hotplug_long_detect;
2127                hpd = hpd_gen12;
2128        } else {
2129                long_pulse_detect = gen11_port_hotplug_long_detect;
2130                hpd = hpd_gen11;
2131        }
2132
2133        if (trigger_tc) {
2134                u32 dig_hotplug_reg;
2135
2136                dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2137                I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2138
2139                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2140                                   dig_hotplug_reg, hpd, long_pulse_detect);
2141        }
2142
2143        if (trigger_tbt) {
2144                u32 dig_hotplug_reg;
2145
2146                dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2147                I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2148
2149                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2150                                   dig_hotplug_reg, hpd, long_pulse_detect);
2151        }
2152
2153        if (pin_mask)
2154                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2155        else
2156                DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2157}
2158
2159static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2160{
2161        u32 mask;
2162
2163        if (INTEL_GEN(dev_priv) >= 12)
2164                return TGL_DE_PORT_AUX_DDIA |
2165                        TGL_DE_PORT_AUX_DDIB |
2166                        TGL_DE_PORT_AUX_DDIC |
2167                        TGL_DE_PORT_AUX_USBC1 |
2168                        TGL_DE_PORT_AUX_USBC2 |
2169                        TGL_DE_PORT_AUX_USBC3 |
2170                        TGL_DE_PORT_AUX_USBC4 |
2171                        TGL_DE_PORT_AUX_USBC5 |
2172                        TGL_DE_PORT_AUX_USBC6;
2173
2174
2175        mask = GEN8_AUX_CHANNEL_A;
2176        if (INTEL_GEN(dev_priv) >= 9)
2177                mask |= GEN9_AUX_CHANNEL_B |
2178                        GEN9_AUX_CHANNEL_C |
2179                        GEN9_AUX_CHANNEL_D;
2180
2181        if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2182                mask |= CNL_AUX_CHANNEL_F;
2183
2184        if (IS_GEN(dev_priv, 11))
2185                mask |= ICL_AUX_CHANNEL_E;
2186
2187        return mask;
2188}
2189
2190static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2191{
2192        if (INTEL_GEN(dev_priv) >= 11)
2193                return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2194        else if (INTEL_GEN(dev_priv) >= 9)
2195                return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2196        else
2197                return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2198}
2199
2200static void
2201gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2202{
2203        bool found = false;
2204
2205        if (iir & GEN8_DE_MISC_GSE) {
2206                intel_opregion_asle_intr(dev_priv);
2207                found = true;
2208        }
2209
2210        if (iir & GEN8_DE_EDP_PSR) {
2211                u32 psr_iir;
2212                i915_reg_t iir_reg;
2213
2214                if (INTEL_GEN(dev_priv) >= 12)
2215                        iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2216                else
2217                        iir_reg = EDP_PSR_IIR;
2218
2219                psr_iir = I915_READ(iir_reg);
2220                I915_WRITE(iir_reg, psr_iir);
2221
2222                if (psr_iir)
2223                        found = true;
2224
2225                intel_psr_irq_handler(dev_priv, psr_iir);
2226        }
2227
2228        if (!found)
2229                DRM_ERROR("Unexpected DE Misc interrupt\n");
2230}
2231
2232static irqreturn_t
2233gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2234{
2235        irqreturn_t ret = IRQ_NONE;
2236        u32 iir;
2237        enum pipe pipe;
2238
2239        if (master_ctl & GEN8_DE_MISC_IRQ) {
2240                iir = I915_READ(GEN8_DE_MISC_IIR);
2241                if (iir) {
2242                        I915_WRITE(GEN8_DE_MISC_IIR, iir);
2243                        ret = IRQ_HANDLED;
2244                        gen8_de_misc_irq_handler(dev_priv, iir);
2245                } else {
2246                        DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2247                }
2248        }
2249
2250        if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2251                iir = I915_READ(GEN11_DE_HPD_IIR);
2252                if (iir) {
2253                        I915_WRITE(GEN11_DE_HPD_IIR, iir);
2254                        ret = IRQ_HANDLED;
2255                        gen11_hpd_irq_handler(dev_priv, iir);
2256                } else {
2257                        DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2258                }
2259        }
2260
2261        if (master_ctl & GEN8_DE_PORT_IRQ) {
2262                iir = I915_READ(GEN8_DE_PORT_IIR);
2263                if (iir) {
2264                        u32 tmp_mask;
2265                        bool found = false;
2266
2267                        I915_WRITE(GEN8_DE_PORT_IIR, iir);
2268                        ret = IRQ_HANDLED;
2269
2270                        if (iir & gen8_de_port_aux_mask(dev_priv)) {
2271                                dp_aux_irq_handler(dev_priv);
2272                                found = true;
2273                        }
2274
2275                        if (IS_GEN9_LP(dev_priv)) {
2276                                tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2277                                if (tmp_mask) {
2278                                        bxt_hpd_irq_handler(dev_priv, tmp_mask,
2279                                                            hpd_bxt);
2280                                        found = true;
2281                                }
2282                        } else if (IS_BROADWELL(dev_priv)) {
2283                                tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2284                                if (tmp_mask) {
2285                                        ilk_hpd_irq_handler(dev_priv,
2286                                                            tmp_mask, hpd_bdw);
2287                                        found = true;
2288                                }
2289                        }
2290
2291                        if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2292                                gmbus_irq_handler(dev_priv);
2293                                found = true;
2294                        }
2295
2296                        if (!found)
2297                                DRM_ERROR("Unexpected DE Port interrupt\n");
2298                }
2299                else
2300                        DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2301        }
2302
2303        for_each_pipe(dev_priv, pipe) {
2304                u32 fault_errors;
2305
2306                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2307                        continue;
2308
2309                iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2310                if (!iir) {
2311                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2312                        continue;
2313                }
2314
2315                ret = IRQ_HANDLED;
2316                I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2317
2318                if (iir & GEN8_PIPE_VBLANK)
2319                        drm_handle_vblank(&dev_priv->drm, pipe);
2320
2321                if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2322                        hsw_pipe_crc_irq_handler(dev_priv, pipe);
2323
2324                if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2325                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2326
2327                fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2328                if (fault_errors)
2329                        DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2330                                  pipe_name(pipe),
2331                                  fault_errors);
2332        }
2333
2334        if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2335            master_ctl & GEN8_DE_PCH_IRQ) {
2336                /*
2337                 * FIXME(BDW): Assume for now that the new interrupt handling
2338                 * scheme also closed the SDE interrupt handling race we've seen
2339                 * on older pch-split platforms. But this needs testing.
2340                 */
2341                iir = I915_READ(SDEIIR);
2342                if (iir) {
2343                        I915_WRITE(SDEIIR, iir);
2344                        ret = IRQ_HANDLED;
2345
2346                        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2347                                icp_irq_handler(dev_priv, iir);
2348                        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2349                                spt_irq_handler(dev_priv, iir);
2350                        else
2351                                cpt_irq_handler(dev_priv, iir);
2352                } else {
2353                        /*
2354                         * Like on previous PCH there seems to be something
2355                         * fishy going on with forwarding PCH interrupts.
2356                         */
2357                        DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2358                }
2359        }
2360
2361        return ret;
2362}
2363
2364static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2365{
2366        raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2367
2368        /*
2369         * Now with master disabled, get a sample of level indications
2370         * for this interrupt. Indications will be cleared on related acks.
2371         * New indications can and will light up during processing,
2372         * and will generate new interrupt after enabling master.
2373         */
2374        return raw_reg_read(regs, GEN8_MASTER_IRQ);
2375}
2376
2377static inline void gen8_master_intr_enable(void __iomem * const regs)
2378{
2379        raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2380}
2381
2382static irqreturn_t gen8_irq_handler(int irq, void *arg)
2383{
2384        struct drm_i915_private *dev_priv = arg;
2385        void __iomem * const regs = dev_priv->uncore.regs;
2386        u32 master_ctl;
2387        u32 gt_iir[4];
2388
2389        if (!intel_irqs_enabled(dev_priv))
2390                return IRQ_NONE;
2391
2392        master_ctl = gen8_master_intr_disable(regs);
2393        if (!master_ctl) {
2394                gen8_master_intr_enable(regs);
2395                return IRQ_NONE;
2396        }
2397
2398        /* Find, clear, then process each source of interrupt */
2399        gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2400
2401        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2402        if (master_ctl & ~GEN8_GT_IRQS) {
2403                disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2404                gen8_de_irq_handler(dev_priv, master_ctl);
2405                enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2406        }
2407
2408        gen8_master_intr_enable(regs);
2409
2410        gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2411
2412        return IRQ_HANDLED;
2413}
2414
2415static u32
2416gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2417{
2418        void __iomem * const regs = gt->uncore->regs;
2419        u32 iir;
2420
2421        if (!(master_ctl & GEN11_GU_MISC_IRQ))
2422                return 0;
2423
2424        iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2425        if (likely(iir))
2426                raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2427
2428        return iir;
2429}
2430
2431static void
2432gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2433{
2434        if (iir & GEN11_GU_MISC_GSE)
2435                intel_opregion_asle_intr(gt->i915);
2436}
2437
2438static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2439{
2440        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2441
2442        /*
2443         * Now with master disabled, get a sample of level indications
2444         * for this interrupt. Indications will be cleared on related acks.
2445         * New indications can and will light up during processing,
2446         * and will generate new interrupt after enabling master.
2447         */
2448        return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2449}
2450
2451static inline void gen11_master_intr_enable(void __iomem * const regs)
2452{
2453        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2454}
2455
2456static void
2457gen11_display_irq_handler(struct drm_i915_private *i915)
2458{
2459        void __iomem * const regs = i915->uncore.regs;
2460        const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2461
2462        disable_rpm_wakeref_asserts(&i915->runtime_pm);
2463        /*
2464         * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2465         * for the display related bits.
2466         */
2467        raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2468        gen8_de_irq_handler(i915, disp_ctl);
2469        raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2470                      GEN11_DISPLAY_IRQ_ENABLE);
2471
2472        enable_rpm_wakeref_asserts(&i915->runtime_pm);
2473}
2474
2475static __always_inline irqreturn_t
2476__gen11_irq_handler(struct drm_i915_private * const i915,
2477                    u32 (*intr_disable)(void __iomem * const regs),
2478                    void (*intr_enable)(void __iomem * const regs))
2479{
2480        void __iomem * const regs = i915->uncore.regs;
2481        struct intel_gt *gt = &i915->gt;
2482        u32 master_ctl;
2483        u32 gu_misc_iir;
2484
2485        if (!intel_irqs_enabled(i915))
2486                return IRQ_NONE;
2487
2488        master_ctl = intr_disable(regs);
2489        if (!master_ctl) {
2490                intr_enable(regs);
2491                return IRQ_NONE;
2492        }
2493
2494        /* Find, clear, then process each source of interrupt. */
2495        gen11_gt_irq_handler(gt, master_ctl);
2496
2497        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2498        if (master_ctl & GEN11_DISPLAY_IRQ)
2499                gen11_display_irq_handler(i915);
2500
2501        gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2502
2503        intr_enable(regs);
2504
2505        gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2506
2507        return IRQ_HANDLED;
2508}
2509
2510static irqreturn_t gen11_irq_handler(int irq, void *arg)
2511{
2512        return __gen11_irq_handler(arg,
2513                                   gen11_master_intr_disable,
2514                                   gen11_master_intr_enable);
2515}
2516
2517/* Called from drm generic code, passed 'crtc' which
2518 * we use as a pipe index
2519 */
2520int i8xx_enable_vblank(struct drm_crtc *crtc)
2521{
2522        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2523        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2524        unsigned long irqflags;
2525
2526        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2527        i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2528        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2529
2530        return 0;
2531}
2532
2533int i915gm_enable_vblank(struct drm_crtc *crtc)
2534{
2535        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2536
2537        /*
2538         * Vblank interrupts fail to wake the device up from C2+.
2539         * Disabling render clock gating during C-states avoids
2540         * the problem. There is a small power cost so we do this
2541         * only when vblank interrupts are actually enabled.
2542         */
2543        if (dev_priv->vblank_enabled++ == 0)
2544                I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2545
2546        return i8xx_enable_vblank(crtc);
2547}
2548
2549int i965_enable_vblank(struct drm_crtc *crtc)
2550{
2551        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2552        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2553        unsigned long irqflags;
2554
2555        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2556        i915_enable_pipestat(dev_priv, pipe,
2557                             PIPE_START_VBLANK_INTERRUPT_STATUS);
2558        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2559
2560        return 0;
2561}
2562
2563int ilk_enable_vblank(struct drm_crtc *crtc)
2564{
2565        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2566        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2567        unsigned long irqflags;
2568        u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2569                DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2570
2571        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2572        ilk_enable_display_irq(dev_priv, bit);
2573        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2574
2575        /* Even though there is no DMC, frame counter can get stuck when
2576         * PSR is active as no frames are generated.
2577         */
2578        if (HAS_PSR(dev_priv))
2579                drm_crtc_vblank_restore(crtc);
2580
2581        return 0;
2582}
2583
2584int bdw_enable_vblank(struct drm_crtc *crtc)
2585{
2586        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2587        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2588        unsigned long irqflags;
2589
2590        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2591        bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2592        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2593
2594        /* Even if there is no DMC, frame counter can get stuck when
2595         * PSR is active as no frames are generated, so check only for PSR.
2596         */
2597        if (HAS_PSR(dev_priv))
2598                drm_crtc_vblank_restore(crtc);
2599
2600        return 0;
2601}
2602
2603/* Called from drm generic code, passed 'crtc' which
2604 * we use as a pipe index
2605 */
2606void i8xx_disable_vblank(struct drm_crtc *crtc)
2607{
2608        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2609        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2610        unsigned long irqflags;
2611
2612        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2613        i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2614        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2615}
2616
2617void i915gm_disable_vblank(struct drm_crtc *crtc)
2618{
2619        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2620
2621        i8xx_disable_vblank(crtc);
2622
2623        if (--dev_priv->vblank_enabled == 0)
2624                I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2625}
2626
2627void i965_disable_vblank(struct drm_crtc *crtc)
2628{
2629        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2630        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2631        unsigned long irqflags;
2632
2633        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2634        i915_disable_pipestat(dev_priv, pipe,
2635                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2636        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2637}
2638
2639void ilk_disable_vblank(struct drm_crtc *crtc)
2640{
2641        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2642        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2643        unsigned long irqflags;
2644        u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2645                DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2646
2647        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2648        ilk_disable_display_irq(dev_priv, bit);
2649        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2650}
2651
2652void bdw_disable_vblank(struct drm_crtc *crtc)
2653{
2654        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2655        enum pipe pipe = to_intel_crtc(crtc)->pipe;
2656        unsigned long irqflags;
2657
2658        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2659        bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2660        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2661}
2662
2663static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2664{
2665        struct intel_uncore *uncore = &dev_priv->uncore;
2666
2667        if (HAS_PCH_NOP(dev_priv))
2668                return;
2669
2670        GEN3_IRQ_RESET(uncore, SDE);
2671
2672        if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2673                I915_WRITE(SERR_INT, 0xffffffff);
2674}
2675
2676/*
2677 * SDEIER is also touched by the interrupt handler to work around missed PCH
2678 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2679 * instead we unconditionally enable all PCH interrupt sources here, but then
2680 * only unmask them as needed with SDEIMR.
2681 *
2682 * This function needs to be called before interrupts are enabled.
2683 */
2684static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
2685{
2686        if (HAS_PCH_NOP(dev_priv))
2687                return;
2688
2689        WARN_ON(I915_READ(SDEIER) != 0);
2690        I915_WRITE(SDEIER, 0xffffffff);
2691        POSTING_READ(SDEIER);
2692}
2693
2694static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2695{
2696        struct intel_uncore *uncore = &dev_priv->uncore;
2697
2698        if (IS_CHERRYVIEW(dev_priv))
2699                intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2700        else
2701                intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2702
2703        i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2704        intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2705
2706        i9xx_pipestat_irq_reset(dev_priv);
2707
2708        GEN3_IRQ_RESET(uncore, VLV_);
2709        dev_priv->irq_mask = ~0u;
2710}
2711
2712static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2713{
2714        struct intel_uncore *uncore = &dev_priv->uncore;
2715
2716        u32 pipestat_mask;
2717        u32 enable_mask;
2718        enum pipe pipe;
2719
2720        pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2721
2722        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2723        for_each_pipe(dev_priv, pipe)
2724                i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2725
2726        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2727                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2728                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2729                I915_LPE_PIPE_A_INTERRUPT |
2730                I915_LPE_PIPE_B_INTERRUPT;
2731
2732        if (IS_CHERRYVIEW(dev_priv))
2733                enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2734                        I915_LPE_PIPE_C_INTERRUPT;
2735
2736        WARN_ON(dev_priv->irq_mask != ~0u);
2737
2738        dev_priv->irq_mask = ~enable_mask;
2739
2740        GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2741}
2742
2743/* drm_dma.h hooks
2744*/
2745static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2746{
2747        struct intel_uncore *uncore = &dev_priv->uncore;
2748
2749        GEN3_IRQ_RESET(uncore, DE);
2750        if (IS_GEN(dev_priv, 7))
2751                intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2752
2753        if (IS_HASWELL(dev_priv)) {
2754                intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2755                intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2756        }
2757
2758        gen5_gt_irq_reset(&dev_priv->gt);
2759
2760        ibx_irq_reset(dev_priv);
2761}
2762
2763static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2764{
2765        I915_WRITE(VLV_MASTER_IER, 0);
2766        POSTING_READ(VLV_MASTER_IER);
2767
2768        gen5_gt_irq_reset(&dev_priv->gt);
2769
2770        spin_lock_irq(&dev_priv->irq_lock);
2771        if (dev_priv->display_irqs_enabled)
2772                vlv_display_irq_reset(dev_priv);
2773        spin_unlock_irq(&dev_priv->irq_lock);
2774}
2775
2776static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2777{
2778        struct intel_uncore *uncore = &dev_priv->uncore;
2779        enum pipe pipe;
2780
2781        gen8_master_intr_disable(dev_priv->uncore.regs);
2782
2783        gen8_gt_irq_reset(&dev_priv->gt);
2784
2785        intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2786        intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2787
2788        for_each_pipe(dev_priv, pipe)
2789                if (intel_display_power_is_enabled(dev_priv,
2790                                                   POWER_DOMAIN_PIPE(pipe)))
2791                        GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2792
2793        GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2794        GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2795        GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2796
2797        if (HAS_PCH_SPLIT(dev_priv))
2798                ibx_irq_reset(dev_priv);
2799}
2800
2801static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2802{
2803        struct intel_uncore *uncore = &dev_priv->uncore;
2804        enum pipe pipe;
2805
2806        intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2807
2808        if (INTEL_GEN(dev_priv) >= 12) {
2809                enum transcoder trans;
2810
2811                for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
2812                        enum intel_display_power_domain domain;
2813
2814                        domain = POWER_DOMAIN_TRANSCODER(trans);
2815                        if (!intel_display_power_is_enabled(dev_priv, domain))
2816                                continue;
2817
2818                        intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2819                        intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2820                }
2821        } else {
2822                intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2823                intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2824        }
2825
2826        for_each_pipe(dev_priv, pipe)
2827                if (intel_display_power_is_enabled(dev_priv,
2828                                                   POWER_DOMAIN_PIPE(pipe)))
2829                        GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2830
2831        GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2832        GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2833        GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2834
2835        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2836                GEN3_IRQ_RESET(uncore, SDE);
2837}
2838
2839static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2840{
2841        struct intel_uncore *uncore = &dev_priv->uncore;
2842
2843        gen11_master_intr_disable(dev_priv->uncore.regs);
2844
2845        gen11_gt_irq_reset(&dev_priv->gt);
2846        gen11_display_irq_reset(dev_priv);
2847
2848        GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2849        GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2850}
2851
2852void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2853                                     u8 pipe_mask)
2854{
2855        struct intel_uncore *uncore = &dev_priv->uncore;
2856
2857        u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2858        enum pipe pipe;
2859
2860        spin_lock_irq(&dev_priv->irq_lock);
2861
2862        if (!intel_irqs_enabled(dev_priv)) {
2863                spin_unlock_irq(&dev_priv->irq_lock);
2864                return;
2865        }
2866
2867        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2868                GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2869                                  dev_priv->de_irq_mask[pipe],
2870                                  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2871
2872        spin_unlock_irq(&dev_priv->irq_lock);
2873}
2874
2875void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2876                                     u8 pipe_mask)
2877{
2878        struct intel_uncore *uncore = &dev_priv->uncore;
2879        enum pipe pipe;
2880
2881        spin_lock_irq(&dev_priv->irq_lock);
2882
2883        if (!intel_irqs_enabled(dev_priv)) {
2884                spin_unlock_irq(&dev_priv->irq_lock);
2885                return;
2886        }
2887
2888        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2889                GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2890
2891        spin_unlock_irq(&dev_priv->irq_lock);
2892
2893        /* make sure we're done processing display irqs */
2894        intel_synchronize_irq(dev_priv);
2895}
2896
2897static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2898{
2899        struct intel_uncore *uncore = &dev_priv->uncore;
2900
2901        I915_WRITE(GEN8_MASTER_IRQ, 0);
2902        POSTING_READ(GEN8_MASTER_IRQ);
2903
2904        gen8_gt_irq_reset(&dev_priv->gt);
2905
2906        GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2907
2908        spin_lock_irq(&dev_priv->irq_lock);
2909        if (dev_priv->display_irqs_enabled)
2910                vlv_display_irq_reset(dev_priv);
2911        spin_unlock_irq(&dev_priv->irq_lock);
2912}
2913
2914static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2915                                  const u32 hpd[HPD_NUM_PINS])
2916{
2917        struct intel_encoder *encoder;
2918        u32 enabled_irqs = 0;
2919
2920        for_each_intel_encoder(&dev_priv->drm, encoder)
2921                if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
2922                        enabled_irqs |= hpd[encoder->hpd_pin];
2923
2924        return enabled_irqs;
2925}
2926
2927static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2928{
2929        u32 hotplug;
2930
2931        /*
2932         * Enable digital hotplug on the PCH, and configure the DP short pulse
2933         * duration to 2ms (which is the minimum in the Display Port spec).
2934         * The pulse duration bits are reserved on LPT+.
2935         */
2936        hotplug = I915_READ(PCH_PORT_HOTPLUG);
2937        hotplug &= ~(PORTB_PULSE_DURATION_MASK |
2938                     PORTC_PULSE_DURATION_MASK |
2939                     PORTD_PULSE_DURATION_MASK);
2940        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2941        hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2942        hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2943        /*
2944         * When CPU and PCH are on the same package, port A
2945         * HPD must be enabled in both north and south.
2946         */
2947        if (HAS_PCH_LPT_LP(dev_priv))
2948                hotplug |= PORTA_HOTPLUG_ENABLE;
2949        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2950}
2951
2952static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2953{
2954        u32 hotplug_irqs, enabled_irqs;
2955
2956        if (HAS_PCH_IBX(dev_priv)) {
2957                hotplug_irqs = SDE_HOTPLUG_MASK;
2958                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
2959        } else {
2960                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2961                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
2962        }
2963
2964        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2965
2966        ibx_hpd_detection_setup(dev_priv);
2967}
2968
2969static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
2970                                    u32 ddi_hotplug_enable_mask,
2971                                    u32 tc_hotplug_enable_mask)
2972{
2973        u32 hotplug;
2974
2975        hotplug = I915_READ(SHOTPLUG_CTL_DDI);
2976        hotplug |= ddi_hotplug_enable_mask;
2977        I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
2978
2979        if (tc_hotplug_enable_mask) {
2980                hotplug = I915_READ(SHOTPLUG_CTL_TC);
2981                hotplug |= tc_hotplug_enable_mask;
2982                I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
2983        }
2984}
2985
2986static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
2987                              u32 sde_ddi_mask, u32 sde_tc_mask,
2988                              u32 ddi_enable_mask, u32 tc_enable_mask,
2989                              const u32 *pins)
2990{
2991        u32 hotplug_irqs, enabled_irqs;
2992
2993        hotplug_irqs = sde_ddi_mask | sde_tc_mask;
2994        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
2995
2996        I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
2997
2998        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2999
3000        icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3001}
3002
3003/*
3004 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3005 * equivalent of SDE.
3006 */
3007static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3008{
3009        icp_hpd_irq_setup(dev_priv,
3010                          SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
3011                          ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
3012                          hpd_icp);
3013}
3014
3015/*
3016 * JSP behaves exactly the same as MCC above except that port C is mapped to
3017 * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
3018 * masks & tables rather than ICP's masks & tables.
3019 */
3020static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3021{
3022        icp_hpd_irq_setup(dev_priv,
3023                          SDE_DDI_MASK_TGP, 0,
3024                          TGP_DDI_HPD_ENABLE_MASK, 0,
3025                          hpd_tgp);
3026}
3027
3028static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3029{
3030        u32 hotplug;
3031
3032        hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3033        hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3034                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3035                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3036                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3037        I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3038
3039        hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3040        hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3041                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3042                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3043                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3044        I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3045}
3046
3047static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3048{
3049        u32 hotplug_irqs, enabled_irqs;
3050        const u32 *hpd;
3051        u32 val;
3052
3053        hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
3054        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3055        hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3056
3057        val = I915_READ(GEN11_DE_HPD_IMR);
3058        val &= ~hotplug_irqs;
3059        I915_WRITE(GEN11_DE_HPD_IMR, val);
3060        POSTING_READ(GEN11_DE_HPD_IMR);
3061
3062        gen11_hpd_detection_setup(dev_priv);
3063
3064        if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3065                icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
3066                                  TGP_DDI_HPD_ENABLE_MASK,
3067                                  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
3068        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3069                icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
3070                                  ICP_DDI_HPD_ENABLE_MASK,
3071                                  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
3072}
3073
3074static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3075{
3076        u32 val, hotplug;
3077
3078        /* Display WA #1179 WaHardHangonHotPlug: cnp */
3079        if (HAS_PCH_CNP(dev_priv)) {
3080                val = I915_READ(SOUTH_CHICKEN1);
3081                val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3082                val |= CHASSIS_CLK_REQ_DURATION(0xf);
3083                I915_WRITE(SOUTH_CHICKEN1, val);
3084        }
3085
3086        /* Enable digital hotplug on the PCH */
3087        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3088        hotplug |= PORTA_HOTPLUG_ENABLE |
3089                   PORTB_HOTPLUG_ENABLE |
3090                   PORTC_HOTPLUG_ENABLE |
3091                   PORTD_HOTPLUG_ENABLE;
3092        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3093
3094        hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3095        hotplug |= PORTE_HOTPLUG_ENABLE;
3096        I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3097}
3098
3099static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3100{
3101        u32 hotplug_irqs, enabled_irqs;
3102
3103        if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3104                I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3105
3106        hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3107        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3108
3109        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3110
3111        spt_hpd_detection_setup(dev_priv);
3112}
3113
3114static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3115{
3116        u32 hotplug;
3117
3118        /*
3119         * Enable digital hotplug on the CPU, and configure the DP short pulse
3120         * duration to 2ms (which is the minimum in the Display Port spec)
3121         * The pulse duration bits are reserved on HSW+.
3122         */
3123        hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3124        hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3125        hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3126                   DIGITAL_PORTA_PULSE_DURATION_2ms;
3127        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3128}
3129
3130static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3131{
3132        u32 hotplug_irqs, enabled_irqs;
3133
3134        if (INTEL_GEN(dev_priv) >= 8) {
3135                hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3136                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3137
3138                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3139        } else if (INTEL_GEN(dev_priv) >= 7) {
3140                hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3141                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3142
3143                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3144        } else {
3145                hotplug_irqs = DE_DP_A_HOTPLUG;
3146                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3147
3148                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3149        }
3150
3151        ilk_hpd_detection_setup(dev_priv);
3152
3153        ibx_hpd_irq_setup(dev_priv);
3154}
3155
3156static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3157                                      u32 enabled_irqs)
3158{
3159        u32 hotplug;
3160
3161        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3162        hotplug |= PORTA_HOTPLUG_ENABLE |
3163                   PORTB_HOTPLUG_ENABLE |
3164                   PORTC_HOTPLUG_ENABLE;
3165
3166        DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3167                      hotplug, enabled_irqs);
3168        hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3169
3170        /*
3171         * For BXT invert bit has to be set based on AOB design
3172         * for HPD detection logic, update it based on VBT fields.
3173         */
3174        if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3175            intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3176                hotplug |= BXT_DDIA_HPD_INVERT;
3177        if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3178            intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3179                hotplug |= BXT_DDIB_HPD_INVERT;
3180        if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3181            intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3182                hotplug |= BXT_DDIC_HPD_INVERT;
3183
3184        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3185}
3186
3187static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3188{
3189        __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3190}
3191
3192static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3193{
3194        u32 hotplug_irqs, enabled_irqs;
3195
3196        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3197        hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3198
3199        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3200
3201        __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3202}
3203
3204static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3205{
3206        u32 mask;
3207
3208        if (HAS_PCH_NOP(dev_priv))
3209                return;
3210
3211        if (HAS_PCH_IBX(dev_priv))
3212                mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3213        else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3214                mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3215        else
3216                mask = SDE_GMBUS_CPT;
3217
3218        gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3219        I915_WRITE(SDEIMR, ~mask);
3220
3221        if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3222            HAS_PCH_LPT(dev_priv))
3223                ibx_hpd_detection_setup(dev_priv);
3224        else
3225                spt_hpd_detection_setup(dev_priv);
3226}
3227
3228static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3229{
3230        struct intel_uncore *uncore = &dev_priv->uncore;
3231        u32 display_mask, extra_mask;
3232
3233        if (INTEL_GEN(dev_priv) >= 7) {
3234                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3235                                DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3236                extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3237                              DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3238                              DE_DP_A_HOTPLUG_IVB);
3239        } else {
3240                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3241                                DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3242                                DE_PIPEA_CRC_DONE | DE_POISON);
3243                extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3244                              DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3245                              DE_DP_A_HOTPLUG);
3246        }
3247
3248        if (IS_HASWELL(dev_priv)) {
3249                gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3250                display_mask |= DE_EDP_PSR_INT_HSW;
3251        }
3252
3253        dev_priv->irq_mask = ~display_mask;
3254
3255        ibx_irq_pre_postinstall(dev_priv);
3256
3257        GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3258                      display_mask | extra_mask);
3259
3260        gen5_gt_irq_postinstall(&dev_priv->gt);
3261
3262        ilk_hpd_detection_setup(dev_priv);
3263
3264        ibx_irq_postinstall(dev_priv);
3265
3266        if (IS_IRONLAKE_M(dev_priv)) {
3267                /* Enable PCU event interrupts
3268                 *
3269                 * spinlocking not required here for correctness since interrupt
3270                 * setup is guaranteed to run in single-threaded context. But we
3271                 * need it to make the assert_spin_locked happy. */
3272                spin_lock_irq(&dev_priv->irq_lock);
3273                ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3274                spin_unlock_irq(&dev_priv->irq_lock);
3275        }
3276}
3277
3278void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3279{
3280        lockdep_assert_held(&dev_priv->irq_lock);
3281
3282        if (dev_priv->display_irqs_enabled)
3283                return;
3284
3285        dev_priv->display_irqs_enabled = true;
3286
3287        if (intel_irqs_enabled(dev_priv)) {
3288                vlv_display_irq_reset(dev_priv);
3289                vlv_display_irq_postinstall(dev_priv);
3290        }
3291}
3292
3293void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3294{
3295        lockdep_assert_held(&dev_priv->irq_lock);
3296
3297        if (!dev_priv->display_irqs_enabled)
3298                return;
3299
3300        dev_priv->display_irqs_enabled = false;
3301
3302        if (intel_irqs_enabled(dev_priv))
3303                vlv_display_irq_reset(dev_priv);
3304}
3305
3306
3307static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3308{
3309        gen5_gt_irq_postinstall(&dev_priv->gt);
3310
3311        spin_lock_irq(&dev_priv->irq_lock);
3312        if (dev_priv->display_irqs_enabled)
3313                vlv_display_irq_postinstall(dev_priv);
3314        spin_unlock_irq(&dev_priv->irq_lock);
3315
3316        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3317        POSTING_READ(VLV_MASTER_IER);
3318}
3319
3320static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3321{
3322        struct intel_uncore *uncore = &dev_priv->uncore;
3323
3324        u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3325        u32 de_pipe_enables;
3326        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3327        u32 de_port_enables;
3328        u32 de_misc_masked = GEN8_DE_EDP_PSR;
3329        enum pipe pipe;
3330
3331        if (INTEL_GEN(dev_priv) <= 10)
3332                de_misc_masked |= GEN8_DE_MISC_GSE;
3333
3334        if (INTEL_GEN(dev_priv) >= 9) {
3335                de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3336                de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3337                                  GEN9_AUX_CHANNEL_D;
3338                if (IS_GEN9_LP(dev_priv))
3339                        de_port_masked |= BXT_DE_PORT_GMBUS;
3340        } else {
3341                de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3342        }
3343
3344        if (INTEL_GEN(dev_priv) >= 11)
3345                de_port_masked |= ICL_AUX_CHANNEL_E;
3346
3347        if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
3348                de_port_masked |= CNL_AUX_CHANNEL_F;
3349
3350        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3351                                           GEN8_PIPE_FIFO_UNDERRUN;
3352
3353        de_port_enables = de_port_masked;
3354        if (IS_GEN9_LP(dev_priv))
3355                de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3356        else if (IS_BROADWELL(dev_priv))
3357                de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3358
3359        if (INTEL_GEN(dev_priv) >= 12) {
3360                enum transcoder trans;
3361
3362                for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
3363                        enum intel_display_power_domain domain;
3364
3365                        domain = POWER_DOMAIN_TRANSCODER(trans);
3366                        if (!intel_display_power_is_enabled(dev_priv, domain))
3367                                continue;
3368
3369                        gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3370                }
3371        } else {
3372                gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3373        }
3374
3375        for_each_pipe(dev_priv, pipe) {
3376                dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3377
3378                if (intel_display_power_is_enabled(dev_priv,
3379                                POWER_DOMAIN_PIPE(pipe)))
3380                        GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3381                                          dev_priv->de_irq_mask[pipe],
3382                                          de_pipe_enables);
3383        }
3384
3385        GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3386        GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3387
3388        if (INTEL_GEN(dev_priv) >= 11) {
3389                u32 de_hpd_masked = 0;
3390                u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3391                                     GEN11_DE_TBT_HOTPLUG_MASK;
3392
3393                GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3394                              de_hpd_enables);
3395                gen11_hpd_detection_setup(dev_priv);
3396        } else if (IS_GEN9_LP(dev_priv)) {
3397                bxt_hpd_detection_setup(dev_priv);
3398        } else if (IS_BROADWELL(dev_priv)) {
3399                ilk_hpd_detection_setup(dev_priv);
3400        }
3401}
3402
3403static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3404{
3405        if (HAS_PCH_SPLIT(dev_priv))
3406                ibx_irq_pre_postinstall(dev_priv);
3407
3408        gen8_gt_irq_postinstall(&dev_priv->gt);
3409        gen8_de_irq_postinstall(dev_priv);
3410
3411        if (HAS_PCH_SPLIT(dev_priv))
3412                ibx_irq_postinstall(dev_priv);
3413
3414        gen8_master_intr_enable(dev_priv->uncore.regs);
3415}
3416
3417static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3418{
3419        u32 mask = SDE_GMBUS_ICP;
3420
3421        WARN_ON(I915_READ(SDEIER) != 0);
3422        I915_WRITE(SDEIER, 0xffffffff);
3423        POSTING_READ(SDEIER);
3424
3425        gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3426        I915_WRITE(SDEIMR, ~mask);
3427
3428        if (HAS_PCH_TGP(dev_priv))
3429                icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3430                                        TGP_TC_HPD_ENABLE_MASK);
3431        else if (HAS_PCH_JSP(dev_priv))
3432                icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3433        else if (HAS_PCH_MCC(dev_priv))
3434                icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3435                                        ICP_TC_HPD_ENABLE(PORT_TC1));
3436        else
3437                icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3438                                        ICP_TC_HPD_ENABLE_MASK);
3439}
3440
3441static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3442{
3443        struct intel_uncore *uncore = &dev_priv->uncore;
3444        u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3445
3446        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3447                icp_irq_postinstall(dev_priv);
3448
3449        gen11_gt_irq_postinstall(&dev_priv->gt);
3450        gen8_de_irq_postinstall(dev_priv);
3451
3452        GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3453
3454        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3455
3456        gen11_master_intr_enable(uncore->regs);
3457        POSTING_READ(GEN11_GFX_MSTR_IRQ);
3458}
3459
3460static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3461{
3462        gen8_gt_irq_postinstall(&dev_priv->gt);
3463
3464        spin_lock_irq(&dev_priv->irq_lock);
3465        if (dev_priv->display_irqs_enabled)
3466                vlv_display_irq_postinstall(dev_priv);
3467        spin_unlock_irq(&dev_priv->irq_lock);
3468
3469        I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3470        POSTING_READ(GEN8_MASTER_IRQ);
3471}
3472
3473static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3474{
3475        struct intel_uncore *uncore = &dev_priv->uncore;
3476
3477        i9xx_pipestat_irq_reset(dev_priv);
3478
3479        GEN2_IRQ_RESET(uncore);
3480}
3481
3482static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3483{
3484        struct intel_uncore *uncore = &dev_priv->uncore;
3485        u16 enable_mask;
3486
3487        intel_uncore_write16(uncore,
3488                             EMR,
3489                             ~(I915_ERROR_PAGE_TABLE |
3490                               I915_ERROR_MEMORY_REFRESH));
3491
3492        /* Unmask the interrupts that we always want on. */
3493        dev_priv->irq_mask =
3494                ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3495                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3496                  I915_MASTER_ERROR_INTERRUPT);
3497
3498        enable_mask =
3499                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3500                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3501                I915_MASTER_ERROR_INTERRUPT |
3502                I915_USER_INTERRUPT;
3503
3504        GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3505
3506        /* Interrupt setup is already guaranteed to be single-threaded, this is
3507         * just to make the assert_spin_locked check happy. */
3508        spin_lock_irq(&dev_priv->irq_lock);
3509        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3510        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3511        spin_unlock_irq(&dev_priv->irq_lock);
3512}
3513
3514static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3515                               u16 *eir, u16 *eir_stuck)
3516{
3517        struct intel_uncore *uncore = &i915->uncore;
3518        u16 emr;
3519
3520        *eir = intel_uncore_read16(uncore, EIR);
3521
3522        if (*eir)
3523                intel_uncore_write16(uncore, EIR, *eir);
3524
3525        *eir_stuck = intel_uncore_read16(uncore, EIR);
3526        if (*eir_stuck == 0)
3527                return;
3528
3529        /*
3530         * Toggle all EMR bits to make sure we get an edge
3531         * in the ISR master error bit if we don't clear
3532         * all the EIR bits. Otherwise the edge triggered
3533         * IIR on i965/g4x wouldn't notice that an interrupt
3534         * is still pending. Also some EIR bits can't be
3535         * cleared except by handling the underlying error
3536         * (or by a GPU reset) so we mask any bit that
3537         * remains set.
3538         */
3539        emr = intel_uncore_read16(uncore, EMR);
3540        intel_uncore_write16(uncore, EMR, 0xffff);
3541        intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3542}
3543
3544static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3545                                   u16 eir, u16 eir_stuck)
3546{
3547        DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3548
3549        if (eir_stuck)
3550                DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
3551}
3552
3553static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3554                               u32 *eir, u32 *eir_stuck)
3555{
3556        u32 emr;
3557
3558        *eir = I915_READ(EIR);
3559
3560        I915_WRITE(EIR, *eir);
3561
3562        *eir_stuck = I915_READ(EIR);
3563        if (*eir_stuck == 0)
3564                return;
3565
3566        /*
3567         * Toggle all EMR bits to make sure we get an edge
3568         * in the ISR master error bit if we don't clear
3569         * all the EIR bits. Otherwise the edge triggered
3570         * IIR on i965/g4x wouldn't notice that an interrupt
3571         * is still pending. Also some EIR bits can't be
3572         * cleared except by handling the underlying error
3573         * (or by a GPU reset) so we mask any bit that
3574         * remains set.
3575         */
3576        emr = I915_READ(EMR);
3577        I915_WRITE(EMR, 0xffffffff);
3578        I915_WRITE(EMR, emr | *eir_stuck);
3579}
3580
3581static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3582                                   u32 eir, u32 eir_stuck)
3583{
3584        DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3585
3586        if (eir_stuck)
3587                DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
3588}
3589
3590static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3591{
3592        struct drm_i915_private *dev_priv = arg;
3593        irqreturn_t ret = IRQ_NONE;
3594
3595        if (!intel_irqs_enabled(dev_priv))
3596                return IRQ_NONE;
3597
3598        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3599        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3600
3601        do {
3602                u32 pipe_stats[I915_MAX_PIPES] = {};
3603                u16 eir = 0, eir_stuck = 0;
3604                u16 iir;
3605
3606                iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3607                if (iir == 0)
3608                        break;
3609
3610                ret = IRQ_HANDLED;
3611
3612                /* Call regardless, as some status bits might not be
3613                 * signalled in iir */
3614                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3615
3616                if (iir & I915_MASTER_ERROR_INTERRUPT)
3617                        i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3618
3619                intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3620
3621                if (iir & I915_USER_INTERRUPT)
3622                        intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3623
3624                if (iir & I915_MASTER_ERROR_INTERRUPT)
3625                        i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3626
3627                i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3628        } while (0);
3629
3630        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3631
3632        return ret;
3633}
3634
3635static void i915_irq_reset(struct drm_i915_private *dev_priv)
3636{
3637        struct intel_uncore *uncore = &dev_priv->uncore;
3638
3639        if (I915_HAS_HOTPLUG(dev_priv)) {
3640                i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3641                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3642        }
3643
3644        i9xx_pipestat_irq_reset(dev_priv);
3645
3646        GEN3_IRQ_RESET(uncore, GEN2_);
3647}
3648
3649static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3650{
3651        struct intel_uncore *uncore = &dev_priv->uncore;
3652        u32 enable_mask;
3653
3654        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3655                          I915_ERROR_MEMORY_REFRESH));
3656
3657        /* Unmask the interrupts that we always want on. */
3658        dev_priv->irq_mask =
3659                ~(I915_ASLE_INTERRUPT |
3660                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3661                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3662                  I915_MASTER_ERROR_INTERRUPT);
3663
3664        enable_mask =
3665                I915_ASLE_INTERRUPT |
3666                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3667                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3668                I915_MASTER_ERROR_INTERRUPT |
3669                I915_USER_INTERRUPT;
3670
3671        if (I915_HAS_HOTPLUG(dev_priv)) {
3672                /* Enable in IER... */
3673                enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3674                /* and unmask in IMR */
3675                dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3676        }
3677
3678        GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3679
3680        /* Interrupt setup is already guaranteed to be single-threaded, this is
3681         * just to make the assert_spin_locked check happy. */
3682        spin_lock_irq(&dev_priv->irq_lock);
3683        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3684        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3685        spin_unlock_irq(&dev_priv->irq_lock);
3686
3687        i915_enable_asle_pipestat(dev_priv);
3688}
3689
3690static irqreturn_t i915_irq_handler(int irq, void *arg)
3691{
3692        struct drm_i915_private *dev_priv = arg;
3693        irqreturn_t ret = IRQ_NONE;
3694
3695        if (!intel_irqs_enabled(dev_priv))
3696                return IRQ_NONE;
3697
3698        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3699        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3700
3701        do {
3702                u32 pipe_stats[I915_MAX_PIPES] = {};
3703                u32 eir = 0, eir_stuck = 0;
3704                u32 hotplug_status = 0;
3705                u32 iir;
3706
3707                iir = I915_READ(GEN2_IIR);
3708                if (iir == 0)
3709                        break;
3710
3711                ret = IRQ_HANDLED;
3712
3713                if (I915_HAS_HOTPLUG(dev_priv) &&
3714                    iir & I915_DISPLAY_PORT_INTERRUPT)
3715                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3716
3717                /* Call regardless, as some status bits might not be
3718                 * signalled in iir */
3719                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3720
3721                if (iir & I915_MASTER_ERROR_INTERRUPT)
3722                        i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3723
3724                I915_WRITE(GEN2_IIR, iir);
3725
3726                if (iir & I915_USER_INTERRUPT)
3727                        intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3728
3729                if (iir & I915_MASTER_ERROR_INTERRUPT)
3730                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3731
3732                if (hotplug_status)
3733                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3734
3735                i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3736        } while (0);
3737
3738        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3739
3740        return ret;
3741}
3742
3743static void i965_irq_reset(struct drm_i915_private *dev_priv)
3744{
3745        struct intel_uncore *uncore = &dev_priv->uncore;
3746
3747        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3748        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3749
3750        i9xx_pipestat_irq_reset(dev_priv);
3751
3752        GEN3_IRQ_RESET(uncore, GEN2_);
3753}
3754
3755static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3756{
3757        struct intel_uncore *uncore = &dev_priv->uncore;
3758        u32 enable_mask;
3759        u32 error_mask;
3760
3761        /*
3762         * Enable some error detection, note the instruction error mask
3763         * bit is reserved, so we leave it masked.
3764         */
3765        if (IS_G4X(dev_priv)) {
3766                error_mask = ~(GM45_ERROR_PAGE_TABLE |
3767                               GM45_ERROR_MEM_PRIV |
3768                               GM45_ERROR_CP_PRIV |
3769                               I915_ERROR_MEMORY_REFRESH);
3770        } else {
3771                error_mask = ~(I915_ERROR_PAGE_TABLE |
3772                               I915_ERROR_MEMORY_REFRESH);
3773        }
3774        I915_WRITE(EMR, error_mask);
3775
3776        /* Unmask the interrupts that we always want on. */
3777        dev_priv->irq_mask =
3778                ~(I915_ASLE_INTERRUPT |
3779                  I915_DISPLAY_PORT_INTERRUPT |
3780                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3781                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3782                  I915_MASTER_ERROR_INTERRUPT);
3783
3784        enable_mask =
3785                I915_ASLE_INTERRUPT |
3786                I915_DISPLAY_PORT_INTERRUPT |
3787                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3788                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3789                I915_MASTER_ERROR_INTERRUPT |
3790                I915_USER_INTERRUPT;
3791
3792        if (IS_G4X(dev_priv))
3793                enable_mask |= I915_BSD_USER_INTERRUPT;
3794
3795        GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3796
3797        /* Interrupt setup is already guaranteed to be single-threaded, this is
3798         * just to make the assert_spin_locked check happy. */
3799        spin_lock_irq(&dev_priv->irq_lock);
3800        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3801        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3802        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3803        spin_unlock_irq(&dev_priv->irq_lock);
3804
3805        i915_enable_asle_pipestat(dev_priv);
3806}
3807
3808static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3809{
3810        u32 hotplug_en;
3811
3812        lockdep_assert_held(&dev_priv->irq_lock);
3813
3814        /* Note HDMI and DP share hotplug bits */
3815        /* enable bits are the same for all generations */
3816        hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3817        /* Programming the CRT detection parameters tends
3818           to generate a spurious hotplug event about three
3819           seconds later.  So just do it once.
3820        */
3821        if (IS_G4X(dev_priv))
3822                hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3823        hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3824
3825        /* Ignore TV since it's buggy */
3826        i915_hotplug_interrupt_update_locked(dev_priv,
3827                                             HOTPLUG_INT_EN_MASK |
3828                                             CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3829                                             CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3830                                             hotplug_en);
3831}
3832
3833static irqreturn_t i965_irq_handler(int irq, void *arg)
3834{
3835        struct drm_i915_private *dev_priv = arg;
3836        irqreturn_t ret = IRQ_NONE;
3837
3838        if (!intel_irqs_enabled(dev_priv))
3839                return IRQ_NONE;
3840
3841        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3842        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3843
3844        do {
3845                u32 pipe_stats[I915_MAX_PIPES] = {};
3846                u32 eir = 0, eir_stuck = 0;
3847                u32 hotplug_status = 0;
3848                u32 iir;
3849
3850                iir = I915_READ(GEN2_IIR);
3851                if (iir == 0)
3852                        break;
3853
3854                ret = IRQ_HANDLED;
3855
3856                if (iir & I915_DISPLAY_PORT_INTERRUPT)
3857                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3858
3859                /* Call regardless, as some status bits might not be
3860                 * signalled in iir */
3861                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3862
3863                if (iir & I915_MASTER_ERROR_INTERRUPT)
3864                        i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3865
3866                I915_WRITE(GEN2_IIR, iir);
3867
3868                if (iir & I915_USER_INTERRUPT)
3869                        intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3870
3871                if (iir & I915_BSD_USER_INTERRUPT)
3872                        intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
3873
3874                if (iir & I915_MASTER_ERROR_INTERRUPT)
3875                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3876
3877                if (hotplug_status)
3878                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3879
3880                i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3881        } while (0);
3882
3883        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3884
3885        return ret;
3886}
3887
3888/**
3889 * intel_irq_init - initializes irq support
3890 * @dev_priv: i915 device instance
3891 *
3892 * This function initializes all the irq support including work items, timers
3893 * and all the vtables. It does not setup the interrupt itself though.
3894 */
3895void intel_irq_init(struct drm_i915_private *dev_priv)
3896{
3897        struct drm_device *dev = &dev_priv->drm;
3898        int i;
3899
3900        intel_hpd_init_work(dev_priv);
3901
3902        INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3903        for (i = 0; i < MAX_L3_SLICES; ++i)
3904                dev_priv->l3_parity.remap_info[i] = NULL;
3905
3906        /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3907        if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3908                dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3909
3910        dev->vblank_disable_immediate = true;
3911
3912        /* Most platforms treat the display irq block as an always-on
3913         * power domain. vlv/chv can disable it at runtime and need
3914         * special care to avoid writing any of the display block registers
3915         * outside of the power domain. We defer setting up the display irqs
3916         * in this case to the runtime pm.
3917         */
3918        dev_priv->display_irqs_enabled = true;
3919        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3920                dev_priv->display_irqs_enabled = false;
3921
3922        dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3923        /* If we have MST support, we want to avoid doing short HPD IRQ storm
3924         * detection, as short HPD storms will occur as a natural part of
3925         * sideband messaging with MST.
3926         * On older platforms however, IRQ storms can occur with both long and
3927         * short pulses, as seen on some G4x systems.
3928         */
3929        dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
3930
3931        if (HAS_GMCH(dev_priv)) {
3932                if (I915_HAS_HOTPLUG(dev_priv))
3933                        dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3934        } else {
3935                if (HAS_PCH_JSP(dev_priv))
3936                        dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
3937                else if (HAS_PCH_MCC(dev_priv))
3938                        dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
3939                else if (INTEL_GEN(dev_priv) >= 11)
3940                        dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
3941                else if (IS_GEN9_LP(dev_priv))
3942                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
3943                else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3944                        dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
3945                else
3946                        dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
3947        }
3948}
3949
3950/**
3951 * intel_irq_fini - deinitializes IRQ support
3952 * @i915: i915 device instance
3953 *
3954 * This function deinitializes all the IRQ support.
3955 */
3956void intel_irq_fini(struct drm_i915_private *i915)
3957{
3958        int i;
3959
3960        for (i = 0; i < MAX_L3_SLICES; ++i)
3961                kfree(i915->l3_parity.remap_info[i]);
3962}
3963
3964static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
3965{
3966        if (HAS_GMCH(dev_priv)) {
3967                if (IS_CHERRYVIEW(dev_priv))
3968                        return cherryview_irq_handler;
3969                else if (IS_VALLEYVIEW(dev_priv))
3970                        return valleyview_irq_handler;
3971                else if (IS_GEN(dev_priv, 4))
3972                        return i965_irq_handler;
3973                else if (IS_GEN(dev_priv, 3))
3974                        return i915_irq_handler;
3975                else
3976                        return i8xx_irq_handler;
3977        } else {
3978                if (INTEL_GEN(dev_priv) >= 11)
3979                        return gen11_irq_handler;
3980                else if (INTEL_GEN(dev_priv) >= 8)
3981                        return gen8_irq_handler;
3982                else
3983                        return ilk_irq_handler;
3984        }
3985}
3986
3987static void intel_irq_reset(struct drm_i915_private *dev_priv)
3988{
3989        if (HAS_GMCH(dev_priv)) {
3990                if (IS_CHERRYVIEW(dev_priv))
3991                        cherryview_irq_reset(dev_priv);
3992                else if (IS_VALLEYVIEW(dev_priv))
3993                        valleyview_irq_reset(dev_priv);
3994                else if (IS_GEN(dev_priv, 4))
3995                        i965_irq_reset(dev_priv);
3996                else if (IS_GEN(dev_priv, 3))
3997                        i915_irq_reset(dev_priv);
3998                else
3999                        i8xx_irq_reset(dev_priv);
4000        } else {
4001                if (INTEL_GEN(dev_priv) >= 11)
4002                        gen11_irq_reset(dev_priv);
4003                else if (INTEL_GEN(dev_priv) >= 8)
4004                        gen8_irq_reset(dev_priv);
4005                else
4006                        ilk_irq_reset(dev_priv);
4007        }
4008}
4009
4010static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4011{
4012        if (HAS_GMCH(dev_priv)) {
4013                if (IS_CHERRYVIEW(dev_priv))
4014                        cherryview_irq_postinstall(dev_priv);
4015                else if (IS_VALLEYVIEW(dev_priv))
4016                        valleyview_irq_postinstall(dev_priv);
4017                else if (IS_GEN(dev_priv, 4))
4018                        i965_irq_postinstall(dev_priv);
4019                else if (IS_GEN(dev_priv, 3))
4020                        i915_irq_postinstall(dev_priv);
4021                else
4022                        i8xx_irq_postinstall(dev_priv);
4023        } else {
4024                if (INTEL_GEN(dev_priv) >= 11)
4025                        gen11_irq_postinstall(dev_priv);
4026                else if (INTEL_GEN(dev_priv) >= 8)
4027                        gen8_irq_postinstall(dev_priv);
4028                else
4029                        ilk_irq_postinstall(dev_priv);
4030        }
4031}
4032
4033/**
4034 * intel_irq_install - enables the hardware interrupt
4035 * @dev_priv: i915 device instance
4036 *
4037 * This function enables the hardware interrupt handling, but leaves the hotplug
4038 * handling still disabled. It is called after intel_irq_init().
4039 *
4040 * In the driver load and resume code we need working interrupts in a few places
4041 * but don't want to deal with the hassle of concurrent probe and hotplug
4042 * workers. Hence the split into this two-stage approach.
4043 */
4044int intel_irq_install(struct drm_i915_private *dev_priv)
4045{
4046        int irq = dev_priv->drm.pdev->irq;
4047        int ret;
4048
4049        /*
4050         * We enable some interrupt sources in our postinstall hooks, so mark
4051         * interrupts as enabled _before_ actually enabling them to avoid
4052         * special cases in our ordering checks.
4053         */
4054        dev_priv->runtime_pm.irqs_enabled = true;
4055
4056        dev_priv->drm.irq_enabled = true;
4057
4058        intel_irq_reset(dev_priv);
4059
4060        ret = request_irq(irq, intel_irq_handler(dev_priv),
4061                          IRQF_SHARED, DRIVER_NAME, dev_priv);
4062        if (ret < 0) {
4063                dev_priv->drm.irq_enabled = false;
4064                return ret;
4065        }
4066
4067        intel_irq_postinstall(dev_priv);
4068
4069        return ret;
4070}
4071
4072/**
4073 * intel_irq_uninstall - finilizes all irq handling
4074 * @dev_priv: i915 device instance
4075 *
4076 * This stops interrupt and hotplug handling and unregisters and frees all
4077 * resources acquired in the init functions.
4078 */
4079void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4080{
4081        int irq = dev_priv->drm.pdev->irq;
4082
4083        /*
4084         * FIXME we can get called twice during driver probe
4085         * error handling as well as during driver remove due to
4086         * intel_modeset_driver_remove() calling us out of sequence.
4087         * Would be nice if it didn't do that...
4088         */
4089        if (!dev_priv->drm.irq_enabled)
4090                return;
4091
4092        dev_priv->drm.irq_enabled = false;
4093
4094        intel_irq_reset(dev_priv);
4095
4096        free_irq(irq, dev_priv);
4097
4098        intel_hpd_cancel_work(dev_priv);
4099        dev_priv->runtime_pm.irqs_enabled = false;
4100}
4101
4102/**
4103 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4104 * @dev_priv: i915 device instance
4105 *
4106 * This function is used to disable interrupts at runtime, both in the runtime
4107 * pm and the system suspend/resume code.
4108 */
4109void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4110{
4111        intel_irq_reset(dev_priv);
4112        dev_priv->runtime_pm.irqs_enabled = false;
4113        intel_synchronize_irq(dev_priv);
4114}
4115
4116/**
4117 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4118 * @dev_priv: i915 device instance
4119 *
4120 * This function is used to enable interrupts at runtime, both in the runtime
4121 * pm and the system suspend/resume code.
4122 */
4123void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4124{
4125        dev_priv->runtime_pm.irqs_enabled = true;
4126        intel_irq_reset(dev_priv);
4127        intel_irq_postinstall(dev_priv);
4128}
4129
4130bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4131{
4132        /*
4133         * We only use drm_irq_uninstall() at unload and VT switch, so
4134         * this is the only thing we need to check.
4135         */
4136        return dev_priv->runtime_pm.irqs_enabled;
4137}
4138
4139void intel_synchronize_irq(struct drm_i915_private *i915)
4140{
4141        synchronize_irq(i915->drm.pdev->irq);
4142}
4143