linux/drivers/gpu/drm/i915/i915_irq.c
<<
>>
Prefs
   1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/sysrq.h>
  32#include <linux/slab.h>
  33#include <linux/circ_buf.h>
  34#include <drm/drmP.h>
  35#include <drm/i915_drm.h>
  36#include "i915_drv.h"
  37#include "i915_trace.h"
  38#include "intel_drv.h"
  39
  40/**
  41 * DOC: interrupt handling
  42 *
  43 * These functions provide the basic support for enabling and disabling the
  44 * interrupt handling support. There's a lot more functionality in i915_irq.c
  45 * and related files, but that will be described in separate chapters.
  46 */
  47
  48static const u32 hpd_ilk[HPD_NUM_PINS] = {
  49        [HPD_PORT_A] = DE_DP_A_HOTPLUG,
  50};
  51
  52static const u32 hpd_ivb[HPD_NUM_PINS] = {
  53        [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
  54};
  55
  56static const u32 hpd_bdw[HPD_NUM_PINS] = {
  57        [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
  58};
  59
  60static const u32 hpd_ibx[HPD_NUM_PINS] = {
  61        [HPD_CRT] = SDE_CRT_HOTPLUG,
  62        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  63        [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  64        [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  65        [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  66};
  67
  68static const u32 hpd_cpt[HPD_NUM_PINS] = {
  69        [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  70        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  71        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  72        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  73        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  74};
  75
  76static const u32 hpd_spt[HPD_NUM_PINS] = {
  77        [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
  78        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  79        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  80        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
  81        [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
  82};
  83
  84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
  85        [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  86        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  87        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  88        [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  89        [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  90        [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  91};
  92
  93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
  94        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  95        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  96        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  97        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  98        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  99        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 100};
 101
 102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
 103        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 104        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
 105        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
 106        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 107        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
 108        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 109};
 110
 111/* BXT hpd list */
 112static const u32 hpd_bxt[HPD_NUM_PINS] = {
 113        [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
 114        [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
 115        [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
 116};
 117
 118static const u32 hpd_gen11[HPD_NUM_PINS] = {
 119        [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
 120        [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
 121        [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
 122        [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
 123};
 124
 125static const u32 hpd_icp[HPD_NUM_PINS] = {
 126        [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
 127        [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
 128        [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
 129        [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
 130        [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
 131        [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
 132};
 133
 134/* IIR can theoretically queue up two events. Be paranoid. */
 135#define GEN8_IRQ_RESET_NDX(type, which) do { \
 136        I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
 137        POSTING_READ(GEN8_##type##_IMR(which)); \
 138        I915_WRITE(GEN8_##type##_IER(which), 0); \
 139        I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
 140        POSTING_READ(GEN8_##type##_IIR(which)); \
 141        I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
 142        POSTING_READ(GEN8_##type##_IIR(which)); \
 143} while (0)
 144
 145#define GEN3_IRQ_RESET(type) do { \
 146        I915_WRITE(type##IMR, 0xffffffff); \
 147        POSTING_READ(type##IMR); \
 148        I915_WRITE(type##IER, 0); \
 149        I915_WRITE(type##IIR, 0xffffffff); \
 150        POSTING_READ(type##IIR); \
 151        I915_WRITE(type##IIR, 0xffffffff); \
 152        POSTING_READ(type##IIR); \
 153} while (0)
 154
 155#define GEN2_IRQ_RESET(type) do { \
 156        I915_WRITE16(type##IMR, 0xffff); \
 157        POSTING_READ16(type##IMR); \
 158        I915_WRITE16(type##IER, 0); \
 159        I915_WRITE16(type##IIR, 0xffff); \
 160        POSTING_READ16(type##IIR); \
 161        I915_WRITE16(type##IIR, 0xffff); \
 162        POSTING_READ16(type##IIR); \
 163} while (0)
 164
 165/*
 166 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 167 */
 168static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
 169                                    i915_reg_t reg)
 170{
 171        u32 val = I915_READ(reg);
 172
 173        if (val == 0)
 174                return;
 175
 176        WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
 177             i915_mmio_reg_offset(reg), val);
 178        I915_WRITE(reg, 0xffffffff);
 179        POSTING_READ(reg);
 180        I915_WRITE(reg, 0xffffffff);
 181        POSTING_READ(reg);
 182}
 183
 184static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
 185                                    i915_reg_t reg)
 186{
 187        u16 val = I915_READ16(reg);
 188
 189        if (val == 0)
 190                return;
 191
 192        WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
 193             i915_mmio_reg_offset(reg), val);
 194        I915_WRITE16(reg, 0xffff);
 195        POSTING_READ16(reg);
 196        I915_WRITE16(reg, 0xffff);
 197        POSTING_READ16(reg);
 198}
 199
 200#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
 201        gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
 202        I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
 203        I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
 204        POSTING_READ(GEN8_##type##_IMR(which)); \
 205} while (0)
 206
 207#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
 208        gen3_assert_iir_is_zero(dev_priv, type##IIR); \
 209        I915_WRITE(type##IER, (ier_val)); \
 210        I915_WRITE(type##IMR, (imr_val)); \
 211        POSTING_READ(type##IMR); \
 212} while (0)
 213
 214#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
 215        gen2_assert_iir_is_zero(dev_priv, type##IIR); \
 216        I915_WRITE16(type##IER, (ier_val)); \
 217        I915_WRITE16(type##IMR, (imr_val)); \
 218        POSTING_READ16(type##IMR); \
 219} while (0)
 220
 221static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 222static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 223
 224/* For display hotplug interrupt */
 225static inline void
 226i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
 227                                     uint32_t mask,
 228                                     uint32_t bits)
 229{
 230        uint32_t val;
 231
 232        lockdep_assert_held(&dev_priv->irq_lock);
 233        WARN_ON(bits & ~mask);
 234
 235        val = I915_READ(PORT_HOTPLUG_EN);
 236        val &= ~mask;
 237        val |= bits;
 238        I915_WRITE(PORT_HOTPLUG_EN, val);
 239}
 240
 241/**
 242 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 243 * @dev_priv: driver private
 244 * @mask: bits to update
 245 * @bits: bits to enable
 246 * NOTE: the HPD enable bits are modified both inside and outside
 247 * of an interrupt context. To avoid that read-modify-write cycles
 248 * interfer, these bits are protected by a spinlock. Since this
 249 * function is usually not called from a context where the lock is
 250 * held already, this function acquires the lock itself. A non-locking
 251 * version is also available.
 252 */
 253void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 254                                   uint32_t mask,
 255                                   uint32_t bits)
 256{
 257        spin_lock_irq(&dev_priv->irq_lock);
 258        i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
 259        spin_unlock_irq(&dev_priv->irq_lock);
 260}
 261
 262static u32
 263gen11_gt_engine_identity(struct drm_i915_private * const i915,
 264                         const unsigned int bank, const unsigned int bit);
 265
 266static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
 267                                const unsigned int bank,
 268                                const unsigned int bit)
 269{
 270        void __iomem * const regs = i915->regs;
 271        u32 dw;
 272
 273        lockdep_assert_held(&i915->irq_lock);
 274
 275        dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
 276        if (dw & BIT(bit)) {
 277                /*
 278                 * According to the BSpec, DW_IIR bits cannot be cleared without
 279                 * first servicing the Selector & Shared IIR registers.
 280                 */
 281                gen11_gt_engine_identity(i915, bank, bit);
 282
 283                /*
 284                 * We locked GT INT DW by reading it. If we want to (try
 285                 * to) recover from this succesfully, we need to clear
 286                 * our bit, otherwise we are locking the register for
 287                 * everybody.
 288                 */
 289                raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
 290
 291                return true;
 292        }
 293
 294        return false;
 295}
 296
 297/**
 298 * ilk_update_display_irq - update DEIMR
 299 * @dev_priv: driver private
 300 * @interrupt_mask: mask of interrupt bits to update
 301 * @enabled_irq_mask: mask of interrupt bits to enable
 302 */
 303void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 304                            uint32_t interrupt_mask,
 305                            uint32_t enabled_irq_mask)
 306{
 307        uint32_t new_val;
 308
 309        lockdep_assert_held(&dev_priv->irq_lock);
 310
 311        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 312
 313        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 314                return;
 315
 316        new_val = dev_priv->irq_mask;
 317        new_val &= ~interrupt_mask;
 318        new_val |= (~enabled_irq_mask & interrupt_mask);
 319
 320        if (new_val != dev_priv->irq_mask) {
 321                dev_priv->irq_mask = new_val;
 322                I915_WRITE(DEIMR, dev_priv->irq_mask);
 323                POSTING_READ(DEIMR);
 324        }
 325}
 326
 327/**
 328 * ilk_update_gt_irq - update GTIMR
 329 * @dev_priv: driver private
 330 * @interrupt_mask: mask of interrupt bits to update
 331 * @enabled_irq_mask: mask of interrupt bits to enable
 332 */
 333static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
 334                              uint32_t interrupt_mask,
 335                              uint32_t enabled_irq_mask)
 336{
 337        lockdep_assert_held(&dev_priv->irq_lock);
 338
 339        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 340
 341        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 342                return;
 343
 344        dev_priv->gt_irq_mask &= ~interrupt_mask;
 345        dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
 346        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 347}
 348
 349void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 350{
 351        ilk_update_gt_irq(dev_priv, mask, mask);
 352        POSTING_READ_FW(GTIMR);
 353}
 354
 355void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 356{
 357        ilk_update_gt_irq(dev_priv, mask, 0);
 358}
 359
 360static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
 361{
 362        WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
 363
 364        return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 365}
 366
 367static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
 368{
 369        if (INTEL_GEN(dev_priv) >= 11)
 370                return GEN11_GPM_WGBOXPERF_INTR_MASK;
 371        else if (INTEL_GEN(dev_priv) >= 8)
 372                return GEN8_GT_IMR(2);
 373        else
 374                return GEN6_PMIMR;
 375}
 376
 377static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
 378{
 379        if (INTEL_GEN(dev_priv) >= 11)
 380                return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
 381        else if (INTEL_GEN(dev_priv) >= 8)
 382                return GEN8_GT_IER(2);
 383        else
 384                return GEN6_PMIER;
 385}
 386
 387/**
 388 * snb_update_pm_irq - update GEN6_PMIMR
 389 * @dev_priv: driver private
 390 * @interrupt_mask: mask of interrupt bits to update
 391 * @enabled_irq_mask: mask of interrupt bits to enable
 392 */
 393static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
 394                              uint32_t interrupt_mask,
 395                              uint32_t enabled_irq_mask)
 396{
 397        uint32_t new_val;
 398
 399        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 400
 401        lockdep_assert_held(&dev_priv->irq_lock);
 402
 403        new_val = dev_priv->pm_imr;
 404        new_val &= ~interrupt_mask;
 405        new_val |= (~enabled_irq_mask & interrupt_mask);
 406
 407        if (new_val != dev_priv->pm_imr) {
 408                dev_priv->pm_imr = new_val;
 409                I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
 410                POSTING_READ(gen6_pm_imr(dev_priv));
 411        }
 412}
 413
 414void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
 415{
 416        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 417                return;
 418
 419        snb_update_pm_irq(dev_priv, mask, mask);
 420}
 421
 422static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
 423{
 424        snb_update_pm_irq(dev_priv, mask, 0);
 425}
 426
 427void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
 428{
 429        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 430                return;
 431
 432        __gen6_mask_pm_irq(dev_priv, mask);
 433}
 434
 435static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
 436{
 437        i915_reg_t reg = gen6_pm_iir(dev_priv);
 438
 439        lockdep_assert_held(&dev_priv->irq_lock);
 440
 441        I915_WRITE(reg, reset_mask);
 442        I915_WRITE(reg, reset_mask);
 443        POSTING_READ(reg);
 444}
 445
 446static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
 447{
 448        lockdep_assert_held(&dev_priv->irq_lock);
 449
 450        dev_priv->pm_ier |= enable_mask;
 451        I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
 452        gen6_unmask_pm_irq(dev_priv, enable_mask);
 453        /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
 454}
 455
 456static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
 457{
 458        lockdep_assert_held(&dev_priv->irq_lock);
 459
 460        dev_priv->pm_ier &= ~disable_mask;
 461        __gen6_mask_pm_irq(dev_priv, disable_mask);
 462        I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
 463        /* though a barrier is missing here, but don't really need a one */
 464}
 465
 466void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 467{
 468        spin_lock_irq(&dev_priv->irq_lock);
 469
 470        while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
 471                ;
 472
 473        dev_priv->gt_pm.rps.pm_iir = 0;
 474
 475        spin_unlock_irq(&dev_priv->irq_lock);
 476}
 477
 478void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 479{
 480        spin_lock_irq(&dev_priv->irq_lock);
 481        gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
 482        dev_priv->gt_pm.rps.pm_iir = 0;
 483        spin_unlock_irq(&dev_priv->irq_lock);
 484}
 485
 486void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 487{
 488        struct intel_rps *rps = &dev_priv->gt_pm.rps;
 489
 490        if (READ_ONCE(rps->interrupts_enabled))
 491                return;
 492
 493        spin_lock_irq(&dev_priv->irq_lock);
 494        WARN_ON_ONCE(rps->pm_iir);
 495
 496        if (INTEL_GEN(dev_priv) >= 11)
 497                WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
 498        else
 499                WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
 500
 501        rps->interrupts_enabled = true;
 502        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 503
 504        spin_unlock_irq(&dev_priv->irq_lock);
 505}
 506
 507void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 508{
 509        struct intel_rps *rps = &dev_priv->gt_pm.rps;
 510
 511        if (!READ_ONCE(rps->interrupts_enabled))
 512                return;
 513
 514        spin_lock_irq(&dev_priv->irq_lock);
 515        rps->interrupts_enabled = false;
 516
 517        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 518
 519        gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 520
 521        spin_unlock_irq(&dev_priv->irq_lock);
 522        synchronize_irq(dev_priv->drm.irq);
 523
 524        /* Now that we will not be generating any more work, flush any
 525         * outstanding tasks. As we are called on the RPS idle path,
 526         * we will reset the GPU to minimum frequencies, so the current
 527         * state of the worker can be discarded.
 528         */
 529        cancel_work_sync(&rps->work);
 530        if (INTEL_GEN(dev_priv) >= 11)
 531                gen11_reset_rps_interrupts(dev_priv);
 532        else
 533                gen6_reset_rps_interrupts(dev_priv);
 534}
 535
 536void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
 537{
 538        assert_rpm_wakelock_held(dev_priv);
 539
 540        spin_lock_irq(&dev_priv->irq_lock);
 541        gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
 542        spin_unlock_irq(&dev_priv->irq_lock);
 543}
 544
 545void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
 546{
 547        assert_rpm_wakelock_held(dev_priv);
 548
 549        spin_lock_irq(&dev_priv->irq_lock);
 550        if (!dev_priv->guc.interrupts_enabled) {
 551                WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
 552                                       dev_priv->pm_guc_events);
 553                dev_priv->guc.interrupts_enabled = true;
 554                gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
 555        }
 556        spin_unlock_irq(&dev_priv->irq_lock);
 557}
 558
 559void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
 560{
 561        assert_rpm_wakelock_held(dev_priv);
 562
 563        spin_lock_irq(&dev_priv->irq_lock);
 564        dev_priv->guc.interrupts_enabled = false;
 565
 566        gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
 567
 568        spin_unlock_irq(&dev_priv->irq_lock);
 569        synchronize_irq(dev_priv->drm.irq);
 570
 571        gen9_reset_guc_interrupts(dev_priv);
 572}
 573
 574/**
 575 * bdw_update_port_irq - update DE port interrupt
 576 * @dev_priv: driver private
 577 * @interrupt_mask: mask of interrupt bits to update
 578 * @enabled_irq_mask: mask of interrupt bits to enable
 579 */
 580static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 581                                uint32_t interrupt_mask,
 582                                uint32_t enabled_irq_mask)
 583{
 584        uint32_t new_val;
 585        uint32_t old_val;
 586
 587        lockdep_assert_held(&dev_priv->irq_lock);
 588
 589        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 590
 591        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 592                return;
 593
 594        old_val = I915_READ(GEN8_DE_PORT_IMR);
 595
 596        new_val = old_val;
 597        new_val &= ~interrupt_mask;
 598        new_val |= (~enabled_irq_mask & interrupt_mask);
 599
 600        if (new_val != old_val) {
 601                I915_WRITE(GEN8_DE_PORT_IMR, new_val);
 602                POSTING_READ(GEN8_DE_PORT_IMR);
 603        }
 604}
 605
 606/**
 607 * bdw_update_pipe_irq - update DE pipe interrupt
 608 * @dev_priv: driver private
 609 * @pipe: pipe whose interrupt to update
 610 * @interrupt_mask: mask of interrupt bits to update
 611 * @enabled_irq_mask: mask of interrupt bits to enable
 612 */
 613void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 614                         enum pipe pipe,
 615                         uint32_t interrupt_mask,
 616                         uint32_t enabled_irq_mask)
 617{
 618        uint32_t new_val;
 619
 620        lockdep_assert_held(&dev_priv->irq_lock);
 621
 622        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 623
 624        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 625                return;
 626
 627        new_val = dev_priv->de_irq_mask[pipe];
 628        new_val &= ~interrupt_mask;
 629        new_val |= (~enabled_irq_mask & interrupt_mask);
 630
 631        if (new_val != dev_priv->de_irq_mask[pipe]) {
 632                dev_priv->de_irq_mask[pipe] = new_val;
 633                I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
 634                POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
 635        }
 636}
 637
 638/**
 639 * ibx_display_interrupt_update - update SDEIMR
 640 * @dev_priv: driver private
 641 * @interrupt_mask: mask of interrupt bits to update
 642 * @enabled_irq_mask: mask of interrupt bits to enable
 643 */
 644void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 645                                  uint32_t interrupt_mask,
 646                                  uint32_t enabled_irq_mask)
 647{
 648        uint32_t sdeimr = I915_READ(SDEIMR);
 649        sdeimr &= ~interrupt_mask;
 650        sdeimr |= (~enabled_irq_mask & interrupt_mask);
 651
 652        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 653
 654        lockdep_assert_held(&dev_priv->irq_lock);
 655
 656        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 657                return;
 658
 659        I915_WRITE(SDEIMR, sdeimr);
 660        POSTING_READ(SDEIMR);
 661}
 662
 663u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
 664                              enum pipe pipe)
 665{
 666        u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
 667        u32 enable_mask = status_mask << 16;
 668
 669        lockdep_assert_held(&dev_priv->irq_lock);
 670
 671        if (INTEL_GEN(dev_priv) < 5)
 672                goto out;
 673
 674        /*
 675         * On pipe A we don't support the PSR interrupt yet,
 676         * on pipe B and C the same bit MBZ.
 677         */
 678        if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
 679                return 0;
 680        /*
 681         * On pipe B and C we don't support the PSR interrupt yet, on pipe
 682         * A the same bit is for perf counters which we don't use either.
 683         */
 684        if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
 685                return 0;
 686
 687        enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
 688                         SPRITE0_FLIP_DONE_INT_EN_VLV |
 689                         SPRITE1_FLIP_DONE_INT_EN_VLV);
 690        if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
 691                enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
 692        if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
 693                enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 694
 695out:
 696        WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 697                  status_mask & ~PIPESTAT_INT_STATUS_MASK,
 698                  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
 699                  pipe_name(pipe), enable_mask, status_mask);
 700
 701        return enable_mask;
 702}
 703
 704void i915_enable_pipestat(struct drm_i915_private *dev_priv,
 705                          enum pipe pipe, u32 status_mask)
 706{
 707        i915_reg_t reg = PIPESTAT(pipe);
 708        u32 enable_mask;
 709
 710        WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
 711                  "pipe %c: status_mask=0x%x\n",
 712                  pipe_name(pipe), status_mask);
 713
 714        lockdep_assert_held(&dev_priv->irq_lock);
 715        WARN_ON(!intel_irqs_enabled(dev_priv));
 716
 717        if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
 718                return;
 719
 720        dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 721        enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 722
 723        I915_WRITE(reg, enable_mask | status_mask);
 724        POSTING_READ(reg);
 725}
 726
 727void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 728                           enum pipe pipe, u32 status_mask)
 729{
 730        i915_reg_t reg = PIPESTAT(pipe);
 731        u32 enable_mask;
 732
 733        WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
 734                  "pipe %c: status_mask=0x%x\n",
 735                  pipe_name(pipe), status_mask);
 736
 737        lockdep_assert_held(&dev_priv->irq_lock);
 738        WARN_ON(!intel_irqs_enabled(dev_priv));
 739
 740        if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
 741                return;
 742
 743        dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 744        enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 745
 746        I915_WRITE(reg, enable_mask | status_mask);
 747        POSTING_READ(reg);
 748}
 749
 750/**
 751 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
 752 * @dev_priv: i915 device private
 753 */
 754static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 755{
 756        if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
 757                return;
 758
 759        spin_lock_irq(&dev_priv->irq_lock);
 760
 761        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
 762        if (INTEL_GEN(dev_priv) >= 4)
 763                i915_enable_pipestat(dev_priv, PIPE_A,
 764                                     PIPE_LEGACY_BLC_EVENT_STATUS);
 765
 766        spin_unlock_irq(&dev_priv->irq_lock);
 767}
 768
 769/*
 770 * This timing diagram depicts the video signal in and
 771 * around the vertical blanking period.
 772 *
 773 * Assumptions about the fictitious mode used in this example:
 774 *  vblank_start >= 3
 775 *  vsync_start = vblank_start + 1
 776 *  vsync_end = vblank_start + 2
 777 *  vtotal = vblank_start + 3
 778 *
 779 *           start of vblank:
 780 *           latch double buffered registers
 781 *           increment frame counter (ctg+)
 782 *           generate start of vblank interrupt (gen4+)
 783 *           |
 784 *           |          frame start:
 785 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 786 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 787 *           |          |
 788 *           |          |  start of vsync:
 789 *           |          |  generate vsync interrupt
 790 *           |          |  |
 791 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 792 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 793 * ----va---> <-----------------vb--------------------> <--------va-------------
 794 *       |          |       <----vs----->                     |
 795 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 796 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 797 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 798 *       |          |                                         |
 799 *       last visible pixel                                   first visible pixel
 800 *                  |                                         increment frame counter (gen3/4)
 801 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 802 *
 803 * x  = horizontal active
 804 * _  = horizontal blanking
 805 * hs = horizontal sync
 806 * va = vertical active
 807 * vb = vertical blanking
 808 * vs = vertical sync
 809 * vbs = vblank_start (number)
 810 *
 811 * Summary:
 812 * - most events happen at the start of horizontal sync
 813 * - frame start happens at the start of horizontal blank, 1-4 lines
 814 *   (depending on PIPECONF settings) after the start of vblank
 815 * - gen3/4 pixel and frame counter are synchronized with the start
 816 *   of horizontal active on the first line of vertical active
 817 */
 818
 819/* Called from drm generic code, passed a 'crtc', which
 820 * we use as a pipe index
 821 */
 822static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 823{
 824        struct drm_i915_private *dev_priv = to_i915(dev);
 825        i915_reg_t high_frame, low_frame;
 826        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 827        const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
 828        unsigned long irqflags;
 829
 830        htotal = mode->crtc_htotal;
 831        hsync_start = mode->crtc_hsync_start;
 832        vbl_start = mode->crtc_vblank_start;
 833        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 834                vbl_start = DIV_ROUND_UP(vbl_start, 2);
 835
 836        /* Convert to pixel count */
 837        vbl_start *= htotal;
 838
 839        /* Start of vblank event occurs at start of hsync */
 840        vbl_start -= htotal - hsync_start;
 841
 842        high_frame = PIPEFRAME(pipe);
 843        low_frame = PIPEFRAMEPIXEL(pipe);
 844
 845        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 846
 847        /*
 848         * High & low register fields aren't synchronized, so make sure
 849         * we get a low value that's stable across two reads of the high
 850         * register.
 851         */
 852        do {
 853                high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
 854                low   = I915_READ_FW(low_frame);
 855                high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
 856        } while (high1 != high2);
 857
 858        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 859
 860        high1 >>= PIPE_FRAME_HIGH_SHIFT;
 861        pixel = low & PIPE_PIXEL_MASK;
 862        low >>= PIPE_FRAME_LOW_SHIFT;
 863
 864        /*
 865         * The frame counter increments at beginning of active.
 866         * Cook up a vblank counter by also checking the pixel
 867         * counter against vblank start.
 868         */
 869        return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 870}
 871
 872static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 873{
 874        struct drm_i915_private *dev_priv = to_i915(dev);
 875
 876        return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
 877}
 878
 879/*
 880 * On certain encoders on certain platforms, pipe
 881 * scanline register will not work to get the scanline,
 882 * since the timings are driven from the PORT or issues
 883 * with scanline register updates.
 884 * This function will use Framestamp and current
 885 * timestamp registers to calculate the scanline.
 886 */
 887static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
 888{
 889        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 890        struct drm_vblank_crtc *vblank =
 891                &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 892        const struct drm_display_mode *mode = &vblank->hwmode;
 893        u32 vblank_start = mode->crtc_vblank_start;
 894        u32 vtotal = mode->crtc_vtotal;
 895        u32 htotal = mode->crtc_htotal;
 896        u32 clock = mode->crtc_clock;
 897        u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
 898
 899        /*
 900         * To avoid the race condition where we might cross into the
 901         * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
 902         * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
 903         * during the same frame.
 904         */
 905        do {
 906                /*
 907                 * This field provides read back of the display
 908                 * pipe frame time stamp. The time stamp value
 909                 * is sampled at every start of vertical blank.
 910                 */
 911                scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
 912
 913                /*
 914                 * The TIMESTAMP_CTR register has the current
 915                 * time stamp value.
 916                 */
 917                scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
 918
 919                scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
 920        } while (scan_post_time != scan_prev_time);
 921
 922        scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
 923                                        clock), 1000 * htotal);
 924        scanline = min(scanline, vtotal - 1);
 925        scanline = (scanline + vblank_start) % vtotal;
 926
 927        return scanline;
 928}
 929
 930/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
 931static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 932{
 933        struct drm_device *dev = crtc->base.dev;
 934        struct drm_i915_private *dev_priv = to_i915(dev);
 935        const struct drm_display_mode *mode;
 936        struct drm_vblank_crtc *vblank;
 937        enum pipe pipe = crtc->pipe;
 938        int position, vtotal;
 939
 940        if (!crtc->active)
 941                return -1;
 942
 943        vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 944        mode = &vblank->hwmode;
 945
 946        if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
 947                return __intel_get_crtc_scanline_from_timestamp(crtc);
 948
 949        vtotal = mode->crtc_vtotal;
 950        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 951                vtotal /= 2;
 952
 953        if (IS_GEN2(dev_priv))
 954                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
 955        else
 956                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 957
 958        /*
 959         * On HSW, the DSL reg (0x70000) appears to return 0 if we
 960         * read it just before the start of vblank.  So try it again
 961         * so we don't accidentally end up spanning a vblank frame
 962         * increment, causing the pipe_update_end() code to squak at us.
 963         *
 964         * The nature of this problem means we can't simply check the ISR
 965         * bit and return the vblank start value; nor can we use the scanline
 966         * debug register in the transcoder as it appears to have the same
 967         * problem.  We may need to extend this to include other platforms,
 968         * but so far testing only shows the problem on HSW.
 969         */
 970        if (HAS_DDI(dev_priv) && !position) {
 971                int i, temp;
 972
 973                for (i = 0; i < 100; i++) {
 974                        udelay(1);
 975                        temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 976                        if (temp != position) {
 977                                position = temp;
 978                                break;
 979                        }
 980                }
 981        }
 982
 983        /*
 984         * See update_scanline_offset() for the details on the
 985         * scanline_offset adjustment.
 986         */
 987        return (position + crtc->scanline_offset) % vtotal;
 988}
 989
 990static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 991                                     bool in_vblank_irq, int *vpos, int *hpos,
 992                                     ktime_t *stime, ktime_t *etime,
 993                                     const struct drm_display_mode *mode)
 994{
 995        struct drm_i915_private *dev_priv = to_i915(dev);
 996        struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
 997                                                                pipe);
 998        int position;
 999        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1000        unsigned long irqflags;
1001
1002        if (WARN_ON(!mode->crtc_clock)) {
1003                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1004                                 "pipe %c\n", pipe_name(pipe));
1005                return false;
1006        }
1007
1008        htotal = mode->crtc_htotal;
1009        hsync_start = mode->crtc_hsync_start;
1010        vtotal = mode->crtc_vtotal;
1011        vbl_start = mode->crtc_vblank_start;
1012        vbl_end = mode->crtc_vblank_end;
1013
1014        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1015                vbl_start = DIV_ROUND_UP(vbl_start, 2);
1016                vbl_end /= 2;
1017                vtotal /= 2;
1018        }
1019
1020        /*
1021         * Lock uncore.lock, as we will do multiple timing critical raw
1022         * register reads, potentially with preemption disabled, so the
1023         * following code must not block on uncore.lock.
1024         */
1025        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1026
1027        /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1028
1029        /* Get optional system timestamp before query. */
1030        if (stime)
1031                *stime = ktime_get();
1032
1033        if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1034                /* No obvious pixelcount register. Only query vertical
1035                 * scanout position from Display scan line register.
1036                 */
1037                position = __intel_get_crtc_scanline(intel_crtc);
1038        } else {
1039                /* Have access to pixelcount since start of frame.
1040                 * We can split this into vertical and horizontal
1041                 * scanout position.
1042                 */
1043                position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1044
1045                /* convert to pixel counts */
1046                vbl_start *= htotal;
1047                vbl_end *= htotal;
1048                vtotal *= htotal;
1049
1050                /*
1051                 * In interlaced modes, the pixel counter counts all pixels,
1052                 * so one field will have htotal more pixels. In order to avoid
1053                 * the reported position from jumping backwards when the pixel
1054                 * counter is beyond the length of the shorter field, just
1055                 * clamp the position the length of the shorter field. This
1056                 * matches how the scanline counter based position works since
1057                 * the scanline counter doesn't count the two half lines.
1058                 */
1059                if (position >= vtotal)
1060                        position = vtotal - 1;
1061
1062                /*
1063                 * Start of vblank interrupt is triggered at start of hsync,
1064                 * just prior to the first active line of vblank. However we
1065                 * consider lines to start at the leading edge of horizontal
1066                 * active. So, should we get here before we've crossed into
1067                 * the horizontal active of the first line in vblank, we would
1068                 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1069                 * always add htotal-hsync_start to the current pixel position.
1070                 */
1071                position = (position + htotal - hsync_start) % vtotal;
1072        }
1073
1074        /* Get optional system timestamp after query. */
1075        if (etime)
1076                *etime = ktime_get();
1077
1078        /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1079
1080        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1081
1082        /*
1083         * While in vblank, position will be negative
1084         * counting up towards 0 at vbl_end. And outside
1085         * vblank, position will be positive counting
1086         * up since vbl_end.
1087         */
1088        if (position >= vbl_start)
1089                position -= vbl_end;
1090        else
1091                position += vtotal - vbl_end;
1092
1093        if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1094                *vpos = position;
1095                *hpos = 0;
1096        } else {
1097                *vpos = position / htotal;
1098                *hpos = position - (*vpos * htotal);
1099        }
1100
1101        return true;
1102}
1103
1104int intel_get_crtc_scanline(struct intel_crtc *crtc)
1105{
1106        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1107        unsigned long irqflags;
1108        int position;
1109
1110        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1111        position = __intel_get_crtc_scanline(crtc);
1112        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1113
1114        return position;
1115}
1116
1117static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1118{
1119        u32 busy_up, busy_down, max_avg, min_avg;
1120        u8 new_delay;
1121
1122        spin_lock(&mchdev_lock);
1123
1124        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1125
1126        new_delay = dev_priv->ips.cur_delay;
1127
1128        I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1129        busy_up = I915_READ(RCPREVBSYTUPAVG);
1130        busy_down = I915_READ(RCPREVBSYTDNAVG);
1131        max_avg = I915_READ(RCBMAXAVG);
1132        min_avg = I915_READ(RCBMINAVG);
1133
1134        /* Handle RCS change request from hw */
1135        if (busy_up > max_avg) {
1136                if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1137                        new_delay = dev_priv->ips.cur_delay - 1;
1138                if (new_delay < dev_priv->ips.max_delay)
1139                        new_delay = dev_priv->ips.max_delay;
1140        } else if (busy_down < min_avg) {
1141                if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1142                        new_delay = dev_priv->ips.cur_delay + 1;
1143                if (new_delay > dev_priv->ips.min_delay)
1144                        new_delay = dev_priv->ips.min_delay;
1145        }
1146
1147        if (ironlake_set_drps(dev_priv, new_delay))
1148                dev_priv->ips.cur_delay = new_delay;
1149
1150        spin_unlock(&mchdev_lock);
1151
1152        return;
1153}
1154
1155static void notify_ring(struct intel_engine_cs *engine)
1156{
1157        const u32 seqno = intel_engine_get_seqno(engine);
1158        struct i915_request *rq = NULL;
1159        struct task_struct *tsk = NULL;
1160        struct intel_wait *wait;
1161
1162        if (unlikely(!engine->breadcrumbs.irq_armed))
1163                return;
1164
1165        rcu_read_lock();
1166
1167        spin_lock(&engine->breadcrumbs.irq_lock);
1168        wait = engine->breadcrumbs.irq_wait;
1169        if (wait) {
1170                /*
1171                 * We use a callback from the dma-fence to submit
1172                 * requests after waiting on our own requests. To
1173                 * ensure minimum delay in queuing the next request to
1174                 * hardware, signal the fence now rather than wait for
1175                 * the signaler to be woken up. We still wake up the
1176                 * waiter in order to handle the irq-seqno coherency
1177                 * issues (we may receive the interrupt before the
1178                 * seqno is written, see __i915_request_irq_complete())
1179                 * and to handle coalescing of multiple seqno updates
1180                 * and many waiters.
1181                 */
1182                if (i915_seqno_passed(seqno, wait->seqno)) {
1183                        struct i915_request *waiter = wait->request;
1184
1185                        if (waiter &&
1186                            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1187                                      &waiter->fence.flags) &&
1188                            intel_wait_check_request(wait, waiter))
1189                                rq = i915_request_get(waiter);
1190
1191                        tsk = wait->tsk;
1192                } else {
1193                        if (engine->irq_seqno_barrier &&
1194                            i915_seqno_passed(seqno, wait->seqno - 1)) {
1195                                set_bit(ENGINE_IRQ_BREADCRUMB,
1196                                        &engine->irq_posted);
1197                                tsk = wait->tsk;
1198                        }
1199                }
1200
1201                engine->breadcrumbs.irq_count++;
1202        } else {
1203                if (engine->breadcrumbs.irq_armed)
1204                        __intel_engine_disarm_breadcrumbs(engine);
1205        }
1206        spin_unlock(&engine->breadcrumbs.irq_lock);
1207
1208        if (rq) {
1209                spin_lock(&rq->lock);
1210                dma_fence_signal_locked(&rq->fence);
1211                GEM_BUG_ON(!i915_request_completed(rq));
1212                spin_unlock(&rq->lock);
1213
1214                i915_request_put(rq);
1215        }
1216
1217        if (tsk && tsk->state & TASK_NORMAL)
1218                wake_up_process(tsk);
1219
1220        rcu_read_unlock();
1221
1222        trace_intel_engine_notify(engine, wait);
1223}
1224
1225static void vlv_c0_read(struct drm_i915_private *dev_priv,
1226                        struct intel_rps_ei *ei)
1227{
1228        ei->ktime = ktime_get_raw();
1229        ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1230        ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1231}
1232
1233void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1234{
1235        memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1236}
1237
1238static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1239{
1240        struct intel_rps *rps = &dev_priv->gt_pm.rps;
1241        const struct intel_rps_ei *prev = &rps->ei;
1242        struct intel_rps_ei now;
1243        u32 events = 0;
1244
1245        if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1246                return 0;
1247
1248        vlv_c0_read(dev_priv, &now);
1249
1250        if (prev->ktime) {
1251                u64 time, c0;
1252                u32 render, media;
1253
1254                time = ktime_us_delta(now.ktime, prev->ktime);
1255
1256                time *= dev_priv->czclk_freq;
1257
1258                /* Workload can be split between render + media,
1259                 * e.g. SwapBuffers being blitted in X after being rendered in
1260                 * mesa. To account for this we need to combine both engines
1261                 * into our activity counter.
1262                 */
1263                render = now.render_c0 - prev->render_c0;
1264                media = now.media_c0 - prev->media_c0;
1265                c0 = max(render, media);
1266                c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1267
1268                if (c0 > time * rps->power.up_threshold)
1269                        events = GEN6_PM_RP_UP_THRESHOLD;
1270                else if (c0 < time * rps->power.down_threshold)
1271                        events = GEN6_PM_RP_DOWN_THRESHOLD;
1272        }
1273
1274        rps->ei = now;
1275        return events;
1276}
1277
1278static void gen6_pm_rps_work(struct work_struct *work)
1279{
1280        struct drm_i915_private *dev_priv =
1281                container_of(work, struct drm_i915_private, gt_pm.rps.work);
1282        struct intel_rps *rps = &dev_priv->gt_pm.rps;
1283        bool client_boost = false;
1284        int new_delay, adj, min, max;
1285        u32 pm_iir = 0;
1286
1287        spin_lock_irq(&dev_priv->irq_lock);
1288        if (rps->interrupts_enabled) {
1289                pm_iir = fetch_and_zero(&rps->pm_iir);
1290                client_boost = atomic_read(&rps->num_waiters);
1291        }
1292        spin_unlock_irq(&dev_priv->irq_lock);
1293
1294        /* Make sure we didn't queue anything we're not going to process. */
1295        WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1296        if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1297                goto out;
1298
1299        mutex_lock(&dev_priv->pcu_lock);
1300
1301        pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1302
1303        adj = rps->last_adj;
1304        new_delay = rps->cur_freq;
1305        min = rps->min_freq_softlimit;
1306        max = rps->max_freq_softlimit;
1307        if (client_boost)
1308                max = rps->max_freq;
1309        if (client_boost && new_delay < rps->boost_freq) {
1310                new_delay = rps->boost_freq;
1311                adj = 0;
1312        } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1313                if (adj > 0)
1314                        adj *= 2;
1315                else /* CHV needs even encode values */
1316                        adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1317
1318                if (new_delay >= rps->max_freq_softlimit)
1319                        adj = 0;
1320        } else if (client_boost) {
1321                adj = 0;
1322        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1323                if (rps->cur_freq > rps->efficient_freq)
1324                        new_delay = rps->efficient_freq;
1325                else if (rps->cur_freq > rps->min_freq_softlimit)
1326                        new_delay = rps->min_freq_softlimit;
1327                adj = 0;
1328        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1329                if (adj < 0)
1330                        adj *= 2;
1331                else /* CHV needs even encode values */
1332                        adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1333
1334                if (new_delay <= rps->min_freq_softlimit)
1335                        adj = 0;
1336        } else { /* unknown event */
1337                adj = 0;
1338        }
1339
1340        rps->last_adj = adj;
1341
1342        /* sysfs frequency interfaces may have snuck in while servicing the
1343         * interrupt
1344         */
1345        new_delay += adj;
1346        new_delay = clamp_t(int, new_delay, min, max);
1347
1348        if (intel_set_rps(dev_priv, new_delay)) {
1349                DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1350                rps->last_adj = 0;
1351        }
1352
1353        mutex_unlock(&dev_priv->pcu_lock);
1354
1355out:
1356        /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1357        spin_lock_irq(&dev_priv->irq_lock);
1358        if (rps->interrupts_enabled)
1359                gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1360        spin_unlock_irq(&dev_priv->irq_lock);
1361}
1362
1363
1364/**
1365 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1366 * occurred.
1367 * @work: workqueue struct
1368 *
1369 * Doesn't actually do anything except notify userspace. As a consequence of
1370 * this event, userspace should try to remap the bad rows since statistically
1371 * it is likely the same row is more likely to go bad again.
1372 */
1373static void ivybridge_parity_work(struct work_struct *work)
1374{
1375        struct drm_i915_private *dev_priv =
1376                container_of(work, typeof(*dev_priv), l3_parity.error_work);
1377        u32 error_status, row, bank, subbank;
1378        char *parity_event[6];
1379        uint32_t misccpctl;
1380        uint8_t slice = 0;
1381
1382        /* We must turn off DOP level clock gating to access the L3 registers.
1383         * In order to prevent a get/put style interface, acquire struct mutex
1384         * any time we access those registers.
1385         */
1386        mutex_lock(&dev_priv->drm.struct_mutex);
1387
1388        /* If we've screwed up tracking, just let the interrupt fire again */
1389        if (WARN_ON(!dev_priv->l3_parity.which_slice))
1390                goto out;
1391
1392        misccpctl = I915_READ(GEN7_MISCCPCTL);
1393        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1394        POSTING_READ(GEN7_MISCCPCTL);
1395
1396        while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1397                i915_reg_t reg;
1398
1399                slice--;
1400                if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1401                        break;
1402
1403                dev_priv->l3_parity.which_slice &= ~(1<<slice);
1404
1405                reg = GEN7_L3CDERRST1(slice);
1406
1407                error_status = I915_READ(reg);
1408                row = GEN7_PARITY_ERROR_ROW(error_status);
1409                bank = GEN7_PARITY_ERROR_BANK(error_status);
1410                subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1411
1412                I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1413                POSTING_READ(reg);
1414
1415                parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1416                parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1417                parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1418                parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1419                parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1420                parity_event[5] = NULL;
1421
1422                kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1423                                   KOBJ_CHANGE, parity_event);
1424
1425                DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1426                          slice, row, bank, subbank);
1427
1428                kfree(parity_event[4]);
1429                kfree(parity_event[3]);
1430                kfree(parity_event[2]);
1431                kfree(parity_event[1]);
1432        }
1433
1434        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1435
1436out:
1437        WARN_ON(dev_priv->l3_parity.which_slice);
1438        spin_lock_irq(&dev_priv->irq_lock);
1439        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1440        spin_unlock_irq(&dev_priv->irq_lock);
1441
1442        mutex_unlock(&dev_priv->drm.struct_mutex);
1443}
1444
1445static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1446                                               u32 iir)
1447{
1448        if (!HAS_L3_DPF(dev_priv))
1449                return;
1450
1451        spin_lock(&dev_priv->irq_lock);
1452        gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1453        spin_unlock(&dev_priv->irq_lock);
1454
1455        iir &= GT_PARITY_ERROR(dev_priv);
1456        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1457                dev_priv->l3_parity.which_slice |= 1 << 1;
1458
1459        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1460                dev_priv->l3_parity.which_slice |= 1 << 0;
1461
1462        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1463}
1464
1465static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1466                               u32 gt_iir)
1467{
1468        if (gt_iir & GT_RENDER_USER_INTERRUPT)
1469                notify_ring(dev_priv->engine[RCS]);
1470        if (gt_iir & ILK_BSD_USER_INTERRUPT)
1471                notify_ring(dev_priv->engine[VCS]);
1472}
1473
1474static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1475                               u32 gt_iir)
1476{
1477        if (gt_iir & GT_RENDER_USER_INTERRUPT)
1478                notify_ring(dev_priv->engine[RCS]);
1479        if (gt_iir & GT_BSD_USER_INTERRUPT)
1480                notify_ring(dev_priv->engine[VCS]);
1481        if (gt_iir & GT_BLT_USER_INTERRUPT)
1482                notify_ring(dev_priv->engine[BCS]);
1483
1484        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1485                      GT_BSD_CS_ERROR_INTERRUPT |
1486                      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1487                DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1488
1489        if (gt_iir & GT_PARITY_ERROR(dev_priv))
1490                ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1491}
1492
1493static void
1494gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1495{
1496        bool tasklet = false;
1497
1498        if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1499                tasklet = true;
1500
1501        if (iir & GT_RENDER_USER_INTERRUPT) {
1502                notify_ring(engine);
1503                tasklet |= USES_GUC_SUBMISSION(engine->i915);
1504        }
1505
1506        if (tasklet)
1507                tasklet_hi_schedule(&engine->execlists.tasklet);
1508}
1509
1510static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1511                            u32 master_ctl, u32 gt_iir[4])
1512{
1513        void __iomem * const regs = i915->regs;
1514
1515#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1516                      GEN8_GT_BCS_IRQ | \
1517                      GEN8_GT_VCS1_IRQ | \
1518                      GEN8_GT_VCS2_IRQ | \
1519                      GEN8_GT_VECS_IRQ | \
1520                      GEN8_GT_PM_IRQ | \
1521                      GEN8_GT_GUC_IRQ)
1522
1523        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1524                gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1525                if (likely(gt_iir[0]))
1526                        raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1527        }
1528
1529        if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1530                gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1531                if (likely(gt_iir[1]))
1532                        raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1533        }
1534
1535        if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1536                gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1537                if (likely(gt_iir[2] & (i915->pm_rps_events |
1538                                        i915->pm_guc_events)))
1539                        raw_reg_write(regs, GEN8_GT_IIR(2),
1540                                      gt_iir[2] & (i915->pm_rps_events |
1541                                                   i915->pm_guc_events));
1542        }
1543
1544        if (master_ctl & GEN8_GT_VECS_IRQ) {
1545                gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1546                if (likely(gt_iir[3]))
1547                        raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1548        }
1549}
1550
1551static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1552                                u32 master_ctl, u32 gt_iir[4])
1553{
1554        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1555                gen8_cs_irq_handler(i915->engine[RCS],
1556                                    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1557                gen8_cs_irq_handler(i915->engine[BCS],
1558                                    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1559        }
1560
1561        if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1562                gen8_cs_irq_handler(i915->engine[VCS],
1563                                    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1564                gen8_cs_irq_handler(i915->engine[VCS2],
1565                                    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1566        }
1567
1568        if (master_ctl & GEN8_GT_VECS_IRQ) {
1569                gen8_cs_irq_handler(i915->engine[VECS],
1570                                    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1571        }
1572
1573        if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1574                gen6_rps_irq_handler(i915, gt_iir[2]);
1575                gen9_guc_irq_handler(i915, gt_iir[2]);
1576        }
1577}
1578
1579static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1580{
1581        switch (pin) {
1582        case HPD_PORT_C:
1583                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1584        case HPD_PORT_D:
1585                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1586        case HPD_PORT_E:
1587                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1588        case HPD_PORT_F:
1589                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1590        default:
1591                return false;
1592        }
1593}
1594
1595static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1596{
1597        switch (pin) {
1598        case HPD_PORT_A:
1599                return val & PORTA_HOTPLUG_LONG_DETECT;
1600        case HPD_PORT_B:
1601                return val & PORTB_HOTPLUG_LONG_DETECT;
1602        case HPD_PORT_C:
1603                return val & PORTC_HOTPLUG_LONG_DETECT;
1604        default:
1605                return false;
1606        }
1607}
1608
1609static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1610{
1611        switch (pin) {
1612        case HPD_PORT_A:
1613                return val & ICP_DDIA_HPD_LONG_DETECT;
1614        case HPD_PORT_B:
1615                return val & ICP_DDIB_HPD_LONG_DETECT;
1616        default:
1617                return false;
1618        }
1619}
1620
1621static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1622{
1623        switch (pin) {
1624        case HPD_PORT_C:
1625                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1626        case HPD_PORT_D:
1627                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1628        case HPD_PORT_E:
1629                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1630        case HPD_PORT_F:
1631                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1632        default:
1633                return false;
1634        }
1635}
1636
1637static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1638{
1639        switch (pin) {
1640        case HPD_PORT_E:
1641                return val & PORTE_HOTPLUG_LONG_DETECT;
1642        default:
1643                return false;
1644        }
1645}
1646
1647static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1648{
1649        switch (pin) {
1650        case HPD_PORT_A:
1651                return val & PORTA_HOTPLUG_LONG_DETECT;
1652        case HPD_PORT_B:
1653                return val & PORTB_HOTPLUG_LONG_DETECT;
1654        case HPD_PORT_C:
1655                return val & PORTC_HOTPLUG_LONG_DETECT;
1656        case HPD_PORT_D:
1657                return val & PORTD_HOTPLUG_LONG_DETECT;
1658        default:
1659                return false;
1660        }
1661}
1662
1663static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1664{
1665        switch (pin) {
1666        case HPD_PORT_A:
1667                return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1668        default:
1669                return false;
1670        }
1671}
1672
1673static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1674{
1675        switch (pin) {
1676        case HPD_PORT_B:
1677                return val & PORTB_HOTPLUG_LONG_DETECT;
1678        case HPD_PORT_C:
1679                return val & PORTC_HOTPLUG_LONG_DETECT;
1680        case HPD_PORT_D:
1681                return val & PORTD_HOTPLUG_LONG_DETECT;
1682        default:
1683                return false;
1684        }
1685}
1686
1687static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1688{
1689        switch (pin) {
1690        case HPD_PORT_B:
1691                return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1692        case HPD_PORT_C:
1693                return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1694        case HPD_PORT_D:
1695                return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1696        default:
1697                return false;
1698        }
1699}
1700
1701/*
1702 * Get a bit mask of pins that have triggered, and which ones may be long.
1703 * This can be called multiple times with the same masks to accumulate
1704 * hotplug detection results from several registers.
1705 *
1706 * Note that the caller is expected to zero out the masks initially.
1707 */
1708static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1709                               u32 *pin_mask, u32 *long_mask,
1710                               u32 hotplug_trigger, u32 dig_hotplug_reg,
1711                               const u32 hpd[HPD_NUM_PINS],
1712                               bool long_pulse_detect(enum hpd_pin pin, u32 val))
1713{
1714        enum hpd_pin pin;
1715
1716        for_each_hpd_pin(pin) {
1717                if ((hpd[pin] & hotplug_trigger) == 0)
1718                        continue;
1719
1720                *pin_mask |= BIT(pin);
1721
1722                if (long_pulse_detect(pin, dig_hotplug_reg))
1723                        *long_mask |= BIT(pin);
1724        }
1725
1726        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1727                         hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1728
1729}
1730
1731static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1732{
1733        wake_up_all(&dev_priv->gmbus_wait_queue);
1734}
1735
1736static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1737{
1738        wake_up_all(&dev_priv->gmbus_wait_queue);
1739}
1740
1741#if defined(CONFIG_DEBUG_FS)
1742static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1743                                         enum pipe pipe,
1744                                         uint32_t crc0, uint32_t crc1,
1745                                         uint32_t crc2, uint32_t crc3,
1746                                         uint32_t crc4)
1747{
1748        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1749        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1750        uint32_t crcs[5];
1751
1752        spin_lock(&pipe_crc->lock);
1753        /*
1754         * For some not yet identified reason, the first CRC is
1755         * bonkers. So let's just wait for the next vblank and read
1756         * out the buggy result.
1757         *
1758         * On GEN8+ sometimes the second CRC is bonkers as well, so
1759         * don't trust that one either.
1760         */
1761        if (pipe_crc->skipped <= 0 ||
1762            (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1763                pipe_crc->skipped++;
1764                spin_unlock(&pipe_crc->lock);
1765                return;
1766        }
1767        spin_unlock(&pipe_crc->lock);
1768
1769        crcs[0] = crc0;
1770        crcs[1] = crc1;
1771        crcs[2] = crc2;
1772        crcs[3] = crc3;
1773        crcs[4] = crc4;
1774        drm_crtc_add_crc_entry(&crtc->base, true,
1775                                drm_crtc_accurate_vblank_count(&crtc->base),
1776                                crcs);
1777}
1778#else
1779static inline void
1780display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1781                             enum pipe pipe,
1782                             uint32_t crc0, uint32_t crc1,
1783                             uint32_t crc2, uint32_t crc3,
1784                             uint32_t crc4) {}
1785#endif
1786
1787
1788static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1789                                     enum pipe pipe)
1790{
1791        display_pipe_crc_irq_handler(dev_priv, pipe,
1792                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1793                                     0, 0, 0, 0);
1794}
1795
1796static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1797                                     enum pipe pipe)
1798{
1799        display_pipe_crc_irq_handler(dev_priv, pipe,
1800                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1801                                     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1802                                     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1803                                     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1804                                     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1805}
1806
1807static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1808                                      enum pipe pipe)
1809{
1810        uint32_t res1, res2;
1811
1812        if (INTEL_GEN(dev_priv) >= 3)
1813                res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1814        else
1815                res1 = 0;
1816
1817        if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1818                res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1819        else
1820                res2 = 0;
1821
1822        display_pipe_crc_irq_handler(dev_priv, pipe,
1823                                     I915_READ(PIPE_CRC_RES_RED(pipe)),
1824                                     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1825                                     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1826                                     res1, res2);
1827}
1828
1829/* The RPS events need forcewake, so we add them to a work queue and mask their
1830 * IMR bits until the work is done. Other interrupts can be processed without
1831 * the work queue. */
1832static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1833{
1834        struct intel_rps *rps = &dev_priv->gt_pm.rps;
1835
1836        if (pm_iir & dev_priv->pm_rps_events) {
1837                spin_lock(&dev_priv->irq_lock);
1838                gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1839                if (rps->interrupts_enabled) {
1840                        rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1841                        schedule_work(&rps->work);
1842                }
1843                spin_unlock(&dev_priv->irq_lock);
1844        }
1845
1846        if (INTEL_GEN(dev_priv) >= 8)
1847                return;
1848
1849        if (HAS_VEBOX(dev_priv)) {
1850                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1851                        notify_ring(dev_priv->engine[VECS]);
1852
1853                if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1854                        DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1855        }
1856}
1857
1858static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1859{
1860        if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1861                intel_guc_to_host_event_handler(&dev_priv->guc);
1862}
1863
1864static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1865{
1866        enum pipe pipe;
1867
1868        for_each_pipe(dev_priv, pipe) {
1869                I915_WRITE(PIPESTAT(pipe),
1870                           PIPESTAT_INT_STATUS_MASK |
1871                           PIPE_FIFO_UNDERRUN_STATUS);
1872
1873                dev_priv->pipestat_irq_mask[pipe] = 0;
1874        }
1875}
1876
1877static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1878                                  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1879{
1880        int pipe;
1881
1882        spin_lock(&dev_priv->irq_lock);
1883
1884        if (!dev_priv->display_irqs_enabled) {
1885                spin_unlock(&dev_priv->irq_lock);
1886                return;
1887        }
1888
1889        for_each_pipe(dev_priv, pipe) {
1890                i915_reg_t reg;
1891                u32 status_mask, enable_mask, iir_bit = 0;
1892
1893                /*
1894                 * PIPESTAT bits get signalled even when the interrupt is
1895                 * disabled with the mask bits, and some of the status bits do
1896                 * not generate interrupts at all (like the underrun bit). Hence
1897                 * we need to be careful that we only handle what we want to
1898                 * handle.
1899                 */
1900
1901                /* fifo underruns are filterered in the underrun handler. */
1902                status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1903
1904                switch (pipe) {
1905                case PIPE_A:
1906                        iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1907                        break;
1908                case PIPE_B:
1909                        iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1910                        break;
1911                case PIPE_C:
1912                        iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1913                        break;
1914                }
1915                if (iir & iir_bit)
1916                        status_mask |= dev_priv->pipestat_irq_mask[pipe];
1917
1918                if (!status_mask)
1919                        continue;
1920
1921                reg = PIPESTAT(pipe);
1922                pipe_stats[pipe] = I915_READ(reg) & status_mask;
1923                enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1924
1925                /*
1926                 * Clear the PIPE*STAT regs before the IIR
1927                 *
1928                 * Toggle the enable bits to make sure we get an
1929                 * edge in the ISR pipe event bit if we don't clear
1930                 * all the enabled status bits. Otherwise the edge
1931                 * triggered IIR on i965/g4x wouldn't notice that
1932                 * an interrupt is still pending.
1933                 */
1934                if (pipe_stats[pipe]) {
1935                        I915_WRITE(reg, pipe_stats[pipe]);
1936                        I915_WRITE(reg, enable_mask);
1937                }
1938        }
1939        spin_unlock(&dev_priv->irq_lock);
1940}
1941
1942static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1943                                      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1944{
1945        enum pipe pipe;
1946
1947        for_each_pipe(dev_priv, pipe) {
1948                if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1949                        drm_handle_vblank(&dev_priv->drm, pipe);
1950
1951                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1952                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1953
1954                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1955                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1956        }
1957}
1958
1959static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1960                                      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1961{
1962        bool blc_event = false;
1963        enum pipe pipe;
1964
1965        for_each_pipe(dev_priv, pipe) {
1966                if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1967                        drm_handle_vblank(&dev_priv->drm, pipe);
1968
1969                if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1970                        blc_event = true;
1971
1972                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1973                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1974
1975                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1976                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1977        }
1978
1979        if (blc_event || (iir & I915_ASLE_INTERRUPT))
1980                intel_opregion_asle_intr(dev_priv);
1981}
1982
1983static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1984                                      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1985{
1986        bool blc_event = false;
1987        enum pipe pipe;
1988
1989        for_each_pipe(dev_priv, pipe) {
1990                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1991                        drm_handle_vblank(&dev_priv->drm, pipe);
1992
1993                if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1994                        blc_event = true;
1995
1996                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1997                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1998
1999                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2000                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2001        }
2002
2003        if (blc_event || (iir & I915_ASLE_INTERRUPT))
2004                intel_opregion_asle_intr(dev_priv);
2005
2006        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2007                gmbus_irq_handler(dev_priv);
2008}
2009
2010static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2011                                            u32 pipe_stats[I915_MAX_PIPES])
2012{
2013        enum pipe pipe;
2014
2015        for_each_pipe(dev_priv, pipe) {
2016                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2017                        drm_handle_vblank(&dev_priv->drm, pipe);
2018
2019                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2020                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2021
2022                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2023                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2024        }
2025
2026        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2027                gmbus_irq_handler(dev_priv);
2028}
2029
2030static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2031{
2032        u32 hotplug_status = 0, hotplug_status_mask;
2033        int i;
2034
2035        if (IS_G4X(dev_priv) ||
2036            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2037                hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2038                        DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2039        else
2040                hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2041
2042        /*
2043         * We absolutely have to clear all the pending interrupt
2044         * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2045         * interrupt bit won't have an edge, and the i965/g4x
2046         * edge triggered IIR will not notice that an interrupt
2047         * is still pending. We can't use PORT_HOTPLUG_EN to
2048         * guarantee the edge as the act of toggling the enable
2049         * bits can itself generate a new hotplug interrupt :(
2050         */
2051        for (i = 0; i < 10; i++) {
2052                u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2053
2054                if (tmp == 0)
2055                        return hotplug_status;
2056
2057                hotplug_status |= tmp;
2058                I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2059        }
2060
2061        WARN_ONCE(1,
2062                  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2063                  I915_READ(PORT_HOTPLUG_STAT));
2064
2065        return hotplug_status;
2066}
2067
2068static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2069                                 u32 hotplug_status)
2070{
2071        u32 pin_mask = 0, long_mask = 0;
2072
2073        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2074            IS_CHERRYVIEW(dev_priv)) {
2075                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2076
2077                if (hotplug_trigger) {
2078                        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2079                                           hotplug_trigger, hotplug_trigger,
2080                                           hpd_status_g4x,
2081                                           i9xx_port_hotplug_long_detect);
2082
2083                        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2084                }
2085
2086                if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2087                        dp_aux_irq_handler(dev_priv);
2088        } else {
2089                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2090
2091                if (hotplug_trigger) {
2092                        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2093                                           hotplug_trigger, hotplug_trigger,
2094                                           hpd_status_i915,
2095                                           i9xx_port_hotplug_long_detect);
2096                        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2097                }
2098        }
2099}
2100
2101static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2102{
2103        struct drm_device *dev = arg;
2104        struct drm_i915_private *dev_priv = to_i915(dev);
2105        irqreturn_t ret = IRQ_NONE;
2106
2107        if (!intel_irqs_enabled(dev_priv))
2108                return IRQ_NONE;
2109
2110        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2111        disable_rpm_wakeref_asserts(dev_priv);
2112
2113        do {
2114                u32 iir, gt_iir, pm_iir;
2115                u32 pipe_stats[I915_MAX_PIPES] = {};
2116                u32 hotplug_status = 0;
2117                u32 ier = 0;
2118
2119                gt_iir = I915_READ(GTIIR);
2120                pm_iir = I915_READ(GEN6_PMIIR);
2121                iir = I915_READ(VLV_IIR);
2122
2123                if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2124                        break;
2125
2126                ret = IRQ_HANDLED;
2127
2128                /*
2129                 * Theory on interrupt generation, based on empirical evidence:
2130                 *
2131                 * x = ((VLV_IIR & VLV_IER) ||
2132                 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2133                 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2134                 *
2135                 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2136                 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2137                 * guarantee the CPU interrupt will be raised again even if we
2138                 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2139                 * bits this time around.
2140                 */
2141                I915_WRITE(VLV_MASTER_IER, 0);
2142                ier = I915_READ(VLV_IER);
2143                I915_WRITE(VLV_IER, 0);
2144
2145                if (gt_iir)
2146                        I915_WRITE(GTIIR, gt_iir);
2147                if (pm_iir)
2148                        I915_WRITE(GEN6_PMIIR, pm_iir);
2149
2150                if (iir & I915_DISPLAY_PORT_INTERRUPT)
2151                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2152
2153                /* Call regardless, as some status bits might not be
2154                 * signalled in iir */
2155                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2156
2157                if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2158                           I915_LPE_PIPE_B_INTERRUPT))
2159                        intel_lpe_audio_irq_handler(dev_priv);
2160
2161                /*
2162                 * VLV_IIR is single buffered, and reflects the level
2163                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2164                 */
2165                if (iir)
2166                        I915_WRITE(VLV_IIR, iir);
2167
2168                I915_WRITE(VLV_IER, ier);
2169                I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2170
2171                if (gt_iir)
2172                        snb_gt_irq_handler(dev_priv, gt_iir);
2173                if (pm_iir)
2174                        gen6_rps_irq_handler(dev_priv, pm_iir);
2175
2176                if (hotplug_status)
2177                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2178
2179                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2180        } while (0);
2181
2182        enable_rpm_wakeref_asserts(dev_priv);
2183
2184        return ret;
2185}
2186
2187static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2188{
2189        struct drm_device *dev = arg;
2190        struct drm_i915_private *dev_priv = to_i915(dev);
2191        irqreturn_t ret = IRQ_NONE;
2192
2193        if (!intel_irqs_enabled(dev_priv))
2194                return IRQ_NONE;
2195
2196        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2197        disable_rpm_wakeref_asserts(dev_priv);
2198
2199        do {
2200                u32 master_ctl, iir;
2201                u32 pipe_stats[I915_MAX_PIPES] = {};
2202                u32 hotplug_status = 0;
2203                u32 gt_iir[4];
2204                u32 ier = 0;
2205
2206                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2207                iir = I915_READ(VLV_IIR);
2208
2209                if (master_ctl == 0 && iir == 0)
2210                        break;
2211
2212                ret = IRQ_HANDLED;
2213
2214                /*
2215                 * Theory on interrupt generation, based on empirical evidence:
2216                 *
2217                 * x = ((VLV_IIR & VLV_IER) ||
2218                 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2219                 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2220                 *
2221                 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2222                 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2223                 * guarantee the CPU interrupt will be raised again even if we
2224                 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2225                 * bits this time around.
2226                 */
2227                I915_WRITE(GEN8_MASTER_IRQ, 0);
2228                ier = I915_READ(VLV_IER);
2229                I915_WRITE(VLV_IER, 0);
2230
2231                gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2232
2233                if (iir & I915_DISPLAY_PORT_INTERRUPT)
2234                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2235
2236                /* Call regardless, as some status bits might not be
2237                 * signalled in iir */
2238                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2239
2240                if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2241                           I915_LPE_PIPE_B_INTERRUPT |
2242                           I915_LPE_PIPE_C_INTERRUPT))
2243                        intel_lpe_audio_irq_handler(dev_priv);
2244
2245                /*
2246                 * VLV_IIR is single buffered, and reflects the level
2247                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2248                 */
2249                if (iir)
2250                        I915_WRITE(VLV_IIR, iir);
2251
2252                I915_WRITE(VLV_IER, ier);
2253                I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2254
2255                gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2256
2257                if (hotplug_status)
2258                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2259
2260                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2261        } while (0);
2262
2263        enable_rpm_wakeref_asserts(dev_priv);
2264
2265        return ret;
2266}
2267
2268static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2269                                u32 hotplug_trigger,
2270                                const u32 hpd[HPD_NUM_PINS])
2271{
2272        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2273
2274        /*
2275         * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2276         * unless we touch the hotplug register, even if hotplug_trigger is
2277         * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2278         * errors.
2279         */
2280        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2281        if (!hotplug_trigger) {
2282                u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2283                        PORTD_HOTPLUG_STATUS_MASK |
2284                        PORTC_HOTPLUG_STATUS_MASK |
2285                        PORTB_HOTPLUG_STATUS_MASK;
2286                dig_hotplug_reg &= ~mask;
2287        }
2288
2289        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2290        if (!hotplug_trigger)
2291                return;
2292
2293        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2294                           dig_hotplug_reg, hpd,
2295                           pch_port_hotplug_long_detect);
2296
2297        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2298}
2299
2300static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2301{
2302        int pipe;
2303        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2304
2305        ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2306
2307        if (pch_iir & SDE_AUDIO_POWER_MASK) {
2308                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2309                               SDE_AUDIO_POWER_SHIFT);
2310                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2311                                 port_name(port));
2312        }
2313
2314        if (pch_iir & SDE_AUX_MASK)
2315                dp_aux_irq_handler(dev_priv);
2316
2317        if (pch_iir & SDE_GMBUS)
2318                gmbus_irq_handler(dev_priv);
2319
2320        if (pch_iir & SDE_AUDIO_HDCP_MASK)
2321                DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2322
2323        if (pch_iir & SDE_AUDIO_TRANS_MASK)
2324                DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2325
2326        if (pch_iir & SDE_POISON)
2327                DRM_ERROR("PCH poison interrupt\n");
2328
2329        if (pch_iir & SDE_FDI_MASK)
2330                for_each_pipe(dev_priv, pipe)
2331                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2332                                         pipe_name(pipe),
2333                                         I915_READ(FDI_RX_IIR(pipe)));
2334
2335        if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2336                DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2337
2338        if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2339                DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2340
2341        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2342                intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2343
2344        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2345                intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2346}
2347
2348static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2349{
2350        u32 err_int = I915_READ(GEN7_ERR_INT);
2351        enum pipe pipe;
2352
2353        if (err_int & ERR_INT_POISON)
2354                DRM_ERROR("Poison interrupt\n");
2355
2356        for_each_pipe(dev_priv, pipe) {
2357                if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2358                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2359
2360                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2361                        if (IS_IVYBRIDGE(dev_priv))
2362                                ivb_pipe_crc_irq_handler(dev_priv, pipe);
2363                        else
2364                                hsw_pipe_crc_irq_handler(dev_priv, pipe);
2365                }
2366        }
2367
2368        I915_WRITE(GEN7_ERR_INT, err_int);
2369}
2370
2371static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2372{
2373        u32 serr_int = I915_READ(SERR_INT);
2374        enum pipe pipe;
2375
2376        if (serr_int & SERR_INT_POISON)
2377                DRM_ERROR("PCH poison interrupt\n");
2378
2379        for_each_pipe(dev_priv, pipe)
2380                if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2381                        intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2382
2383        I915_WRITE(SERR_INT, serr_int);
2384}
2385
2386static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2387{
2388        int pipe;
2389        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2390
2391        ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2392
2393        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2394                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2395                               SDE_AUDIO_POWER_SHIFT_CPT);
2396                DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2397                                 port_name(port));
2398        }
2399
2400        if (pch_iir & SDE_AUX_MASK_CPT)
2401                dp_aux_irq_handler(dev_priv);
2402
2403        if (pch_iir & SDE_GMBUS_CPT)
2404                gmbus_irq_handler(dev_priv);
2405
2406        if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2407                DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2408
2409        if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2410                DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2411
2412        if (pch_iir & SDE_FDI_MASK_CPT)
2413                for_each_pipe(dev_priv, pipe)
2414                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2415                                         pipe_name(pipe),
2416                                         I915_READ(FDI_RX_IIR(pipe)));
2417
2418        if (pch_iir & SDE_ERROR_CPT)
2419                cpt_serr_int_handler(dev_priv);
2420}
2421
2422static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2423{
2424        u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2425        u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2426        u32 pin_mask = 0, long_mask = 0;
2427
2428        if (ddi_hotplug_trigger) {
2429                u32 dig_hotplug_reg;
2430
2431                dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2432                I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2433
2434                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2435                                   ddi_hotplug_trigger,
2436                                   dig_hotplug_reg, hpd_icp,
2437                                   icp_ddi_port_hotplug_long_detect);
2438        }
2439
2440        if (tc_hotplug_trigger) {
2441                u32 dig_hotplug_reg;
2442
2443                dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2444                I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2445
2446                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2447                                   tc_hotplug_trigger,
2448                                   dig_hotplug_reg, hpd_icp,
2449                                   icp_tc_port_hotplug_long_detect);
2450        }
2451
2452        if (pin_mask)
2453                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2454
2455        if (pch_iir & SDE_GMBUS_ICP)
2456                gmbus_irq_handler(dev_priv);
2457}
2458
2459static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2460{
2461        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2462                ~SDE_PORTE_HOTPLUG_SPT;
2463        u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2464        u32 pin_mask = 0, long_mask = 0;
2465
2466        if (hotplug_trigger) {
2467                u32 dig_hotplug_reg;
2468
2469                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2470                I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2471
2472                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2473                                   hotplug_trigger, dig_hotplug_reg, hpd_spt,
2474                                   spt_port_hotplug_long_detect);
2475        }
2476
2477        if (hotplug2_trigger) {
2478                u32 dig_hotplug_reg;
2479
2480                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2481                I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2482
2483                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2484                                   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2485                                   spt_port_hotplug2_long_detect);
2486        }
2487
2488        if (pin_mask)
2489                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2490
2491        if (pch_iir & SDE_GMBUS_CPT)
2492                gmbus_irq_handler(dev_priv);
2493}
2494
2495static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2496                                u32 hotplug_trigger,
2497                                const u32 hpd[HPD_NUM_PINS])
2498{
2499        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2500
2501        dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2502        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2503
2504        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2505                           dig_hotplug_reg, hpd,
2506                           ilk_port_hotplug_long_detect);
2507
2508        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2509}
2510
2511static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2512                                    u32 de_iir)
2513{
2514        enum pipe pipe;
2515        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2516
2517        if (hotplug_trigger)
2518                ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2519
2520        if (de_iir & DE_AUX_CHANNEL_A)
2521                dp_aux_irq_handler(dev_priv);
2522
2523        if (de_iir & DE_GSE)
2524                intel_opregion_asle_intr(dev_priv);
2525
2526        if (de_iir & DE_POISON)
2527                DRM_ERROR("Poison interrupt\n");
2528
2529        for_each_pipe(dev_priv, pipe) {
2530                if (de_iir & DE_PIPE_VBLANK(pipe))
2531                        drm_handle_vblank(&dev_priv->drm, pipe);
2532
2533                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2534                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2535
2536                if (de_iir & DE_PIPE_CRC_DONE(pipe))
2537                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2538        }
2539
2540        /* check event from PCH */
2541        if (de_iir & DE_PCH_EVENT) {
2542                u32 pch_iir = I915_READ(SDEIIR);
2543
2544                if (HAS_PCH_CPT(dev_priv))
2545                        cpt_irq_handler(dev_priv, pch_iir);
2546                else
2547                        ibx_irq_handler(dev_priv, pch_iir);
2548
2549                /* should clear PCH hotplug event before clear CPU irq */
2550                I915_WRITE(SDEIIR, pch_iir);
2551        }
2552
2553        if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2554                ironlake_rps_change_irq_handler(dev_priv);
2555}
2556
2557static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2558                                    u32 de_iir)
2559{
2560        enum pipe pipe;
2561        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2562
2563        if (hotplug_trigger)
2564                ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2565
2566        if (de_iir & DE_ERR_INT_IVB)
2567                ivb_err_int_handler(dev_priv);
2568
2569        if (de_iir & DE_EDP_PSR_INT_HSW) {
2570                u32 psr_iir = I915_READ(EDP_PSR_IIR);
2571
2572                intel_psr_irq_handler(dev_priv, psr_iir);
2573                I915_WRITE(EDP_PSR_IIR, psr_iir);
2574        }
2575
2576        if (de_iir & DE_AUX_CHANNEL_A_IVB)
2577                dp_aux_irq_handler(dev_priv);
2578
2579        if (de_iir & DE_GSE_IVB)
2580                intel_opregion_asle_intr(dev_priv);
2581
2582        for_each_pipe(dev_priv, pipe) {
2583                if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2584                        drm_handle_vblank(&dev_priv->drm, pipe);
2585        }
2586
2587        /* check event from PCH */
2588        if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2589                u32 pch_iir = I915_READ(SDEIIR);
2590
2591                cpt_irq_handler(dev_priv, pch_iir);
2592
2593                /* clear PCH hotplug event before clear CPU irq */
2594                I915_WRITE(SDEIIR, pch_iir);
2595        }
2596}
2597
2598/*
2599 * To handle irqs with the minimum potential races with fresh interrupts, we:
2600 * 1 - Disable Master Interrupt Control.
2601 * 2 - Find the source(s) of the interrupt.
2602 * 3 - Clear the Interrupt Identity bits (IIR).
2603 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2604 * 5 - Re-enable Master Interrupt Control.
2605 */
2606static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2607{
2608        struct drm_device *dev = arg;
2609        struct drm_i915_private *dev_priv = to_i915(dev);
2610        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2611        irqreturn_t ret = IRQ_NONE;
2612
2613        if (!intel_irqs_enabled(dev_priv))
2614                return IRQ_NONE;
2615
2616        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2617        disable_rpm_wakeref_asserts(dev_priv);
2618
2619        /* disable master interrupt before clearing iir  */
2620        de_ier = I915_READ(DEIER);
2621        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2622
2623        /* Disable south interrupts. We'll only write to SDEIIR once, so further
2624         * interrupts will will be stored on its back queue, and then we'll be
2625         * able to process them after we restore SDEIER (as soon as we restore
2626         * it, we'll get an interrupt if SDEIIR still has something to process
2627         * due to its back queue). */
2628        if (!HAS_PCH_NOP(dev_priv)) {
2629                sde_ier = I915_READ(SDEIER);
2630                I915_WRITE(SDEIER, 0);
2631        }
2632
2633        /* Find, clear, then process each source of interrupt */
2634
2635        gt_iir = I915_READ(GTIIR);
2636        if (gt_iir) {
2637                I915_WRITE(GTIIR, gt_iir);
2638                ret = IRQ_HANDLED;
2639                if (INTEL_GEN(dev_priv) >= 6)
2640                        snb_gt_irq_handler(dev_priv, gt_iir);
2641                else
2642                        ilk_gt_irq_handler(dev_priv, gt_iir);
2643        }
2644
2645        de_iir = I915_READ(DEIIR);
2646        if (de_iir) {
2647                I915_WRITE(DEIIR, de_iir);
2648                ret = IRQ_HANDLED;
2649                if (INTEL_GEN(dev_priv) >= 7)
2650                        ivb_display_irq_handler(dev_priv, de_iir);
2651                else
2652                        ilk_display_irq_handler(dev_priv, de_iir);
2653        }
2654
2655        if (INTEL_GEN(dev_priv) >= 6) {
2656                u32 pm_iir = I915_READ(GEN6_PMIIR);
2657                if (pm_iir) {
2658                        I915_WRITE(GEN6_PMIIR, pm_iir);
2659                        ret = IRQ_HANDLED;
2660                        gen6_rps_irq_handler(dev_priv, pm_iir);
2661                }
2662        }
2663
2664        I915_WRITE(DEIER, de_ier);
2665        if (!HAS_PCH_NOP(dev_priv))
2666                I915_WRITE(SDEIER, sde_ier);
2667
2668        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2669        enable_rpm_wakeref_asserts(dev_priv);
2670
2671        return ret;
2672}
2673
2674static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2675                                u32 hotplug_trigger,
2676                                const u32 hpd[HPD_NUM_PINS])
2677{
2678        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2679
2680        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2681        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2682
2683        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2684                           dig_hotplug_reg, hpd,
2685                           bxt_port_hotplug_long_detect);
2686
2687        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2688}
2689
2690static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2691{
2692        u32 pin_mask = 0, long_mask = 0;
2693        u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2694        u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2695
2696        if (trigger_tc) {
2697                u32 dig_hotplug_reg;
2698
2699                dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2700                I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2701
2702                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2703                                   dig_hotplug_reg, hpd_gen11,
2704                                   gen11_port_hotplug_long_detect);
2705        }
2706
2707        if (trigger_tbt) {
2708                u32 dig_hotplug_reg;
2709
2710                dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2711                I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2712
2713                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2714                                   dig_hotplug_reg, hpd_gen11,
2715                                   gen11_port_hotplug_long_detect);
2716        }
2717
2718        if (pin_mask)
2719                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2720        else
2721                DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2722}
2723
2724static irqreturn_t
2725gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2726{
2727        irqreturn_t ret = IRQ_NONE;
2728        u32 iir;
2729        enum pipe pipe;
2730
2731        if (master_ctl & GEN8_DE_MISC_IRQ) {
2732                iir = I915_READ(GEN8_DE_MISC_IIR);
2733                if (iir) {
2734                        bool found = false;
2735
2736                        I915_WRITE(GEN8_DE_MISC_IIR, iir);
2737                        ret = IRQ_HANDLED;
2738
2739                        if (iir & GEN8_DE_MISC_GSE) {
2740                                intel_opregion_asle_intr(dev_priv);
2741                                found = true;
2742                        }
2743
2744                        if (iir & GEN8_DE_EDP_PSR) {
2745                                u32 psr_iir = I915_READ(EDP_PSR_IIR);
2746
2747                                intel_psr_irq_handler(dev_priv, psr_iir);
2748                                I915_WRITE(EDP_PSR_IIR, psr_iir);
2749                                found = true;
2750                        }
2751
2752                        if (!found)
2753                                DRM_ERROR("Unexpected DE Misc interrupt\n");
2754                }
2755                else
2756                        DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2757        }
2758
2759        if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2760                iir = I915_READ(GEN11_DE_HPD_IIR);
2761                if (iir) {
2762                        I915_WRITE(GEN11_DE_HPD_IIR, iir);
2763                        ret = IRQ_HANDLED;
2764                        gen11_hpd_irq_handler(dev_priv, iir);
2765                } else {
2766                        DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2767                }
2768        }
2769
2770        if (master_ctl & GEN8_DE_PORT_IRQ) {
2771                iir = I915_READ(GEN8_DE_PORT_IIR);
2772                if (iir) {
2773                        u32 tmp_mask;
2774                        bool found = false;
2775
2776                        I915_WRITE(GEN8_DE_PORT_IIR, iir);
2777                        ret = IRQ_HANDLED;
2778
2779                        tmp_mask = GEN8_AUX_CHANNEL_A;
2780                        if (INTEL_GEN(dev_priv) >= 9)
2781                                tmp_mask |= GEN9_AUX_CHANNEL_B |
2782                                            GEN9_AUX_CHANNEL_C |
2783                                            GEN9_AUX_CHANNEL_D;
2784
2785                        if (INTEL_GEN(dev_priv) >= 11)
2786                                tmp_mask |= ICL_AUX_CHANNEL_E;
2787
2788                        if (IS_CNL_WITH_PORT_F(dev_priv) ||
2789                            INTEL_GEN(dev_priv) >= 11)
2790                                tmp_mask |= CNL_AUX_CHANNEL_F;
2791
2792                        if (iir & tmp_mask) {
2793                                dp_aux_irq_handler(dev_priv);
2794                                found = true;
2795                        }
2796
2797                        if (IS_GEN9_LP(dev_priv)) {
2798                                tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2799                                if (tmp_mask) {
2800                                        bxt_hpd_irq_handler(dev_priv, tmp_mask,
2801                                                            hpd_bxt);
2802                                        found = true;
2803                                }
2804                        } else if (IS_BROADWELL(dev_priv)) {
2805                                tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2806                                if (tmp_mask) {
2807                                        ilk_hpd_irq_handler(dev_priv,
2808                                                            tmp_mask, hpd_bdw);
2809                                        found = true;
2810                                }
2811                        }
2812
2813                        if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2814                                gmbus_irq_handler(dev_priv);
2815                                found = true;
2816                        }
2817
2818                        if (!found)
2819                                DRM_ERROR("Unexpected DE Port interrupt\n");
2820                }
2821                else
2822                        DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2823        }
2824
2825        for_each_pipe(dev_priv, pipe) {
2826                u32 fault_errors;
2827
2828                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2829                        continue;
2830
2831                iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2832                if (!iir) {
2833                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2834                        continue;
2835                }
2836
2837                ret = IRQ_HANDLED;
2838                I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2839
2840                if (iir & GEN8_PIPE_VBLANK)
2841                        drm_handle_vblank(&dev_priv->drm, pipe);
2842
2843                if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2844                        hsw_pipe_crc_irq_handler(dev_priv, pipe);
2845
2846                if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2847                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2848
2849                fault_errors = iir;
2850                if (INTEL_GEN(dev_priv) >= 9)
2851                        fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2852                else
2853                        fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2854
2855                if (fault_errors)
2856                        DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2857                                  pipe_name(pipe),
2858                                  fault_errors);
2859        }
2860
2861        if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2862            master_ctl & GEN8_DE_PCH_IRQ) {
2863                /*
2864                 * FIXME(BDW): Assume for now that the new interrupt handling
2865                 * scheme also closed the SDE interrupt handling race we've seen
2866                 * on older pch-split platforms. But this needs testing.
2867                 */
2868                iir = I915_READ(SDEIIR);
2869                if (iir) {
2870                        I915_WRITE(SDEIIR, iir);
2871                        ret = IRQ_HANDLED;
2872
2873                        if (HAS_PCH_ICP(dev_priv))
2874                                icp_irq_handler(dev_priv, iir);
2875                        else if (HAS_PCH_SPT(dev_priv) ||
2876                                 HAS_PCH_KBP(dev_priv) ||
2877                                 HAS_PCH_CNP(dev_priv))
2878                                spt_irq_handler(dev_priv, iir);
2879                        else
2880                                cpt_irq_handler(dev_priv, iir);
2881                } else {
2882                        /*
2883                         * Like on previous PCH there seems to be something
2884                         * fishy going on with forwarding PCH interrupts.
2885                         */
2886                        DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2887                }
2888        }
2889
2890        return ret;
2891}
2892
2893static irqreturn_t gen8_irq_handler(int irq, void *arg)
2894{
2895        struct drm_i915_private *dev_priv = to_i915(arg);
2896        u32 master_ctl;
2897        u32 gt_iir[4];
2898
2899        if (!intel_irqs_enabled(dev_priv))
2900                return IRQ_NONE;
2901
2902        master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2903        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2904        if (!master_ctl)
2905                return IRQ_NONE;
2906
2907        I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2908
2909        /* Find, clear, then process each source of interrupt */
2910        gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2911
2912        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2913        if (master_ctl & ~GEN8_GT_IRQS) {
2914                disable_rpm_wakeref_asserts(dev_priv);
2915                gen8_de_irq_handler(dev_priv, master_ctl);
2916                enable_rpm_wakeref_asserts(dev_priv);
2917        }
2918
2919        I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2920
2921        gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2922
2923        return IRQ_HANDLED;
2924}
2925
2926struct wedge_me {
2927        struct delayed_work work;
2928        struct drm_i915_private *i915;
2929        const char *name;
2930};
2931
2932static void wedge_me(struct work_struct *work)
2933{
2934        struct wedge_me *w = container_of(work, typeof(*w), work.work);
2935
2936        dev_err(w->i915->drm.dev,
2937                "%s timed out, cancelling all in-flight rendering.\n",
2938                w->name);
2939        i915_gem_set_wedged(w->i915);
2940}
2941
2942static void __init_wedge(struct wedge_me *w,
2943                         struct drm_i915_private *i915,
2944                         long timeout,
2945                         const char *name)
2946{
2947        w->i915 = i915;
2948        w->name = name;
2949
2950        INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2951        schedule_delayed_work(&w->work, timeout);
2952}
2953
2954static void __fini_wedge(struct wedge_me *w)
2955{
2956        cancel_delayed_work_sync(&w->work);
2957        destroy_delayed_work_on_stack(&w->work);
2958        w->i915 = NULL;
2959}
2960
2961#define i915_wedge_on_timeout(W, DEV, TIMEOUT)                          \
2962        for (__init_wedge((W), (DEV), (TIMEOUT), __func__);             \
2963             (W)->i915;                                                 \
2964             __fini_wedge((W)))
2965
2966static u32
2967gen11_gt_engine_identity(struct drm_i915_private * const i915,
2968                         const unsigned int bank, const unsigned int bit)
2969{
2970        void __iomem * const regs = i915->regs;
2971        u32 timeout_ts;
2972        u32 ident;
2973
2974        lockdep_assert_held(&i915->irq_lock);
2975
2976        raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2977
2978        /*
2979         * NB: Specs do not specify how long to spin wait,
2980         * so we do ~100us as an educated guess.
2981         */
2982        timeout_ts = (local_clock() >> 10) + 100;
2983        do {
2984                ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2985        } while (!(ident & GEN11_INTR_DATA_VALID) &&
2986                 !time_after32(local_clock() >> 10, timeout_ts));
2987
2988        if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2989                DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2990                          bank, bit, ident);
2991                return 0;
2992        }
2993
2994        raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2995                      GEN11_INTR_DATA_VALID);
2996
2997        return ident;
2998}
2999
3000static void
3001gen11_other_irq_handler(struct drm_i915_private * const i915,
3002                        const u8 instance, const u16 iir)
3003{
3004        if (instance == OTHER_GTPM_INSTANCE)
3005                return gen6_rps_irq_handler(i915, iir);
3006
3007        WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3008                  instance, iir);
3009}
3010
3011static void
3012gen11_engine_irq_handler(struct drm_i915_private * const i915,
3013                         const u8 class, const u8 instance, const u16 iir)
3014{
3015        struct intel_engine_cs *engine;
3016
3017        if (instance <= MAX_ENGINE_INSTANCE)
3018                engine = i915->engine_class[class][instance];
3019        else
3020                engine = NULL;
3021
3022        if (likely(engine))
3023                return gen8_cs_irq_handler(engine, iir);
3024
3025        WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3026                  class, instance);
3027}
3028
3029static void
3030gen11_gt_identity_handler(struct drm_i915_private * const i915,
3031                          const u32 identity)
3032{
3033        const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3034        const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3035        const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3036
3037        if (unlikely(!intr))
3038                return;
3039
3040        if (class <= COPY_ENGINE_CLASS)
3041                return gen11_engine_irq_handler(i915, class, instance, intr);
3042
3043        if (class == OTHER_CLASS)
3044                return gen11_other_irq_handler(i915, instance, intr);
3045
3046        WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3047                  class, instance, intr);
3048}
3049
3050static void
3051gen11_gt_bank_handler(struct drm_i915_private * const i915,
3052                      const unsigned int bank)
3053{
3054        void __iomem * const regs = i915->regs;
3055        unsigned long intr_dw;
3056        unsigned int bit;
3057
3058        lockdep_assert_held(&i915->irq_lock);
3059
3060        intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3061
3062        if (unlikely(!intr_dw)) {
3063                DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3064                return;
3065        }
3066
3067        for_each_set_bit(bit, &intr_dw, 32) {
3068                const u32 ident = gen11_gt_engine_identity(i915,
3069                                                           bank, bit);
3070
3071                gen11_gt_identity_handler(i915, ident);
3072        }
3073
3074        /* Clear must be after shared has been served for engine */
3075        raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3076}
3077
3078static void
3079gen11_gt_irq_handler(struct drm_i915_private * const i915,
3080                     const u32 master_ctl)
3081{
3082        unsigned int bank;
3083
3084        spin_lock(&i915->irq_lock);
3085
3086        for (bank = 0; bank < 2; bank++) {
3087                if (master_ctl & GEN11_GT_DW_IRQ(bank))
3088                        gen11_gt_bank_handler(i915, bank);
3089        }
3090
3091        spin_unlock(&i915->irq_lock);
3092}
3093
3094static u32
3095gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3096{
3097        void __iomem * const regs = dev_priv->regs;
3098        u32 iir;
3099
3100        if (!(master_ctl & GEN11_GU_MISC_IRQ))
3101                return 0;
3102
3103        iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3104        if (likely(iir))
3105                raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3106
3107        return iir;
3108}
3109
3110static void
3111gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3112{
3113        if (iir & GEN11_GU_MISC_GSE)
3114                intel_opregion_asle_intr(dev_priv);
3115}
3116
3117static irqreturn_t gen11_irq_handler(int irq, void *arg)
3118{
3119        struct drm_i915_private * const i915 = to_i915(arg);
3120        void __iomem * const regs = i915->regs;
3121        u32 master_ctl;
3122        u32 gu_misc_iir;
3123
3124        if (!intel_irqs_enabled(i915))
3125                return IRQ_NONE;
3126
3127        master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3128        master_ctl &= ~GEN11_MASTER_IRQ;
3129        if (!master_ctl)
3130                return IRQ_NONE;
3131
3132        /* Disable interrupts. */
3133        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3134
3135        /* Find, clear, then process each source of interrupt. */
3136        gen11_gt_irq_handler(i915, master_ctl);
3137
3138        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3139        if (master_ctl & GEN11_DISPLAY_IRQ) {
3140                const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3141
3142                disable_rpm_wakeref_asserts(i915);
3143                /*
3144                 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3145                 * for the display related bits.
3146                 */
3147                gen8_de_irq_handler(i915, disp_ctl);
3148                enable_rpm_wakeref_asserts(i915);
3149        }
3150
3151        gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3152
3153        /* Acknowledge and enable interrupts. */
3154        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3155
3156        gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3157
3158        return IRQ_HANDLED;
3159}
3160
3161static void i915_reset_device(struct drm_i915_private *dev_priv,
3162                              u32 engine_mask,
3163                              const char *reason)
3164{
3165        struct i915_gpu_error *error = &dev_priv->gpu_error;
3166        struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
3167        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
3168        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
3169        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
3170        struct wedge_me w;
3171
3172        kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
3173
3174        DRM_DEBUG_DRIVER("resetting chip\n");
3175        kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
3176
3177        /* Use a watchdog to ensure that our reset completes */
3178        i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
3179                intel_prepare_reset(dev_priv);
3180
3181                error->reason = reason;
3182                error->stalled_mask = engine_mask;
3183
3184                /* Signal that locked waiters should reset the GPU */
3185                smp_mb__before_atomic();
3186                set_bit(I915_RESET_HANDOFF, &error->flags);
3187                wake_up_all(&error->wait_queue);
3188
3189                /* Wait for anyone holding the lock to wakeup, without
3190                 * blocking indefinitely on struct_mutex.
3191                 */
3192                do {
3193                        if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
3194                                i915_reset(dev_priv, engine_mask, reason);
3195                                mutex_unlock(&dev_priv->drm.struct_mutex);
3196                        }
3197                } while (wait_on_bit_timeout(&error->flags,
3198                                             I915_RESET_HANDOFF,
3199                                             TASK_UNINTERRUPTIBLE,
3200                                             1));
3201
3202                error->stalled_mask = 0;
3203                error->reason = NULL;
3204
3205                intel_finish_reset(dev_priv);
3206        }
3207
3208        if (!test_bit(I915_WEDGED, &error->flags))
3209                kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
3210}
3211
3212static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
3213{
3214        u32 eir;
3215
3216        if (!IS_GEN2(dev_priv))
3217                I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
3218
3219        if (INTEL_GEN(dev_priv) < 4)
3220                I915_WRITE(IPEIR, I915_READ(IPEIR));
3221        else
3222                I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
3223
3224        I915_WRITE(EIR, I915_READ(EIR));
3225        eir = I915_READ(EIR);
3226        if (eir) {
3227                /*
3228                 * some errors might have become stuck,
3229                 * mask them.
3230                 */
3231                DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
3232                I915_WRITE(EMR, I915_READ(EMR) | eir);
3233                I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
3234        }
3235}
3236
3237/**
3238 * i915_handle_error - handle a gpu error
3239 * @dev_priv: i915 device private
3240 * @engine_mask: mask representing engines that are hung
3241 * @flags: control flags
3242 * @fmt: Error message format string
3243 *
3244 * Do some basic checking of register state at error time and
3245 * dump it to the syslog.  Also call i915_capture_error_state() to make
3246 * sure we get a record and make it available in debugfs.  Fire a uevent
3247 * so userspace knows something bad happened (should trigger collection
3248 * of a ring dump etc.).
3249 */
3250void i915_handle_error(struct drm_i915_private *dev_priv,
3251                       u32 engine_mask,
3252                       unsigned long flags,
3253                       const char *fmt, ...)
3254{
3255        struct intel_engine_cs *engine;
3256        unsigned int tmp;
3257        char error_msg[80];
3258        char *msg = NULL;
3259
3260        if (fmt) {
3261                va_list args;
3262
3263                va_start(args, fmt);
3264                vscnprintf(error_msg, sizeof(error_msg), fmt, args);
3265                va_end(args);
3266
3267                msg = error_msg;
3268        }
3269
3270        /*
3271         * In most cases it's guaranteed that we get here with an RPM
3272         * reference held, for example because there is a pending GPU
3273         * request that won't finish until the reset is done. This
3274         * isn't the case at least when we get here by doing a
3275         * simulated reset via debugfs, so get an RPM reference.
3276         */
3277        intel_runtime_pm_get(dev_priv);
3278
3279        engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
3280
3281        if (flags & I915_ERROR_CAPTURE) {
3282                i915_capture_error_state(dev_priv, engine_mask, msg);
3283                i915_clear_error_registers(dev_priv);
3284        }
3285
3286        /*
3287         * Try engine reset when available. We fall back to full reset if
3288         * single reset fails.
3289         */
3290        if (intel_has_reset_engine(dev_priv)) {
3291                for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
3292                        BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
3293                        if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3294                                             &dev_priv->gpu_error.flags))
3295                                continue;
3296
3297                        if (i915_reset_engine(engine, msg) == 0)
3298                                engine_mask &= ~intel_engine_flag(engine);
3299
3300                        clear_bit(I915_RESET_ENGINE + engine->id,
3301                                  &dev_priv->gpu_error.flags);
3302                        wake_up_bit(&dev_priv->gpu_error.flags,
3303                                    I915_RESET_ENGINE + engine->id);
3304                }
3305        }
3306
3307        if (!engine_mask)
3308                goto out;
3309
3310        /* Full reset needs the mutex, stop any other user trying to do so. */
3311        if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
3312                wait_event(dev_priv->gpu_error.reset_queue,
3313                           !test_bit(I915_RESET_BACKOFF,
3314                                     &dev_priv->gpu_error.flags));
3315                goto out;
3316        }
3317
3318        /* Prevent any other reset-engine attempt. */
3319        for_each_engine(engine, dev_priv, tmp) {
3320                while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3321                                        &dev_priv->gpu_error.flags))
3322                        wait_on_bit(&dev_priv->gpu_error.flags,
3323                                    I915_RESET_ENGINE + engine->id,
3324                                    TASK_UNINTERRUPTIBLE);
3325        }
3326
3327        i915_reset_device(dev_priv, engine_mask, msg);
3328
3329        for_each_engine(engine, dev_priv, tmp) {
3330                clear_bit(I915_RESET_ENGINE + engine->id,
3331                          &dev_priv->gpu_error.flags);
3332        }
3333
3334        clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
3335        wake_up_all(&dev_priv->gpu_error.reset_queue);
3336
3337out:
3338        intel_runtime_pm_put(dev_priv);
3339}
3340
3341/* Called from drm generic code, passed 'crtc' which
3342 * we use as a pipe index
3343 */
3344static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3345{
3346        struct drm_i915_private *dev_priv = to_i915(dev);
3347        unsigned long irqflags;
3348
3349        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3350        i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3351        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3352
3353        return 0;
3354}
3355
3356static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3357{
3358        struct drm_i915_private *dev_priv = to_i915(dev);
3359        unsigned long irqflags;
3360
3361        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3362        i915_enable_pipestat(dev_priv, pipe,
3363                             PIPE_START_VBLANK_INTERRUPT_STATUS);
3364        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3365
3366        return 0;
3367}
3368
3369static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3370{
3371        struct drm_i915_private *dev_priv = to_i915(dev);
3372        unsigned long irqflags;
3373        uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3374                DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3375
3376        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3377        ilk_enable_display_irq(dev_priv, bit);
3378        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3379
3380        /* Even though there is no DMC, frame counter can get stuck when
3381         * PSR is active as no frames are generated.
3382         */
3383        if (HAS_PSR(dev_priv))
3384                drm_vblank_restore(dev, pipe);
3385
3386        return 0;
3387}
3388
3389static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3390{
3391        struct drm_i915_private *dev_priv = to_i915(dev);
3392        unsigned long irqflags;
3393
3394        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3395        bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3396        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3397
3398        /* Even if there is no DMC, frame counter can get stuck when
3399         * PSR is active as no frames are generated, so check only for PSR.
3400         */
3401        if (HAS_PSR(dev_priv))
3402                drm_vblank_restore(dev, pipe);
3403
3404        return 0;
3405}
3406
3407/* Called from drm generic code, passed 'crtc' which
3408 * we use as a pipe index
3409 */
3410static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3411{
3412        struct drm_i915_private *dev_priv = to_i915(dev);
3413        unsigned long irqflags;
3414
3415        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3416        i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3417        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3418}
3419
3420static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3421{
3422        struct drm_i915_private *dev_priv = to_i915(dev);
3423        unsigned long irqflags;
3424
3425        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3426        i915_disable_pipestat(dev_priv, pipe,
3427                              PIPE_START_VBLANK_INTERRUPT_STATUS);
3428        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3429}
3430
3431static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3432{
3433        struct drm_i915_private *dev_priv = to_i915(dev);
3434        unsigned long irqflags;
3435        uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3436                DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3437
3438        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3439        ilk_disable_display_irq(dev_priv, bit);
3440        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3441}
3442
3443static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3444{
3445        struct drm_i915_private *dev_priv = to_i915(dev);
3446        unsigned long irqflags;
3447
3448        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3449        bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3450        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3451}
3452
3453static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3454{
3455        if (HAS_PCH_NOP(dev_priv))
3456                return;
3457
3458        GEN3_IRQ_RESET(SDE);
3459
3460        if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3461                I915_WRITE(SERR_INT, 0xffffffff);
3462}
3463
3464/*
3465 * SDEIER is also touched by the interrupt handler to work around missed PCH
3466 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3467 * instead we unconditionally enable all PCH interrupt sources here, but then
3468 * only unmask them as needed with SDEIMR.
3469 *
3470 * This function needs to be called before interrupts are enabled.
3471 */
3472static void ibx_irq_pre_postinstall(struct drm_device *dev)
3473{
3474        struct drm_i915_private *dev_priv = to_i915(dev);
3475
3476        if (HAS_PCH_NOP(dev_priv))
3477                return;
3478
3479        WARN_ON(I915_READ(SDEIER) != 0);
3480        I915_WRITE(SDEIER, 0xffffffff);
3481        POSTING_READ(SDEIER);
3482}
3483
3484static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3485{
3486        GEN3_IRQ_RESET(GT);
3487        if (INTEL_GEN(dev_priv) >= 6)
3488                GEN3_IRQ_RESET(GEN6_PM);
3489}
3490
3491static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3492{
3493        if (IS_CHERRYVIEW(dev_priv))
3494                I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3495        else
3496                I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3497
3498        i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3499        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3500
3501        i9xx_pipestat_irq_reset(dev_priv);
3502
3503        GEN3_IRQ_RESET(VLV_);
3504        dev_priv->irq_mask = ~0u;
3505}
3506
3507static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3508{
3509        u32 pipestat_mask;
3510        u32 enable_mask;
3511        enum pipe pipe;
3512
3513        pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3514
3515        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3516        for_each_pipe(dev_priv, pipe)
3517                i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3518
3519        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3520                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3521                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3522                I915_LPE_PIPE_A_INTERRUPT |
3523                I915_LPE_PIPE_B_INTERRUPT;
3524
3525        if (IS_CHERRYVIEW(dev_priv))
3526                enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3527                        I915_LPE_PIPE_C_INTERRUPT;
3528
3529        WARN_ON(dev_priv->irq_mask != ~0u);
3530
3531        dev_priv->irq_mask = ~enable_mask;
3532
3533        GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3534}
3535
3536/* drm_dma.h hooks
3537*/
3538static void ironlake_irq_reset(struct drm_device *dev)
3539{
3540        struct drm_i915_private *dev_priv = to_i915(dev);
3541
3542        if (IS_GEN5(dev_priv))
3543                I915_WRITE(HWSTAM, 0xffffffff);
3544
3545        GEN3_IRQ_RESET(DE);
3546        if (IS_GEN7(dev_priv))
3547                I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3548
3549        if (IS_HASWELL(dev_priv)) {
3550                I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3551                I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3552        }
3553
3554        gen5_gt_irq_reset(dev_priv);
3555
3556        ibx_irq_reset(dev_priv);
3557}
3558
3559static void valleyview_irq_reset(struct drm_device *dev)
3560{
3561        struct drm_i915_private *dev_priv = to_i915(dev);
3562
3563        I915_WRITE(VLV_MASTER_IER, 0);
3564        POSTING_READ(VLV_MASTER_IER);
3565
3566        gen5_gt_irq_reset(dev_priv);
3567
3568        spin_lock_irq(&dev_priv->irq_lock);
3569        if (dev_priv->display_irqs_enabled)
3570                vlv_display_irq_reset(dev_priv);
3571        spin_unlock_irq(&dev_priv->irq_lock);
3572}
3573
3574static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3575{
3576        GEN8_IRQ_RESET_NDX(GT, 0);
3577        GEN8_IRQ_RESET_NDX(GT, 1);
3578        GEN8_IRQ_RESET_NDX(GT, 2);
3579        GEN8_IRQ_RESET_NDX(GT, 3);
3580}
3581
3582static void gen8_irq_reset(struct drm_device *dev)
3583{
3584        struct drm_i915_private *dev_priv = to_i915(dev);
3585        int pipe;
3586
3587        I915_WRITE(GEN8_MASTER_IRQ, 0);
3588        POSTING_READ(GEN8_MASTER_IRQ);
3589
3590        gen8_gt_irq_reset(dev_priv);
3591
3592        I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3593        I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3594
3595        for_each_pipe(dev_priv, pipe)
3596                if (intel_display_power_is_enabled(dev_priv,
3597                                                   POWER_DOMAIN_PIPE(pipe)))
3598                        GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3599
3600        GEN3_IRQ_RESET(GEN8_DE_PORT_);
3601        GEN3_IRQ_RESET(GEN8_DE_MISC_);
3602        GEN3_IRQ_RESET(GEN8_PCU_);
3603
3604        if (HAS_PCH_SPLIT(dev_priv))
3605                ibx_irq_reset(dev_priv);
3606}
3607
3608static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3609{
3610        /* Disable RCS, BCS, VCS and VECS class engines. */
3611        I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3612        I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    0);
3613
3614        /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3615        I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~0);
3616        I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~0);
3617        I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~0);
3618        I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~0);
3619        I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3620
3621        I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3622        I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3623}
3624
3625static void gen11_irq_reset(struct drm_device *dev)
3626{
3627        struct drm_i915_private *dev_priv = dev->dev_private;
3628        int pipe;
3629
3630        I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
3631        POSTING_READ(GEN11_GFX_MSTR_IRQ);
3632
3633        gen11_gt_irq_reset(dev_priv);
3634
3635        I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3636
3637        for_each_pipe(dev_priv, pipe)
3638                if (intel_display_power_is_enabled(dev_priv,
3639                                                   POWER_DOMAIN_PIPE(pipe)))
3640                        GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3641
3642        GEN3_IRQ_RESET(GEN8_DE_PORT_);
3643        GEN3_IRQ_RESET(GEN8_DE_MISC_);
3644        GEN3_IRQ_RESET(GEN11_DE_HPD_);
3645        GEN3_IRQ_RESET(GEN11_GU_MISC_);
3646        GEN3_IRQ_RESET(GEN8_PCU_);
3647
3648        if (HAS_PCH_ICP(dev_priv))
3649                GEN3_IRQ_RESET(SDE);
3650}
3651
3652void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3653                                     u8 pipe_mask)
3654{
3655        uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3656        enum pipe pipe;
3657
3658        spin_lock_irq(&dev_priv->irq_lock);
3659
3660        if (!intel_irqs_enabled(dev_priv)) {
3661                spin_unlock_irq(&dev_priv->irq_lock);
3662                return;
3663        }
3664
3665        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3666                GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3667                                  dev_priv->de_irq_mask[pipe],
3668                                  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3669
3670        spin_unlock_irq(&dev_priv->irq_lock);
3671}
3672
3673void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3674                                     u8 pipe_mask)
3675{
3676        enum pipe pipe;
3677
3678        spin_lock_irq(&dev_priv->irq_lock);
3679
3680        if (!intel_irqs_enabled(dev_priv)) {
3681                spin_unlock_irq(&dev_priv->irq_lock);
3682                return;
3683        }
3684
3685        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3686                GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3687
3688        spin_unlock_irq(&dev_priv->irq_lock);
3689
3690        /* make sure we're done processing display irqs */
3691        synchronize_irq(dev_priv->drm.irq);
3692}
3693
3694static void cherryview_irq_reset(struct drm_device *dev)
3695{
3696        struct drm_i915_private *dev_priv = to_i915(dev);
3697
3698        I915_WRITE(GEN8_MASTER_IRQ, 0);
3699        POSTING_READ(GEN8_MASTER_IRQ);
3700
3701        gen8_gt_irq_reset(dev_priv);
3702
3703        GEN3_IRQ_RESET(GEN8_PCU_);
3704
3705        spin_lock_irq(&dev_priv->irq_lock);
3706        if (dev_priv->display_irqs_enabled)
3707                vlv_display_irq_reset(dev_priv);
3708        spin_unlock_irq(&dev_priv->irq_lock);
3709}
3710
3711static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3712                                  const u32 hpd[HPD_NUM_PINS])
3713{
3714        struct intel_encoder *encoder;
3715        u32 enabled_irqs = 0;
3716
3717        for_each_intel_encoder(&dev_priv->drm, encoder)
3718                if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3719                        enabled_irqs |= hpd[encoder->hpd_pin];
3720
3721        return enabled_irqs;
3722}
3723
3724static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3725{
3726        u32 hotplug;
3727
3728        /*
3729         * Enable digital hotplug on the PCH, and configure the DP short pulse
3730         * duration to 2ms (which is the minimum in the Display Port spec).
3731         * The pulse duration bits are reserved on LPT+.
3732         */
3733        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3734        hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3735                     PORTC_PULSE_DURATION_MASK |
3736                     PORTD_PULSE_DURATION_MASK);
3737        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3738        hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3739        hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3740        /*
3741         * When CPU and PCH are on the same package, port A
3742         * HPD must be enabled in both north and south.
3743         */
3744        if (HAS_PCH_LPT_LP(dev_priv))
3745                hotplug |= PORTA_HOTPLUG_ENABLE;
3746        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3747}
3748
3749static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3750{
3751        u32 hotplug_irqs, enabled_irqs;
3752
3753        if (HAS_PCH_IBX(dev_priv)) {
3754                hotplug_irqs = SDE_HOTPLUG_MASK;
3755                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3756        } else {
3757                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3758                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3759        }
3760
3761        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3762
3763        ibx_hpd_detection_setup(dev_priv);
3764}
3765
3766static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3767{
3768        u32 hotplug;
3769
3770        hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3771        hotplug |= ICP_DDIA_HPD_ENABLE |
3772                   ICP_DDIB_HPD_ENABLE;
3773        I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3774
3775        hotplug = I915_READ(SHOTPLUG_CTL_TC);
3776        hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3777                   ICP_TC_HPD_ENABLE(PORT_TC2) |
3778                   ICP_TC_HPD_ENABLE(PORT_TC3) |
3779                   ICP_TC_HPD_ENABLE(PORT_TC4);
3780        I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3781}
3782
3783static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3784{
3785        u32 hotplug_irqs, enabled_irqs;
3786
3787        hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3788        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3789
3790        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3791
3792        icp_hpd_detection_setup(dev_priv);
3793}
3794
3795static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3796{
3797        u32 hotplug;
3798
3799        hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3800        hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3801                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3802                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3803                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3804        I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3805
3806        hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3807        hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3808                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3809                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3810                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3811        I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3812}
3813
3814static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3815{
3816        u32 hotplug_irqs, enabled_irqs;
3817        u32 val;
3818
3819        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3820        hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3821
3822        val = I915_READ(GEN11_DE_HPD_IMR);
3823        val &= ~hotplug_irqs;
3824        I915_WRITE(GEN11_DE_HPD_IMR, val);
3825        POSTING_READ(GEN11_DE_HPD_IMR);
3826
3827        gen11_hpd_detection_setup(dev_priv);
3828
3829        if (HAS_PCH_ICP(dev_priv))
3830                icp_hpd_irq_setup(dev_priv);
3831}
3832
3833static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3834{
3835        u32 val, hotplug;
3836
3837        /* Display WA #1179 WaHardHangonHotPlug: cnp */
3838        if (HAS_PCH_CNP(dev_priv)) {
3839                val = I915_READ(SOUTH_CHICKEN1);
3840                val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3841                val |= CHASSIS_CLK_REQ_DURATION(0xf);
3842                I915_WRITE(SOUTH_CHICKEN1, val);
3843        }
3844
3845        /* Enable digital hotplug on the PCH */
3846        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3847        hotplug |= PORTA_HOTPLUG_ENABLE |
3848                   PORTB_HOTPLUG_ENABLE |
3849                   PORTC_HOTPLUG_ENABLE |
3850                   PORTD_HOTPLUG_ENABLE;
3851        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3852
3853        hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3854        hotplug |= PORTE_HOTPLUG_ENABLE;
3855        I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3856}
3857
3858static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3859{
3860        u32 hotplug_irqs, enabled_irqs;
3861
3862        hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3863        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3864
3865        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3866
3867        spt_hpd_detection_setup(dev_priv);
3868}
3869
3870static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3871{
3872        u32 hotplug;
3873
3874        /*
3875         * Enable digital hotplug on the CPU, and configure the DP short pulse
3876         * duration to 2ms (which is the minimum in the Display Port spec)
3877         * The pulse duration bits are reserved on HSW+.
3878         */
3879        hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3880        hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3881        hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3882                   DIGITAL_PORTA_PULSE_DURATION_2ms;
3883        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3884}
3885
3886static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3887{
3888        u32 hotplug_irqs, enabled_irqs;
3889
3890        if (INTEL_GEN(dev_priv) >= 8) {
3891                hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3892                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3893
3894                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3895        } else if (INTEL_GEN(dev_priv) >= 7) {
3896                hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3897                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3898
3899                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3900        } else {
3901                hotplug_irqs = DE_DP_A_HOTPLUG;
3902                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3903
3904                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3905        }
3906
3907        ilk_hpd_detection_setup(dev_priv);
3908
3909        ibx_hpd_irq_setup(dev_priv);
3910}
3911
3912static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3913                                      u32 enabled_irqs)
3914{
3915        u32 hotplug;
3916
3917        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3918        hotplug |= PORTA_HOTPLUG_ENABLE |
3919                   PORTB_HOTPLUG_ENABLE |
3920                   PORTC_HOTPLUG_ENABLE;
3921
3922        DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3923                      hotplug, enabled_irqs);
3924        hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3925
3926        /*
3927         * For BXT invert bit has to be set based on AOB design
3928         * for HPD detection logic, update it based on VBT fields.
3929         */
3930        if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3931            intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3932                hotplug |= BXT_DDIA_HPD_INVERT;
3933        if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3934            intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3935                hotplug |= BXT_DDIB_HPD_INVERT;
3936        if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3937            intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3938                hotplug |= BXT_DDIC_HPD_INVERT;
3939
3940        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3941}
3942
3943static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3944{
3945        __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3946}
3947
3948static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3949{
3950        u32 hotplug_irqs, enabled_irqs;
3951
3952        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3953        hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3954
3955        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3956
3957        __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3958}
3959
3960static void ibx_irq_postinstall(struct drm_device *dev)
3961{
3962        struct drm_i915_private *dev_priv = to_i915(dev);
3963        u32 mask;
3964
3965        if (HAS_PCH_NOP(dev_priv))
3966                return;
3967
3968        if (HAS_PCH_IBX(dev_priv))
3969                mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3970        else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3971                mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3972        else
3973                mask = SDE_GMBUS_CPT;
3974
3975        gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3976        I915_WRITE(SDEIMR, ~mask);
3977
3978        if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3979            HAS_PCH_LPT(dev_priv))
3980                ibx_hpd_detection_setup(dev_priv);
3981        else
3982                spt_hpd_detection_setup(dev_priv);
3983}
3984
3985static void gen5_gt_irq_postinstall(struct drm_device *dev)
3986{
3987        struct drm_i915_private *dev_priv = to_i915(dev);
3988        u32 pm_irqs, gt_irqs;
3989
3990        pm_irqs = gt_irqs = 0;
3991
3992        dev_priv->gt_irq_mask = ~0;
3993        if (HAS_L3_DPF(dev_priv)) {
3994                /* L3 parity interrupt is always unmasked. */
3995                dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3996                gt_irqs |= GT_PARITY_ERROR(dev_priv);
3997        }
3998
3999        gt_irqs |= GT_RENDER_USER_INTERRUPT;
4000        if (IS_GEN5(dev_priv)) {
4001                gt_irqs |= ILK_BSD_USER_INTERRUPT;
4002        } else {
4003                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
4004        }
4005
4006        GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
4007
4008        if (INTEL_GEN(dev_priv) >= 6) {
4009                /*
4010                 * RPS interrupts will get enabled/disabled on demand when RPS
4011                 * itself is enabled/disabled.
4012                 */
4013                if (HAS_VEBOX(dev_priv)) {
4014                        pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4015                        dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
4016                }
4017
4018                dev_priv->pm_imr = 0xffffffff;
4019                GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
4020        }
4021}
4022
4023static int ironlake_irq_postinstall(struct drm_device *dev)
4024{
4025        struct drm_i915_private *dev_priv = to_i915(dev);
4026        u32 display_mask, extra_mask;
4027
4028        if (INTEL_GEN(dev_priv) >= 7) {
4029                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4030                                DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
4031                extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
4032                              DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
4033                              DE_DP_A_HOTPLUG_IVB);
4034        } else {
4035                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4036                                DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4037                                DE_PIPEA_CRC_DONE | DE_POISON);
4038                extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4039                              DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4040                              DE_DP_A_HOTPLUG);
4041        }
4042
4043        if (IS_HASWELL(dev_priv)) {
4044                gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4045                intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4046                display_mask |= DE_EDP_PSR_INT_HSW;
4047        }
4048
4049        dev_priv->irq_mask = ~display_mask;
4050
4051        ibx_irq_pre_postinstall(dev);
4052
4053        GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
4054
4055        gen5_gt_irq_postinstall(dev);
4056
4057        ilk_hpd_detection_setup(dev_priv);
4058
4059        ibx_irq_postinstall(dev);
4060
4061        if (IS_IRONLAKE_M(dev_priv)) {
4062                /* Enable PCU event interrupts
4063                 *
4064                 * spinlocking not required here for correctness since interrupt
4065                 * setup is guaranteed to run in single-threaded context. But we
4066                 * need it to make the assert_spin_locked happy. */
4067                spin_lock_irq(&dev_priv->irq_lock);
4068                ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4069                spin_unlock_irq(&dev_priv->irq_lock);
4070        }
4071
4072        return 0;
4073}
4074
4075void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4076{
4077        lockdep_assert_held(&dev_priv->irq_lock);
4078
4079        if (dev_priv->display_irqs_enabled)
4080                return;
4081
4082        dev_priv->display_irqs_enabled = true;
4083
4084        if (intel_irqs_enabled(dev_priv)) {
4085                vlv_display_irq_reset(dev_priv);
4086                vlv_display_irq_postinstall(dev_priv);
4087        }
4088}
4089
4090void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4091{
4092        lockdep_assert_held(&dev_priv->irq_lock);
4093
4094        if (!dev_priv->display_irqs_enabled)
4095                return;
4096
4097        dev_priv->display_irqs_enabled = false;
4098
4099        if (intel_irqs_enabled(dev_priv))
4100                vlv_display_irq_reset(dev_priv);
4101}
4102
4103
4104static int valleyview_irq_postinstall(struct drm_device *dev)
4105{
4106        struct drm_i915_private *dev_priv = to_i915(dev);
4107
4108        gen5_gt_irq_postinstall(dev);
4109
4110        spin_lock_irq(&dev_priv->irq_lock);
4111        if (dev_priv->display_irqs_enabled)
4112                vlv_display_irq_postinstall(dev_priv);
4113        spin_unlock_irq(&dev_priv->irq_lock);
4114
4115        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4116        POSTING_READ(VLV_MASTER_IER);
4117
4118        return 0;
4119}
4120
4121static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4122{
4123        /* These are interrupts we'll toggle with the ring mask register */
4124        uint32_t gt_interrupts[] = {
4125                GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4126                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4127                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4128                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4129                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4130                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4131                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
4132                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4133                0,
4134                GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4135                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4136                };
4137
4138        if (HAS_L3_DPF(dev_priv))
4139                gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
4140
4141        dev_priv->pm_ier = 0x0;
4142        dev_priv->pm_imr = ~dev_priv->pm_ier;
4143        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4144        GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4145        /*
4146         * RPS interrupts will get enabled/disabled on demand when RPS itself
4147         * is enabled/disabled. Same wil be the case for GuC interrupts.
4148         */
4149        GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4150        GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4151}
4152
4153static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4154{
4155        uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4156        uint32_t de_pipe_enables;
4157        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4158        u32 de_port_enables;
4159        u32 de_misc_masked = GEN8_DE_EDP_PSR;
4160        enum pipe pipe;
4161
4162        if (INTEL_GEN(dev_priv) <= 10)
4163                de_misc_masked |= GEN8_DE_MISC_GSE;
4164
4165        if (INTEL_GEN(dev_priv) >= 9) {
4166                de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4167                de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4168                                  GEN9_AUX_CHANNEL_D;
4169                if (IS_GEN9_LP(dev_priv))
4170                        de_port_masked |= BXT_DE_PORT_GMBUS;
4171        } else {
4172                de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4173        }
4174
4175        if (INTEL_GEN(dev_priv) >= 11)
4176                de_port_masked |= ICL_AUX_CHANNEL_E;
4177
4178        if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4179                de_port_masked |= CNL_AUX_CHANNEL_F;
4180
4181        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4182                                           GEN8_PIPE_FIFO_UNDERRUN;
4183
4184        de_port_enables = de_port_masked;
4185        if (IS_GEN9_LP(dev_priv))
4186                de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4187        else if (IS_BROADWELL(dev_priv))
4188                de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4189
4190        gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4191        intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4192
4193        for_each_pipe(dev_priv, pipe) {
4194                dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4195
4196                if (intel_display_power_is_enabled(dev_priv,
4197                                POWER_DOMAIN_PIPE(pipe)))
4198                        GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
4199                                          dev_priv->de_irq_mask[pipe],
4200                                          de_pipe_enables);
4201        }
4202
4203        GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4204        GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4205
4206        if (INTEL_GEN(dev_priv) >= 11) {
4207                u32 de_hpd_masked = 0;
4208                u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4209                                     GEN11_DE_TBT_HOTPLUG_MASK;
4210
4211                GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4212                gen11_hpd_detection_setup(dev_priv);
4213        } else if (IS_GEN9_LP(dev_priv)) {
4214                bxt_hpd_detection_setup(dev_priv);
4215        } else if (IS_BROADWELL(dev_priv)) {
4216                ilk_hpd_detection_setup(dev_priv);
4217        }
4218}
4219
4220static int gen8_irq_postinstall(struct drm_device *dev)
4221{
4222        struct drm_i915_private *dev_priv = to_i915(dev);
4223
4224        if (HAS_PCH_SPLIT(dev_priv))
4225                ibx_irq_pre_postinstall(dev);
4226
4227        gen8_gt_irq_postinstall(dev_priv);
4228        gen8_de_irq_postinstall(dev_priv);
4229
4230        if (HAS_PCH_SPLIT(dev_priv))
4231                ibx_irq_postinstall(dev);
4232
4233        I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4234        POSTING_READ(GEN8_MASTER_IRQ);
4235
4236        return 0;
4237}
4238
4239static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4240{
4241        const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4242
4243        BUILD_BUG_ON(irqs & 0xffff0000);
4244
4245        /* Enable RCS, BCS, VCS and VECS class interrupts. */
4246        I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4247        I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    irqs << 16 | irqs);
4248
4249        /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4250        I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~(irqs << 16));
4251        I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~(irqs << 16));
4252        I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~(irqs | irqs << 16));
4253        I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~(irqs | irqs << 16));
4254        I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4255
4256        /*
4257         * RPS interrupts will get enabled/disabled on demand when RPS itself
4258         * is enabled/disabled.
4259         */
4260        dev_priv->pm_ier = 0x0;
4261        dev_priv->pm_imr = ~dev_priv->pm_ier;
4262        I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4263        I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4264}
4265
4266static void icp_irq_postinstall(struct drm_device *dev)
4267{
4268        struct drm_i915_private *dev_priv = to_i915(dev);
4269        u32 mask = SDE_GMBUS_ICP;
4270
4271        WARN_ON(I915_READ(SDEIER) != 0);
4272        I915_WRITE(SDEIER, 0xffffffff);
4273        POSTING_READ(SDEIER);
4274
4275        gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4276        I915_WRITE(SDEIMR, ~mask);
4277
4278        icp_hpd_detection_setup(dev_priv);
4279}
4280
4281static int gen11_irq_postinstall(struct drm_device *dev)
4282{
4283        struct drm_i915_private *dev_priv = dev->dev_private;
4284        u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4285
4286        if (HAS_PCH_ICP(dev_priv))
4287                icp_irq_postinstall(dev);
4288
4289        gen11_gt_irq_postinstall(dev_priv);
4290        gen8_de_irq_postinstall(dev_priv);
4291
4292        GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4293
4294        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4295
4296        I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
4297        POSTING_READ(GEN11_GFX_MSTR_IRQ);
4298
4299        return 0;
4300}
4301
4302static int cherryview_irq_postinstall(struct drm_device *dev)
4303{
4304        struct drm_i915_private *dev_priv = to_i915(dev);
4305
4306        gen8_gt_irq_postinstall(dev_priv);
4307
4308        spin_lock_irq(&dev_priv->irq_lock);
4309        if (dev_priv->display_irqs_enabled)
4310                vlv_display_irq_postinstall(dev_priv);
4311        spin_unlock_irq(&dev_priv->irq_lock);
4312
4313        I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4314        POSTING_READ(GEN8_MASTER_IRQ);
4315
4316        return 0;
4317}
4318
4319static void i8xx_irq_reset(struct drm_device *dev)
4320{
4321        struct drm_i915_private *dev_priv = to_i915(dev);
4322
4323        i9xx_pipestat_irq_reset(dev_priv);
4324
4325        I915_WRITE16(HWSTAM, 0xffff);
4326
4327        GEN2_IRQ_RESET();
4328}
4329
4330static int i8xx_irq_postinstall(struct drm_device *dev)
4331{
4332        struct drm_i915_private *dev_priv = to_i915(dev);
4333        u16 enable_mask;
4334
4335        I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4336                            I915_ERROR_MEMORY_REFRESH));
4337
4338        /* Unmask the interrupts that we always want on. */
4339        dev_priv->irq_mask =
4340                ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4341                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4342                  I915_MASTER_ERROR_INTERRUPT);
4343
4344        enable_mask =
4345                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4346                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4347                I915_MASTER_ERROR_INTERRUPT |
4348                I915_USER_INTERRUPT;
4349
4350        GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4351
4352        /* Interrupt setup is already guaranteed to be single-threaded, this is
4353         * just to make the assert_spin_locked check happy. */
4354        spin_lock_irq(&dev_priv->irq_lock);
4355        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4356        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4357        spin_unlock_irq(&dev_priv->irq_lock);
4358
4359        return 0;
4360}
4361
4362static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4363                               u16 *eir, u16 *eir_stuck)
4364{
4365        u16 emr;
4366
4367        *eir = I915_READ16(EIR);
4368
4369        if (*eir)
4370                I915_WRITE16(EIR, *eir);
4371
4372        *eir_stuck = I915_READ16(EIR);
4373        if (*eir_stuck == 0)
4374                return;
4375
4376        /*
4377         * Toggle all EMR bits to make sure we get an edge
4378         * in the ISR master error bit if we don't clear
4379         * all the EIR bits. Otherwise the edge triggered
4380         * IIR on i965/g4x wouldn't notice that an interrupt
4381         * is still pending. Also some EIR bits can't be
4382         * cleared except by handling the underlying error
4383         * (or by a GPU reset) so we mask any bit that
4384         * remains set.
4385         */
4386        emr = I915_READ16(EMR);
4387        I915_WRITE16(EMR, 0xffff);
4388        I915_WRITE16(EMR, emr | *eir_stuck);
4389}
4390
4391static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4392                                   u16 eir, u16 eir_stuck)
4393{
4394        DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4395
4396        if (eir_stuck)
4397                DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4398}
4399
4400static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4401                               u32 *eir, u32 *eir_stuck)
4402{
4403        u32 emr;
4404
4405        *eir = I915_READ(EIR);
4406
4407        I915_WRITE(EIR, *eir);
4408
4409        *eir_stuck = I915_READ(EIR);
4410        if (*eir_stuck == 0)
4411                return;
4412
4413        /*
4414         * Toggle all EMR bits to make sure we get an edge
4415         * in the ISR master error bit if we don't clear
4416         * all the EIR bits. Otherwise the edge triggered
4417         * IIR on i965/g4x wouldn't notice that an interrupt
4418         * is still pending. Also some EIR bits can't be
4419         * cleared except by handling the underlying error
4420         * (or by a GPU reset) so we mask any bit that
4421         * remains set.
4422         */
4423        emr = I915_READ(EMR);
4424        I915_WRITE(EMR, 0xffffffff);
4425        I915_WRITE(EMR, emr | *eir_stuck);
4426}
4427
4428static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4429                                   u32 eir, u32 eir_stuck)
4430{
4431        DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4432
4433        if (eir_stuck)
4434                DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4435}
4436
4437static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4438{
4439        struct drm_device *dev = arg;
4440        struct drm_i915_private *dev_priv = to_i915(dev);
4441        irqreturn_t ret = IRQ_NONE;
4442
4443        if (!intel_irqs_enabled(dev_priv))
4444                return IRQ_NONE;
4445
4446        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4447        disable_rpm_wakeref_asserts(dev_priv);
4448
4449        do {
4450                u32 pipe_stats[I915_MAX_PIPES] = {};
4451                u16 eir = 0, eir_stuck = 0;
4452                u16 iir;
4453
4454                iir = I915_READ16(IIR);
4455                if (iir == 0)
4456                        break;
4457
4458                ret = IRQ_HANDLED;
4459
4460                /* Call regardless, as some status bits might not be
4461                 * signalled in iir */
4462                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4463
4464                if (iir & I915_MASTER_ERROR_INTERRUPT)
4465                        i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4466
4467                I915_WRITE16(IIR, iir);
4468
4469                if (iir & I915_USER_INTERRUPT)
4470                        notify_ring(dev_priv->engine[RCS]);
4471
4472                if (iir & I915_MASTER_ERROR_INTERRUPT)
4473                        i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4474
4475                i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4476        } while (0);
4477
4478        enable_rpm_wakeref_asserts(dev_priv);
4479
4480        return ret;
4481}
4482
4483static void i915_irq_reset(struct drm_device *dev)
4484{
4485        struct drm_i915_private *dev_priv = to_i915(dev);
4486
4487        if (I915_HAS_HOTPLUG(dev_priv)) {
4488                i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4489                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4490        }
4491
4492        i9xx_pipestat_irq_reset(dev_priv);
4493
4494        I915_WRITE(HWSTAM, 0xffffffff);
4495
4496        GEN3_IRQ_RESET();
4497}
4498
4499static int i915_irq_postinstall(struct drm_device *dev)
4500{
4501        struct drm_i915_private *dev_priv = to_i915(dev);
4502        u32 enable_mask;
4503
4504        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4505                          I915_ERROR_MEMORY_REFRESH));
4506
4507        /* Unmask the interrupts that we always want on. */
4508        dev_priv->irq_mask =
4509                ~(I915_ASLE_INTERRUPT |
4510                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4511                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4512                  I915_MASTER_ERROR_INTERRUPT);
4513
4514        enable_mask =
4515                I915_ASLE_INTERRUPT |
4516                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4517                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4518                I915_MASTER_ERROR_INTERRUPT |
4519                I915_USER_INTERRUPT;
4520
4521        if (I915_HAS_HOTPLUG(dev_priv)) {
4522                /* Enable in IER... */
4523                enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4524                /* and unmask in IMR */
4525                dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4526        }
4527
4528        GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4529
4530        /* Interrupt setup is already guaranteed to be single-threaded, this is
4531         * just to make the assert_spin_locked check happy. */
4532        spin_lock_irq(&dev_priv->irq_lock);
4533        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4534        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4535        spin_unlock_irq(&dev_priv->irq_lock);
4536
4537        i915_enable_asle_pipestat(dev_priv);
4538
4539        return 0;
4540}
4541
4542static irqreturn_t i915_irq_handler(int irq, void *arg)
4543{
4544        struct drm_device *dev = arg;
4545        struct drm_i915_private *dev_priv = to_i915(dev);
4546        irqreturn_t ret = IRQ_NONE;
4547
4548        if (!intel_irqs_enabled(dev_priv))
4549                return IRQ_NONE;
4550
4551        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4552        disable_rpm_wakeref_asserts(dev_priv);
4553
4554        do {
4555                u32 pipe_stats[I915_MAX_PIPES] = {};
4556                u32 eir = 0, eir_stuck = 0;
4557                u32 hotplug_status = 0;
4558                u32 iir;
4559
4560                iir = I915_READ(IIR);
4561                if (iir == 0)
4562                        break;
4563
4564                ret = IRQ_HANDLED;
4565
4566                if (I915_HAS_HOTPLUG(dev_priv) &&
4567                    iir & I915_DISPLAY_PORT_INTERRUPT)
4568                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4569
4570                /* Call regardless, as some status bits might not be
4571                 * signalled in iir */
4572                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4573
4574                if (iir & I915_MASTER_ERROR_INTERRUPT)
4575                        i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4576
4577                I915_WRITE(IIR, iir);
4578
4579                if (iir & I915_USER_INTERRUPT)
4580                        notify_ring(dev_priv->engine[RCS]);
4581
4582                if (iir & I915_MASTER_ERROR_INTERRUPT)
4583                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4584
4585                if (hotplug_status)
4586                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4587
4588                i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4589        } while (0);
4590
4591        enable_rpm_wakeref_asserts(dev_priv);
4592
4593        return ret;
4594}
4595
4596static void i965_irq_reset(struct drm_device *dev)
4597{
4598        struct drm_i915_private *dev_priv = to_i915(dev);
4599
4600        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4601        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4602
4603        i9xx_pipestat_irq_reset(dev_priv);
4604
4605        I915_WRITE(HWSTAM, 0xffffffff);
4606
4607        GEN3_IRQ_RESET();
4608}
4609
4610static int i965_irq_postinstall(struct drm_device *dev)
4611{
4612        struct drm_i915_private *dev_priv = to_i915(dev);
4613        u32 enable_mask;
4614        u32 error_mask;
4615
4616        /*
4617         * Enable some error detection, note the instruction error mask
4618         * bit is reserved, so we leave it masked.
4619         */
4620        if (IS_G4X(dev_priv)) {
4621                error_mask = ~(GM45_ERROR_PAGE_TABLE |
4622                               GM45_ERROR_MEM_PRIV |
4623                               GM45_ERROR_CP_PRIV |
4624                               I915_ERROR_MEMORY_REFRESH);
4625        } else {
4626                error_mask = ~(I915_ERROR_PAGE_TABLE |
4627                               I915_ERROR_MEMORY_REFRESH);
4628        }
4629        I915_WRITE(EMR, error_mask);
4630
4631        /* Unmask the interrupts that we always want on. */
4632        dev_priv->irq_mask =
4633                ~(I915_ASLE_INTERRUPT |
4634                  I915_DISPLAY_PORT_INTERRUPT |
4635                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4636                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4637                  I915_MASTER_ERROR_INTERRUPT);
4638
4639        enable_mask =
4640                I915_ASLE_INTERRUPT |
4641                I915_DISPLAY_PORT_INTERRUPT |
4642                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4643                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4644                I915_MASTER_ERROR_INTERRUPT |
4645                I915_USER_INTERRUPT;
4646
4647        if (IS_G4X(dev_priv))
4648                enable_mask |= I915_BSD_USER_INTERRUPT;
4649
4650        GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4651
4652        /* Interrupt setup is already guaranteed to be single-threaded, this is
4653         * just to make the assert_spin_locked check happy. */
4654        spin_lock_irq(&dev_priv->irq_lock);
4655        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4656        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4657        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4658        spin_unlock_irq(&dev_priv->irq_lock);
4659
4660        i915_enable_asle_pipestat(dev_priv);
4661
4662        return 0;
4663}
4664
4665static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4666{
4667        u32 hotplug_en;
4668
4669        lockdep_assert_held(&dev_priv->irq_lock);
4670
4671        /* Note HDMI and DP share hotplug bits */
4672        /* enable bits are the same for all generations */
4673        hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4674        /* Programming the CRT detection parameters tends
4675           to generate a spurious hotplug event about three
4676           seconds later.  So just do it once.
4677        */
4678        if (IS_G4X(dev_priv))
4679                hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4680        hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4681
4682        /* Ignore TV since it's buggy */
4683        i915_hotplug_interrupt_update_locked(dev_priv,
4684                                             HOTPLUG_INT_EN_MASK |
4685                                             CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4686                                             CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4687                                             hotplug_en);
4688}
4689
4690static irqreturn_t i965_irq_handler(int irq, void *arg)
4691{
4692        struct drm_device *dev = arg;
4693        struct drm_i915_private *dev_priv = to_i915(dev);
4694        irqreturn_t ret = IRQ_NONE;
4695
4696        if (!intel_irqs_enabled(dev_priv))
4697                return IRQ_NONE;
4698
4699        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4700        disable_rpm_wakeref_asserts(dev_priv);
4701
4702        do {
4703                u32 pipe_stats[I915_MAX_PIPES] = {};
4704                u32 eir = 0, eir_stuck = 0;
4705                u32 hotplug_status = 0;
4706                u32 iir;
4707
4708                iir = I915_READ(IIR);
4709                if (iir == 0)
4710                        break;
4711
4712                ret = IRQ_HANDLED;
4713
4714                if (iir & I915_DISPLAY_PORT_INTERRUPT)
4715                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4716
4717                /* Call regardless, as some status bits might not be
4718                 * signalled in iir */
4719                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4720
4721                if (iir & I915_MASTER_ERROR_INTERRUPT)
4722                        i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4723
4724                I915_WRITE(IIR, iir);
4725
4726                if (iir & I915_USER_INTERRUPT)
4727                        notify_ring(dev_priv->engine[RCS]);
4728
4729                if (iir & I915_BSD_USER_INTERRUPT)
4730                        notify_ring(dev_priv->engine[VCS]);
4731
4732                if (iir & I915_MASTER_ERROR_INTERRUPT)
4733                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4734
4735                if (hotplug_status)
4736                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4737
4738                i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4739        } while (0);
4740
4741        enable_rpm_wakeref_asserts(dev_priv);
4742
4743        return ret;
4744}
4745
4746/**
4747 * intel_irq_init - initializes irq support
4748 * @dev_priv: i915 device instance
4749 *
4750 * This function initializes all the irq support including work items, timers
4751 * and all the vtables. It does not setup the interrupt itself though.
4752 */
4753void intel_irq_init(struct drm_i915_private *dev_priv)
4754{
4755        struct drm_device *dev = &dev_priv->drm;
4756        struct intel_rps *rps = &dev_priv->gt_pm.rps;
4757        int i;
4758
4759        intel_hpd_init_work(dev_priv);
4760
4761        INIT_WORK(&rps->work, gen6_pm_rps_work);
4762
4763        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4764        for (i = 0; i < MAX_L3_SLICES; ++i)
4765                dev_priv->l3_parity.remap_info[i] = NULL;
4766
4767        if (HAS_GUC_SCHED(dev_priv))
4768                dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4769
4770        /* Let's track the enabled rps events */
4771        if (IS_VALLEYVIEW(dev_priv))
4772                /* WaGsvRC0ResidencyMethod:vlv */
4773                dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4774        else
4775                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4776
4777        rps->pm_intrmsk_mbz = 0;
4778
4779        /*
4780         * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4781         * if GEN6_PM_UP_EI_EXPIRED is masked.
4782         *
4783         * TODO: verify if this can be reproduced on VLV,CHV.
4784         */
4785        if (INTEL_GEN(dev_priv) <= 7)
4786                rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4787
4788        if (INTEL_GEN(dev_priv) >= 8)
4789                rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4790
4791        if (IS_GEN2(dev_priv)) {
4792                /* Gen2 doesn't have a hardware frame counter */
4793                dev->max_vblank_count = 0;
4794        } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4795                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4796                dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4797        } else {
4798                dev->driver->get_vblank_counter = i915_get_vblank_counter;
4799                dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4800        }
4801
4802        /*
4803         * Opt out of the vblank disable timer on everything except gen2.
4804         * Gen2 doesn't have a hardware frame counter and so depends on
4805         * vblank interrupts to produce sane vblank seuquence numbers.
4806         */
4807        if (!IS_GEN2(dev_priv))
4808                dev->vblank_disable_immediate = true;
4809
4810        /* Most platforms treat the display irq block as an always-on
4811         * power domain. vlv/chv can disable it at runtime and need
4812         * special care to avoid writing any of the display block registers
4813         * outside of the power domain. We defer setting up the display irqs
4814         * in this case to the runtime pm.
4815         */
4816        dev_priv->display_irqs_enabled = true;
4817        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4818                dev_priv->display_irqs_enabled = false;
4819
4820        dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4821
4822        dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4823        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4824
4825        if (IS_CHERRYVIEW(dev_priv)) {
4826                dev->driver->irq_handler = cherryview_irq_handler;
4827                dev->driver->irq_preinstall = cherryview_irq_reset;
4828                dev->driver->irq_postinstall = cherryview_irq_postinstall;
4829                dev->driver->irq_uninstall = cherryview_irq_reset;
4830                dev->driver->enable_vblank = i965_enable_vblank;
4831                dev->driver->disable_vblank = i965_disable_vblank;
4832                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4833        } else if (IS_VALLEYVIEW(dev_priv)) {
4834                dev->driver->irq_handler = valleyview_irq_handler;
4835                dev->driver->irq_preinstall = valleyview_irq_reset;
4836                dev->driver->irq_postinstall = valleyview_irq_postinstall;
4837                dev->driver->irq_uninstall = valleyview_irq_reset;
4838                dev->driver->enable_vblank = i965_enable_vblank;
4839                dev->driver->disable_vblank = i965_disable_vblank;
4840                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4841        } else if (INTEL_GEN(dev_priv) >= 11) {
4842                dev->driver->irq_handler = gen11_irq_handler;
4843                dev->driver->irq_preinstall = gen11_irq_reset;
4844                dev->driver->irq_postinstall = gen11_irq_postinstall;
4845                dev->driver->irq_uninstall = gen11_irq_reset;
4846                dev->driver->enable_vblank = gen8_enable_vblank;
4847                dev->driver->disable_vblank = gen8_disable_vblank;
4848                dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4849        } else if (INTEL_GEN(dev_priv) >= 8) {
4850                dev->driver->irq_handler = gen8_irq_handler;
4851                dev->driver->irq_preinstall = gen8_irq_reset;
4852                dev->driver->irq_postinstall = gen8_irq_postinstall;
4853                dev->driver->irq_uninstall = gen8_irq_reset;
4854                dev->driver->enable_vblank = gen8_enable_vblank;
4855                dev->driver->disable_vblank = gen8_disable_vblank;
4856                if (IS_GEN9_LP(dev_priv))
4857                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4858                else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4859                         HAS_PCH_CNP(dev_priv))
4860                        dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4861                else
4862                        dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4863        } else if (HAS_PCH_SPLIT(dev_priv)) {
4864                dev->driver->irq_handler = ironlake_irq_handler;
4865                dev->driver->irq_preinstall = ironlake_irq_reset;
4866                dev->driver->irq_postinstall = ironlake_irq_postinstall;
4867                dev->driver->irq_uninstall = ironlake_irq_reset;
4868                dev->driver->enable_vblank = ironlake_enable_vblank;
4869                dev->driver->disable_vblank = ironlake_disable_vblank;
4870                dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4871        } else {
4872                if (IS_GEN2(dev_priv)) {
4873                        dev->driver->irq_preinstall = i8xx_irq_reset;
4874                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
4875                        dev->driver->irq_handler = i8xx_irq_handler;
4876                        dev->driver->irq_uninstall = i8xx_irq_reset;
4877                        dev->driver->enable_vblank = i8xx_enable_vblank;
4878                        dev->driver->disable_vblank = i8xx_disable_vblank;
4879                } else if (IS_GEN3(dev_priv)) {
4880                        dev->driver->irq_preinstall = i915_irq_reset;
4881                        dev->driver->irq_postinstall = i915_irq_postinstall;
4882                        dev->driver->irq_uninstall = i915_irq_reset;
4883                        dev->driver->irq_handler = i915_irq_handler;
4884                        dev->driver->enable_vblank = i8xx_enable_vblank;
4885                        dev->driver->disable_vblank = i8xx_disable_vblank;
4886                } else {
4887                        dev->driver->irq_preinstall = i965_irq_reset;
4888                        dev->driver->irq_postinstall = i965_irq_postinstall;
4889                        dev->driver->irq_uninstall = i965_irq_reset;
4890                        dev->driver->irq_handler = i965_irq_handler;
4891                        dev->driver->enable_vblank = i965_enable_vblank;
4892                        dev->driver->disable_vblank = i965_disable_vblank;
4893                }
4894                if (I915_HAS_HOTPLUG(dev_priv))
4895                        dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4896        }
4897}
4898
4899/**
4900 * intel_irq_fini - deinitializes IRQ support
4901 * @i915: i915 device instance
4902 *
4903 * This function deinitializes all the IRQ support.
4904 */
4905void intel_irq_fini(struct drm_i915_private *i915)
4906{
4907        int i;
4908
4909        for (i = 0; i < MAX_L3_SLICES; ++i)
4910                kfree(i915->l3_parity.remap_info[i]);
4911}
4912
4913/**
4914 * intel_irq_install - enables the hardware interrupt
4915 * @dev_priv: i915 device instance
4916 *
4917 * This function enables the hardware interrupt handling, but leaves the hotplug
4918 * handling still disabled. It is called after intel_irq_init().
4919 *
4920 * In the driver load and resume code we need working interrupts in a few places
4921 * but don't want to deal with the hassle of concurrent probe and hotplug
4922 * workers. Hence the split into this two-stage approach.
4923 */
4924int intel_irq_install(struct drm_i915_private *dev_priv)
4925{
4926        /*
4927         * We enable some interrupt sources in our postinstall hooks, so mark
4928         * interrupts as enabled _before_ actually enabling them to avoid
4929         * special cases in our ordering checks.
4930         */
4931        dev_priv->runtime_pm.irqs_enabled = true;
4932
4933        return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4934}
4935
4936/**
4937 * intel_irq_uninstall - finilizes all irq handling
4938 * @dev_priv: i915 device instance
4939 *
4940 * This stops interrupt and hotplug handling and unregisters and frees all
4941 * resources acquired in the init functions.
4942 */
4943void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4944{
4945        drm_irq_uninstall(&dev_priv->drm);
4946        intel_hpd_cancel_work(dev_priv);
4947        dev_priv->runtime_pm.irqs_enabled = false;
4948}
4949
4950/**
4951 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4952 * @dev_priv: i915 device instance
4953 *
4954 * This function is used to disable interrupts at runtime, both in the runtime
4955 * pm and the system suspend/resume code.
4956 */
4957void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4958{
4959        dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4960        dev_priv->runtime_pm.irqs_enabled = false;
4961        synchronize_irq(dev_priv->drm.irq);
4962}
4963
4964/**
4965 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4966 * @dev_priv: i915 device instance
4967 *
4968 * This function is used to enable interrupts at runtime, both in the runtime
4969 * pm and the system suspend/resume code.
4970 */
4971void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4972{
4973        dev_priv->runtime_pm.irqs_enabled = true;
4974        dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4975        dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4976}
4977