linux/drivers/gpu/drm/i915/i915_irq.c
<<
>>
Prefs
   1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/sysrq.h>
  32#include <linux/slab.h>
  33#include <linux/circ_buf.h>
  34#include <drm/drmP.h>
  35#include <drm/i915_drm.h>
  36#include "i915_drv.h"
  37#include "i915_trace.h"
  38#include "intel_drv.h"
  39
  40/**
  41 * DOC: interrupt handling
  42 *
  43 * These functions provide the basic support for enabling and disabling the
  44 * interrupt handling support. There's a lot more functionality in i915_irq.c
  45 * and related files, but that will be described in separate chapters.
  46 */
  47
  48static const u32 hpd_ilk[HPD_NUM_PINS] = {
  49        [HPD_PORT_A] = DE_DP_A_HOTPLUG,
  50};
  51
  52static const u32 hpd_ivb[HPD_NUM_PINS] = {
  53        [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
  54};
  55
  56static const u32 hpd_bdw[HPD_NUM_PINS] = {
  57        [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
  58};
  59
  60static const u32 hpd_ibx[HPD_NUM_PINS] = {
  61        [HPD_CRT] = SDE_CRT_HOTPLUG,
  62        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  63        [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  64        [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  65        [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  66};
  67
  68static const u32 hpd_cpt[HPD_NUM_PINS] = {
  69        [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  70        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  71        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  72        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  73        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  74};
  75
  76static const u32 hpd_spt[HPD_NUM_PINS] = {
  77        [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
  78        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  79        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  80        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
  81        [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
  82};
  83
  84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
  85        [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  86        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  87        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  88        [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  89        [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  90        [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  91};
  92
  93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
  94        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  95        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  96        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  97        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  98        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  99        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 100};
 101
 102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
 103        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 104        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
 105        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
 106        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 107        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
 108        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 109};
 110
 111/* BXT hpd list */
 112static const u32 hpd_bxt[HPD_NUM_PINS] = {
 113        [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
 114        [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
 115        [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
 116};
 117
 118/* IIR can theoretically queue up two events. Be paranoid. */
 119#define GEN8_IRQ_RESET_NDX(type, which) do { \
 120        I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
 121        POSTING_READ(GEN8_##type##_IMR(which)); \
 122        I915_WRITE(GEN8_##type##_IER(which), 0); \
 123        I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
 124        POSTING_READ(GEN8_##type##_IIR(which)); \
 125        I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
 126        POSTING_READ(GEN8_##type##_IIR(which)); \
 127} while (0)
 128
 129#define GEN5_IRQ_RESET(type) do { \
 130        I915_WRITE(type##IMR, 0xffffffff); \
 131        POSTING_READ(type##IMR); \
 132        I915_WRITE(type##IER, 0); \
 133        I915_WRITE(type##IIR, 0xffffffff); \
 134        POSTING_READ(type##IIR); \
 135        I915_WRITE(type##IIR, 0xffffffff); \
 136        POSTING_READ(type##IIR); \
 137} while (0)
 138
 139/*
 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
 141 */
 142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
 143                                    i915_reg_t reg)
 144{
 145        u32 val = I915_READ(reg);
 146
 147        if (val == 0)
 148                return;
 149
 150        WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
 151             i915_mmio_reg_offset(reg), val);
 152        I915_WRITE(reg, 0xffffffff);
 153        POSTING_READ(reg);
 154        I915_WRITE(reg, 0xffffffff);
 155        POSTING_READ(reg);
 156}
 157
 158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
 159        gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
 160        I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
 161        I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
 162        POSTING_READ(GEN8_##type##_IMR(which)); \
 163} while (0)
 164
 165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
 166        gen5_assert_iir_is_zero(dev_priv, type##IIR); \
 167        I915_WRITE(type##IER, (ier_val)); \
 168        I915_WRITE(type##IMR, (imr_val)); \
 169        POSTING_READ(type##IMR); \
 170} while (0)
 171
 172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 173
 174/* For display hotplug interrupt */
 175static inline void
 176i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
 177                                     uint32_t mask,
 178                                     uint32_t bits)
 179{
 180        uint32_t val;
 181
 182        assert_spin_locked(&dev_priv->irq_lock);
 183        WARN_ON(bits & ~mask);
 184
 185        val = I915_READ(PORT_HOTPLUG_EN);
 186        val &= ~mask;
 187        val |= bits;
 188        I915_WRITE(PORT_HOTPLUG_EN, val);
 189}
 190
 191/**
 192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
 193 * @dev_priv: driver private
 194 * @mask: bits to update
 195 * @bits: bits to enable
 196 * NOTE: the HPD enable bits are modified both inside and outside
 197 * of an interrupt context. To avoid that read-modify-write cycles
 198 * interfer, these bits are protected by a spinlock. Since this
 199 * function is usually not called from a context where the lock is
 200 * held already, this function acquires the lock itself. A non-locking
 201 * version is also available.
 202 */
 203void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 204                                   uint32_t mask,
 205                                   uint32_t bits)
 206{
 207        spin_lock_irq(&dev_priv->irq_lock);
 208        i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
 209        spin_unlock_irq(&dev_priv->irq_lock);
 210}
 211
 212/**
 213 * ilk_update_display_irq - update DEIMR
 214 * @dev_priv: driver private
 215 * @interrupt_mask: mask of interrupt bits to update
 216 * @enabled_irq_mask: mask of interrupt bits to enable
 217 */
 218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 219                            uint32_t interrupt_mask,
 220                            uint32_t enabled_irq_mask)
 221{
 222        uint32_t new_val;
 223
 224        assert_spin_locked(&dev_priv->irq_lock);
 225
 226        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 227
 228        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 229                return;
 230
 231        new_val = dev_priv->irq_mask;
 232        new_val &= ~interrupt_mask;
 233        new_val |= (~enabled_irq_mask & interrupt_mask);
 234
 235        if (new_val != dev_priv->irq_mask) {
 236                dev_priv->irq_mask = new_val;
 237                I915_WRITE(DEIMR, dev_priv->irq_mask);
 238                POSTING_READ(DEIMR);
 239        }
 240}
 241
 242/**
 243 * ilk_update_gt_irq - update GTIMR
 244 * @dev_priv: driver private
 245 * @interrupt_mask: mask of interrupt bits to update
 246 * @enabled_irq_mask: mask of interrupt bits to enable
 247 */
 248static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
 249                              uint32_t interrupt_mask,
 250                              uint32_t enabled_irq_mask)
 251{
 252        assert_spin_locked(&dev_priv->irq_lock);
 253
 254        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 255
 256        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 257                return;
 258
 259        dev_priv->gt_irq_mask &= ~interrupt_mask;
 260        dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
 261        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 262}
 263
 264void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 265{
 266        ilk_update_gt_irq(dev_priv, mask, mask);
 267        POSTING_READ_FW(GTIMR);
 268}
 269
 270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 271{
 272        ilk_update_gt_irq(dev_priv, mask, 0);
 273}
 274
 275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
 276{
 277        return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 278}
 279
 280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
 281{
 282        return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
 283}
 284
 285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
 286{
 287        return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
 288}
 289
 290/**
 291 * snb_update_pm_irq - update GEN6_PMIMR
 292 * @dev_priv: driver private
 293 * @interrupt_mask: mask of interrupt bits to update
 294 * @enabled_irq_mask: mask of interrupt bits to enable
 295 */
 296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
 297                              uint32_t interrupt_mask,
 298                              uint32_t enabled_irq_mask)
 299{
 300        uint32_t new_val;
 301
 302        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 303
 304        assert_spin_locked(&dev_priv->irq_lock);
 305
 306        new_val = dev_priv->pm_irq_mask;
 307        new_val &= ~interrupt_mask;
 308        new_val |= (~enabled_irq_mask & interrupt_mask);
 309
 310        if (new_val != dev_priv->pm_irq_mask) {
 311                dev_priv->pm_irq_mask = new_val;
 312                I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
 313                POSTING_READ(gen6_pm_imr(dev_priv));
 314        }
 315}
 316
 317void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 318{
 319        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 320                return;
 321
 322        snb_update_pm_irq(dev_priv, mask, mask);
 323}
 324
 325static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
 326                                  uint32_t mask)
 327{
 328        snb_update_pm_irq(dev_priv, mask, 0);
 329}
 330
 331void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 332{
 333        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 334                return;
 335
 336        __gen6_disable_pm_irq(dev_priv, mask);
 337}
 338
 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 340{
 341        i915_reg_t reg = gen6_pm_iir(dev_priv);
 342
 343        spin_lock_irq(&dev_priv->irq_lock);
 344        I915_WRITE(reg, dev_priv->pm_rps_events);
 345        I915_WRITE(reg, dev_priv->pm_rps_events);
 346        POSTING_READ(reg);
 347        dev_priv->rps.pm_iir = 0;
 348        spin_unlock_irq(&dev_priv->irq_lock);
 349}
 350
 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 352{
 353        if (READ_ONCE(dev_priv->rps.interrupts_enabled))
 354                return;
 355
 356        spin_lock_irq(&dev_priv->irq_lock);
 357        WARN_ON_ONCE(dev_priv->rps.pm_iir);
 358        WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
 359        dev_priv->rps.interrupts_enabled = true;
 360        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
 361                                dev_priv->pm_rps_events);
 362        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 363
 364        spin_unlock_irq(&dev_priv->irq_lock);
 365}
 366
 367u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
 368{
 369        return (mask & ~dev_priv->rps.pm_intr_keep);
 370}
 371
 372void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 373{
 374        if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
 375                return;
 376
 377        spin_lock_irq(&dev_priv->irq_lock);
 378        dev_priv->rps.interrupts_enabled = false;
 379
 380        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 381
 382        __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 383        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
 384                                ~dev_priv->pm_rps_events);
 385
 386        spin_unlock_irq(&dev_priv->irq_lock);
 387        synchronize_irq(dev_priv->drm.irq);
 388
 389        /* Now that we will not be generating any more work, flush any
 390         * outsanding tasks. As we are called on the RPS idle path,
 391         * we will reset the GPU to minimum frequencies, so the current
 392         * state of the worker can be discarded.
 393         */
 394        cancel_work_sync(&dev_priv->rps.work);
 395        gen6_reset_rps_interrupts(dev_priv);
 396}
 397
 398/**
 399 * bdw_update_port_irq - update DE port interrupt
 400 * @dev_priv: driver private
 401 * @interrupt_mask: mask of interrupt bits to update
 402 * @enabled_irq_mask: mask of interrupt bits to enable
 403 */
 404static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 405                                uint32_t interrupt_mask,
 406                                uint32_t enabled_irq_mask)
 407{
 408        uint32_t new_val;
 409        uint32_t old_val;
 410
 411        assert_spin_locked(&dev_priv->irq_lock);
 412
 413        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 414
 415        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 416                return;
 417
 418        old_val = I915_READ(GEN8_DE_PORT_IMR);
 419
 420        new_val = old_val;
 421        new_val &= ~interrupt_mask;
 422        new_val |= (~enabled_irq_mask & interrupt_mask);
 423
 424        if (new_val != old_val) {
 425                I915_WRITE(GEN8_DE_PORT_IMR, new_val);
 426                POSTING_READ(GEN8_DE_PORT_IMR);
 427        }
 428}
 429
 430/**
 431 * bdw_update_pipe_irq - update DE pipe interrupt
 432 * @dev_priv: driver private
 433 * @pipe: pipe whose interrupt to update
 434 * @interrupt_mask: mask of interrupt bits to update
 435 * @enabled_irq_mask: mask of interrupt bits to enable
 436 */
 437void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 438                         enum pipe pipe,
 439                         uint32_t interrupt_mask,
 440                         uint32_t enabled_irq_mask)
 441{
 442        uint32_t new_val;
 443
 444        assert_spin_locked(&dev_priv->irq_lock);
 445
 446        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 447
 448        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 449                return;
 450
 451        new_val = dev_priv->de_irq_mask[pipe];
 452        new_val &= ~interrupt_mask;
 453        new_val |= (~enabled_irq_mask & interrupt_mask);
 454
 455        if (new_val != dev_priv->de_irq_mask[pipe]) {
 456                dev_priv->de_irq_mask[pipe] = new_val;
 457                I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
 458                POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
 459        }
 460}
 461
 462/**
 463 * ibx_display_interrupt_update - update SDEIMR
 464 * @dev_priv: driver private
 465 * @interrupt_mask: mask of interrupt bits to update
 466 * @enabled_irq_mask: mask of interrupt bits to enable
 467 */
 468void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 469                                  uint32_t interrupt_mask,
 470                                  uint32_t enabled_irq_mask)
 471{
 472        uint32_t sdeimr = I915_READ(SDEIMR);
 473        sdeimr &= ~interrupt_mask;
 474        sdeimr |= (~enabled_irq_mask & interrupt_mask);
 475
 476        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 477
 478        assert_spin_locked(&dev_priv->irq_lock);
 479
 480        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 481                return;
 482
 483        I915_WRITE(SDEIMR, sdeimr);
 484        POSTING_READ(SDEIMR);
 485}
 486
 487static void
 488__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 489                       u32 enable_mask, u32 status_mask)
 490{
 491        i915_reg_t reg = PIPESTAT(pipe);
 492        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 493
 494        assert_spin_locked(&dev_priv->irq_lock);
 495        WARN_ON(!intel_irqs_enabled(dev_priv));
 496
 497        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 498                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
 499                      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
 500                      pipe_name(pipe), enable_mask, status_mask))
 501                return;
 502
 503        if ((pipestat & enable_mask) == enable_mask)
 504                return;
 505
 506        dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 507
 508        /* Enable the interrupt, clear any pending status */
 509        pipestat |= enable_mask | status_mask;
 510        I915_WRITE(reg, pipestat);
 511        POSTING_READ(reg);
 512}
 513
 514static void
 515__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 516                        u32 enable_mask, u32 status_mask)
 517{
 518        i915_reg_t reg = PIPESTAT(pipe);
 519        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 520
 521        assert_spin_locked(&dev_priv->irq_lock);
 522        WARN_ON(!intel_irqs_enabled(dev_priv));
 523
 524        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 525                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
 526                      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
 527                      pipe_name(pipe), enable_mask, status_mask))
 528                return;
 529
 530        if ((pipestat & enable_mask) == 0)
 531                return;
 532
 533        dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 534
 535        pipestat &= ~enable_mask;
 536        I915_WRITE(reg, pipestat);
 537        POSTING_READ(reg);
 538}
 539
 540static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
 541{
 542        u32 enable_mask = status_mask << 16;
 543
 544        /*
 545         * On pipe A we don't support the PSR interrupt yet,
 546         * on pipe B and C the same bit MBZ.
 547         */
 548        if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
 549                return 0;
 550        /*
 551         * On pipe B and C we don't support the PSR interrupt yet, on pipe
 552         * A the same bit is for perf counters which we don't use either.
 553         */
 554        if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
 555                return 0;
 556
 557        enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
 558                         SPRITE0_FLIP_DONE_INT_EN_VLV |
 559                         SPRITE1_FLIP_DONE_INT_EN_VLV);
 560        if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
 561                enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
 562        if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
 563                enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 564
 565        return enable_mask;
 566}
 567
 568void
 569i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 570                     u32 status_mask)
 571{
 572        u32 enable_mask;
 573
 574        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 575                enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
 576                                                           status_mask);
 577        else
 578                enable_mask = status_mask << 16;
 579        __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
 580}
 581
 582void
 583i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 584                      u32 status_mask)
 585{
 586        u32 enable_mask;
 587
 588        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 589                enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
 590                                                           status_mask);
 591        else
 592                enable_mask = status_mask << 16;
 593        __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
 594}
 595
 596/**
 597 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
 598 * @dev_priv: i915 device private
 599 */
 600static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 601{
 602        if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
 603                return;
 604
 605        spin_lock_irq(&dev_priv->irq_lock);
 606
 607        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
 608        if (INTEL_GEN(dev_priv) >= 4)
 609                i915_enable_pipestat(dev_priv, PIPE_A,
 610                                     PIPE_LEGACY_BLC_EVENT_STATUS);
 611
 612        spin_unlock_irq(&dev_priv->irq_lock);
 613}
 614
 615/*
 616 * This timing diagram depicts the video signal in and
 617 * around the vertical blanking period.
 618 *
 619 * Assumptions about the fictitious mode used in this example:
 620 *  vblank_start >= 3
 621 *  vsync_start = vblank_start + 1
 622 *  vsync_end = vblank_start + 2
 623 *  vtotal = vblank_start + 3
 624 *
 625 *           start of vblank:
 626 *           latch double buffered registers
 627 *           increment frame counter (ctg+)
 628 *           generate start of vblank interrupt (gen4+)
 629 *           |
 630 *           |          frame start:
 631 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
 632 *           |          may be shifted forward 1-3 extra lines via PIPECONF
 633 *           |          |
 634 *           |          |  start of vsync:
 635 *           |          |  generate vsync interrupt
 636 *           |          |  |
 637 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
 638 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
 639 * ----va---> <-----------------vb--------------------> <--------va-------------
 640 *       |          |       <----vs----->                     |
 641 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
 642 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
 643 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
 644 *       |          |                                         |
 645 *       last visible pixel                                   first visible pixel
 646 *                  |                                         increment frame counter (gen3/4)
 647 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
 648 *
 649 * x  = horizontal active
 650 * _  = horizontal blanking
 651 * hs = horizontal sync
 652 * va = vertical active
 653 * vb = vertical blanking
 654 * vs = vertical sync
 655 * vbs = vblank_start (number)
 656 *
 657 * Summary:
 658 * - most events happen at the start of horizontal sync
 659 * - frame start happens at the start of horizontal blank, 1-4 lines
 660 *   (depending on PIPECONF settings) after the start of vblank
 661 * - gen3/4 pixel and frame counter are synchronized with the start
 662 *   of horizontal active on the first line of vertical active
 663 */
 664
 665/* Called from drm generic code, passed a 'crtc', which
 666 * we use as a pipe index
 667 */
 668static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 669{
 670        struct drm_i915_private *dev_priv = to_i915(dev);
 671        i915_reg_t high_frame, low_frame;
 672        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 673        struct intel_crtc *intel_crtc =
 674                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 675        const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 676
 677        htotal = mode->crtc_htotal;
 678        hsync_start = mode->crtc_hsync_start;
 679        vbl_start = mode->crtc_vblank_start;
 680        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 681                vbl_start = DIV_ROUND_UP(vbl_start, 2);
 682
 683        /* Convert to pixel count */
 684        vbl_start *= htotal;
 685
 686        /* Start of vblank event occurs at start of hsync */
 687        vbl_start -= htotal - hsync_start;
 688
 689        high_frame = PIPEFRAME(pipe);
 690        low_frame = PIPEFRAMEPIXEL(pipe);
 691
 692        /*
 693         * High & low register fields aren't synchronized, so make sure
 694         * we get a low value that's stable across two reads of the high
 695         * register.
 696         */
 697        do {
 698                high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 699                low   = I915_READ(low_frame);
 700                high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 701        } while (high1 != high2);
 702
 703        high1 >>= PIPE_FRAME_HIGH_SHIFT;
 704        pixel = low & PIPE_PIXEL_MASK;
 705        low >>= PIPE_FRAME_LOW_SHIFT;
 706
 707        /*
 708         * The frame counter increments at beginning of active.
 709         * Cook up a vblank counter by also checking the pixel
 710         * counter against vblank start.
 711         */
 712        return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 713}
 714
 715static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 716{
 717        struct drm_i915_private *dev_priv = to_i915(dev);
 718
 719        return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
 720}
 721
 722/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
 723static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 724{
 725        struct drm_device *dev = crtc->base.dev;
 726        struct drm_i915_private *dev_priv = to_i915(dev);
 727        const struct drm_display_mode *mode = &crtc->base.hwmode;
 728        enum pipe pipe = crtc->pipe;
 729        int position, vtotal;
 730
 731        vtotal = mode->crtc_vtotal;
 732        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 733                vtotal /= 2;
 734
 735        if (IS_GEN2(dev_priv))
 736                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
 737        else
 738                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 739
 740        /*
 741         * On HSW, the DSL reg (0x70000) appears to return 0 if we
 742         * read it just before the start of vblank.  So try it again
 743         * so we don't accidentally end up spanning a vblank frame
 744         * increment, causing the pipe_update_end() code to squak at us.
 745         *
 746         * The nature of this problem means we can't simply check the ISR
 747         * bit and return the vblank start value; nor can we use the scanline
 748         * debug register in the transcoder as it appears to have the same
 749         * problem.  We may need to extend this to include other platforms,
 750         * but so far testing only shows the problem on HSW.
 751         */
 752        if (HAS_DDI(dev_priv) && !position) {
 753                int i, temp;
 754
 755                for (i = 0; i < 100; i++) {
 756                        udelay(1);
 757                        temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
 758                                DSL_LINEMASK_GEN3;
 759                        if (temp != position) {
 760                                position = temp;
 761                                break;
 762                        }
 763                }
 764        }
 765
 766        /*
 767         * See update_scanline_offset() for the details on the
 768         * scanline_offset adjustment.
 769         */
 770        return (position + crtc->scanline_offset) % vtotal;
 771}
 772
 773static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 774                                    unsigned int flags, int *vpos, int *hpos,
 775                                    ktime_t *stime, ktime_t *etime,
 776                                    const struct drm_display_mode *mode)
 777{
 778        struct drm_i915_private *dev_priv = to_i915(dev);
 779        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 780        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 781        int position;
 782        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 783        bool in_vbl = true;
 784        int ret = 0;
 785        unsigned long irqflags;
 786
 787        if (WARN_ON(!mode->crtc_clock)) {
 788                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 789                                 "pipe %c\n", pipe_name(pipe));
 790                return 0;
 791        }
 792
 793        htotal = mode->crtc_htotal;
 794        hsync_start = mode->crtc_hsync_start;
 795        vtotal = mode->crtc_vtotal;
 796        vbl_start = mode->crtc_vblank_start;
 797        vbl_end = mode->crtc_vblank_end;
 798
 799        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
 800                vbl_start = DIV_ROUND_UP(vbl_start, 2);
 801                vbl_end /= 2;
 802                vtotal /= 2;
 803        }
 804
 805        ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 806
 807        /*
 808         * Lock uncore.lock, as we will do multiple timing critical raw
 809         * register reads, potentially with preemption disabled, so the
 810         * following code must not block on uncore.lock.
 811         */
 812        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 813
 814        /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 815
 816        /* Get optional system timestamp before query. */
 817        if (stime)
 818                *stime = ktime_get();
 819
 820        if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
 821                /* No obvious pixelcount register. Only query vertical
 822                 * scanout position from Display scan line register.
 823                 */
 824                position = __intel_get_crtc_scanline(intel_crtc);
 825        } else {
 826                /* Have access to pixelcount since start of frame.
 827                 * We can split this into vertical and horizontal
 828                 * scanout position.
 829                 */
 830                position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 831
 832                /* convert to pixel counts */
 833                vbl_start *= htotal;
 834                vbl_end *= htotal;
 835                vtotal *= htotal;
 836
 837                /*
 838                 * In interlaced modes, the pixel counter counts all pixels,
 839                 * so one field will have htotal more pixels. In order to avoid
 840                 * the reported position from jumping backwards when the pixel
 841                 * counter is beyond the length of the shorter field, just
 842                 * clamp the position the length of the shorter field. This
 843                 * matches how the scanline counter based position works since
 844                 * the scanline counter doesn't count the two half lines.
 845                 */
 846                if (position >= vtotal)
 847                        position = vtotal - 1;
 848
 849                /*
 850                 * Start of vblank interrupt is triggered at start of hsync,
 851                 * just prior to the first active line of vblank. However we
 852                 * consider lines to start at the leading edge of horizontal
 853                 * active. So, should we get here before we've crossed into
 854                 * the horizontal active of the first line in vblank, we would
 855                 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
 856                 * always add htotal-hsync_start to the current pixel position.
 857                 */
 858                position = (position + htotal - hsync_start) % vtotal;
 859        }
 860
 861        /* Get optional system timestamp after query. */
 862        if (etime)
 863                *etime = ktime_get();
 864
 865        /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
 866
 867        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 868
 869        in_vbl = position >= vbl_start && position < vbl_end;
 870
 871        /*
 872         * While in vblank, position will be negative
 873         * counting up towards 0 at vbl_end. And outside
 874         * vblank, position will be positive counting
 875         * up since vbl_end.
 876         */
 877        if (position >= vbl_start)
 878                position -= vbl_end;
 879        else
 880                position += vtotal - vbl_end;
 881
 882        if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
 883                *vpos = position;
 884                *hpos = 0;
 885        } else {
 886                *vpos = position / htotal;
 887                *hpos = position - (*vpos * htotal);
 888        }
 889
 890        /* In vblank? */
 891        if (in_vbl)
 892                ret |= DRM_SCANOUTPOS_IN_VBLANK;
 893
 894        return ret;
 895}
 896
 897int intel_get_crtc_scanline(struct intel_crtc *crtc)
 898{
 899        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 900        unsigned long irqflags;
 901        int position;
 902
 903        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 904        position = __intel_get_crtc_scanline(crtc);
 905        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 906
 907        return position;
 908}
 909
 910static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
 911                              int *max_error,
 912                              struct timeval *vblank_time,
 913                              unsigned flags)
 914{
 915        struct drm_crtc *crtc;
 916
 917        if (pipe >= INTEL_INFO(dev)->num_pipes) {
 918                DRM_ERROR("Invalid crtc %u\n", pipe);
 919                return -EINVAL;
 920        }
 921
 922        /* Get drm_crtc to timestamp: */
 923        crtc = intel_get_crtc_for_pipe(dev, pipe);
 924        if (crtc == NULL) {
 925                DRM_ERROR("Invalid crtc %u\n", pipe);
 926                return -EINVAL;
 927        }
 928
 929        if (!crtc->hwmode.crtc_clock) {
 930                DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
 931                return -EBUSY;
 932        }
 933
 934        /* Helper routine in DRM core does all the work: */
 935        return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 936                                                     vblank_time, flags,
 937                                                     &crtc->hwmode);
 938}
 939
 940static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
 941{
 942        u32 busy_up, busy_down, max_avg, min_avg;
 943        u8 new_delay;
 944
 945        spin_lock(&mchdev_lock);
 946
 947        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 948
 949        new_delay = dev_priv->ips.cur_delay;
 950
 951        I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 952        busy_up = I915_READ(RCPREVBSYTUPAVG);
 953        busy_down = I915_READ(RCPREVBSYTDNAVG);
 954        max_avg = I915_READ(RCBMAXAVG);
 955        min_avg = I915_READ(RCBMINAVG);
 956
 957        /* Handle RCS change request from hw */
 958        if (busy_up > max_avg) {
 959                if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
 960                        new_delay = dev_priv->ips.cur_delay - 1;
 961                if (new_delay < dev_priv->ips.max_delay)
 962                        new_delay = dev_priv->ips.max_delay;
 963        } else if (busy_down < min_avg) {
 964                if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
 965                        new_delay = dev_priv->ips.cur_delay + 1;
 966                if (new_delay > dev_priv->ips.min_delay)
 967                        new_delay = dev_priv->ips.min_delay;
 968        }
 969
 970        if (ironlake_set_drps(dev_priv, new_delay))
 971                dev_priv->ips.cur_delay = new_delay;
 972
 973        spin_unlock(&mchdev_lock);
 974
 975        return;
 976}
 977
 978static void notify_ring(struct intel_engine_cs *engine)
 979{
 980        smp_store_mb(engine->breadcrumbs.irq_posted, true);
 981        if (intel_engine_wakeup(engine))
 982                trace_i915_gem_request_notify(engine);
 983}
 984
 985static void vlv_c0_read(struct drm_i915_private *dev_priv,
 986                        struct intel_rps_ei *ei)
 987{
 988        ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
 989        ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
 990        ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
 991}
 992
 993static bool vlv_c0_above(struct drm_i915_private *dev_priv,
 994                         const struct intel_rps_ei *old,
 995                         const struct intel_rps_ei *now,
 996                         int threshold)
 997{
 998        u64 time, c0;
 999        unsigned int mul = 100;
1000
1001        if (old->cz_clock == 0)
1002                return false;
1003
1004        if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1005                mul <<= 8;
1006
1007        time = now->cz_clock - old->cz_clock;
1008        time *= threshold * dev_priv->czclk_freq;
1009
1010        /* Workload can be split between render + media, e.g. SwapBuffers
1011         * being blitted in X after being rendered in mesa. To account for
1012         * this we need to combine both engines into our activity counter.
1013         */
1014        c0 = now->render_c0 - old->render_c0;
1015        c0 += now->media_c0 - old->media_c0;
1016        c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1017
1018        return c0 >= time;
1019}
1020
1021void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1022{
1023        vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1024        dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1025}
1026
1027static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1028{
1029        struct intel_rps_ei now;
1030        u32 events = 0;
1031
1032        if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1033                return 0;
1034
1035        vlv_c0_read(dev_priv, &now);
1036        if (now.cz_clock == 0)
1037                return 0;
1038
1039        if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1040                if (!vlv_c0_above(dev_priv,
1041                                  &dev_priv->rps.down_ei, &now,
1042                                  dev_priv->rps.down_threshold))
1043                        events |= GEN6_PM_RP_DOWN_THRESHOLD;
1044                dev_priv->rps.down_ei = now;
1045        }
1046
1047        if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1048                if (vlv_c0_above(dev_priv,
1049                                 &dev_priv->rps.up_ei, &now,
1050                                 dev_priv->rps.up_threshold))
1051                        events |= GEN6_PM_RP_UP_THRESHOLD;
1052                dev_priv->rps.up_ei = now;
1053        }
1054
1055        return events;
1056}
1057
1058static bool any_waiters(struct drm_i915_private *dev_priv)
1059{
1060        struct intel_engine_cs *engine;
1061
1062        for_each_engine(engine, dev_priv)
1063                if (intel_engine_has_waiter(engine))
1064                        return true;
1065
1066        return false;
1067}
1068
1069static void gen6_pm_rps_work(struct work_struct *work)
1070{
1071        struct drm_i915_private *dev_priv =
1072                container_of(work, struct drm_i915_private, rps.work);
1073        bool client_boost;
1074        int new_delay, adj, min, max;
1075        u32 pm_iir;
1076
1077        spin_lock_irq(&dev_priv->irq_lock);
1078        /* Speed up work cancelation during disabling rps interrupts. */
1079        if (!dev_priv->rps.interrupts_enabled) {
1080                spin_unlock_irq(&dev_priv->irq_lock);
1081                return;
1082        }
1083
1084        pm_iir = dev_priv->rps.pm_iir;
1085        dev_priv->rps.pm_iir = 0;
1086        /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1087        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1088        client_boost = dev_priv->rps.client_boost;
1089        dev_priv->rps.client_boost = false;
1090        spin_unlock_irq(&dev_priv->irq_lock);
1091
1092        /* Make sure we didn't queue anything we're not going to process. */
1093        WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1094
1095        if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1096                return;
1097
1098        mutex_lock(&dev_priv->rps.hw_lock);
1099
1100        pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1101
1102        adj = dev_priv->rps.last_adj;
1103        new_delay = dev_priv->rps.cur_freq;
1104        min = dev_priv->rps.min_freq_softlimit;
1105        max = dev_priv->rps.max_freq_softlimit;
1106        if (client_boost || any_waiters(dev_priv))
1107                max = dev_priv->rps.max_freq;
1108        if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1109                new_delay = dev_priv->rps.boost_freq;
1110                adj = 0;
1111        } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1112                if (adj > 0)
1113                        adj *= 2;
1114                else /* CHV needs even encode values */
1115                        adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1116                /*
1117                 * For better performance, jump directly
1118                 * to RPe if we're below it.
1119                 */
1120                if (new_delay < dev_priv->rps.efficient_freq - adj) {
1121                        new_delay = dev_priv->rps.efficient_freq;
1122                        adj = 0;
1123                }
1124        } else if (client_boost || any_waiters(dev_priv)) {
1125                adj = 0;
1126        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1127                if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1128                        new_delay = dev_priv->rps.efficient_freq;
1129                else
1130                        new_delay = dev_priv->rps.min_freq_softlimit;
1131                adj = 0;
1132        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1133                if (adj < 0)
1134                        adj *= 2;
1135                else /* CHV needs even encode values */
1136                        adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1137        } else { /* unknown event */
1138                adj = 0;
1139        }
1140
1141        dev_priv->rps.last_adj = adj;
1142
1143        /* sysfs frequency interfaces may have snuck in while servicing the
1144         * interrupt
1145         */
1146        new_delay += adj;
1147        new_delay = clamp_t(int, new_delay, min, max);
1148
1149        intel_set_rps(dev_priv, new_delay);
1150
1151        mutex_unlock(&dev_priv->rps.hw_lock);
1152}
1153
1154
1155/**
1156 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1157 * occurred.
1158 * @work: workqueue struct
1159 *
1160 * Doesn't actually do anything except notify userspace. As a consequence of
1161 * this event, userspace should try to remap the bad rows since statistically
1162 * it is likely the same row is more likely to go bad again.
1163 */
1164static void ivybridge_parity_work(struct work_struct *work)
1165{
1166        struct drm_i915_private *dev_priv =
1167                container_of(work, struct drm_i915_private, l3_parity.error_work);
1168        u32 error_status, row, bank, subbank;
1169        char *parity_event[6];
1170        uint32_t misccpctl;
1171        uint8_t slice = 0;
1172
1173        /* We must turn off DOP level clock gating to access the L3 registers.
1174         * In order to prevent a get/put style interface, acquire struct mutex
1175         * any time we access those registers.
1176         */
1177        mutex_lock(&dev_priv->drm.struct_mutex);
1178
1179        /* If we've screwed up tracking, just let the interrupt fire again */
1180        if (WARN_ON(!dev_priv->l3_parity.which_slice))
1181                goto out;
1182
1183        misccpctl = I915_READ(GEN7_MISCCPCTL);
1184        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1185        POSTING_READ(GEN7_MISCCPCTL);
1186
1187        while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1188                i915_reg_t reg;
1189
1190                slice--;
1191                if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1192                        break;
1193
1194                dev_priv->l3_parity.which_slice &= ~(1<<slice);
1195
1196                reg = GEN7_L3CDERRST1(slice);
1197
1198                error_status = I915_READ(reg);
1199                row = GEN7_PARITY_ERROR_ROW(error_status);
1200                bank = GEN7_PARITY_ERROR_BANK(error_status);
1201                subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1202
1203                I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1204                POSTING_READ(reg);
1205
1206                parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1207                parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1208                parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1209                parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1210                parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1211                parity_event[5] = NULL;
1212
1213                kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1214                                   KOBJ_CHANGE, parity_event);
1215
1216                DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1217                          slice, row, bank, subbank);
1218
1219                kfree(parity_event[4]);
1220                kfree(parity_event[3]);
1221                kfree(parity_event[2]);
1222                kfree(parity_event[1]);
1223        }
1224
1225        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1226
1227out:
1228        WARN_ON(dev_priv->l3_parity.which_slice);
1229        spin_lock_irq(&dev_priv->irq_lock);
1230        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1231        spin_unlock_irq(&dev_priv->irq_lock);
1232
1233        mutex_unlock(&dev_priv->drm.struct_mutex);
1234}
1235
1236static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1237                                               u32 iir)
1238{
1239        if (!HAS_L3_DPF(dev_priv))
1240                return;
1241
1242        spin_lock(&dev_priv->irq_lock);
1243        gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1244        spin_unlock(&dev_priv->irq_lock);
1245
1246        iir &= GT_PARITY_ERROR(dev_priv);
1247        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1248                dev_priv->l3_parity.which_slice |= 1 << 1;
1249
1250        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1251                dev_priv->l3_parity.which_slice |= 1 << 0;
1252
1253        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1254}
1255
1256static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1257                               u32 gt_iir)
1258{
1259        if (gt_iir & GT_RENDER_USER_INTERRUPT)
1260                notify_ring(&dev_priv->engine[RCS]);
1261        if (gt_iir & ILK_BSD_USER_INTERRUPT)
1262                notify_ring(&dev_priv->engine[VCS]);
1263}
1264
1265static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1266                               u32 gt_iir)
1267{
1268        if (gt_iir & GT_RENDER_USER_INTERRUPT)
1269                notify_ring(&dev_priv->engine[RCS]);
1270        if (gt_iir & GT_BSD_USER_INTERRUPT)
1271                notify_ring(&dev_priv->engine[VCS]);
1272        if (gt_iir & GT_BLT_USER_INTERRUPT)
1273                notify_ring(&dev_priv->engine[BCS]);
1274
1275        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1276                      GT_BSD_CS_ERROR_INTERRUPT |
1277                      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1278                DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1279
1280        if (gt_iir & GT_PARITY_ERROR(dev_priv))
1281                ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1282}
1283
1284static __always_inline void
1285gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1286{
1287        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1288                notify_ring(engine);
1289        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1290                tasklet_schedule(&engine->irq_tasklet);
1291}
1292
1293static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1294                                   u32 master_ctl,
1295                                   u32 gt_iir[4])
1296{
1297        irqreturn_t ret = IRQ_NONE;
1298
1299        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1300                gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1301                if (gt_iir[0]) {
1302                        I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1303                        ret = IRQ_HANDLED;
1304                } else
1305                        DRM_ERROR("The master control interrupt lied (GT0)!\n");
1306        }
1307
1308        if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1309                gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1310                if (gt_iir[1]) {
1311                        I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1312                        ret = IRQ_HANDLED;
1313                } else
1314                        DRM_ERROR("The master control interrupt lied (GT1)!\n");
1315        }
1316
1317        if (master_ctl & GEN8_GT_VECS_IRQ) {
1318                gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1319                if (gt_iir[3]) {
1320                        I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1321                        ret = IRQ_HANDLED;
1322                } else
1323                        DRM_ERROR("The master control interrupt lied (GT3)!\n");
1324        }
1325
1326        if (master_ctl & GEN8_GT_PM_IRQ) {
1327                gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1328                if (gt_iir[2] & dev_priv->pm_rps_events) {
1329                        I915_WRITE_FW(GEN8_GT_IIR(2),
1330                                      gt_iir[2] & dev_priv->pm_rps_events);
1331                        ret = IRQ_HANDLED;
1332                } else
1333                        DRM_ERROR("The master control interrupt lied (PM)!\n");
1334        }
1335
1336        return ret;
1337}
1338
1339static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1340                                u32 gt_iir[4])
1341{
1342        if (gt_iir[0]) {
1343                gen8_cs_irq_handler(&dev_priv->engine[RCS],
1344                                    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1345                gen8_cs_irq_handler(&dev_priv->engine[BCS],
1346                                    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1347        }
1348
1349        if (gt_iir[1]) {
1350                gen8_cs_irq_handler(&dev_priv->engine[VCS],
1351                                    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1352                gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1353                                    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1354        }
1355
1356        if (gt_iir[3])
1357                gen8_cs_irq_handler(&dev_priv->engine[VECS],
1358                                    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1359
1360        if (gt_iir[2] & dev_priv->pm_rps_events)
1361                gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1362}
1363
1364static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1365{
1366        switch (port) {
1367        case PORT_A:
1368                return val & PORTA_HOTPLUG_LONG_DETECT;
1369        case PORT_B:
1370                return val & PORTB_HOTPLUG_LONG_DETECT;
1371        case PORT_C:
1372                return val & PORTC_HOTPLUG_LONG_DETECT;
1373        default:
1374                return false;
1375        }
1376}
1377
1378static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1379{
1380        switch (port) {
1381        case PORT_E:
1382                return val & PORTE_HOTPLUG_LONG_DETECT;
1383        default:
1384                return false;
1385        }
1386}
1387
1388static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1389{
1390        switch (port) {
1391        case PORT_A:
1392                return val & PORTA_HOTPLUG_LONG_DETECT;
1393        case PORT_B:
1394                return val & PORTB_HOTPLUG_LONG_DETECT;
1395        case PORT_C:
1396                return val & PORTC_HOTPLUG_LONG_DETECT;
1397        case PORT_D:
1398                return val & PORTD_HOTPLUG_LONG_DETECT;
1399        default:
1400                return false;
1401        }
1402}
1403
1404static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1405{
1406        switch (port) {
1407        case PORT_A:
1408                return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1409        default:
1410                return false;
1411        }
1412}
1413
1414static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1415{
1416        switch (port) {
1417        case PORT_B:
1418                return val & PORTB_HOTPLUG_LONG_DETECT;
1419        case PORT_C:
1420                return val & PORTC_HOTPLUG_LONG_DETECT;
1421        case PORT_D:
1422                return val & PORTD_HOTPLUG_LONG_DETECT;
1423        default:
1424                return false;
1425        }
1426}
1427
1428static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1429{
1430        switch (port) {
1431        case PORT_B:
1432                return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1433        case PORT_C:
1434                return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1435        case PORT_D:
1436                return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1437        default:
1438                return false;
1439        }
1440}
1441
1442/*
1443 * Get a bit mask of pins that have triggered, and which ones may be long.
1444 * This can be called multiple times with the same masks to accumulate
1445 * hotplug detection results from several registers.
1446 *
1447 * Note that the caller is expected to zero out the masks initially.
1448 */
1449static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1450                             u32 hotplug_trigger, u32 dig_hotplug_reg,
1451                             const u32 hpd[HPD_NUM_PINS],
1452                             bool long_pulse_detect(enum port port, u32 val))
1453{
1454        enum port port;
1455        int i;
1456
1457        for_each_hpd_pin(i) {
1458                if ((hpd[i] & hotplug_trigger) == 0)
1459                        continue;
1460
1461                *pin_mask |= BIT(i);
1462
1463                if (!intel_hpd_pin_to_port(i, &port))
1464                        continue;
1465
1466                if (long_pulse_detect(port, dig_hotplug_reg))
1467                        *long_mask |= BIT(i);
1468        }
1469
1470        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1471                         hotplug_trigger, dig_hotplug_reg, *pin_mask);
1472
1473}
1474
1475static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1476{
1477        wake_up_all(&dev_priv->gmbus_wait_queue);
1478}
1479
1480static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1481{
1482        wake_up_all(&dev_priv->gmbus_wait_queue);
1483}
1484
1485#if defined(CONFIG_DEBUG_FS)
1486static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1487                                         enum pipe pipe,
1488                                         uint32_t crc0, uint32_t crc1,
1489                                         uint32_t crc2, uint32_t crc3,
1490                                         uint32_t crc4)
1491{
1492        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1493        struct intel_pipe_crc_entry *entry;
1494        int head, tail;
1495
1496        spin_lock(&pipe_crc->lock);
1497
1498        if (!pipe_crc->entries) {
1499                spin_unlock(&pipe_crc->lock);
1500                DRM_DEBUG_KMS("spurious interrupt\n");
1501                return;
1502        }
1503
1504        head = pipe_crc->head;
1505        tail = pipe_crc->tail;
1506
1507        if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1508                spin_unlock(&pipe_crc->lock);
1509                DRM_ERROR("CRC buffer overflowing\n");
1510                return;
1511        }
1512
1513        entry = &pipe_crc->entries[head];
1514
1515        entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1516                                                                 pipe);
1517        entry->crc[0] = crc0;
1518        entry->crc[1] = crc1;
1519        entry->crc[2] = crc2;
1520        entry->crc[3] = crc3;
1521        entry->crc[4] = crc4;
1522
1523        head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1524        pipe_crc->head = head;
1525
1526        spin_unlock(&pipe_crc->lock);
1527
1528        wake_up_interruptible(&pipe_crc->wq);
1529}
1530#else
1531static inline void
1532display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1533                             enum pipe pipe,
1534                             uint32_t crc0, uint32_t crc1,
1535                             uint32_t crc2, uint32_t crc3,
1536                             uint32_t crc4) {}
1537#endif
1538
1539
1540static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1541                                     enum pipe pipe)
1542{
1543        display_pipe_crc_irq_handler(dev_priv, pipe,
1544                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1545                                     0, 0, 0, 0);
1546}
1547
1548static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1549                                     enum pipe pipe)
1550{
1551        display_pipe_crc_irq_handler(dev_priv, pipe,
1552                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1553                                     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1554                                     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1555                                     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1556                                     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1557}
1558
1559static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1560                                      enum pipe pipe)
1561{
1562        uint32_t res1, res2;
1563
1564        if (INTEL_GEN(dev_priv) >= 3)
1565                res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1566        else
1567                res1 = 0;
1568
1569        if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1570                res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1571        else
1572                res2 = 0;
1573
1574        display_pipe_crc_irq_handler(dev_priv, pipe,
1575                                     I915_READ(PIPE_CRC_RES_RED(pipe)),
1576                                     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1577                                     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1578                                     res1, res2);
1579}
1580
1581/* The RPS events need forcewake, so we add them to a work queue and mask their
1582 * IMR bits until the work is done. Other interrupts can be processed without
1583 * the work queue. */
1584static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1585{
1586        if (pm_iir & dev_priv->pm_rps_events) {
1587                spin_lock(&dev_priv->irq_lock);
1588                gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1589                if (dev_priv->rps.interrupts_enabled) {
1590                        dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1591                        schedule_work(&dev_priv->rps.work);
1592                }
1593                spin_unlock(&dev_priv->irq_lock);
1594        }
1595
1596        if (INTEL_INFO(dev_priv)->gen >= 8)
1597                return;
1598
1599        if (HAS_VEBOX(dev_priv)) {
1600                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1601                        notify_ring(&dev_priv->engine[VECS]);
1602
1603                if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1604                        DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1605        }
1606}
1607
1608static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1609                                     enum pipe pipe)
1610{
1611        bool ret;
1612
1613        ret = drm_handle_vblank(&dev_priv->drm, pipe);
1614        if (ret)
1615                intel_finish_page_flip_mmio(dev_priv, pipe);
1616
1617        return ret;
1618}
1619
1620static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1621                                        u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1622{
1623        int pipe;
1624
1625        spin_lock(&dev_priv->irq_lock);
1626
1627        if (!dev_priv->display_irqs_enabled) {
1628                spin_unlock(&dev_priv->irq_lock);
1629                return;
1630        }
1631
1632        for_each_pipe(dev_priv, pipe) {
1633                i915_reg_t reg;
1634                u32 mask, iir_bit = 0;
1635
1636                /*
1637                 * PIPESTAT bits get signalled even when the interrupt is
1638                 * disabled with the mask bits, and some of the status bits do
1639                 * not generate interrupts at all (like the underrun bit). Hence
1640                 * we need to be careful that we only handle what we want to
1641                 * handle.
1642                 */
1643
1644                /* fifo underruns are filterered in the underrun handler. */
1645                mask = PIPE_FIFO_UNDERRUN_STATUS;
1646
1647                switch (pipe) {
1648                case PIPE_A:
1649                        iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1650                        break;
1651                case PIPE_B:
1652                        iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1653                        break;
1654                case PIPE_C:
1655                        iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1656                        break;
1657                }
1658                if (iir & iir_bit)
1659                        mask |= dev_priv->pipestat_irq_mask[pipe];
1660
1661                if (!mask)
1662                        continue;
1663
1664                reg = PIPESTAT(pipe);
1665                mask |= PIPESTAT_INT_ENABLE_MASK;
1666                pipe_stats[pipe] = I915_READ(reg) & mask;
1667
1668                /*
1669                 * Clear the PIPE*STAT regs before the IIR
1670                 */
1671                if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1672                                        PIPESTAT_INT_STATUS_MASK))
1673                        I915_WRITE(reg, pipe_stats[pipe]);
1674        }
1675        spin_unlock(&dev_priv->irq_lock);
1676}
1677
1678static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1679                                            u32 pipe_stats[I915_MAX_PIPES])
1680{
1681        enum pipe pipe;
1682
1683        for_each_pipe(dev_priv, pipe) {
1684                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1685                    intel_pipe_handle_vblank(dev_priv, pipe))
1686                        intel_check_page_flip(dev_priv, pipe);
1687
1688                if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1689                        intel_finish_page_flip_cs(dev_priv, pipe);
1690
1691                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1692                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1693
1694                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1695                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1696        }
1697
1698        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1699                gmbus_irq_handler(dev_priv);
1700}
1701
1702static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1703{
1704        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1705
1706        if (hotplug_status)
1707                I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1708
1709        return hotplug_status;
1710}
1711
1712static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1713                                 u32 hotplug_status)
1714{
1715        u32 pin_mask = 0, long_mask = 0;
1716
1717        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1718            IS_CHERRYVIEW(dev_priv)) {
1719                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1720
1721                if (hotplug_trigger) {
1722                        intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1723                                           hotplug_trigger, hpd_status_g4x,
1724                                           i9xx_port_hotplug_long_detect);
1725
1726                        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1727                }
1728
1729                if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1730                        dp_aux_irq_handler(dev_priv);
1731        } else {
1732                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1733
1734                if (hotplug_trigger) {
1735                        intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1736                                           hotplug_trigger, hpd_status_i915,
1737                                           i9xx_port_hotplug_long_detect);
1738                        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1739                }
1740        }
1741}
1742
1743static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1744{
1745        struct drm_device *dev = arg;
1746        struct drm_i915_private *dev_priv = to_i915(dev);
1747        irqreturn_t ret = IRQ_NONE;
1748
1749        if (!intel_irqs_enabled(dev_priv))
1750                return IRQ_NONE;
1751
1752        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1753        disable_rpm_wakeref_asserts(dev_priv);
1754
1755        do {
1756                u32 iir, gt_iir, pm_iir;
1757                u32 pipe_stats[I915_MAX_PIPES] = {};
1758                u32 hotplug_status = 0;
1759                u32 ier = 0;
1760
1761                gt_iir = I915_READ(GTIIR);
1762                pm_iir = I915_READ(GEN6_PMIIR);
1763                iir = I915_READ(VLV_IIR);
1764
1765                if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1766                        break;
1767
1768                ret = IRQ_HANDLED;
1769
1770                /*
1771                 * Theory on interrupt generation, based on empirical evidence:
1772                 *
1773                 * x = ((VLV_IIR & VLV_IER) ||
1774                 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1775                 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1776                 *
1777                 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1778                 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1779                 * guarantee the CPU interrupt will be raised again even if we
1780                 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1781                 * bits this time around.
1782                 */
1783                I915_WRITE(VLV_MASTER_IER, 0);
1784                ier = I915_READ(VLV_IER);
1785                I915_WRITE(VLV_IER, 0);
1786
1787                if (gt_iir)
1788                        I915_WRITE(GTIIR, gt_iir);
1789                if (pm_iir)
1790                        I915_WRITE(GEN6_PMIIR, pm_iir);
1791
1792                if (iir & I915_DISPLAY_PORT_INTERRUPT)
1793                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1794
1795                /* Call regardless, as some status bits might not be
1796                 * signalled in iir */
1797                valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1798
1799                /*
1800                 * VLV_IIR is single buffered, and reflects the level
1801                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1802                 */
1803                if (iir)
1804                        I915_WRITE(VLV_IIR, iir);
1805
1806                I915_WRITE(VLV_IER, ier);
1807                I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1808                POSTING_READ(VLV_MASTER_IER);
1809
1810                if (gt_iir)
1811                        snb_gt_irq_handler(dev_priv, gt_iir);
1812                if (pm_iir)
1813                        gen6_rps_irq_handler(dev_priv, pm_iir);
1814
1815                if (hotplug_status)
1816                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1817
1818                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1819        } while (0);
1820
1821        enable_rpm_wakeref_asserts(dev_priv);
1822
1823        return ret;
1824}
1825
1826static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1827{
1828        struct drm_device *dev = arg;
1829        struct drm_i915_private *dev_priv = to_i915(dev);
1830        irqreturn_t ret = IRQ_NONE;
1831
1832        if (!intel_irqs_enabled(dev_priv))
1833                return IRQ_NONE;
1834
1835        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1836        disable_rpm_wakeref_asserts(dev_priv);
1837
1838        do {
1839                u32 master_ctl, iir;
1840                u32 gt_iir[4] = {};
1841                u32 pipe_stats[I915_MAX_PIPES] = {};
1842                u32 hotplug_status = 0;
1843                u32 ier = 0;
1844
1845                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1846                iir = I915_READ(VLV_IIR);
1847
1848                if (master_ctl == 0 && iir == 0)
1849                        break;
1850
1851                ret = IRQ_HANDLED;
1852
1853                /*
1854                 * Theory on interrupt generation, based on empirical evidence:
1855                 *
1856                 * x = ((VLV_IIR & VLV_IER) ||
1857                 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1858                 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1859                 *
1860                 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1861                 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1862                 * guarantee the CPU interrupt will be raised again even if we
1863                 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1864                 * bits this time around.
1865                 */
1866                I915_WRITE(GEN8_MASTER_IRQ, 0);
1867                ier = I915_READ(VLV_IER);
1868                I915_WRITE(VLV_IER, 0);
1869
1870                gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1871
1872                if (iir & I915_DISPLAY_PORT_INTERRUPT)
1873                        hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1874
1875                /* Call regardless, as some status bits might not be
1876                 * signalled in iir */
1877                valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1878
1879                /*
1880                 * VLV_IIR is single buffered, and reflects the level
1881                 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1882                 */
1883                if (iir)
1884                        I915_WRITE(VLV_IIR, iir);
1885
1886                I915_WRITE(VLV_IER, ier);
1887                I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1888                POSTING_READ(GEN8_MASTER_IRQ);
1889
1890                gen8_gt_irq_handler(dev_priv, gt_iir);
1891
1892                if (hotplug_status)
1893                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1894
1895                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1896        } while (0);
1897
1898        enable_rpm_wakeref_asserts(dev_priv);
1899
1900        return ret;
1901}
1902
1903static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1904                                u32 hotplug_trigger,
1905                                const u32 hpd[HPD_NUM_PINS])
1906{
1907        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1908
1909        /*
1910         * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1911         * unless we touch the hotplug register, even if hotplug_trigger is
1912         * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1913         * errors.
1914         */
1915        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1916        if (!hotplug_trigger) {
1917                u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1918                        PORTD_HOTPLUG_STATUS_MASK |
1919                        PORTC_HOTPLUG_STATUS_MASK |
1920                        PORTB_HOTPLUG_STATUS_MASK;
1921                dig_hotplug_reg &= ~mask;
1922        }
1923
1924        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1925        if (!hotplug_trigger)
1926                return;
1927
1928        intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1929                           dig_hotplug_reg, hpd,
1930                           pch_port_hotplug_long_detect);
1931
1932        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1933}
1934
1935static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1936{
1937        int pipe;
1938        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1939
1940        ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1941
1942        if (pch_iir & SDE_AUDIO_POWER_MASK) {
1943                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1944                               SDE_AUDIO_POWER_SHIFT);
1945                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1946                                 port_name(port));
1947        }
1948
1949        if (pch_iir & SDE_AUX_MASK)
1950                dp_aux_irq_handler(dev_priv);
1951
1952        if (pch_iir & SDE_GMBUS)
1953                gmbus_irq_handler(dev_priv);
1954
1955        if (pch_iir & SDE_AUDIO_HDCP_MASK)
1956                DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1957
1958        if (pch_iir & SDE_AUDIO_TRANS_MASK)
1959                DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1960
1961        if (pch_iir & SDE_POISON)
1962                DRM_ERROR("PCH poison interrupt\n");
1963
1964        if (pch_iir & SDE_FDI_MASK)
1965                for_each_pipe(dev_priv, pipe)
1966                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1967                                         pipe_name(pipe),
1968                                         I915_READ(FDI_RX_IIR(pipe)));
1969
1970        if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1971                DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1972
1973        if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1974                DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1975
1976        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1977                intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1978
1979        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1980                intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1981}
1982
1983static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1984{
1985        u32 err_int = I915_READ(GEN7_ERR_INT);
1986        enum pipe pipe;
1987
1988        if (err_int & ERR_INT_POISON)
1989                DRM_ERROR("Poison interrupt\n");
1990
1991        for_each_pipe(dev_priv, pipe) {
1992                if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1993                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1994
1995                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1996                        if (IS_IVYBRIDGE(dev_priv))
1997                                ivb_pipe_crc_irq_handler(dev_priv, pipe);
1998                        else
1999                                hsw_pipe_crc_irq_handler(dev_priv, pipe);
2000                }
2001        }
2002
2003        I915_WRITE(GEN7_ERR_INT, err_int);
2004}
2005
2006static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2007{
2008        u32 serr_int = I915_READ(SERR_INT);
2009
2010        if (serr_int & SERR_INT_POISON)
2011                DRM_ERROR("PCH poison interrupt\n");
2012
2013        if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2014                intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2015
2016        if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2017                intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2018
2019        if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2020                intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2021
2022        I915_WRITE(SERR_INT, serr_int);
2023}
2024
2025static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2026{
2027        int pipe;
2028        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2029
2030        ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2031
2032        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2033                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2034                               SDE_AUDIO_POWER_SHIFT_CPT);
2035                DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2036                                 port_name(port));
2037        }
2038
2039        if (pch_iir & SDE_AUX_MASK_CPT)
2040                dp_aux_irq_handler(dev_priv);
2041
2042        if (pch_iir & SDE_GMBUS_CPT)
2043                gmbus_irq_handler(dev_priv);
2044
2045        if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2046                DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2047
2048        if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2049                DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2050
2051        if (pch_iir & SDE_FDI_MASK_CPT)
2052                for_each_pipe(dev_priv, pipe)
2053                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2054                                         pipe_name(pipe),
2055                                         I915_READ(FDI_RX_IIR(pipe)));
2056
2057        if (pch_iir & SDE_ERROR_CPT)
2058                cpt_serr_int_handler(dev_priv);
2059}
2060
2061static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2062{
2063        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2064                ~SDE_PORTE_HOTPLUG_SPT;
2065        u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2066        u32 pin_mask = 0, long_mask = 0;
2067
2068        if (hotplug_trigger) {
2069                u32 dig_hotplug_reg;
2070
2071                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2072                I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2073
2074                intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2075                                   dig_hotplug_reg, hpd_spt,
2076                                   spt_port_hotplug_long_detect);
2077        }
2078
2079        if (hotplug2_trigger) {
2080                u32 dig_hotplug_reg;
2081
2082                dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2083                I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2084
2085                intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2086                                   dig_hotplug_reg, hpd_spt,
2087                                   spt_port_hotplug2_long_detect);
2088        }
2089
2090        if (pin_mask)
2091                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2092
2093        if (pch_iir & SDE_GMBUS_CPT)
2094                gmbus_irq_handler(dev_priv);
2095}
2096
2097static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2098                                u32 hotplug_trigger,
2099                                const u32 hpd[HPD_NUM_PINS])
2100{
2101        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2102
2103        dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2104        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2105
2106        intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2107                           dig_hotplug_reg, hpd,
2108                           ilk_port_hotplug_long_detect);
2109
2110        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2111}
2112
2113static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2114                                    u32 de_iir)
2115{
2116        enum pipe pipe;
2117        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2118
2119        if (hotplug_trigger)
2120                ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2121
2122        if (de_iir & DE_AUX_CHANNEL_A)
2123                dp_aux_irq_handler(dev_priv);
2124
2125        if (de_iir & DE_GSE)
2126                intel_opregion_asle_intr(dev_priv);
2127
2128        if (de_iir & DE_POISON)
2129                DRM_ERROR("Poison interrupt\n");
2130
2131        for_each_pipe(dev_priv, pipe) {
2132                if (de_iir & DE_PIPE_VBLANK(pipe) &&
2133                    intel_pipe_handle_vblank(dev_priv, pipe))
2134                        intel_check_page_flip(dev_priv, pipe);
2135
2136                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2137                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2138
2139                if (de_iir & DE_PIPE_CRC_DONE(pipe))
2140                        i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2141
2142                /* plane/pipes map 1:1 on ilk+ */
2143                if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2144                        intel_finish_page_flip_cs(dev_priv, pipe);
2145        }
2146
2147        /* check event from PCH */
2148        if (de_iir & DE_PCH_EVENT) {
2149                u32 pch_iir = I915_READ(SDEIIR);
2150
2151                if (HAS_PCH_CPT(dev_priv))
2152                        cpt_irq_handler(dev_priv, pch_iir);
2153                else
2154                        ibx_irq_handler(dev_priv, pch_iir);
2155
2156                /* should clear PCH hotplug event before clear CPU irq */
2157                I915_WRITE(SDEIIR, pch_iir);
2158        }
2159
2160        if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2161                ironlake_rps_change_irq_handler(dev_priv);
2162}
2163
2164static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2165                                    u32 de_iir)
2166{
2167        enum pipe pipe;
2168        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2169
2170        if (hotplug_trigger)
2171                ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2172
2173        if (de_iir & DE_ERR_INT_IVB)
2174                ivb_err_int_handler(dev_priv);
2175
2176        if (de_iir & DE_AUX_CHANNEL_A_IVB)
2177                dp_aux_irq_handler(dev_priv);
2178
2179        if (de_iir & DE_GSE_IVB)
2180                intel_opregion_asle_intr(dev_priv);
2181
2182        for_each_pipe(dev_priv, pipe) {
2183                if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2184                    intel_pipe_handle_vblank(dev_priv, pipe))
2185                        intel_check_page_flip(dev_priv, pipe);
2186
2187                /* plane/pipes map 1:1 on ilk+ */
2188                if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2189                        intel_finish_page_flip_cs(dev_priv, pipe);
2190        }
2191
2192        /* check event from PCH */
2193        if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2194                u32 pch_iir = I915_READ(SDEIIR);
2195
2196                cpt_irq_handler(dev_priv, pch_iir);
2197
2198                /* clear PCH hotplug event before clear CPU irq */
2199                I915_WRITE(SDEIIR, pch_iir);
2200        }
2201}
2202
2203/*
2204 * To handle irqs with the minimum potential races with fresh interrupts, we:
2205 * 1 - Disable Master Interrupt Control.
2206 * 2 - Find the source(s) of the interrupt.
2207 * 3 - Clear the Interrupt Identity bits (IIR).
2208 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2209 * 5 - Re-enable Master Interrupt Control.
2210 */
2211static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2212{
2213        struct drm_device *dev = arg;
2214        struct drm_i915_private *dev_priv = to_i915(dev);
2215        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2216        irqreturn_t ret = IRQ_NONE;
2217
2218        if (!intel_irqs_enabled(dev_priv))
2219                return IRQ_NONE;
2220
2221        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2222        disable_rpm_wakeref_asserts(dev_priv);
2223
2224        /* disable master interrupt before clearing iir  */
2225        de_ier = I915_READ(DEIER);
2226        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2227        POSTING_READ(DEIER);
2228
2229        /* Disable south interrupts. We'll only write to SDEIIR once, so further
2230         * interrupts will will be stored on its back queue, and then we'll be
2231         * able to process them after we restore SDEIER (as soon as we restore
2232         * it, we'll get an interrupt if SDEIIR still has something to process
2233         * due to its back queue). */
2234        if (!HAS_PCH_NOP(dev_priv)) {
2235                sde_ier = I915_READ(SDEIER);
2236                I915_WRITE(SDEIER, 0);
2237                POSTING_READ(SDEIER);
2238        }
2239
2240        /* Find, clear, then process each source of interrupt */
2241
2242        gt_iir = I915_READ(GTIIR);
2243        if (gt_iir) {
2244                I915_WRITE(GTIIR, gt_iir);
2245                ret = IRQ_HANDLED;
2246                if (INTEL_GEN(dev_priv) >= 6)
2247                        snb_gt_irq_handler(dev_priv, gt_iir);
2248                else
2249                        ilk_gt_irq_handler(dev_priv, gt_iir);
2250        }
2251
2252        de_iir = I915_READ(DEIIR);
2253        if (de_iir) {
2254                I915_WRITE(DEIIR, de_iir);
2255                ret = IRQ_HANDLED;
2256                if (INTEL_GEN(dev_priv) >= 7)
2257                        ivb_display_irq_handler(dev_priv, de_iir);
2258                else
2259                        ilk_display_irq_handler(dev_priv, de_iir);
2260        }
2261
2262        if (INTEL_GEN(dev_priv) >= 6) {
2263                u32 pm_iir = I915_READ(GEN6_PMIIR);
2264                if (pm_iir) {
2265                        I915_WRITE(GEN6_PMIIR, pm_iir);
2266                        ret = IRQ_HANDLED;
2267                        gen6_rps_irq_handler(dev_priv, pm_iir);
2268                }
2269        }
2270
2271        I915_WRITE(DEIER, de_ier);
2272        POSTING_READ(DEIER);
2273        if (!HAS_PCH_NOP(dev_priv)) {
2274                I915_WRITE(SDEIER, sde_ier);
2275                POSTING_READ(SDEIER);
2276        }
2277
2278        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2279        enable_rpm_wakeref_asserts(dev_priv);
2280
2281        return ret;
2282}
2283
2284static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2285                                u32 hotplug_trigger,
2286                                const u32 hpd[HPD_NUM_PINS])
2287{
2288        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2289
2290        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2291        I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2292
2293        intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2294                           dig_hotplug_reg, hpd,
2295                           bxt_port_hotplug_long_detect);
2296
2297        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2298}
2299
2300static irqreturn_t
2301gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2302{
2303        irqreturn_t ret = IRQ_NONE;
2304        u32 iir;
2305        enum pipe pipe;
2306
2307        if (master_ctl & GEN8_DE_MISC_IRQ) {
2308                iir = I915_READ(GEN8_DE_MISC_IIR);
2309                if (iir) {
2310                        I915_WRITE(GEN8_DE_MISC_IIR, iir);
2311                        ret = IRQ_HANDLED;
2312                        if (iir & GEN8_DE_MISC_GSE)
2313                                intel_opregion_asle_intr(dev_priv);
2314                        else
2315                                DRM_ERROR("Unexpected DE Misc interrupt\n");
2316                }
2317                else
2318                        DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2319        }
2320
2321        if (master_ctl & GEN8_DE_PORT_IRQ) {
2322                iir = I915_READ(GEN8_DE_PORT_IIR);
2323                if (iir) {
2324                        u32 tmp_mask;
2325                        bool found = false;
2326
2327                        I915_WRITE(GEN8_DE_PORT_IIR, iir);
2328                        ret = IRQ_HANDLED;
2329
2330                        tmp_mask = GEN8_AUX_CHANNEL_A;
2331                        if (INTEL_INFO(dev_priv)->gen >= 9)
2332                                tmp_mask |= GEN9_AUX_CHANNEL_B |
2333                                            GEN9_AUX_CHANNEL_C |
2334                                            GEN9_AUX_CHANNEL_D;
2335
2336                        if (iir & tmp_mask) {
2337                                dp_aux_irq_handler(dev_priv);
2338                                found = true;
2339                        }
2340
2341                        if (IS_BROXTON(dev_priv)) {
2342                                tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2343                                if (tmp_mask) {
2344                                        bxt_hpd_irq_handler(dev_priv, tmp_mask,
2345                                                            hpd_bxt);
2346                                        found = true;
2347                                }
2348                        } else if (IS_BROADWELL(dev_priv)) {
2349                                tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2350                                if (tmp_mask) {
2351                                        ilk_hpd_irq_handler(dev_priv,
2352                                                            tmp_mask, hpd_bdw);
2353                                        found = true;
2354                                }
2355                        }
2356
2357                        if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2358                                gmbus_irq_handler(dev_priv);
2359                                found = true;
2360                        }
2361
2362                        if (!found)
2363                                DRM_ERROR("Unexpected DE Port interrupt\n");
2364                }
2365                else
2366                        DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2367        }
2368
2369        for_each_pipe(dev_priv, pipe) {
2370                u32 flip_done, fault_errors;
2371
2372                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2373                        continue;
2374
2375                iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2376                if (!iir) {
2377                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2378                        continue;
2379                }
2380
2381                ret = IRQ_HANDLED;
2382                I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2383
2384                if (iir & GEN8_PIPE_VBLANK &&
2385                    intel_pipe_handle_vblank(dev_priv, pipe))
2386                        intel_check_page_flip(dev_priv, pipe);
2387
2388                flip_done = iir;
2389                if (INTEL_INFO(dev_priv)->gen >= 9)
2390                        flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2391                else
2392                        flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2393
2394                if (flip_done)
2395                        intel_finish_page_flip_cs(dev_priv, pipe);
2396
2397                if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2398                        hsw_pipe_crc_irq_handler(dev_priv, pipe);
2399
2400                if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2401                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2402
2403                fault_errors = iir;
2404                if (INTEL_INFO(dev_priv)->gen >= 9)
2405                        fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2406                else
2407                        fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2408
2409                if (fault_errors)
2410                        DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2411                                  pipe_name(pipe),
2412                                  fault_errors);
2413        }
2414
2415        if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2416            master_ctl & GEN8_DE_PCH_IRQ) {
2417                /*
2418                 * FIXME(BDW): Assume for now that the new interrupt handling
2419                 * scheme also closed the SDE interrupt handling race we've seen
2420                 * on older pch-split platforms. But this needs testing.
2421                 */
2422                iir = I915_READ(SDEIIR);
2423                if (iir) {
2424                        I915_WRITE(SDEIIR, iir);
2425                        ret = IRQ_HANDLED;
2426
2427                        if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2428                                spt_irq_handler(dev_priv, iir);
2429                        else
2430                                cpt_irq_handler(dev_priv, iir);
2431                } else {
2432                        /*
2433                         * Like on previous PCH there seems to be something
2434                         * fishy going on with forwarding PCH interrupts.
2435                         */
2436                        DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2437                }
2438        }
2439
2440        return ret;
2441}
2442
2443static irqreturn_t gen8_irq_handler(int irq, void *arg)
2444{
2445        struct drm_device *dev = arg;
2446        struct drm_i915_private *dev_priv = to_i915(dev);
2447        u32 master_ctl;
2448        u32 gt_iir[4] = {};
2449        irqreturn_t ret;
2450
2451        if (!intel_irqs_enabled(dev_priv))
2452                return IRQ_NONE;
2453
2454        master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2455        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2456        if (!master_ctl)
2457                return IRQ_NONE;
2458
2459        I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2460
2461        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2462        disable_rpm_wakeref_asserts(dev_priv);
2463
2464        /* Find, clear, then process each source of interrupt */
2465        ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2466        gen8_gt_irq_handler(dev_priv, gt_iir);
2467        ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2468
2469        I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2470        POSTING_READ_FW(GEN8_MASTER_IRQ);
2471
2472        enable_rpm_wakeref_asserts(dev_priv);
2473
2474        return ret;
2475}
2476
2477static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2478{
2479        /*
2480         * Notify all waiters for GPU completion events that reset state has
2481         * been changed, and that they need to restart their wait after
2482         * checking for potential errors (and bail out to drop locks if there is
2483         * a gpu reset pending so that i915_error_work_func can acquire them).
2484         */
2485
2486        /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2487        wake_up_all(&dev_priv->gpu_error.wait_queue);
2488
2489        /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2490        wake_up_all(&dev_priv->pending_flip_queue);
2491}
2492
2493/**
2494 * i915_reset_and_wakeup - do process context error handling work
2495 * @dev_priv: i915 device private
2496 *
2497 * Fire an error uevent so userspace can see that a hang or error
2498 * was detected.
2499 */
2500static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2501{
2502        struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2503        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2504        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2505        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2506
2507        kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2508
2509        DRM_DEBUG_DRIVER("resetting chip\n");
2510        kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2511
2512        /*
2513         * In most cases it's guaranteed that we get here with an RPM
2514         * reference held, for example because there is a pending GPU
2515         * request that won't finish until the reset is done. This
2516         * isn't the case at least when we get here by doing a
2517         * simulated reset via debugs, so get an RPM reference.
2518         */
2519        intel_runtime_pm_get(dev_priv);
2520        intel_prepare_reset(dev_priv);
2521
2522        do {
2523                /*
2524                 * All state reset _must_ be completed before we update the
2525                 * reset counter, for otherwise waiters might miss the reset
2526                 * pending state and not properly drop locks, resulting in
2527                 * deadlocks with the reset work.
2528                 */
2529                if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2530                        i915_reset(dev_priv);
2531                        mutex_unlock(&dev_priv->drm.struct_mutex);
2532                }
2533
2534                /* We need to wait for anyone holding the lock to wakeup */
2535        } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
2536                                     I915_RESET_IN_PROGRESS,
2537                                     TASK_UNINTERRUPTIBLE,
2538                                     HZ));
2539
2540        intel_finish_reset(dev_priv);
2541        intel_runtime_pm_put(dev_priv);
2542
2543        if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2544                kobject_uevent_env(kobj,
2545                                   KOBJ_CHANGE, reset_done_event);
2546
2547        /*
2548         * Note: The wake_up also serves as a memory barrier so that
2549         * waiters see the updated value of the dev_priv->gpu_error.
2550         */
2551        wake_up_all(&dev_priv->gpu_error.reset_queue);
2552}
2553
2554static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2555{
2556        uint32_t instdone[I915_NUM_INSTDONE_REG];
2557        u32 eir = I915_READ(EIR);
2558        int pipe, i;
2559
2560        if (!eir)
2561                return;
2562
2563        pr_err("render error detected, EIR: 0x%08x\n", eir);
2564
2565        i915_get_extra_instdone(dev_priv, instdone);
2566
2567        if (IS_G4X(dev_priv)) {
2568                if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2569                        u32 ipeir = I915_READ(IPEIR_I965);
2570
2571                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2572                        pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2573                        for (i = 0; i < ARRAY_SIZE(instdone); i++)
2574                                pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2575                        pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2576                        pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2577                        I915_WRITE(IPEIR_I965, ipeir);
2578                        POSTING_READ(IPEIR_I965);
2579                }
2580                if (eir & GM45_ERROR_PAGE_TABLE) {
2581                        u32 pgtbl_err = I915_READ(PGTBL_ER);
2582                        pr_err("page table error\n");
2583                        pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2584                        I915_WRITE(PGTBL_ER, pgtbl_err);
2585                        POSTING_READ(PGTBL_ER);
2586                }
2587        }
2588
2589        if (!IS_GEN2(dev_priv)) {
2590                if (eir & I915_ERROR_PAGE_TABLE) {
2591                        u32 pgtbl_err = I915_READ(PGTBL_ER);
2592                        pr_err("page table error\n");
2593                        pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2594                        I915_WRITE(PGTBL_ER, pgtbl_err);
2595                        POSTING_READ(PGTBL_ER);
2596                }
2597        }
2598
2599        if (eir & I915_ERROR_MEMORY_REFRESH) {
2600                pr_err("memory refresh error:\n");
2601                for_each_pipe(dev_priv, pipe)
2602                        pr_err("pipe %c stat: 0x%08x\n",
2603                               pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2604                /* pipestat has already been acked */
2605        }
2606        if (eir & I915_ERROR_INSTRUCTION) {
2607                pr_err("instruction error\n");
2608                pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2609                for (i = 0; i < ARRAY_SIZE(instdone); i++)
2610                        pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2611                if (INTEL_GEN(dev_priv) < 4) {
2612                        u32 ipeir = I915_READ(IPEIR);
2613
2614                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2615                        pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2616                        pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2617                        I915_WRITE(IPEIR, ipeir);
2618                        POSTING_READ(IPEIR);
2619                } else {
2620                        u32 ipeir = I915_READ(IPEIR_I965);
2621
2622                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2623                        pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2624                        pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2625                        pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2626                        I915_WRITE(IPEIR_I965, ipeir);
2627                        POSTING_READ(IPEIR_I965);
2628                }
2629        }
2630
2631        I915_WRITE(EIR, eir);
2632        POSTING_READ(EIR);
2633        eir = I915_READ(EIR);
2634        if (eir) {
2635                /*
2636                 * some errors might have become stuck,
2637                 * mask them.
2638                 */
2639                DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2640                I915_WRITE(EMR, I915_READ(EMR) | eir);
2641                I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2642        }
2643}
2644
2645/**
2646 * i915_handle_error - handle a gpu error
2647 * @dev_priv: i915 device private
2648 * @engine_mask: mask representing engines that are hung
2649 * Do some basic checking of register state at error time and
2650 * dump it to the syslog.  Also call i915_capture_error_state() to make
2651 * sure we get a record and make it available in debugfs.  Fire a uevent
2652 * so userspace knows something bad happened (should trigger collection
2653 * of a ring dump etc.).
2654 * @fmt: Error message format string
2655 */
2656void i915_handle_error(struct drm_i915_private *dev_priv,
2657                       u32 engine_mask,
2658                       const char *fmt, ...)
2659{
2660        va_list args;
2661        char error_msg[80];
2662
2663        va_start(args, fmt);
2664        vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2665        va_end(args);
2666
2667        i915_capture_error_state(dev_priv, engine_mask, error_msg);
2668        i915_report_and_clear_eir(dev_priv);
2669
2670        if (!engine_mask)
2671                return;
2672
2673        if (test_and_set_bit(I915_RESET_IN_PROGRESS,
2674                             &dev_priv->gpu_error.flags))
2675                return;
2676
2677        /*
2678         * Wakeup waiting processes so that the reset function
2679         * i915_reset_and_wakeup doesn't deadlock trying to grab
2680         * various locks. By bumping the reset counter first, the woken
2681         * processes will see a reset in progress and back off,
2682         * releasing their locks and then wait for the reset completion.
2683         * We must do this for _all_ gpu waiters that might hold locks
2684         * that the reset work needs to acquire.
2685         *
2686         * Note: The wake_up also provides a memory barrier to ensure that the
2687         * waiters see the updated value of the reset flags.
2688         */
2689        i915_error_wake_up(dev_priv);
2690
2691        i915_reset_and_wakeup(dev_priv);
2692}
2693
2694/* Called from drm generic code, passed 'crtc' which
2695 * we use as a pipe index
2696 */
2697static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2698{
2699        struct drm_i915_private *dev_priv = to_i915(dev);
2700        unsigned long irqflags;
2701
2702        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2703        if (INTEL_INFO(dev)->gen >= 4)
2704                i915_enable_pipestat(dev_priv, pipe,
2705                                     PIPE_START_VBLANK_INTERRUPT_STATUS);
2706        else
2707                i915_enable_pipestat(dev_priv, pipe,
2708                                     PIPE_VBLANK_INTERRUPT_STATUS);
2709        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2710
2711        return 0;
2712}
2713
2714static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2715{
2716        struct drm_i915_private *dev_priv = to_i915(dev);
2717        unsigned long irqflags;
2718        uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2719                                                     DE_PIPE_VBLANK(pipe);
2720
2721        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2722        ilk_enable_display_irq(dev_priv, bit);
2723        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2724
2725        return 0;
2726}
2727
2728static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2729{
2730        struct drm_i915_private *dev_priv = to_i915(dev);
2731        unsigned long irqflags;
2732
2733        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2734        i915_enable_pipestat(dev_priv, pipe,
2735                             PIPE_START_VBLANK_INTERRUPT_STATUS);
2736        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2737
2738        return 0;
2739}
2740
2741static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2742{
2743        struct drm_i915_private *dev_priv = to_i915(dev);
2744        unsigned long irqflags;
2745
2746        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2747        bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2748        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2749
2750        return 0;
2751}
2752
2753/* Called from drm generic code, passed 'crtc' which
2754 * we use as a pipe index
2755 */
2756static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2757{
2758        struct drm_i915_private *dev_priv = to_i915(dev);
2759        unsigned long irqflags;
2760
2761        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2762        i915_disable_pipestat(dev_priv, pipe,
2763                              PIPE_VBLANK_INTERRUPT_STATUS |
2764                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2765        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2766}
2767
2768static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2769{
2770        struct drm_i915_private *dev_priv = to_i915(dev);
2771        unsigned long irqflags;
2772        uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2773                                                     DE_PIPE_VBLANK(pipe);
2774
2775        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2776        ilk_disable_display_irq(dev_priv, bit);
2777        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2778}
2779
2780static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2781{
2782        struct drm_i915_private *dev_priv = to_i915(dev);
2783        unsigned long irqflags;
2784
2785        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2786        i915_disable_pipestat(dev_priv, pipe,
2787                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2788        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2789}
2790
2791static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2792{
2793        struct drm_i915_private *dev_priv = to_i915(dev);
2794        unsigned long irqflags;
2795
2796        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2797        bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2798        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2799}
2800
2801static bool
2802ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2803{
2804        if (INTEL_GEN(engine->i915) >= 8) {
2805                return (ipehr >> 23) == 0x1c;
2806        } else {
2807                ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2808                return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2809                                 MI_SEMAPHORE_REGISTER);
2810        }
2811}
2812
2813static struct intel_engine_cs *
2814semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2815                                 u64 offset)
2816{
2817        struct drm_i915_private *dev_priv = engine->i915;
2818        struct intel_engine_cs *signaller;
2819
2820        if (INTEL_GEN(dev_priv) >= 8) {
2821                for_each_engine(signaller, dev_priv) {
2822                        if (engine == signaller)
2823                                continue;
2824
2825                        if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
2826                                return signaller;
2827                }
2828        } else {
2829                u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2830
2831                for_each_engine(signaller, dev_priv) {
2832                        if(engine == signaller)
2833                                continue;
2834
2835                        if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
2836                                return signaller;
2837                }
2838        }
2839
2840        DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
2841                         engine->name, ipehr, offset);
2842
2843        return ERR_PTR(-ENODEV);
2844}
2845
2846static struct intel_engine_cs *
2847semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2848{
2849        struct drm_i915_private *dev_priv = engine->i915;
2850        void __iomem *vaddr;
2851        u32 cmd, ipehr, head;
2852        u64 offset = 0;
2853        int i, backwards;
2854
2855        /*
2856         * This function does not support execlist mode - any attempt to
2857         * proceed further into this function will result in a kernel panic
2858         * when dereferencing ring->buffer, which is not set up in execlist
2859         * mode.
2860         *
2861         * The correct way of doing it would be to derive the currently
2862         * executing ring buffer from the current context, which is derived
2863         * from the currently running request. Unfortunately, to get the
2864         * current request we would have to grab the struct_mutex before doing
2865         * anything else, which would be ill-advised since some other thread
2866         * might have grabbed it already and managed to hang itself, causing
2867         * the hang checker to deadlock.
2868         *
2869         * Therefore, this function does not support execlist mode in its
2870         * current form. Just return NULL and move on.
2871         */
2872        if (engine->buffer == NULL)
2873                return NULL;
2874
2875        ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2876        if (!ipehr_is_semaphore_wait(engine, ipehr))
2877                return NULL;
2878
2879        /*
2880         * HEAD is likely pointing to the dword after the actual command,
2881         * so scan backwards until we find the MBOX. But limit it to just 3
2882         * or 4 dwords depending on the semaphore wait command size.
2883         * Note that we don't care about ACTHD here since that might
2884         * point at at batch, and semaphores are always emitted into the
2885         * ringbuffer itself.
2886         */
2887        head = I915_READ_HEAD(engine) & HEAD_ADDR;
2888        backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2889        vaddr = (void __iomem *)engine->buffer->vaddr;
2890
2891        for (i = backwards; i; --i) {
2892                /*
2893                 * Be paranoid and presume the hw has gone off into the wild -
2894                 * our ring is smaller than what the hardware (and hence
2895                 * HEAD_ADDR) allows. Also handles wrap-around.
2896                 */
2897                head &= engine->buffer->size - 1;
2898
2899                /* This here seems to blow up */
2900                cmd = ioread32(vaddr + head);
2901                if (cmd == ipehr)
2902                        break;
2903
2904                head -= 4;
2905        }
2906
2907        if (!i)
2908                return NULL;
2909
2910        *seqno = ioread32(vaddr + head + 4) + 1;
2911        if (INTEL_GEN(dev_priv) >= 8) {
2912                offset = ioread32(vaddr + head + 12);
2913                offset <<= 32;
2914                offset |= ioread32(vaddr + head + 8);
2915        }
2916        return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2917}
2918
2919static int semaphore_passed(struct intel_engine_cs *engine)
2920{
2921        struct drm_i915_private *dev_priv = engine->i915;
2922        struct intel_engine_cs *signaller;
2923        u32 seqno;
2924
2925        engine->hangcheck.deadlock++;
2926
2927        signaller = semaphore_waits_for(engine, &seqno);
2928        if (signaller == NULL)
2929                return -1;
2930
2931        if (IS_ERR(signaller))
2932                return 0;
2933
2934        /* Prevent pathological recursion due to driver bugs */
2935        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2936                return -1;
2937
2938        if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2939                return 1;
2940
2941        /* cursory check for an unkickable deadlock */
2942        if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2943            semaphore_passed(signaller) < 0)
2944                return -1;
2945
2946        return 0;
2947}
2948
2949static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2950{
2951        struct intel_engine_cs *engine;
2952
2953        for_each_engine(engine, dev_priv)
2954                engine->hangcheck.deadlock = 0;
2955}
2956
2957static bool subunits_stuck(struct intel_engine_cs *engine)
2958{
2959        u32 instdone[I915_NUM_INSTDONE_REG];
2960        bool stuck;
2961        int i;
2962
2963        if (engine->id != RCS)
2964                return true;
2965
2966        i915_get_extra_instdone(engine->i915, instdone);
2967
2968        /* There might be unstable subunit states even when
2969         * actual head is not moving. Filter out the unstable ones by
2970         * accumulating the undone -> done transitions and only
2971         * consider those as progress.
2972         */
2973        stuck = true;
2974        for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2975                const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
2976
2977                if (tmp != engine->hangcheck.instdone[i])
2978                        stuck = false;
2979
2980                engine->hangcheck.instdone[i] |= tmp;
2981        }
2982
2983        return stuck;
2984}
2985
2986static enum intel_engine_hangcheck_action
2987head_stuck(struct intel_engine_cs *engine, u64 acthd)
2988{
2989        if (acthd != engine->hangcheck.acthd) {
2990
2991                /* Clear subunit states on head movement */
2992                memset(engine->hangcheck.instdone, 0,
2993                       sizeof(engine->hangcheck.instdone));
2994
2995                return HANGCHECK_ACTIVE;
2996        }
2997
2998        if (!subunits_stuck(engine))
2999                return HANGCHECK_ACTIVE;
3000
3001        return HANGCHECK_HUNG;
3002}
3003
3004static enum intel_engine_hangcheck_action
3005engine_stuck(struct intel_engine_cs *engine, u64 acthd)
3006{
3007        struct drm_i915_private *dev_priv = engine->i915;
3008        enum intel_engine_hangcheck_action ha;
3009        u32 tmp;
3010
3011        ha = head_stuck(engine, acthd);
3012        if (ha != HANGCHECK_HUNG)
3013                return ha;
3014
3015        if (IS_GEN2(dev_priv))
3016                return HANGCHECK_HUNG;
3017
3018        /* Is the chip hanging on a WAIT_FOR_EVENT?
3019         * If so we can simply poke the RB_WAIT bit
3020         * and break the hang. This should work on
3021         * all but the second generation chipsets.
3022         */
3023        tmp = I915_READ_CTL(engine);
3024        if (tmp & RING_WAIT) {
3025                i915_handle_error(dev_priv, 0,
3026                                  "Kicking stuck wait on %s",
3027                                  engine->name);
3028                I915_WRITE_CTL(engine, tmp);
3029                return HANGCHECK_KICK;
3030        }
3031
3032        if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3033                switch (semaphore_passed(engine)) {
3034                default:
3035                        return HANGCHECK_HUNG;
3036                case 1:
3037                        i915_handle_error(dev_priv, 0,
3038                                          "Kicking stuck semaphore on %s",
3039                                          engine->name);
3040                        I915_WRITE_CTL(engine, tmp);
3041                        return HANGCHECK_KICK;
3042                case 0:
3043                        return HANGCHECK_WAIT;
3044                }
3045        }
3046
3047        return HANGCHECK_HUNG;
3048}
3049
3050/*
3051 * This is called when the chip hasn't reported back with completed
3052 * batchbuffers in a long time. We keep track per ring seqno progress and
3053 * if there are no progress, hangcheck score for that ring is increased.
3054 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3055 * we kick the ring. If we see no progress on three subsequent calls
3056 * we assume chip is wedged and try to fix it by resetting the chip.
3057 */
3058static void i915_hangcheck_elapsed(struct work_struct *work)
3059{
3060        struct drm_i915_private *dev_priv =
3061                container_of(work, typeof(*dev_priv),
3062                             gpu_error.hangcheck_work.work);
3063        struct intel_engine_cs *engine;
3064        unsigned int hung = 0, stuck = 0;
3065        int busy_count = 0;
3066#define BUSY 1
3067#define KICK 5
3068#define HUNG 20
3069#define ACTIVE_DECAY 15
3070
3071        if (!i915.enable_hangcheck)
3072                return;
3073
3074        if (!READ_ONCE(dev_priv->gt.awake))
3075                return;
3076
3077        /* As enabling the GPU requires fairly extensive mmio access,
3078         * periodically arm the mmio checker to see if we are triggering
3079         * any invalid access.
3080         */
3081        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3082
3083        for_each_engine(engine, dev_priv) {
3084                bool busy = intel_engine_has_waiter(engine);
3085                u64 acthd;
3086                u32 seqno;
3087                u32 submit;
3088
3089                semaphore_clear_deadlocks(dev_priv);
3090
3091                /* We don't strictly need an irq-barrier here, as we are not
3092                 * serving an interrupt request, be paranoid in case the
3093                 * barrier has side-effects (such as preventing a broken
3094                 * cacheline snoop) and so be sure that we can see the seqno
3095                 * advance. If the seqno should stick, due to a stale
3096                 * cacheline, we would erroneously declare the GPU hung.
3097                 */
3098                if (engine->irq_seqno_barrier)
3099                        engine->irq_seqno_barrier(engine);
3100
3101                acthd = intel_engine_get_active_head(engine);
3102                seqno = intel_engine_get_seqno(engine);
3103                submit = READ_ONCE(engine->last_submitted_seqno);
3104
3105                if (engine->hangcheck.seqno == seqno) {
3106                        if (i915_seqno_passed(seqno, submit)) {
3107                                engine->hangcheck.action = HANGCHECK_IDLE;
3108                        } else {
3109                                /* We always increment the hangcheck score
3110                                 * if the engine is busy and still processing
3111                                 * the same request, so that no single request
3112                                 * can run indefinitely (such as a chain of
3113                                 * batches). The only time we do not increment
3114                                 * the hangcheck score on this ring, if this
3115                                 * engine is in a legitimate wait for another
3116                                 * engine. In that case the waiting engine is a
3117                                 * victim and we want to be sure we catch the
3118                                 * right culprit. Then every time we do kick
3119                                 * the ring, add a small increment to the
3120                                 * score so that we can catch a batch that is
3121                                 * being repeatedly kicked and so responsible
3122                                 * for stalling the machine.
3123                                 */
3124                                engine->hangcheck.action =
3125                                        engine_stuck(engine, acthd);
3126
3127                                switch (engine->hangcheck.action) {
3128                                case HANGCHECK_IDLE:
3129                                case HANGCHECK_WAIT:
3130                                        break;
3131                                case HANGCHECK_ACTIVE:
3132                                        engine->hangcheck.score += BUSY;
3133                                        break;
3134                                case HANGCHECK_KICK:
3135                                        engine->hangcheck.score += KICK;
3136                                        break;
3137                                case HANGCHECK_HUNG:
3138                                        engine->hangcheck.score += HUNG;
3139                                        break;
3140                                }
3141                        }
3142
3143                        if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3144                                hung |= intel_engine_flag(engine);
3145                                if (engine->hangcheck.action != HANGCHECK_HUNG)
3146                                        stuck |= intel_engine_flag(engine);
3147                        }
3148                } else {
3149                        engine->hangcheck.action = HANGCHECK_ACTIVE;
3150
3151                        /* Gradually reduce the count so that we catch DoS
3152                         * attempts across multiple batches.
3153                         */
3154                        if (engine->hangcheck.score > 0)
3155                                engine->hangcheck.score -= ACTIVE_DECAY;
3156                        if (engine->hangcheck.score < 0)
3157                                engine->hangcheck.score = 0;
3158
3159                        /* Clear head and subunit states on seqno movement */
3160                        acthd = 0;
3161
3162                        memset(engine->hangcheck.instdone, 0,
3163                               sizeof(engine->hangcheck.instdone));
3164                }
3165
3166                engine->hangcheck.seqno = seqno;
3167                engine->hangcheck.acthd = acthd;
3168                busy_count += busy;
3169        }
3170
3171        if (hung) {
3172                char msg[80];
3173                unsigned int tmp;
3174                int len;
3175
3176                /* If some rings hung but others were still busy, only
3177                 * blame the hanging rings in the synopsis.
3178                 */
3179                if (stuck != hung)
3180                        hung &= ~stuck;
3181                len = scnprintf(msg, sizeof(msg),
3182                                "%s on ", stuck == hung ? "No progress" : "Hang");
3183                for_each_engine_masked(engine, dev_priv, hung, tmp)
3184                        len += scnprintf(msg + len, sizeof(msg) - len,
3185                                         "%s, ", engine->name);
3186                msg[len-2] = '\0';
3187
3188                return i915_handle_error(dev_priv, hung, msg);
3189        }
3190
3191        /* Reset timer in case GPU hangs without another request being added */
3192        if (busy_count)
3193                i915_queue_hangcheck(dev_priv);
3194}
3195
3196static void ibx_irq_reset(struct drm_device *dev)
3197{
3198        struct drm_i915_private *dev_priv = to_i915(dev);
3199
3200        if (HAS_PCH_NOP(dev))
3201                return;
3202
3203        GEN5_IRQ_RESET(SDE);
3204
3205        if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3206                I915_WRITE(SERR_INT, 0xffffffff);
3207}
3208
3209/*
3210 * SDEIER is also touched by the interrupt handler to work around missed PCH
3211 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3212 * instead we unconditionally enable all PCH interrupt sources here, but then
3213 * only unmask them as needed with SDEIMR.
3214 *
3215 * This function needs to be called before interrupts are enabled.
3216 */
3217static void ibx_irq_pre_postinstall(struct drm_device *dev)
3218{
3219        struct drm_i915_private *dev_priv = to_i915(dev);
3220
3221        if (HAS_PCH_NOP(dev))
3222                return;
3223
3224        WARN_ON(I915_READ(SDEIER) != 0);
3225        I915_WRITE(SDEIER, 0xffffffff);
3226        POSTING_READ(SDEIER);
3227}
3228
3229static void gen5_gt_irq_reset(struct drm_device *dev)
3230{
3231        struct drm_i915_private *dev_priv = to_i915(dev);
3232
3233        GEN5_IRQ_RESET(GT);
3234        if (INTEL_INFO(dev)->gen >= 6)
3235                GEN5_IRQ_RESET(GEN6_PM);
3236}
3237
3238static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3239{
3240        enum pipe pipe;
3241
3242        if (IS_CHERRYVIEW(dev_priv))
3243                I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3244        else
3245                I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3246
3247        i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3248        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3249
3250        for_each_pipe(dev_priv, pipe) {
3251                I915_WRITE(PIPESTAT(pipe),
3252                           PIPE_FIFO_UNDERRUN_STATUS |
3253                           PIPESTAT_INT_STATUS_MASK);
3254                dev_priv->pipestat_irq_mask[pipe] = 0;
3255        }
3256
3257        GEN5_IRQ_RESET(VLV_);
3258        dev_priv->irq_mask = ~0;
3259}
3260
3261static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3262{
3263        u32 pipestat_mask;
3264        u32 enable_mask;
3265        enum pipe pipe;
3266
3267        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3268                        PIPE_CRC_DONE_INTERRUPT_STATUS;
3269
3270        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3271        for_each_pipe(dev_priv, pipe)
3272                i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3273
3274        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3275                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3276                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3277        if (IS_CHERRYVIEW(dev_priv))
3278                enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3279
3280        WARN_ON(dev_priv->irq_mask != ~0);
3281
3282        dev_priv->irq_mask = ~enable_mask;
3283
3284        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3285}
3286
3287/* drm_dma.h hooks
3288*/
3289static void ironlake_irq_reset(struct drm_device *dev)
3290{
3291        struct drm_i915_private *dev_priv = to_i915(dev);
3292
3293        I915_WRITE(HWSTAM, 0xffffffff);
3294
3295        GEN5_IRQ_RESET(DE);
3296        if (IS_GEN7(dev))
3297                I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3298
3299        gen5_gt_irq_reset(dev);
3300
3301        ibx_irq_reset(dev);
3302}
3303
3304static void valleyview_irq_preinstall(struct drm_device *dev)
3305{
3306        struct drm_i915_private *dev_priv = to_i915(dev);
3307
3308        I915_WRITE(VLV_MASTER_IER, 0);
3309        POSTING_READ(VLV_MASTER_IER);
3310
3311        gen5_gt_irq_reset(dev);
3312
3313        spin_lock_irq(&dev_priv->irq_lock);
3314        if (dev_priv->display_irqs_enabled)
3315                vlv_display_irq_reset(dev_priv);
3316        spin_unlock_irq(&dev_priv->irq_lock);
3317}
3318
3319static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3320{
3321        GEN8_IRQ_RESET_NDX(GT, 0);
3322        GEN8_IRQ_RESET_NDX(GT, 1);
3323        GEN8_IRQ_RESET_NDX(GT, 2);
3324        GEN8_IRQ_RESET_NDX(GT, 3);
3325}
3326
3327static void gen8_irq_reset(struct drm_device *dev)
3328{
3329        struct drm_i915_private *dev_priv = to_i915(dev);
3330        int pipe;
3331
3332        I915_WRITE(GEN8_MASTER_IRQ, 0);
3333        POSTING_READ(GEN8_MASTER_IRQ);
3334
3335        gen8_gt_irq_reset(dev_priv);
3336
3337        for_each_pipe(dev_priv, pipe)
3338                if (intel_display_power_is_enabled(dev_priv,
3339                                                   POWER_DOMAIN_PIPE(pipe)))
3340                        GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3341
3342        GEN5_IRQ_RESET(GEN8_DE_PORT_);
3343        GEN5_IRQ_RESET(GEN8_DE_MISC_);
3344        GEN5_IRQ_RESET(GEN8_PCU_);
3345
3346        if (HAS_PCH_SPLIT(dev))
3347                ibx_irq_reset(dev);
3348}
3349
3350void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3351                                     unsigned int pipe_mask)
3352{
3353        uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3354        enum pipe pipe;
3355
3356        spin_lock_irq(&dev_priv->irq_lock);
3357        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3358                GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3359                                  dev_priv->de_irq_mask[pipe],
3360                                  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3361        spin_unlock_irq(&dev_priv->irq_lock);
3362}
3363
3364void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3365                                     unsigned int pipe_mask)
3366{
3367        enum pipe pipe;
3368
3369        spin_lock_irq(&dev_priv->irq_lock);
3370        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3371                GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3372        spin_unlock_irq(&dev_priv->irq_lock);
3373
3374        /* make sure we're done processing display irqs */
3375        synchronize_irq(dev_priv->drm.irq);
3376}
3377
3378static void cherryview_irq_preinstall(struct drm_device *dev)
3379{
3380        struct drm_i915_private *dev_priv = to_i915(dev);
3381
3382        I915_WRITE(GEN8_MASTER_IRQ, 0);
3383        POSTING_READ(GEN8_MASTER_IRQ);
3384
3385        gen8_gt_irq_reset(dev_priv);
3386
3387        GEN5_IRQ_RESET(GEN8_PCU_);
3388
3389        spin_lock_irq(&dev_priv->irq_lock);
3390        if (dev_priv->display_irqs_enabled)
3391                vlv_display_irq_reset(dev_priv);
3392        spin_unlock_irq(&dev_priv->irq_lock);
3393}
3394
3395static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3396                                  const u32 hpd[HPD_NUM_PINS])
3397{
3398        struct intel_encoder *encoder;
3399        u32 enabled_irqs = 0;
3400
3401        for_each_intel_encoder(&dev_priv->drm, encoder)
3402                if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3403                        enabled_irqs |= hpd[encoder->hpd_pin];
3404
3405        return enabled_irqs;
3406}
3407
3408static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3409{
3410        u32 hotplug_irqs, hotplug, enabled_irqs;
3411
3412        if (HAS_PCH_IBX(dev_priv)) {
3413                hotplug_irqs = SDE_HOTPLUG_MASK;
3414                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3415        } else {
3416                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3417                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3418        }
3419
3420        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3421
3422        /*
3423         * Enable digital hotplug on the PCH, and configure the DP short pulse
3424         * duration to 2ms (which is the minimum in the Display Port spec).
3425         * The pulse duration bits are reserved on LPT+.
3426         */
3427        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3428        hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3429        hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3430        hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3431        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3432        /*
3433         * When CPU and PCH are on the same package, port A
3434         * HPD must be enabled in both north and south.
3435         */
3436        if (HAS_PCH_LPT_LP(dev_priv))
3437                hotplug |= PORTA_HOTPLUG_ENABLE;
3438        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3439}
3440
3441static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3442{
3443        u32 hotplug_irqs, hotplug, enabled_irqs;
3444
3445        hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3446        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3447
3448        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3449
3450        /* Enable digital hotplug on the PCH */
3451        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3452        hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3453                PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3454        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3455
3456        hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3457        hotplug |= PORTE_HOTPLUG_ENABLE;
3458        I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3459}
3460
3461static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3462{
3463        u32 hotplug_irqs, hotplug, enabled_irqs;
3464
3465        if (INTEL_GEN(dev_priv) >= 8) {
3466                hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3467                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3468
3469                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3470        } else if (INTEL_GEN(dev_priv) >= 7) {
3471                hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3472                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3473
3474                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3475        } else {
3476                hotplug_irqs = DE_DP_A_HOTPLUG;
3477                enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3478
3479                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3480        }
3481
3482        /*
3483         * Enable digital hotplug on the CPU, and configure the DP short pulse
3484         * duration to 2ms (which is the minimum in the Display Port spec)
3485         * The pulse duration bits are reserved on HSW+.
3486         */
3487        hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3488        hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3489        hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3490        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3491
3492        ibx_hpd_irq_setup(dev_priv);
3493}
3494
3495static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3496{
3497        u32 hotplug_irqs, hotplug, enabled_irqs;
3498
3499        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3500        hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3501
3502        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3503
3504        hotplug = I915_READ(PCH_PORT_HOTPLUG);
3505        hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3506                PORTA_HOTPLUG_ENABLE;
3507
3508        DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3509                      hotplug, enabled_irqs);
3510        hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3511
3512        /*
3513         * For BXT invert bit has to be set based on AOB design
3514         * for HPD detection logic, update it based on VBT fields.
3515         */
3516
3517        if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3518            intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3519                hotplug |= BXT_DDIA_HPD_INVERT;
3520        if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3521            intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3522                hotplug |= BXT_DDIB_HPD_INVERT;
3523        if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3524            intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3525                hotplug |= BXT_DDIC_HPD_INVERT;
3526
3527        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3528}
3529
3530static void ibx_irq_postinstall(struct drm_device *dev)
3531{
3532        struct drm_i915_private *dev_priv = to_i915(dev);
3533        u32 mask;
3534
3535        if (HAS_PCH_NOP(dev))
3536                return;
3537
3538        if (HAS_PCH_IBX(dev))
3539                mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3540        else
3541                mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3542
3543        gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3544        I915_WRITE(SDEIMR, ~mask);
3545}
3546
3547static void gen5_gt_irq_postinstall(struct drm_device *dev)
3548{
3549        struct drm_i915_private *dev_priv = to_i915(dev);
3550        u32 pm_irqs, gt_irqs;
3551
3552        pm_irqs = gt_irqs = 0;
3553
3554        dev_priv->gt_irq_mask = ~0;
3555        if (HAS_L3_DPF(dev)) {
3556                /* L3 parity interrupt is always unmasked. */
3557                dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3558                gt_irqs |= GT_PARITY_ERROR(dev);
3559        }
3560
3561        gt_irqs |= GT_RENDER_USER_INTERRUPT;
3562        if (IS_GEN5(dev)) {
3563                gt_irqs |= ILK_BSD_USER_INTERRUPT;
3564        } else {
3565                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3566        }
3567
3568        GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3569
3570        if (INTEL_INFO(dev)->gen >= 6) {
3571                /*
3572                 * RPS interrupts will get enabled/disabled on demand when RPS
3573                 * itself is enabled/disabled.
3574                 */
3575                if (HAS_VEBOX(dev))
3576                        pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3577
3578                dev_priv->pm_irq_mask = 0xffffffff;
3579                GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3580        }
3581}
3582
3583static int ironlake_irq_postinstall(struct drm_device *dev)
3584{
3585        struct drm_i915_private *dev_priv = to_i915(dev);
3586        u32 display_mask, extra_mask;
3587
3588        if (INTEL_INFO(dev)->gen >= 7) {
3589                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3590                                DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3591                                DE_PLANEB_FLIP_DONE_IVB |
3592                                DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3593                extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3594                              DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3595                              DE_DP_A_HOTPLUG_IVB);
3596        } else {
3597                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3598                                DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3599                                DE_AUX_CHANNEL_A |
3600                                DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3601                                DE_POISON);
3602                extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3603                              DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3604                              DE_DP_A_HOTPLUG);
3605        }
3606
3607        dev_priv->irq_mask = ~display_mask;
3608
3609        I915_WRITE(HWSTAM, 0xeffe);
3610
3611        ibx_irq_pre_postinstall(dev);
3612
3613        GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3614
3615        gen5_gt_irq_postinstall(dev);
3616
3617        ibx_irq_postinstall(dev);
3618
3619        if (IS_IRONLAKE_M(dev)) {
3620                /* Enable PCU event interrupts
3621                 *
3622                 * spinlocking not required here for correctness since interrupt
3623                 * setup is guaranteed to run in single-threaded context. But we
3624                 * need it to make the assert_spin_locked happy. */
3625                spin_lock_irq(&dev_priv->irq_lock);
3626                ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3627                spin_unlock_irq(&dev_priv->irq_lock);
3628        }
3629
3630        return 0;
3631}
3632
3633void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3634{
3635        assert_spin_locked(&dev_priv->irq_lock);
3636
3637        if (dev_priv->display_irqs_enabled)
3638                return;
3639
3640        dev_priv->display_irqs_enabled = true;
3641
3642        if (intel_irqs_enabled(dev_priv)) {
3643                vlv_display_irq_reset(dev_priv);
3644                vlv_display_irq_postinstall(dev_priv);
3645        }
3646}
3647
3648void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3649{
3650        assert_spin_locked(&dev_priv->irq_lock);
3651
3652        if (!dev_priv->display_irqs_enabled)
3653                return;
3654
3655        dev_priv->display_irqs_enabled = false;
3656
3657        if (intel_irqs_enabled(dev_priv))
3658                vlv_display_irq_reset(dev_priv);
3659}
3660
3661
3662static int valleyview_irq_postinstall(struct drm_device *dev)
3663{
3664        struct drm_i915_private *dev_priv = to_i915(dev);
3665
3666        gen5_gt_irq_postinstall(dev);
3667
3668        spin_lock_irq(&dev_priv->irq_lock);
3669        if (dev_priv->display_irqs_enabled)
3670                vlv_display_irq_postinstall(dev_priv);
3671        spin_unlock_irq(&dev_priv->irq_lock);
3672
3673        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3674        POSTING_READ(VLV_MASTER_IER);
3675
3676        return 0;
3677}
3678
3679static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3680{
3681        /* These are interrupts we'll toggle with the ring mask register */
3682        uint32_t gt_interrupts[] = {
3683                GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3684                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3685                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3686                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3687                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3688                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3689                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3690                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3691                0,
3692                GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3693                        GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3694                };
3695
3696        if (HAS_L3_DPF(dev_priv))
3697                gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3698
3699        dev_priv->pm_irq_mask = 0xffffffff;
3700        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3701        GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3702        /*
3703         * RPS interrupts will get enabled/disabled on demand when RPS itself
3704         * is enabled/disabled.
3705         */
3706        GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3707        GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3708}
3709
3710static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3711{
3712        uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3713        uint32_t de_pipe_enables;
3714        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3715        u32 de_port_enables;
3716        u32 de_misc_masked = GEN8_DE_MISC_GSE;
3717        enum pipe pipe;
3718
3719        if (INTEL_INFO(dev_priv)->gen >= 9) {
3720                de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3721                                  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3722                de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3723                                  GEN9_AUX_CHANNEL_D;
3724                if (IS_BROXTON(dev_priv))
3725                        de_port_masked |= BXT_DE_PORT_GMBUS;
3726        } else {
3727                de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3728                                  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3729        }
3730
3731        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3732                                           GEN8_PIPE_FIFO_UNDERRUN;
3733
3734        de_port_enables = de_port_masked;
3735        if (IS_BROXTON(dev_priv))
3736                de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3737        else if (IS_BROADWELL(dev_priv))
3738                de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3739
3740        dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3741        dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3742        dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3743
3744        for_each_pipe(dev_priv, pipe)
3745                if (intel_display_power_is_enabled(dev_priv,
3746                                POWER_DOMAIN_PIPE(pipe)))
3747                        GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3748                                          dev_priv->de_irq_mask[pipe],
3749                                          de_pipe_enables);
3750
3751        GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3752        GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3753}
3754
3755static int gen8_irq_postinstall(struct drm_device *dev)
3756{
3757        struct drm_i915_private *dev_priv = to_i915(dev);
3758
3759        if (HAS_PCH_SPLIT(dev))
3760                ibx_irq_pre_postinstall(dev);
3761
3762        gen8_gt_irq_postinstall(dev_priv);
3763        gen8_de_irq_postinstall(dev_priv);
3764
3765        if (HAS_PCH_SPLIT(dev))
3766                ibx_irq_postinstall(dev);
3767
3768        I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3769        POSTING_READ(GEN8_MASTER_IRQ);
3770
3771        return 0;
3772}
3773
3774static int cherryview_irq_postinstall(struct drm_device *dev)
3775{
3776        struct drm_i915_private *dev_priv = to_i915(dev);
3777
3778        gen8_gt_irq_postinstall(dev_priv);
3779
3780        spin_lock_irq(&dev_priv->irq_lock);
3781        if (dev_priv->display_irqs_enabled)
3782                vlv_display_irq_postinstall(dev_priv);
3783        spin_unlock_irq(&dev_priv->irq_lock);
3784
3785        I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3786        POSTING_READ(GEN8_MASTER_IRQ);
3787
3788        return 0;
3789}
3790
3791static void gen8_irq_uninstall(struct drm_device *dev)
3792{
3793        struct drm_i915_private *dev_priv = to_i915(dev);
3794
3795        if (!dev_priv)
3796                return;
3797
3798        gen8_irq_reset(dev);
3799}
3800
3801static void valleyview_irq_uninstall(struct drm_device *dev)
3802{
3803        struct drm_i915_private *dev_priv = to_i915(dev);
3804
3805        if (!dev_priv)
3806                return;
3807
3808        I915_WRITE(VLV_MASTER_IER, 0);
3809        POSTING_READ(VLV_MASTER_IER);
3810
3811        gen5_gt_irq_reset(dev);
3812
3813        I915_WRITE(HWSTAM, 0xffffffff);
3814
3815        spin_lock_irq(&dev_priv->irq_lock);
3816        if (dev_priv->display_irqs_enabled)
3817                vlv_display_irq_reset(dev_priv);
3818        spin_unlock_irq(&dev_priv->irq_lock);
3819}
3820
3821static void cherryview_irq_uninstall(struct drm_device *dev)
3822{
3823        struct drm_i915_private *dev_priv = to_i915(dev);
3824
3825        if (!dev_priv)
3826                return;
3827
3828        I915_WRITE(GEN8_MASTER_IRQ, 0);
3829        POSTING_READ(GEN8_MASTER_IRQ);
3830
3831        gen8_gt_irq_reset(dev_priv);
3832
3833        GEN5_IRQ_RESET(GEN8_PCU_);
3834
3835        spin_lock_irq(&dev_priv->irq_lock);
3836        if (dev_priv->display_irqs_enabled)
3837                vlv_display_irq_reset(dev_priv);
3838        spin_unlock_irq(&dev_priv->irq_lock);
3839}
3840
3841static void ironlake_irq_uninstall(struct drm_device *dev)
3842{
3843        struct drm_i915_private *dev_priv = to_i915(dev);
3844
3845        if (!dev_priv)
3846                return;
3847
3848        ironlake_irq_reset(dev);
3849}
3850
3851static void i8xx_irq_preinstall(struct drm_device * dev)
3852{
3853        struct drm_i915_private *dev_priv = to_i915(dev);
3854        int pipe;
3855
3856        for_each_pipe(dev_priv, pipe)
3857                I915_WRITE(PIPESTAT(pipe), 0);
3858        I915_WRITE16(IMR, 0xffff);
3859        I915_WRITE16(IER, 0x0);
3860        POSTING_READ16(IER);
3861}
3862
3863static int i8xx_irq_postinstall(struct drm_device *dev)
3864{
3865        struct drm_i915_private *dev_priv = to_i915(dev);
3866
3867        I915_WRITE16(EMR,
3868                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3869
3870        /* Unmask the interrupts that we always want on. */
3871        dev_priv->irq_mask =
3872                ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3873                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3874                  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3875                  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3876        I915_WRITE16(IMR, dev_priv->irq_mask);
3877
3878        I915_WRITE16(IER,
3879                     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3880                     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3881                     I915_USER_INTERRUPT);
3882        POSTING_READ16(IER);
3883
3884        /* Interrupt setup is already guaranteed to be single-threaded, this is
3885         * just to make the assert_spin_locked check happy. */
3886        spin_lock_irq(&dev_priv->irq_lock);
3887        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3888        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3889        spin_unlock_irq(&dev_priv->irq_lock);
3890
3891        return 0;
3892}
3893
3894/*
3895 * Returns true when a page flip has completed.
3896 */
3897static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3898                               int plane, int pipe, u32 iir)
3899{
3900        u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3901
3902        if (!intel_pipe_handle_vblank(dev_priv, pipe))
3903                return false;
3904
3905        if ((iir & flip_pending) == 0)
3906                goto check_page_flip;
3907
3908        /* We detect FlipDone by looking for the change in PendingFlip from '1'
3909         * to '0' on the following vblank, i.e. IIR has the Pendingflip
3910         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3911         * the flip is completed (no longer pending). Since this doesn't raise
3912         * an interrupt per se, we watch for the change at vblank.
3913         */
3914        if (I915_READ16(ISR) & flip_pending)
3915                goto check_page_flip;
3916
3917        intel_finish_page_flip_cs(dev_priv, pipe);
3918        return true;
3919
3920check_page_flip:
3921        intel_check_page_flip(dev_priv, pipe);
3922        return false;
3923}
3924
3925static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3926{
3927        struct drm_device *dev = arg;
3928        struct drm_i915_private *dev_priv = to_i915(dev);
3929        u16 iir, new_iir;
3930        u32 pipe_stats[2];
3931        int pipe;
3932        u16 flip_mask =
3933                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3934                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3935        irqreturn_t ret;
3936
3937        if (!intel_irqs_enabled(dev_priv))
3938                return IRQ_NONE;
3939
3940        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3941        disable_rpm_wakeref_asserts(dev_priv);
3942
3943        ret = IRQ_NONE;
3944        iir = I915_READ16(IIR);
3945        if (iir == 0)
3946                goto out;
3947
3948        while (iir & ~flip_mask) {
3949                /* Can't rely on pipestat interrupt bit in iir as it might
3950                 * have been cleared after the pipestat interrupt was received.
3951                 * It doesn't set the bit in iir again, but it still produces
3952                 * interrupts (for non-MSI).
3953                 */
3954                spin_lock(&dev_priv->irq_lock);
3955                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3956                        DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3957
3958                for_each_pipe(dev_priv, pipe) {
3959                        i915_reg_t reg = PIPESTAT(pipe);
3960                        pipe_stats[pipe] = I915_READ(reg);
3961
3962                        /*
3963                         * Clear the PIPE*STAT regs before the IIR
3964                         */
3965                        if (pipe_stats[pipe] & 0x8000ffff)
3966                                I915_WRITE(reg, pipe_stats[pipe]);
3967                }
3968                spin_unlock(&dev_priv->irq_lock);
3969
3970                I915_WRITE16(IIR, iir & ~flip_mask);
3971                new_iir = I915_READ16(IIR); /* Flush posted writes */
3972
3973                if (iir & I915_USER_INTERRUPT)
3974                        notify_ring(&dev_priv->engine[RCS]);
3975
3976                for_each_pipe(dev_priv, pipe) {
3977                        int plane = pipe;
3978                        if (HAS_FBC(dev_priv))
3979                                plane = !plane;
3980
3981                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3982                            i8xx_handle_vblank(dev_priv, plane, pipe, iir))
3983                                flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3984
3985                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3986                                i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3987
3988                        if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3989                                intel_cpu_fifo_underrun_irq_handler(dev_priv,
3990                                                                    pipe);
3991                }
3992
3993                iir = new_iir;
3994        }
3995        ret = IRQ_HANDLED;
3996
3997out:
3998        enable_rpm_wakeref_asserts(dev_priv);
3999
4000        return ret;
4001}
4002
4003static void i8xx_irq_uninstall(struct drm_device * dev)
4004{
4005        struct drm_i915_private *dev_priv = to_i915(dev);
4006        int pipe;
4007
4008        for_each_pipe(dev_priv, pipe) {
4009                /* Clear enable bits; then clear status bits */
4010                I915_WRITE(PIPESTAT(pipe), 0);
4011                I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4012        }
4013        I915_WRITE16(IMR, 0xffff);
4014        I915_WRITE16(IER, 0x0);
4015        I915_WRITE16(IIR, I915_READ16(IIR));
4016}
4017
4018static void i915_irq_preinstall(struct drm_device * dev)
4019{
4020        struct drm_i915_private *dev_priv = to_i915(dev);
4021        int pipe;
4022
4023        if (I915_HAS_HOTPLUG(dev)) {
4024                i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4025                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4026        }
4027
4028        I915_WRITE16(HWSTAM, 0xeffe);
4029        for_each_pipe(dev_priv, pipe)
4030                I915_WRITE(PIPESTAT(pipe), 0);
4031        I915_WRITE(IMR, 0xffffffff);
4032        I915_WRITE(IER, 0x0);
4033        POSTING_READ(IER);
4034}
4035
4036static int i915_irq_postinstall(struct drm_device *dev)
4037{
4038        struct drm_i915_private *dev_priv = to_i915(dev);
4039        u32 enable_mask;
4040
4041        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4042
4043        /* Unmask the interrupts that we always want on. */
4044        dev_priv->irq_mask =
4045                ~(I915_ASLE_INTERRUPT |
4046                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4047                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4048                  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4049                  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4050
4051        enable_mask =
4052                I915_ASLE_INTERRUPT |
4053                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4054                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4055                I915_USER_INTERRUPT;
4056
4057        if (I915_HAS_HOTPLUG(dev)) {
4058                i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4059                POSTING_READ(PORT_HOTPLUG_EN);
4060
4061                /* Enable in IER... */
4062                enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4063                /* and unmask in IMR */
4064                dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4065        }
4066
4067        I915_WRITE(IMR, dev_priv->irq_mask);
4068        I915_WRITE(IER, enable_mask);
4069        POSTING_READ(IER);
4070
4071        i915_enable_asle_pipestat(dev_priv);
4072
4073        /* Interrupt setup is already guaranteed to be single-threaded, this is
4074         * just to make the assert_spin_locked check happy. */
4075        spin_lock_irq(&dev_priv->irq_lock);
4076        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4077        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4078        spin_unlock_irq(&dev_priv->irq_lock);
4079
4080        return 0;
4081}
4082
4083/*
4084 * Returns true when a page flip has completed.
4085 */
4086static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4087                               int plane, int pipe, u32 iir)
4088{
4089        u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4090
4091        if (!intel_pipe_handle_vblank(dev_priv, pipe))
4092                return false;
4093
4094        if ((iir & flip_pending) == 0)
4095                goto check_page_flip;
4096
4097        /* We detect FlipDone by looking for the change in PendingFlip from '1'
4098         * to '0' on the following vblank, i.e. IIR has the Pendingflip
4099         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4100         * the flip is completed (no longer pending). Since this doesn't raise
4101         * an interrupt per se, we watch for the change at vblank.
4102         */
4103        if (I915_READ(ISR) & flip_pending)
4104                goto check_page_flip;
4105
4106        intel_finish_page_flip_cs(dev_priv, pipe);
4107        return true;
4108
4109check_page_flip:
4110        intel_check_page_flip(dev_priv, pipe);
4111        return false;
4112}
4113
4114static irqreturn_t i915_irq_handler(int irq, void *arg)
4115{
4116        struct drm_device *dev = arg;
4117        struct drm_i915_private *dev_priv = to_i915(dev);
4118        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4119        u32 flip_mask =
4120                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4121                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4122        int pipe, ret = IRQ_NONE;
4123
4124        if (!intel_irqs_enabled(dev_priv))
4125                return IRQ_NONE;
4126
4127        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4128        disable_rpm_wakeref_asserts(dev_priv);
4129
4130        iir = I915_READ(IIR);
4131        do {
4132                bool irq_received = (iir & ~flip_mask) != 0;
4133                bool blc_event = false;
4134
4135                /* Can't rely on pipestat interrupt bit in iir as it might
4136                 * have been cleared after the pipestat interrupt was received.
4137                 * It doesn't set the bit in iir again, but it still produces
4138                 * interrupts (for non-MSI).
4139                 */
4140                spin_lock(&dev_priv->irq_lock);
4141                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4142                        DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4143
4144                for_each_pipe(dev_priv, pipe) {
4145                        i915_reg_t reg = PIPESTAT(pipe);
4146                        pipe_stats[pipe] = I915_READ(reg);
4147
4148                        /* Clear the PIPE*STAT regs before the IIR */
4149                        if (pipe_stats[pipe] & 0x8000ffff) {
4150                                I915_WRITE(reg, pipe_stats[pipe]);
4151                                irq_received = true;
4152                        }
4153                }
4154                spin_unlock(&dev_priv->irq_lock);
4155
4156                if (!irq_received)
4157                        break;
4158
4159                /* Consume port.  Then clear IIR or we'll miss events */
4160                if (I915_HAS_HOTPLUG(dev_priv) &&
4161                    iir & I915_DISPLAY_PORT_INTERRUPT) {
4162                        u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4163                        if (hotplug_status)
4164                                i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4165                }
4166
4167                I915_WRITE(IIR, iir & ~flip_mask);
4168                new_iir = I915_READ(IIR); /* Flush posted writes */
4169
4170                if (iir & I915_USER_INTERRUPT)
4171                        notify_ring(&dev_priv->engine[RCS]);
4172
4173                for_each_pipe(dev_priv, pipe) {
4174                        int plane = pipe;
4175                        if (HAS_FBC(dev_priv))
4176                                plane = !plane;
4177
4178                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4179                            i915_handle_vblank(dev_priv, plane, pipe, iir))
4180                                flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4181
4182                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4183                                blc_event = true;
4184
4185                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4186                                i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4187
4188                        if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4189                                intel_cpu_fifo_underrun_irq_handler(dev_priv,
4190                                                                    pipe);
4191                }
4192
4193                if (blc_event || (iir & I915_ASLE_INTERRUPT))
4194                        intel_opregion_asle_intr(dev_priv);
4195
4196                /* With MSI, interrupts are only generated when iir
4197                 * transitions from zero to nonzero.  If another bit got
4198                 * set while we were handling the existing iir bits, then
4199                 * we would never get another interrupt.
4200                 *
4201                 * This is fine on non-MSI as well, as if we hit this path
4202                 * we avoid exiting the interrupt handler only to generate
4203                 * another one.
4204                 *
4205                 * Note that for MSI this could cause a stray interrupt report
4206                 * if an interrupt landed in the time between writing IIR and
4207                 * the posting read.  This should be rare enough to never
4208                 * trigger the 99% of 100,000 interrupts test for disabling
4209                 * stray interrupts.
4210                 */
4211                ret = IRQ_HANDLED;
4212                iir = new_iir;
4213        } while (iir & ~flip_mask);
4214
4215        enable_rpm_wakeref_asserts(dev_priv);
4216
4217        return ret;
4218}
4219
4220static void i915_irq_uninstall(struct drm_device * dev)
4221{
4222        struct drm_i915_private *dev_priv = to_i915(dev);
4223        int pipe;
4224
4225        if (I915_HAS_HOTPLUG(dev)) {
4226                i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4227                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4228        }
4229
4230        I915_WRITE16(HWSTAM, 0xffff);
4231        for_each_pipe(dev_priv, pipe) {
4232                /* Clear enable bits; then clear status bits */
4233                I915_WRITE(PIPESTAT(pipe), 0);
4234                I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4235        }
4236        I915_WRITE(IMR, 0xffffffff);
4237        I915_WRITE(IER, 0x0);
4238
4239        I915_WRITE(IIR, I915_READ(IIR));
4240}
4241
4242static void i965_irq_preinstall(struct drm_device * dev)
4243{
4244        struct drm_i915_private *dev_priv = to_i915(dev);
4245        int pipe;
4246
4247        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4248        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4249
4250        I915_WRITE(HWSTAM, 0xeffe);
4251        for_each_pipe(dev_priv, pipe)
4252                I915_WRITE(PIPESTAT(pipe), 0);
4253        I915_WRITE(IMR, 0xffffffff);
4254        I915_WRITE(IER, 0x0);
4255        POSTING_READ(IER);
4256}
4257
4258static int i965_irq_postinstall(struct drm_device *dev)
4259{
4260        struct drm_i915_private *dev_priv = to_i915(dev);
4261        u32 enable_mask;
4262        u32 error_mask;
4263
4264        /* Unmask the interrupts that we always want on. */
4265        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4266                               I915_DISPLAY_PORT_INTERRUPT |
4267                               I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4268                               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4269                               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4270                               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4271                               I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4272
4273        enable_mask = ~dev_priv->irq_mask;
4274        enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4275                         I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4276        enable_mask |= I915_USER_INTERRUPT;
4277
4278        if (IS_G4X(dev_priv))
4279                enable_mask |= I915_BSD_USER_INTERRUPT;
4280
4281        /* Interrupt setup is already guaranteed to be single-threaded, this is
4282         * just to make the assert_spin_locked check happy. */
4283        spin_lock_irq(&dev_priv->irq_lock);
4284        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4285        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4286        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4287        spin_unlock_irq(&dev_priv->irq_lock);
4288
4289        /*
4290         * Enable some error detection, note the instruction error mask
4291         * bit is reserved, so we leave it masked.
4292         */
4293        if (IS_G4X(dev_priv)) {
4294                error_mask = ~(GM45_ERROR_PAGE_TABLE |
4295                               GM45_ERROR_MEM_PRIV |
4296                               GM45_ERROR_CP_PRIV |
4297                               I915_ERROR_MEMORY_REFRESH);
4298        } else {
4299                error_mask = ~(I915_ERROR_PAGE_TABLE |
4300                               I915_ERROR_MEMORY_REFRESH);
4301        }
4302        I915_WRITE(EMR, error_mask);
4303
4304        I915_WRITE(IMR, dev_priv->irq_mask);
4305        I915_WRITE(IER, enable_mask);
4306        POSTING_READ(IER);
4307
4308        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4309        POSTING_READ(PORT_HOTPLUG_EN);
4310
4311        i915_enable_asle_pipestat(dev_priv);
4312
4313        return 0;
4314}
4315
4316static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4317{
4318        u32 hotplug_en;
4319
4320        assert_spin_locked(&dev_priv->irq_lock);
4321
4322        /* Note HDMI and DP share hotplug bits */
4323        /* enable bits are the same for all generations */
4324        hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4325        /* Programming the CRT detection parameters tends
4326           to generate a spurious hotplug event about three
4327           seconds later.  So just do it once.
4328        */
4329        if (IS_G4X(dev_priv))
4330                hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4331        hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4332
4333        /* Ignore TV since it's buggy */
4334        i915_hotplug_interrupt_update_locked(dev_priv,
4335                                             HOTPLUG_INT_EN_MASK |
4336                                             CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4337                                             CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4338                                             hotplug_en);
4339}
4340
4341static irqreturn_t i965_irq_handler(int irq, void *arg)
4342{
4343        struct drm_device *dev = arg;
4344        struct drm_i915_private *dev_priv = to_i915(dev);
4345        u32 iir, new_iir;
4346        u32 pipe_stats[I915_MAX_PIPES];
4347        int ret = IRQ_NONE, pipe;
4348        u32 flip_mask =
4349                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4350                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4351
4352        if (!intel_irqs_enabled(dev_priv))
4353                return IRQ_NONE;
4354
4355        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4356        disable_rpm_wakeref_asserts(dev_priv);
4357
4358        iir = I915_READ(IIR);
4359
4360        for (;;) {
4361                bool irq_received = (iir & ~flip_mask) != 0;
4362                bool blc_event = false;
4363
4364                /* Can't rely on pipestat interrupt bit in iir as it might
4365                 * have been cleared after the pipestat interrupt was received.
4366                 * It doesn't set the bit in iir again, but it still produces
4367                 * interrupts (for non-MSI).
4368                 */
4369                spin_lock(&dev_priv->irq_lock);
4370                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4371                        DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4372
4373                for_each_pipe(dev_priv, pipe) {
4374                        i915_reg_t reg = PIPESTAT(pipe);
4375                        pipe_stats[pipe] = I915_READ(reg);
4376
4377                        /*
4378                         * Clear the PIPE*STAT regs before the IIR
4379                         */
4380                        if (pipe_stats[pipe] & 0x8000ffff) {
4381                                I915_WRITE(reg, pipe_stats[pipe]);
4382                                irq_received = true;
4383                        }
4384                }
4385                spin_unlock(&dev_priv->irq_lock);
4386
4387                if (!irq_received)
4388                        break;
4389
4390                ret = IRQ_HANDLED;
4391
4392                /* Consume port.  Then clear IIR or we'll miss events */
4393                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4394                        u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4395                        if (hotplug_status)
4396                                i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4397                }
4398
4399                I915_WRITE(IIR, iir & ~flip_mask);
4400                new_iir = I915_READ(IIR); /* Flush posted writes */
4401
4402                if (iir & I915_USER_INTERRUPT)
4403                        notify_ring(&dev_priv->engine[RCS]);
4404                if (iir & I915_BSD_USER_INTERRUPT)
4405                        notify_ring(&dev_priv->engine[VCS]);
4406
4407                for_each_pipe(dev_priv, pipe) {
4408                        if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4409                            i915_handle_vblank(dev_priv, pipe, pipe, iir))
4410                                flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4411
4412                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4413                                blc_event = true;
4414
4415                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4416                                i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4417
4418                        if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4419                                intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4420                }
4421
4422                if (blc_event || (iir & I915_ASLE_INTERRUPT))
4423                        intel_opregion_asle_intr(dev_priv);
4424
4425                if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4426                        gmbus_irq_handler(dev_priv);
4427
4428                /* With MSI, interrupts are only generated when iir
4429                 * transitions from zero to nonzero.  If another bit got
4430                 * set while we were handling the existing iir bits, then
4431                 * we would never get another interrupt.
4432                 *
4433                 * This is fine on non-MSI as well, as if we hit this path
4434                 * we avoid exiting the interrupt handler only to generate
4435                 * another one.
4436                 *
4437                 * Note that for MSI this could cause a stray interrupt report
4438                 * if an interrupt landed in the time between writing IIR and
4439                 * the posting read.  This should be rare enough to never
4440                 * trigger the 99% of 100,000 interrupts test for disabling
4441                 * stray interrupts.
4442                 */
4443                iir = new_iir;
4444        }
4445
4446        enable_rpm_wakeref_asserts(dev_priv);
4447
4448        return ret;
4449}
4450
4451static void i965_irq_uninstall(struct drm_device * dev)
4452{
4453        struct drm_i915_private *dev_priv = to_i915(dev);
4454        int pipe;
4455
4456        if (!dev_priv)
4457                return;
4458
4459        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4460        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4461
4462        I915_WRITE(HWSTAM, 0xffffffff);
4463        for_each_pipe(dev_priv, pipe)
4464                I915_WRITE(PIPESTAT(pipe), 0);
4465        I915_WRITE(IMR, 0xffffffff);
4466        I915_WRITE(IER, 0x0);
4467
4468        for_each_pipe(dev_priv, pipe)
4469                I915_WRITE(PIPESTAT(pipe),
4470                           I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4471        I915_WRITE(IIR, I915_READ(IIR));
4472}
4473
4474/**
4475 * intel_irq_init - initializes irq support
4476 * @dev_priv: i915 device instance
4477 *
4478 * This function initializes all the irq support including work items, timers
4479 * and all the vtables. It does not setup the interrupt itself though.
4480 */
4481void intel_irq_init(struct drm_i915_private *dev_priv)
4482{
4483        struct drm_device *dev = &dev_priv->drm;
4484
4485        intel_hpd_init_work(dev_priv);
4486
4487        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4488        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4489
4490        /* Let's track the enabled rps events */
4491        if (IS_VALLEYVIEW(dev_priv))
4492                /* WaGsvRC0ResidencyMethod:vlv */
4493                dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4494        else
4495                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4496
4497        dev_priv->rps.pm_intr_keep = 0;
4498
4499        /*
4500         * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4501         * if GEN6_PM_UP_EI_EXPIRED is masked.
4502         *
4503         * TODO: verify if this can be reproduced on VLV,CHV.
4504         */
4505        if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4506                dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4507
4508        if (INTEL_INFO(dev_priv)->gen >= 8)
4509                dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
4510
4511        INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4512                          i915_hangcheck_elapsed);
4513
4514        if (IS_GEN2(dev_priv)) {
4515                /* Gen2 doesn't have a hardware frame counter */
4516                dev->max_vblank_count = 0;
4517                dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4518        } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4519                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4520                dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4521        } else {
4522                dev->driver->get_vblank_counter = i915_get_vblank_counter;
4523                dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4524        }
4525
4526        /*
4527         * Opt out of the vblank disable timer on everything except gen2.
4528         * Gen2 doesn't have a hardware frame counter and so depends on
4529         * vblank interrupts to produce sane vblank seuquence numbers.
4530         */
4531        if (!IS_GEN2(dev_priv))
4532                dev->vblank_disable_immediate = true;
4533
4534        dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4535        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4536
4537        if (IS_CHERRYVIEW(dev_priv)) {
4538                dev->driver->irq_handler = cherryview_irq_handler;
4539                dev->driver->irq_preinstall = cherryview_irq_preinstall;
4540                dev->driver->irq_postinstall = cherryview_irq_postinstall;
4541                dev->driver->irq_uninstall = cherryview_irq_uninstall;
4542                dev->driver->enable_vblank = valleyview_enable_vblank;
4543                dev->driver->disable_vblank = valleyview_disable_vblank;
4544                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4545        } else if (IS_VALLEYVIEW(dev_priv)) {
4546                dev->driver->irq_handler = valleyview_irq_handler;
4547                dev->driver->irq_preinstall = valleyview_irq_preinstall;
4548                dev->driver->irq_postinstall = valleyview_irq_postinstall;
4549                dev->driver->irq_uninstall = valleyview_irq_uninstall;
4550                dev->driver->enable_vblank = valleyview_enable_vblank;
4551                dev->driver->disable_vblank = valleyview_disable_vblank;
4552                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4553        } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4554                dev->driver->irq_handler = gen8_irq_handler;
4555                dev->driver->irq_preinstall = gen8_irq_reset;
4556                dev->driver->irq_postinstall = gen8_irq_postinstall;
4557                dev->driver->irq_uninstall = gen8_irq_uninstall;
4558                dev->driver->enable_vblank = gen8_enable_vblank;
4559                dev->driver->disable_vblank = gen8_disable_vblank;
4560                if (IS_BROXTON(dev))
4561                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4562                else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4563                        dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4564                else
4565                        dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4566        } else if (HAS_PCH_SPLIT(dev)) {
4567                dev->driver->irq_handler = ironlake_irq_handler;
4568                dev->driver->irq_preinstall = ironlake_irq_reset;
4569                dev->driver->irq_postinstall = ironlake_irq_postinstall;
4570                dev->driver->irq_uninstall = ironlake_irq_uninstall;
4571                dev->driver->enable_vblank = ironlake_enable_vblank;
4572                dev->driver->disable_vblank = ironlake_disable_vblank;
4573                dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4574        } else {
4575                if (IS_GEN2(dev_priv)) {
4576                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
4577                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
4578                        dev->driver->irq_handler = i8xx_irq_handler;
4579                        dev->driver->irq_uninstall = i8xx_irq_uninstall;
4580                } else if (IS_GEN3(dev_priv)) {
4581                        dev->driver->irq_preinstall = i915_irq_preinstall;
4582                        dev->driver->irq_postinstall = i915_irq_postinstall;
4583                        dev->driver->irq_uninstall = i915_irq_uninstall;
4584                        dev->driver->irq_handler = i915_irq_handler;
4585                } else {
4586                        dev->driver->irq_preinstall = i965_irq_preinstall;
4587                        dev->driver->irq_postinstall = i965_irq_postinstall;
4588                        dev->driver->irq_uninstall = i965_irq_uninstall;
4589                        dev->driver->irq_handler = i965_irq_handler;
4590                }
4591                if (I915_HAS_HOTPLUG(dev_priv))
4592                        dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4593                dev->driver->enable_vblank = i915_enable_vblank;
4594                dev->driver->disable_vblank = i915_disable_vblank;
4595        }
4596}
4597
4598/**
4599 * intel_irq_install - enables the hardware interrupt
4600 * @dev_priv: i915 device instance
4601 *
4602 * This function enables the hardware interrupt handling, but leaves the hotplug
4603 * handling still disabled. It is called after intel_irq_init().
4604 *
4605 * In the driver load and resume code we need working interrupts in a few places
4606 * but don't want to deal with the hassle of concurrent probe and hotplug
4607 * workers. Hence the split into this two-stage approach.
4608 */
4609int intel_irq_install(struct drm_i915_private *dev_priv)
4610{
4611        /*
4612         * We enable some interrupt sources in our postinstall hooks, so mark
4613         * interrupts as enabled _before_ actually enabling them to avoid
4614         * special cases in our ordering checks.
4615         */
4616        dev_priv->pm.irqs_enabled = true;
4617
4618        return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4619}
4620
4621/**
4622 * intel_irq_uninstall - finilizes all irq handling
4623 * @dev_priv: i915 device instance
4624 *
4625 * This stops interrupt and hotplug handling and unregisters and frees all
4626 * resources acquired in the init functions.
4627 */
4628void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4629{
4630        drm_irq_uninstall(&dev_priv->drm);
4631        intel_hpd_cancel_work(dev_priv);
4632        dev_priv->pm.irqs_enabled = false;
4633}
4634
4635/**
4636 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4637 * @dev_priv: i915 device instance
4638 *
4639 * This function is used to disable interrupts at runtime, both in the runtime
4640 * pm and the system suspend/resume code.
4641 */
4642void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4643{
4644        dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4645        dev_priv->pm.irqs_enabled = false;
4646        synchronize_irq(dev_priv->drm.irq);
4647}
4648
4649/**
4650 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4651 * @dev_priv: i915 device instance
4652 *
4653 * This function is used to enable interrupts at runtime, both in the runtime
4654 * pm and the system suspend/resume code.
4655 */
4656void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4657{
4658        dev_priv->pm.irqs_enabled = true;
4659        dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4660        dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4661}
4662