linux/drivers/gpu/drm/i915/intel_hotplug.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include <linux/kernel.h>
  25
  26#include <drm/drmP.h>
  27#include <drm/i915_drm.h>
  28
  29#include "i915_drv.h"
  30#include "intel_drv.h"
  31
  32/**
  33 * DOC: Hotplug
  34 *
  35 * Simply put, hotplug occurs when a display is connected to or disconnected
  36 * from the system. However, there may be adapters and docking stations and
  37 * Display Port short pulses and MST devices involved, complicating matters.
  38 *
  39 * Hotplug in i915 is handled in many different levels of abstraction.
  40 *
  41 * The platform dependent interrupt handling code in i915_irq.c enables,
  42 * disables, and does preliminary handling of the interrupts. The interrupt
  43 * handlers gather the hotplug detect (HPD) information from relevant registers
  44 * into a platform independent mask of hotplug pins that have fired.
  45 *
  46 * The platform independent interrupt handler intel_hpd_irq_handler() in
  47 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
  48 * further processing to appropriate bottom halves (Display Port specific and
  49 * regular hotplug).
  50 *
  51 * The Display Port work function i915_digport_work_func() calls into
  52 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
  53 * pulses, with failures and non-MST long pulses triggering regular hotplug
  54 * processing on the connector.
  55 *
  56 * The regular hotplug work function i915_hotplug_work_func() calls connector
  57 * detect hooks, and, if connector status changes, triggers sending of hotplug
  58 * uevent to userspace via drm_kms_helper_hotplug_event().
  59 *
  60 * Finally, the userspace is responsible for triggering a modeset upon receiving
  61 * the hotplug uevent, disabling or enabling the crtc as needed.
  62 *
  63 * The hotplug interrupt storm detection and mitigation code keeps track of the
  64 * number of interrupts per hotplug pin per a period of time, and if the number
  65 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
  66 * while before being re-enabled. The intention is to mitigate issues raising
  67 * from broken hardware triggering massive amounts of interrupts and grinding
  68 * the system to a halt.
  69 *
  70 * Current implementation expects that hotplug interrupt storm will not be
  71 * seen when display port sink is connected, hence on platforms whose DP
  72 * callback is handled by i915_digport_work_func reenabling of hpd is not
  73 * performed (it was never expected to be disabled in the first place ;) )
  74 * this is specific to DP sinks handled by this routine and any other display
  75 * such as HDMI or DVI enabled on the same port will have proper logic since
  76 * it will use i915_hotplug_work_func where this logic is handled.
  77 */
  78
  79bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
  80{
  81        switch (pin) {
  82        case HPD_PORT_A:
  83                *port = PORT_A;
  84                return true;
  85        case HPD_PORT_B:
  86                *port = PORT_B;
  87                return true;
  88        case HPD_PORT_C:
  89                *port = PORT_C;
  90                return true;
  91        case HPD_PORT_D:
  92                *port = PORT_D;
  93                return true;
  94        case HPD_PORT_E:
  95                *port = PORT_E;
  96                return true;
  97        default:
  98                return false;   /* no hpd */
  99        }
 100}
 101
 102#define HPD_STORM_DETECT_PERIOD         1000
 103#define HPD_STORM_THRESHOLD             5
 104#define HPD_STORM_REENABLE_DELAY        (2 * 60 * 1000)
 105
 106/**
 107 * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
 108 * @dev_priv: private driver data pointer
 109 * @pin: the pin to gather stats on
 110 *
 111 * Gather stats about HPD irqs from the specified @pin, and detect irq
 112 * storms. Only the pin specific stats and state are changed, the caller is
 113 * responsible for further action.
 114 *
 115 * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
 116 * otherwise it's considered an irq storm, and the irq state is set to
 117 * @HPD_MARK_DISABLED.
 118 *
 119 * Return true if an irq storm was detected on @pin.
 120 */
 121static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 122                                       enum hpd_pin pin)
 123{
 124        unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
 125        unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
 126        bool storm = false;
 127
 128        if (!time_in_range(jiffies, start, end)) {
 129                dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
 130                dev_priv->hotplug.stats[pin].count = 0;
 131                DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
 132        } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
 133                dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
 134                DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
 135                storm = true;
 136        } else {
 137                dev_priv->hotplug.stats[pin].count++;
 138                DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
 139                              dev_priv->hotplug.stats[pin].count);
 140        }
 141
 142        return storm;
 143}
 144
 145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 146{
 147        struct drm_device *dev = &dev_priv->drm;
 148        struct drm_mode_config *mode_config = &dev->mode_config;
 149        struct intel_connector *intel_connector;
 150        struct intel_encoder *intel_encoder;
 151        struct drm_connector *connector;
 152        enum hpd_pin pin;
 153        bool hpd_disabled = false;
 154
 155        assert_spin_locked(&dev_priv->irq_lock);
 156
 157        list_for_each_entry(connector, &mode_config->connector_list, head) {
 158                if (connector->polled != DRM_CONNECTOR_POLL_HPD)
 159                        continue;
 160
 161                intel_connector = to_intel_connector(connector);
 162                intel_encoder = intel_connector->encoder;
 163                if (!intel_encoder)
 164                        continue;
 165
 166                pin = intel_encoder->hpd_pin;
 167                if (pin == HPD_NONE ||
 168                    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
 169                        continue;
 170
 171                DRM_INFO("HPD interrupt storm detected on connector %s: "
 172                         "switching from hotplug detection to polling\n",
 173                         connector->name);
 174
 175                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 176                connector->polled = DRM_CONNECTOR_POLL_CONNECT
 177                        | DRM_CONNECTOR_POLL_DISCONNECT;
 178                hpd_disabled = true;
 179        }
 180
 181        /* Enable polling and queue hotplug re-enabling. */
 182        if (hpd_disabled) {
 183                drm_kms_helper_poll_enable_locked(dev);
 184                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 185                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 186        }
 187}
 188
 189static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 190{
 191        struct drm_i915_private *dev_priv =
 192                container_of(work, typeof(*dev_priv),
 193                             hotplug.reenable_work.work);
 194        struct drm_device *dev = &dev_priv->drm;
 195        struct drm_mode_config *mode_config = &dev->mode_config;
 196        int i;
 197
 198        intel_runtime_pm_get(dev_priv);
 199
 200        spin_lock_irq(&dev_priv->irq_lock);
 201        for_each_hpd_pin(i) {
 202                struct drm_connector *connector;
 203
 204                if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
 205                        continue;
 206
 207                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 208
 209                list_for_each_entry(connector, &mode_config->connector_list, head) {
 210                        struct intel_connector *intel_connector = to_intel_connector(connector);
 211
 212                        if (intel_connector->encoder->hpd_pin == i) {
 213                                if (connector->polled != intel_connector->polled)
 214                                        DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
 215                                                         connector->name);
 216                                connector->polled = intel_connector->polled;
 217                                if (!connector->polled)
 218                                        connector->polled = DRM_CONNECTOR_POLL_HPD;
 219                        }
 220                }
 221        }
 222        if (dev_priv->display.hpd_irq_setup)
 223                dev_priv->display.hpd_irq_setup(dev_priv);
 224        spin_unlock_irq(&dev_priv->irq_lock);
 225
 226        intel_runtime_pm_put(dev_priv);
 227}
 228
 229static bool intel_hpd_irq_event(struct drm_device *dev,
 230                                struct drm_connector *connector)
 231{
 232        enum drm_connector_status old_status;
 233
 234        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 235        old_status = connector->status;
 236
 237        connector->status = connector->funcs->detect(connector, false);
 238        if (old_status == connector->status)
 239                return false;
 240
 241        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
 242                      connector->base.id,
 243                      connector->name,
 244                      drm_get_connector_status_name(old_status),
 245                      drm_get_connector_status_name(connector->status));
 246
 247        return true;
 248}
 249
 250static void i915_digport_work_func(struct work_struct *work)
 251{
 252        struct drm_i915_private *dev_priv =
 253                container_of(work, struct drm_i915_private, hotplug.dig_port_work);
 254        u32 long_port_mask, short_port_mask;
 255        struct intel_digital_port *intel_dig_port;
 256        int i;
 257        u32 old_bits = 0;
 258
 259        spin_lock_irq(&dev_priv->irq_lock);
 260        long_port_mask = dev_priv->hotplug.long_port_mask;
 261        dev_priv->hotplug.long_port_mask = 0;
 262        short_port_mask = dev_priv->hotplug.short_port_mask;
 263        dev_priv->hotplug.short_port_mask = 0;
 264        spin_unlock_irq(&dev_priv->irq_lock);
 265
 266        for (i = 0; i < I915_MAX_PORTS; i++) {
 267                bool valid = false;
 268                bool long_hpd = false;
 269                intel_dig_port = dev_priv->hotplug.irq_port[i];
 270                if (!intel_dig_port || !intel_dig_port->hpd_pulse)
 271                        continue;
 272
 273                if (long_port_mask & (1 << i))  {
 274                        valid = true;
 275                        long_hpd = true;
 276                } else if (short_port_mask & (1 << i))
 277                        valid = true;
 278
 279                if (valid) {
 280                        enum irqreturn ret;
 281
 282                        ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
 283                        if (ret == IRQ_NONE) {
 284                                /* fall back to old school hpd */
 285                                old_bits |= (1 << intel_dig_port->base.hpd_pin);
 286                        }
 287                }
 288        }
 289
 290        if (old_bits) {
 291                spin_lock_irq(&dev_priv->irq_lock);
 292                dev_priv->hotplug.event_bits |= old_bits;
 293                spin_unlock_irq(&dev_priv->irq_lock);
 294                schedule_work(&dev_priv->hotplug.hotplug_work);
 295        }
 296}
 297
 298/*
 299 * Handle hotplug events outside the interrupt handler proper.
 300 */
 301static void i915_hotplug_work_func(struct work_struct *work)
 302{
 303        struct drm_i915_private *dev_priv =
 304                container_of(work, struct drm_i915_private, hotplug.hotplug_work);
 305        struct drm_device *dev = &dev_priv->drm;
 306        struct drm_mode_config *mode_config = &dev->mode_config;
 307        struct intel_connector *intel_connector;
 308        struct intel_encoder *intel_encoder;
 309        struct drm_connector *connector;
 310        bool changed = false;
 311        u32 hpd_event_bits;
 312
 313        mutex_lock(&mode_config->mutex);
 314        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 315
 316        spin_lock_irq(&dev_priv->irq_lock);
 317
 318        hpd_event_bits = dev_priv->hotplug.event_bits;
 319        dev_priv->hotplug.event_bits = 0;
 320
 321        /* Disable hotplug on connectors that hit an irq storm. */
 322        intel_hpd_irq_storm_disable(dev_priv);
 323
 324        spin_unlock_irq(&dev_priv->irq_lock);
 325
 326        list_for_each_entry(connector, &mode_config->connector_list, head) {
 327                intel_connector = to_intel_connector(connector);
 328                if (!intel_connector->encoder)
 329                        continue;
 330                intel_encoder = intel_connector->encoder;
 331                if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
 332                        DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
 333                                      connector->name, intel_encoder->hpd_pin);
 334                        if (intel_encoder->hot_plug)
 335                                intel_encoder->hot_plug(intel_encoder);
 336                        if (intel_hpd_irq_event(dev, connector))
 337                                changed = true;
 338                }
 339        }
 340        mutex_unlock(&mode_config->mutex);
 341
 342        if (changed)
 343                drm_kms_helper_hotplug_event(dev);
 344}
 345
 346
 347/**
 348 * intel_hpd_irq_handler - main hotplug irq handler
 349 * @dev_priv: drm_i915_private
 350 * @pin_mask: a mask of hpd pins that have triggered the irq
 351 * @long_mask: a mask of hpd pins that may be long hpd pulses
 352 *
 353 * This is the main hotplug irq handler for all platforms. The platform specific
 354 * irq handlers call the platform specific hotplug irq handlers, which read and
 355 * decode the appropriate registers into bitmasks about hpd pins that have
 356 * triggered (@pin_mask), and which of those pins may be long pulses
 357 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
 358 * is not a digital port.
 359 *
 360 * Here, we do hotplug irq storm detection and mitigation, and pass further
 361 * processing to appropriate bottom halves.
 362 */
 363void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 364                           u32 pin_mask, u32 long_mask)
 365{
 366        int i;
 367        enum port port;
 368        bool storm_detected = false;
 369        bool queue_dig = false, queue_hp = false;
 370        bool is_dig_port;
 371
 372        if (!pin_mask)
 373                return;
 374
 375        spin_lock(&dev_priv->irq_lock);
 376        for_each_hpd_pin(i) {
 377                if (!(BIT(i) & pin_mask))
 378                        continue;
 379
 380                is_dig_port = intel_hpd_pin_to_port(i, &port) &&
 381                              dev_priv->hotplug.irq_port[port];
 382
 383                if (is_dig_port) {
 384                        bool long_hpd = long_mask & BIT(i);
 385
 386                        DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
 387                                         long_hpd ? "long" : "short");
 388                        /*
 389                         * For long HPD pulses we want to have the digital queue happen,
 390                         * but we still want HPD storm detection to function.
 391                         */
 392                        queue_dig = true;
 393                        if (long_hpd) {
 394                                dev_priv->hotplug.long_port_mask |= (1 << port);
 395                        } else {
 396                                /* for short HPD just trigger the digital queue */
 397                                dev_priv->hotplug.short_port_mask |= (1 << port);
 398                                continue;
 399                        }
 400                }
 401
 402                if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
 403                        /*
 404                         * On GMCH platforms the interrupt mask bits only
 405                         * prevent irq generation, not the setting of the
 406                         * hotplug bits itself. So only WARN about unexpected
 407                         * interrupts on saner platforms.
 408                         */
 409                        WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
 410                                  "Received HPD interrupt on pin %d although disabled\n", i);
 411                        continue;
 412                }
 413
 414                if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
 415                        continue;
 416
 417                if (!is_dig_port) {
 418                        dev_priv->hotplug.event_bits |= BIT(i);
 419                        queue_hp = true;
 420                }
 421
 422                if (intel_hpd_irq_storm_detect(dev_priv, i)) {
 423                        dev_priv->hotplug.event_bits &= ~BIT(i);
 424                        storm_detected = true;
 425                }
 426        }
 427
 428        if (storm_detected)
 429                dev_priv->display.hpd_irq_setup(dev_priv);
 430        spin_unlock(&dev_priv->irq_lock);
 431
 432        /*
 433         * Our hotplug handler can grab modeset locks (by calling down into the
 434         * fb helpers). Hence it must not be run on our own dev-priv->wq work
 435         * queue for otherwise the flush_work in the pageflip code will
 436         * deadlock.
 437         */
 438        if (queue_dig)
 439                queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
 440        if (queue_hp)
 441                schedule_work(&dev_priv->hotplug.hotplug_work);
 442}
 443
 444/**
 445 * intel_hpd_init - initializes and enables hpd support
 446 * @dev_priv: i915 device instance
 447 *
 448 * This function enables the hotplug support. It requires that interrupts have
 449 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
 450 * poll request can run concurrently to other code, so locking rules must be
 451 * obeyed.
 452 *
 453 * This is a separate step from interrupt enabling to simplify the locking rules
 454 * in the driver load and resume code.
 455 *
 456 * Also see: intel_hpd_poll_init(), which enables connector polling
 457 */
 458void intel_hpd_init(struct drm_i915_private *dev_priv)
 459{
 460        int i;
 461
 462        for_each_hpd_pin(i) {
 463                dev_priv->hotplug.stats[i].count = 0;
 464                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 465        }
 466
 467        WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
 468        schedule_work(&dev_priv->hotplug.poll_init_work);
 469
 470        /*
 471         * Interrupt setup is already guaranteed to be single-threaded, this is
 472         * just to make the assert_spin_locked checks happy.
 473         */
 474        spin_lock_irq(&dev_priv->irq_lock);
 475        if (dev_priv->display.hpd_irq_setup)
 476                dev_priv->display.hpd_irq_setup(dev_priv);
 477        spin_unlock_irq(&dev_priv->irq_lock);
 478}
 479
 480static void i915_hpd_poll_init_work(struct work_struct *work)
 481{
 482        struct drm_i915_private *dev_priv =
 483                container_of(work, struct drm_i915_private,
 484                             hotplug.poll_init_work);
 485        struct drm_device *dev = &dev_priv->drm;
 486        struct drm_mode_config *mode_config = &dev->mode_config;
 487        struct drm_connector *connector;
 488        bool enabled;
 489
 490        mutex_lock(&dev->mode_config.mutex);
 491
 492        enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
 493
 494        list_for_each_entry(connector, &mode_config->connector_list, head) {
 495                struct intel_connector *intel_connector =
 496                        to_intel_connector(connector);
 497                connector->polled = intel_connector->polled;
 498
 499                /* MST has a dynamic intel_connector->encoder and it's reprobing
 500                 * is all handled by the MST helpers. */
 501                if (intel_connector->mst_port)
 502                        continue;
 503
 504                if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
 505                    intel_connector->encoder->hpd_pin > HPD_NONE) {
 506                        connector->polled = enabled ?
 507                                DRM_CONNECTOR_POLL_CONNECT |
 508                                DRM_CONNECTOR_POLL_DISCONNECT :
 509                                DRM_CONNECTOR_POLL_HPD;
 510                }
 511        }
 512
 513        if (enabled)
 514                drm_kms_helper_poll_enable_locked(dev);
 515
 516        mutex_unlock(&dev->mode_config.mutex);
 517
 518        /*
 519         * We might have missed any hotplugs that happened while we were
 520         * in the middle of disabling polling
 521         */
 522        if (!enabled)
 523                drm_helper_hpd_irq_event(dev);
 524}
 525
 526/**
 527 * intel_hpd_poll_init - enables/disables polling for connectors with hpd
 528 * @dev_priv: i915 device instance
 529 *
 530 * This function enables polling for all connectors, regardless of whether or
 531 * not they support hotplug detection. Under certain conditions HPD may not be
 532 * functional. On most Intel GPUs, this happens when we enter runtime suspend.
 533 * On Valleyview and Cherryview systems, this also happens when we shut off all
 534 * of the powerwells.
 535 *
 536 * Since this function can get called in contexts where we're already holding
 537 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
 538 * worker.
 539 *
 540 * Also see: intel_hpd_init(), which restores hpd handling.
 541 */
 542void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
 543{
 544        WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
 545
 546        /*
 547         * We might already be holding dev->mode_config.mutex, so do this in a
 548         * seperate worker
 549         * As well, there's no issue if we race here since we always reschedule
 550         * this worker anyway
 551         */
 552        schedule_work(&dev_priv->hotplug.poll_init_work);
 553}
 554
 555void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 556{
 557        INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
 558        INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
 559        INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
 560        INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
 561                          intel_hpd_irq_storm_reenable_work);
 562}
 563
 564void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 565{
 566        spin_lock_irq(&dev_priv->irq_lock);
 567
 568        dev_priv->hotplug.long_port_mask = 0;
 569        dev_priv->hotplug.short_port_mask = 0;
 570        dev_priv->hotplug.event_bits = 0;
 571
 572        spin_unlock_irq(&dev_priv->irq_lock);
 573
 574        cancel_work_sync(&dev_priv->hotplug.dig_port_work);
 575        cancel_work_sync(&dev_priv->hotplug.hotplug_work);
 576        cancel_work_sync(&dev_priv->hotplug.poll_init_work);
 577        cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 578}
 579
 580bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 581{
 582        bool ret = false;
 583
 584        if (pin == HPD_NONE)
 585                return false;
 586
 587        spin_lock_irq(&dev_priv->irq_lock);
 588        if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
 589                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 590                ret = true;
 591        }
 592        spin_unlock_irq(&dev_priv->irq_lock);
 593
 594        return ret;
 595}
 596
 597void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 598{
 599        if (pin == HPD_NONE)
 600                return;
 601
 602        spin_lock_irq(&dev_priv->irq_lock);
 603        dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 604        spin_unlock_irq(&dev_priv->irq_lock);
 605}
 606