linux/drivers/gpu/drm/i915/intel_hotplug.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include <linux/kernel.h>
  25
  26#include <drm/i915_drm.h>
  27
  28#include "i915_drv.h"
  29#include "intel_drv.h"
  30
  31/**
  32 * DOC: Hotplug
  33 *
  34 * Simply put, hotplug occurs when a display is connected to or disconnected
  35 * from the system. However, there may be adapters and docking stations and
  36 * Display Port short pulses and MST devices involved, complicating matters.
  37 *
  38 * Hotplug in i915 is handled in many different levels of abstraction.
  39 *
  40 * The platform dependent interrupt handling code in i915_irq.c enables,
  41 * disables, and does preliminary handling of the interrupts. The interrupt
  42 * handlers gather the hotplug detect (HPD) information from relevant registers
  43 * into a platform independent mask of hotplug pins that have fired.
  44 *
  45 * The platform independent interrupt handler intel_hpd_irq_handler() in
  46 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
  47 * further processing to appropriate bottom halves (Display Port specific and
  48 * regular hotplug).
  49 *
  50 * The Display Port work function i915_digport_work_func() calls into
  51 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
  52 * pulses, with failures and non-MST long pulses triggering regular hotplug
  53 * processing on the connector.
  54 *
  55 * The regular hotplug work function i915_hotplug_work_func() calls connector
  56 * detect hooks, and, if connector status changes, triggers sending of hotplug
  57 * uevent to userspace via drm_kms_helper_hotplug_event().
  58 *
  59 * Finally, the userspace is responsible for triggering a modeset upon receiving
  60 * the hotplug uevent, disabling or enabling the crtc as needed.
  61 *
  62 * The hotplug interrupt storm detection and mitigation code keeps track of the
  63 * number of interrupts per hotplug pin per a period of time, and if the number
  64 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
  65 * while before being re-enabled. The intention is to mitigate issues raising
  66 * from broken hardware triggering massive amounts of interrupts and grinding
  67 * the system to a halt.
  68 *
  69 * Current implementation expects that hotplug interrupt storm will not be
  70 * seen when display port sink is connected, hence on platforms whose DP
  71 * callback is handled by i915_digport_work_func reenabling of hpd is not
  72 * performed (it was never expected to be disabled in the first place ;) )
  73 * this is specific to DP sinks handled by this routine and any other display
  74 * such as HDMI or DVI enabled on the same port will have proper logic since
  75 * it will use i915_hotplug_work_func where this logic is handled.
  76 */
  77
  78/**
  79 * intel_hpd_pin_default - return default pin associated with certain port.
  80 * @dev_priv: private driver data pointer
  81 * @port: the hpd port to get associated pin
  82 *
  83 * It is only valid and used by digital port encoder.
  84 *
  85 * Return pin that is associatade with @port and HDP_NONE if no pin is
  86 * hard associated with that @port.
  87 */
  88enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
  89                                   enum port port)
  90{
  91        switch (port) {
  92        case PORT_A:
  93                return HPD_PORT_A;
  94        case PORT_B:
  95                return HPD_PORT_B;
  96        case PORT_C:
  97                return HPD_PORT_C;
  98        case PORT_D:
  99                return HPD_PORT_D;
 100        case PORT_E:
 101                return HPD_PORT_E;
 102        case PORT_F:
 103                if (IS_CNL_WITH_PORT_F(dev_priv))
 104                        return HPD_PORT_E;
 105                return HPD_PORT_F;
 106        default:
 107                MISSING_CASE(port);
 108                return HPD_NONE;
 109        }
 110}
 111
 112#define HPD_STORM_DETECT_PERIOD         1000
 113#define HPD_STORM_REENABLE_DELAY        (2 * 60 * 1000)
 114
 115/**
 116 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
 117 * @dev_priv: private driver data pointer
 118 * @pin: the pin to gather stats on
 119 * @long_hpd: whether the HPD IRQ was long or short
 120 *
 121 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
 122 * storms. Only the pin specific stats and state are changed, the caller is
 123 * responsible for further action.
 124 *
 125 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
 126 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
 127 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
 128 * short IRQs count as +1. If this threshold is exceeded, it's considered an
 129 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
 130 *
 131 * By default, most systems will only count long IRQs towards
 132 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
 133 * suffer from short IRQ storms and must also track these. Because short IRQ
 134 * storms are naturally caused by sideband interactions with DP MST devices,
 135 * short IRQ detection is only enabled for systems without DP MST support.
 136 * Systems which are new enough to support DP MST are far less likely to
 137 * suffer from IRQ storms at all, so this is fine.
 138 *
 139 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
 140 * and should only be adjusted for automated hotplug testing.
 141 *
 142 * Return true if an IRQ storm was detected on @pin.
 143 */
 144static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 145                                       enum hpd_pin pin, bool long_hpd)
 146{
 147        struct i915_hotplug *hpd = &dev_priv->hotplug;
 148        unsigned long start = hpd->stats[pin].last_jiffies;
 149        unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
 150        const int increment = long_hpd ? 10 : 1;
 151        const int threshold = hpd->hpd_storm_threshold;
 152        bool storm = false;
 153
 154        if (!threshold ||
 155            (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
 156                return false;
 157
 158        if (!time_in_range(jiffies, start, end)) {
 159                hpd->stats[pin].last_jiffies = jiffies;
 160                hpd->stats[pin].count = 0;
 161        }
 162
 163        hpd->stats[pin].count += increment;
 164        if (hpd->stats[pin].count > threshold) {
 165                hpd->stats[pin].state = HPD_MARK_DISABLED;
 166                DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
 167                storm = true;
 168        } else {
 169                DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
 170                              hpd->stats[pin].count);
 171        }
 172
 173        return storm;
 174}
 175
 176static void
 177intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
 178{
 179        struct drm_device *dev = &dev_priv->drm;
 180        struct intel_connector *intel_connector;
 181        struct intel_encoder *intel_encoder;
 182        struct drm_connector *connector;
 183        struct drm_connector_list_iter conn_iter;
 184        enum hpd_pin pin;
 185        bool hpd_disabled = false;
 186
 187        lockdep_assert_held(&dev_priv->irq_lock);
 188
 189        drm_connector_list_iter_begin(dev, &conn_iter);
 190        drm_for_each_connector_iter(connector, &conn_iter) {
 191                if (connector->polled != DRM_CONNECTOR_POLL_HPD)
 192                        continue;
 193
 194                intel_connector = to_intel_connector(connector);
 195                intel_encoder = intel_connector->encoder;
 196                if (!intel_encoder)
 197                        continue;
 198
 199                pin = intel_encoder->hpd_pin;
 200                if (pin == HPD_NONE ||
 201                    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
 202                        continue;
 203
 204                DRM_INFO("HPD interrupt storm detected on connector %s: "
 205                         "switching from hotplug detection to polling\n",
 206                         connector->name);
 207
 208                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 209                connector->polled = DRM_CONNECTOR_POLL_CONNECT
 210                        | DRM_CONNECTOR_POLL_DISCONNECT;
 211                hpd_disabled = true;
 212        }
 213        drm_connector_list_iter_end(&conn_iter);
 214
 215        /* Enable polling and queue hotplug re-enabling. */
 216        if (hpd_disabled) {
 217                drm_kms_helper_poll_enable(dev);
 218                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 219                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 220        }
 221}
 222
 223static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 224{
 225        struct drm_i915_private *dev_priv =
 226                container_of(work, typeof(*dev_priv),
 227                             hotplug.reenable_work.work);
 228        struct drm_device *dev = &dev_priv->drm;
 229        intel_wakeref_t wakeref;
 230        enum hpd_pin pin;
 231
 232        wakeref = intel_runtime_pm_get(dev_priv);
 233
 234        spin_lock_irq(&dev_priv->irq_lock);
 235        for_each_hpd_pin(pin) {
 236                struct drm_connector *connector;
 237                struct drm_connector_list_iter conn_iter;
 238
 239                if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
 240                        continue;
 241
 242                dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 243
 244                drm_connector_list_iter_begin(dev, &conn_iter);
 245                drm_for_each_connector_iter(connector, &conn_iter) {
 246                        struct intel_connector *intel_connector = to_intel_connector(connector);
 247
 248                        /* Don't check MST ports, they don't have pins */
 249                        if (!intel_connector->mst_port &&
 250                            intel_connector->encoder->hpd_pin == pin) {
 251                                if (connector->polled != intel_connector->polled)
 252                                        DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
 253                                                         connector->name);
 254                                connector->polled = intel_connector->polled;
 255                                if (!connector->polled)
 256                                        connector->polled = DRM_CONNECTOR_POLL_HPD;
 257                        }
 258                }
 259                drm_connector_list_iter_end(&conn_iter);
 260        }
 261        if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
 262                dev_priv->display.hpd_irq_setup(dev_priv);
 263        spin_unlock_irq(&dev_priv->irq_lock);
 264
 265        intel_runtime_pm_put(dev_priv, wakeref);
 266}
 267
 268bool intel_encoder_hotplug(struct intel_encoder *encoder,
 269                           struct intel_connector *connector)
 270{
 271        struct drm_device *dev = connector->base.dev;
 272        enum drm_connector_status old_status;
 273
 274        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 275        old_status = connector->base.status;
 276
 277        connector->base.status =
 278                drm_helper_probe_detect(&connector->base, NULL, false);
 279
 280        if (old_status == connector->base.status)
 281                return false;
 282
 283        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
 284                      connector->base.base.id,
 285                      connector->base.name,
 286                      drm_get_connector_status_name(old_status),
 287                      drm_get_connector_status_name(connector->base.status));
 288
 289        return true;
 290}
 291
 292static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
 293{
 294        return intel_encoder_is_dig_port(encoder) &&
 295                enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
 296}
 297
 298static void i915_digport_work_func(struct work_struct *work)
 299{
 300        struct drm_i915_private *dev_priv =
 301                container_of(work, struct drm_i915_private, hotplug.dig_port_work);
 302        u32 long_port_mask, short_port_mask;
 303        struct intel_encoder *encoder;
 304        u32 old_bits = 0;
 305
 306        spin_lock_irq(&dev_priv->irq_lock);
 307        long_port_mask = dev_priv->hotplug.long_port_mask;
 308        dev_priv->hotplug.long_port_mask = 0;
 309        short_port_mask = dev_priv->hotplug.short_port_mask;
 310        dev_priv->hotplug.short_port_mask = 0;
 311        spin_unlock_irq(&dev_priv->irq_lock);
 312
 313        for_each_intel_encoder(&dev_priv->drm, encoder) {
 314                struct intel_digital_port *dig_port;
 315                enum port port = encoder->port;
 316                bool long_hpd, short_hpd;
 317                enum irqreturn ret;
 318
 319                if (!intel_encoder_has_hpd_pulse(encoder))
 320                        continue;
 321
 322                long_hpd = long_port_mask & BIT(port);
 323                short_hpd = short_port_mask & BIT(port);
 324
 325                if (!long_hpd && !short_hpd)
 326                        continue;
 327
 328                dig_port = enc_to_dig_port(&encoder->base);
 329
 330                ret = dig_port->hpd_pulse(dig_port, long_hpd);
 331                if (ret == IRQ_NONE) {
 332                        /* fall back to old school hpd */
 333                        old_bits |= BIT(encoder->hpd_pin);
 334                }
 335        }
 336
 337        if (old_bits) {
 338                spin_lock_irq(&dev_priv->irq_lock);
 339                dev_priv->hotplug.event_bits |= old_bits;
 340                spin_unlock_irq(&dev_priv->irq_lock);
 341                schedule_work(&dev_priv->hotplug.hotplug_work);
 342        }
 343}
 344
 345/*
 346 * Handle hotplug events outside the interrupt handler proper.
 347 */
 348static void i915_hotplug_work_func(struct work_struct *work)
 349{
 350        struct drm_i915_private *dev_priv =
 351                container_of(work, struct drm_i915_private, hotplug.hotplug_work);
 352        struct drm_device *dev = &dev_priv->drm;
 353        struct intel_connector *intel_connector;
 354        struct intel_encoder *intel_encoder;
 355        struct drm_connector *connector;
 356        struct drm_connector_list_iter conn_iter;
 357        bool changed = false;
 358        u32 hpd_event_bits;
 359
 360        mutex_lock(&dev->mode_config.mutex);
 361        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 362
 363        spin_lock_irq(&dev_priv->irq_lock);
 364
 365        hpd_event_bits = dev_priv->hotplug.event_bits;
 366        dev_priv->hotplug.event_bits = 0;
 367
 368        /* Enable polling for connectors which had HPD IRQ storms */
 369        intel_hpd_irq_storm_switch_to_polling(dev_priv);
 370
 371        spin_unlock_irq(&dev_priv->irq_lock);
 372
 373        drm_connector_list_iter_begin(dev, &conn_iter);
 374        drm_for_each_connector_iter(connector, &conn_iter) {
 375                intel_connector = to_intel_connector(connector);
 376                if (!intel_connector->encoder)
 377                        continue;
 378                intel_encoder = intel_connector->encoder;
 379                if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
 380                        DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
 381                                      connector->name, intel_encoder->hpd_pin);
 382
 383                        changed |= intel_encoder->hotplug(intel_encoder,
 384                                                          intel_connector);
 385                }
 386        }
 387        drm_connector_list_iter_end(&conn_iter);
 388        mutex_unlock(&dev->mode_config.mutex);
 389
 390        if (changed)
 391                drm_kms_helper_hotplug_event(dev);
 392}
 393
 394
 395/**
 396 * intel_hpd_irq_handler - main hotplug irq handler
 397 * @dev_priv: drm_i915_private
 398 * @pin_mask: a mask of hpd pins that have triggered the irq
 399 * @long_mask: a mask of hpd pins that may be long hpd pulses
 400 *
 401 * This is the main hotplug irq handler for all platforms. The platform specific
 402 * irq handlers call the platform specific hotplug irq handlers, which read and
 403 * decode the appropriate registers into bitmasks about hpd pins that have
 404 * triggered (@pin_mask), and which of those pins may be long pulses
 405 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
 406 * is not a digital port.
 407 *
 408 * Here, we do hotplug irq storm detection and mitigation, and pass further
 409 * processing to appropriate bottom halves.
 410 */
 411void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 412                           u32 pin_mask, u32 long_mask)
 413{
 414        struct intel_encoder *encoder;
 415        bool storm_detected = false;
 416        bool queue_dig = false, queue_hp = false;
 417        u32 long_hpd_pulse_mask = 0;
 418        u32 short_hpd_pulse_mask = 0;
 419        enum hpd_pin pin;
 420
 421        if (!pin_mask)
 422                return;
 423
 424        spin_lock(&dev_priv->irq_lock);
 425
 426        /*
 427         * Determine whether ->hpd_pulse() exists for each pin, and
 428         * whether we have a short or a long pulse. This is needed
 429         * as each pin may have up to two encoders (HDMI and DP) and
 430         * only the one of them (DP) will have ->hpd_pulse().
 431         */
 432        for_each_intel_encoder(&dev_priv->drm, encoder) {
 433                bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
 434                enum port port = encoder->port;
 435                bool long_hpd;
 436
 437                pin = encoder->hpd_pin;
 438                if (!(BIT(pin) & pin_mask))
 439                        continue;
 440
 441                if (!has_hpd_pulse)
 442                        continue;
 443
 444                long_hpd = long_mask & BIT(pin);
 445
 446                DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
 447                                 long_hpd ? "long" : "short");
 448                queue_dig = true;
 449
 450                if (long_hpd) {
 451                        long_hpd_pulse_mask |= BIT(pin);
 452                        dev_priv->hotplug.long_port_mask |= BIT(port);
 453                } else {
 454                        short_hpd_pulse_mask |= BIT(pin);
 455                        dev_priv->hotplug.short_port_mask |= BIT(port);
 456                }
 457        }
 458
 459        /* Now process each pin just once */
 460        for_each_hpd_pin(pin) {
 461                bool long_hpd;
 462
 463                if (!(BIT(pin) & pin_mask))
 464                        continue;
 465
 466                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
 467                        /*
 468                         * On GMCH platforms the interrupt mask bits only
 469                         * prevent irq generation, not the setting of the
 470                         * hotplug bits itself. So only WARN about unexpected
 471                         * interrupts on saner platforms.
 472                         */
 473                        WARN_ONCE(!HAS_GMCH(dev_priv),
 474                                  "Received HPD interrupt on pin %d although disabled\n", pin);
 475                        continue;
 476                }
 477
 478                if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
 479                        continue;
 480
 481                /*
 482                 * Delegate to ->hpd_pulse() if one of the encoders for this
 483                 * pin has it, otherwise let the hotplug_work deal with this
 484                 * pin directly.
 485                 */
 486                if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
 487                        long_hpd = long_hpd_pulse_mask & BIT(pin);
 488                } else {
 489                        dev_priv->hotplug.event_bits |= BIT(pin);
 490                        long_hpd = true;
 491                        queue_hp = true;
 492                }
 493
 494                if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
 495                        dev_priv->hotplug.event_bits &= ~BIT(pin);
 496                        storm_detected = true;
 497                        queue_hp = true;
 498                }
 499        }
 500
 501        /*
 502         * Disable any IRQs that storms were detected on. Polling enablement
 503         * happens later in our hotplug work.
 504         */
 505        if (storm_detected && dev_priv->display_irqs_enabled)
 506                dev_priv->display.hpd_irq_setup(dev_priv);
 507        spin_unlock(&dev_priv->irq_lock);
 508
 509        /*
 510         * Our hotplug handler can grab modeset locks (by calling down into the
 511         * fb helpers). Hence it must not be run on our own dev-priv->wq work
 512         * queue for otherwise the flush_work in the pageflip code will
 513         * deadlock.
 514         */
 515        if (queue_dig)
 516                queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
 517        if (queue_hp)
 518                schedule_work(&dev_priv->hotplug.hotplug_work);
 519}
 520
 521/**
 522 * intel_hpd_init - initializes and enables hpd support
 523 * @dev_priv: i915 device instance
 524 *
 525 * This function enables the hotplug support. It requires that interrupts have
 526 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
 527 * poll request can run concurrently to other code, so locking rules must be
 528 * obeyed.
 529 *
 530 * This is a separate step from interrupt enabling to simplify the locking rules
 531 * in the driver load and resume code.
 532 *
 533 * Also see: intel_hpd_poll_init(), which enables connector polling
 534 */
 535void intel_hpd_init(struct drm_i915_private *dev_priv)
 536{
 537        int i;
 538
 539        for_each_hpd_pin(i) {
 540                dev_priv->hotplug.stats[i].count = 0;
 541                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 542        }
 543
 544        WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
 545        schedule_work(&dev_priv->hotplug.poll_init_work);
 546
 547        /*
 548         * Interrupt setup is already guaranteed to be single-threaded, this is
 549         * just to make the assert_spin_locked checks happy.
 550         */
 551        if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
 552                spin_lock_irq(&dev_priv->irq_lock);
 553                if (dev_priv->display_irqs_enabled)
 554                        dev_priv->display.hpd_irq_setup(dev_priv);
 555                spin_unlock_irq(&dev_priv->irq_lock);
 556        }
 557}
 558
 559static void i915_hpd_poll_init_work(struct work_struct *work)
 560{
 561        struct drm_i915_private *dev_priv =
 562                container_of(work, struct drm_i915_private,
 563                             hotplug.poll_init_work);
 564        struct drm_device *dev = &dev_priv->drm;
 565        struct drm_connector *connector;
 566        struct drm_connector_list_iter conn_iter;
 567        bool enabled;
 568
 569        mutex_lock(&dev->mode_config.mutex);
 570
 571        enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
 572
 573        drm_connector_list_iter_begin(dev, &conn_iter);
 574        drm_for_each_connector_iter(connector, &conn_iter) {
 575                struct intel_connector *intel_connector =
 576                        to_intel_connector(connector);
 577                connector->polled = intel_connector->polled;
 578
 579                /* MST has a dynamic intel_connector->encoder and it's reprobing
 580                 * is all handled by the MST helpers. */
 581                if (intel_connector->mst_port)
 582                        continue;
 583
 584                if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
 585                    intel_connector->encoder->hpd_pin > HPD_NONE) {
 586                        connector->polled = enabled ?
 587                                DRM_CONNECTOR_POLL_CONNECT |
 588                                DRM_CONNECTOR_POLL_DISCONNECT :
 589                                DRM_CONNECTOR_POLL_HPD;
 590                }
 591        }
 592        drm_connector_list_iter_end(&conn_iter);
 593
 594        if (enabled)
 595                drm_kms_helper_poll_enable(dev);
 596
 597        mutex_unlock(&dev->mode_config.mutex);
 598
 599        /*
 600         * We might have missed any hotplugs that happened while we were
 601         * in the middle of disabling polling
 602         */
 603        if (!enabled)
 604                drm_helper_hpd_irq_event(dev);
 605}
 606
 607/**
 608 * intel_hpd_poll_init - enables/disables polling for connectors with hpd
 609 * @dev_priv: i915 device instance
 610 *
 611 * This function enables polling for all connectors, regardless of whether or
 612 * not they support hotplug detection. Under certain conditions HPD may not be
 613 * functional. On most Intel GPUs, this happens when we enter runtime suspend.
 614 * On Valleyview and Cherryview systems, this also happens when we shut off all
 615 * of the powerwells.
 616 *
 617 * Since this function can get called in contexts where we're already holding
 618 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
 619 * worker.
 620 *
 621 * Also see: intel_hpd_init(), which restores hpd handling.
 622 */
 623void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
 624{
 625        WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
 626
 627        /*
 628         * We might already be holding dev->mode_config.mutex, so do this in a
 629         * seperate worker
 630         * As well, there's no issue if we race here since we always reschedule
 631         * this worker anyway
 632         */
 633        schedule_work(&dev_priv->hotplug.poll_init_work);
 634}
 635
 636void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 637{
 638        INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
 639        INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
 640        INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
 641        INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
 642                          intel_hpd_irq_storm_reenable_work);
 643}
 644
 645void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 646{
 647        spin_lock_irq(&dev_priv->irq_lock);
 648
 649        dev_priv->hotplug.long_port_mask = 0;
 650        dev_priv->hotplug.short_port_mask = 0;
 651        dev_priv->hotplug.event_bits = 0;
 652
 653        spin_unlock_irq(&dev_priv->irq_lock);
 654
 655        cancel_work_sync(&dev_priv->hotplug.dig_port_work);
 656        cancel_work_sync(&dev_priv->hotplug.hotplug_work);
 657        cancel_work_sync(&dev_priv->hotplug.poll_init_work);
 658        cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 659}
 660
 661bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 662{
 663        bool ret = false;
 664
 665        if (pin == HPD_NONE)
 666                return false;
 667
 668        spin_lock_irq(&dev_priv->irq_lock);
 669        if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
 670                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 671                ret = true;
 672        }
 673        spin_unlock_irq(&dev_priv->irq_lock);
 674
 675        return ret;
 676}
 677
 678void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 679{
 680        if (pin == HPD_NONE)
 681                return;
 682
 683        spin_lock_irq(&dev_priv->irq_lock);
 684        dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 685        spin_unlock_irq(&dev_priv->irq_lock);
 686}
 687