linux/drivers/gpu/drm/i915/display/intel_hotplug.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include <linux/kernel.h>
  25
  26#include "i915_drv.h"
  27#include "intel_display_types.h"
  28#include "intel_hotplug.h"
  29
  30/**
  31 * DOC: Hotplug
  32 *
  33 * Simply put, hotplug occurs when a display is connected to or disconnected
  34 * from the system. However, there may be adapters and docking stations and
  35 * Display Port short pulses and MST devices involved, complicating matters.
  36 *
  37 * Hotplug in i915 is handled in many different levels of abstraction.
  38 *
  39 * The platform dependent interrupt handling code in i915_irq.c enables,
  40 * disables, and does preliminary handling of the interrupts. The interrupt
  41 * handlers gather the hotplug detect (HPD) information from relevant registers
  42 * into a platform independent mask of hotplug pins that have fired.
  43 *
  44 * The platform independent interrupt handler intel_hpd_irq_handler() in
  45 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
  46 * further processing to appropriate bottom halves (Display Port specific and
  47 * regular hotplug).
  48 *
  49 * The Display Port work function i915_digport_work_func() calls into
  50 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
  51 * pulses, with failures and non-MST long pulses triggering regular hotplug
  52 * processing on the connector.
  53 *
  54 * The regular hotplug work function i915_hotplug_work_func() calls connector
  55 * detect hooks, and, if connector status changes, triggers sending of hotplug
  56 * uevent to userspace via drm_kms_helper_hotplug_event().
  57 *
  58 * Finally, the userspace is responsible for triggering a modeset upon receiving
  59 * the hotplug uevent, disabling or enabling the crtc as needed.
  60 *
  61 * The hotplug interrupt storm detection and mitigation code keeps track of the
  62 * number of interrupts per hotplug pin per a period of time, and if the number
  63 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
  64 * while before being re-enabled. The intention is to mitigate issues raising
  65 * from broken hardware triggering massive amounts of interrupts and grinding
  66 * the system to a halt.
  67 *
  68 * Current implementation expects that hotplug interrupt storm will not be
  69 * seen when display port sink is connected, hence on platforms whose DP
  70 * callback is handled by i915_digport_work_func reenabling of hpd is not
  71 * performed (it was never expected to be disabled in the first place ;) )
  72 * this is specific to DP sinks handled by this routine and any other display
  73 * such as HDMI or DVI enabled on the same port will have proper logic since
  74 * it will use i915_hotplug_work_func where this logic is handled.
  75 */
  76
  77/**
  78 * intel_hpd_pin_default - return default pin associated with certain port.
  79 * @dev_priv: private driver data pointer
  80 * @port: the hpd port to get associated pin
  81 *
  82 * It is only valid and used by digital port encoder.
  83 *
  84 * Return pin that is associatade with @port.
  85 */
  86enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
  87                                   enum port port)
  88{
  89        return HPD_PORT_A + port - PORT_A;
  90}
  91
  92#define HPD_STORM_DETECT_PERIOD         1000
  93#define HPD_STORM_REENABLE_DELAY        (2 * 60 * 1000)
  94#define HPD_RETRY_DELAY                 1000
  95
  96static enum hpd_pin
  97intel_connector_hpd_pin(struct intel_connector *connector)
  98{
  99        struct intel_encoder *encoder = intel_attached_encoder(connector);
 100
 101        /*
 102         * MST connectors get their encoder attached dynamically
 103         * so need to make sure we have an encoder here. But since
 104         * MST encoders have their hpd_pin set to HPD_NONE we don't
 105         * have to special case them beyond that.
 106         */
 107        return encoder ? encoder->hpd_pin : HPD_NONE;
 108}
 109
 110/**
 111 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
 112 * @dev_priv: private driver data pointer
 113 * @pin: the pin to gather stats on
 114 * @long_hpd: whether the HPD IRQ was long or short
 115 *
 116 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
 117 * storms. Only the pin specific stats and state are changed, the caller is
 118 * responsible for further action.
 119 *
 120 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
 121 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
 122 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
 123 * short IRQs count as +1. If this threshold is exceeded, it's considered an
 124 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
 125 *
 126 * By default, most systems will only count long IRQs towards
 127 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
 128 * suffer from short IRQ storms and must also track these. Because short IRQ
 129 * storms are naturally caused by sideband interactions with DP MST devices,
 130 * short IRQ detection is only enabled for systems without DP MST support.
 131 * Systems which are new enough to support DP MST are far less likely to
 132 * suffer from IRQ storms at all, so this is fine.
 133 *
 134 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
 135 * and should only be adjusted for automated hotplug testing.
 136 *
 137 * Return true if an IRQ storm was detected on @pin.
 138 */
 139static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 140                                       enum hpd_pin pin, bool long_hpd)
 141{
 142        struct i915_hotplug *hpd = &dev_priv->hotplug;
 143        unsigned long start = hpd->stats[pin].last_jiffies;
 144        unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
 145        const int increment = long_hpd ? 10 : 1;
 146        const int threshold = hpd->hpd_storm_threshold;
 147        bool storm = false;
 148
 149        if (!threshold ||
 150            (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
 151                return false;
 152
 153        if (!time_in_range(jiffies, start, end)) {
 154                hpd->stats[pin].last_jiffies = jiffies;
 155                hpd->stats[pin].count = 0;
 156        }
 157
 158        hpd->stats[pin].count += increment;
 159        if (hpd->stats[pin].count > threshold) {
 160                hpd->stats[pin].state = HPD_MARK_DISABLED;
 161                drm_dbg_kms(&dev_priv->drm,
 162                            "HPD interrupt storm detected on PIN %d\n", pin);
 163                storm = true;
 164        } else {
 165                drm_dbg_kms(&dev_priv->drm,
 166                            "Received HPD interrupt on PIN %d - cnt: %d\n",
 167                              pin,
 168                              hpd->stats[pin].count);
 169        }
 170
 171        return storm;
 172}
 173
 174static void
 175intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
 176{
 177        struct drm_device *dev = &dev_priv->drm;
 178        struct drm_connector_list_iter conn_iter;
 179        struct intel_connector *connector;
 180        bool hpd_disabled = false;
 181
 182        lockdep_assert_held(&dev_priv->irq_lock);
 183
 184        drm_connector_list_iter_begin(dev, &conn_iter);
 185        for_each_intel_connector_iter(connector, &conn_iter) {
 186                enum hpd_pin pin;
 187
 188                if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
 189                        continue;
 190
 191                pin = intel_connector_hpd_pin(connector);
 192                if (pin == HPD_NONE ||
 193                    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
 194                        continue;
 195
 196                drm_info(&dev_priv->drm,
 197                         "HPD interrupt storm detected on connector %s: "
 198                         "switching from hotplug detection to polling\n",
 199                         connector->base.name);
 200
 201                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 202                connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
 203                        DRM_CONNECTOR_POLL_DISCONNECT;
 204                hpd_disabled = true;
 205        }
 206        drm_connector_list_iter_end(&conn_iter);
 207
 208        /* Enable polling and queue hotplug re-enabling. */
 209        if (hpd_disabled) {
 210                drm_kms_helper_poll_enable(dev);
 211                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 212                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 213        }
 214}
 215
 216static void intel_hpd_irq_setup(struct drm_i915_private *i915)
 217{
 218        if (i915->display_irqs_enabled && i915->display.hpd_irq_setup)
 219                i915->display.hpd_irq_setup(i915);
 220}
 221
 222static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 223{
 224        struct drm_i915_private *dev_priv =
 225                container_of(work, typeof(*dev_priv),
 226                             hotplug.reenable_work.work);
 227        struct drm_device *dev = &dev_priv->drm;
 228        struct drm_connector_list_iter conn_iter;
 229        struct intel_connector *connector;
 230        intel_wakeref_t wakeref;
 231        enum hpd_pin pin;
 232
 233        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 234
 235        spin_lock_irq(&dev_priv->irq_lock);
 236
 237        drm_connector_list_iter_begin(dev, &conn_iter);
 238        for_each_intel_connector_iter(connector, &conn_iter) {
 239                pin = intel_connector_hpd_pin(connector);
 240                if (pin == HPD_NONE ||
 241                    dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
 242                        continue;
 243
 244                if (connector->base.polled != connector->polled)
 245                        drm_dbg(&dev_priv->drm,
 246                                "Reenabling HPD on connector %s\n",
 247                                connector->base.name);
 248                connector->base.polled = connector->polled;
 249        }
 250        drm_connector_list_iter_end(&conn_iter);
 251
 252        for_each_hpd_pin(pin) {
 253                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
 254                        dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 255        }
 256
 257        intel_hpd_irq_setup(dev_priv);
 258
 259        spin_unlock_irq(&dev_priv->irq_lock);
 260
 261        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 262}
 263
 264enum intel_hotplug_state
 265intel_encoder_hotplug(struct intel_encoder *encoder,
 266                      struct intel_connector *connector)
 267{
 268        struct drm_device *dev = connector->base.dev;
 269        enum drm_connector_status old_status;
 270        u64 old_epoch_counter;
 271        bool ret = false;
 272
 273        drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
 274        old_status = connector->base.status;
 275        old_epoch_counter = connector->base.epoch_counter;
 276
 277        connector->base.status =
 278                drm_helper_probe_detect(&connector->base, NULL, false);
 279
 280        if (old_epoch_counter != connector->base.epoch_counter)
 281                ret = true;
 282
 283        if (ret) {
 284                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
 285                              connector->base.base.id,
 286                              connector->base.name,
 287                              drm_get_connector_status_name(old_status),
 288                              drm_get_connector_status_name(connector->base.status),
 289                              old_epoch_counter,
 290                              connector->base.epoch_counter);
 291                return INTEL_HOTPLUG_CHANGED;
 292        }
 293        return INTEL_HOTPLUG_UNCHANGED;
 294}
 295
 296static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
 297{
 298        return intel_encoder_is_dig_port(encoder) &&
 299                enc_to_dig_port(encoder)->hpd_pulse != NULL;
 300}
 301
 302static void i915_digport_work_func(struct work_struct *work)
 303{
 304        struct drm_i915_private *dev_priv =
 305                container_of(work, struct drm_i915_private, hotplug.dig_port_work);
 306        u32 long_port_mask, short_port_mask;
 307        struct intel_encoder *encoder;
 308        u32 old_bits = 0;
 309
 310        spin_lock_irq(&dev_priv->irq_lock);
 311        long_port_mask = dev_priv->hotplug.long_port_mask;
 312        dev_priv->hotplug.long_port_mask = 0;
 313        short_port_mask = dev_priv->hotplug.short_port_mask;
 314        dev_priv->hotplug.short_port_mask = 0;
 315        spin_unlock_irq(&dev_priv->irq_lock);
 316
 317        for_each_intel_encoder(&dev_priv->drm, encoder) {
 318                struct intel_digital_port *dig_port;
 319                enum port port = encoder->port;
 320                bool long_hpd, short_hpd;
 321                enum irqreturn ret;
 322
 323                if (!intel_encoder_has_hpd_pulse(encoder))
 324                        continue;
 325
 326                long_hpd = long_port_mask & BIT(port);
 327                short_hpd = short_port_mask & BIT(port);
 328
 329                if (!long_hpd && !short_hpd)
 330                        continue;
 331
 332                dig_port = enc_to_dig_port(encoder);
 333
 334                ret = dig_port->hpd_pulse(dig_port, long_hpd);
 335                if (ret == IRQ_NONE) {
 336                        /* fall back to old school hpd */
 337                        old_bits |= BIT(encoder->hpd_pin);
 338                }
 339        }
 340
 341        if (old_bits) {
 342                spin_lock_irq(&dev_priv->irq_lock);
 343                dev_priv->hotplug.event_bits |= old_bits;
 344                spin_unlock_irq(&dev_priv->irq_lock);
 345                queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
 346        }
 347}
 348
 349/**
 350 * intel_hpd_trigger_irq - trigger an hpd irq event for a port
 351 * @dig_port: digital port
 352 *
 353 * Trigger an HPD interrupt event for the given port, emulating a short pulse
 354 * generated by the sink, and schedule the dig port work to handle it.
 355 */
 356void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
 357{
 358        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 359
 360        spin_lock_irq(&i915->irq_lock);
 361        i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
 362        spin_unlock_irq(&i915->irq_lock);
 363
 364        queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
 365}
 366
 367/*
 368 * Handle hotplug events outside the interrupt handler proper.
 369 */
 370static void i915_hotplug_work_func(struct work_struct *work)
 371{
 372        struct drm_i915_private *dev_priv =
 373                container_of(work, struct drm_i915_private,
 374                             hotplug.hotplug_work.work);
 375        struct drm_device *dev = &dev_priv->drm;
 376        struct drm_connector_list_iter conn_iter;
 377        struct intel_connector *connector;
 378        u32 changed = 0, retry = 0;
 379        u32 hpd_event_bits;
 380        u32 hpd_retry_bits;
 381
 382        mutex_lock(&dev->mode_config.mutex);
 383        drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
 384
 385        spin_lock_irq(&dev_priv->irq_lock);
 386
 387        hpd_event_bits = dev_priv->hotplug.event_bits;
 388        dev_priv->hotplug.event_bits = 0;
 389        hpd_retry_bits = dev_priv->hotplug.retry_bits;
 390        dev_priv->hotplug.retry_bits = 0;
 391
 392        /* Enable polling for connectors which had HPD IRQ storms */
 393        intel_hpd_irq_storm_switch_to_polling(dev_priv);
 394
 395        spin_unlock_irq(&dev_priv->irq_lock);
 396
 397        drm_connector_list_iter_begin(dev, &conn_iter);
 398        for_each_intel_connector_iter(connector, &conn_iter) {
 399                enum hpd_pin pin;
 400                u32 hpd_bit;
 401
 402                pin = intel_connector_hpd_pin(connector);
 403                if (pin == HPD_NONE)
 404                        continue;
 405
 406                hpd_bit = BIT(pin);
 407                if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
 408                        struct intel_encoder *encoder =
 409                                intel_attached_encoder(connector);
 410
 411                        if (hpd_event_bits & hpd_bit)
 412                                connector->hotplug_retries = 0;
 413                        else
 414                                connector->hotplug_retries++;
 415
 416                        drm_dbg_kms(&dev_priv->drm,
 417                                    "Connector %s (pin %i) received hotplug event. (retry %d)\n",
 418                                    connector->base.name, pin,
 419                                    connector->hotplug_retries);
 420
 421                        switch (encoder->hotplug(encoder, connector)) {
 422                        case INTEL_HOTPLUG_UNCHANGED:
 423                                break;
 424                        case INTEL_HOTPLUG_CHANGED:
 425                                changed |= hpd_bit;
 426                                break;
 427                        case INTEL_HOTPLUG_RETRY:
 428                                retry |= hpd_bit;
 429                                break;
 430                        }
 431                }
 432        }
 433        drm_connector_list_iter_end(&conn_iter);
 434        mutex_unlock(&dev->mode_config.mutex);
 435
 436        if (changed)
 437                drm_kms_helper_hotplug_event(dev);
 438
 439        /* Remove shared HPD pins that have changed */
 440        retry &= ~changed;
 441        if (retry) {
 442                spin_lock_irq(&dev_priv->irq_lock);
 443                dev_priv->hotplug.retry_bits |= retry;
 444                spin_unlock_irq(&dev_priv->irq_lock);
 445
 446                mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
 447                                 msecs_to_jiffies(HPD_RETRY_DELAY));
 448        }
 449}
 450
 451
 452/**
 453 * intel_hpd_irq_handler - main hotplug irq handler
 454 * @dev_priv: drm_i915_private
 455 * @pin_mask: a mask of hpd pins that have triggered the irq
 456 * @long_mask: a mask of hpd pins that may be long hpd pulses
 457 *
 458 * This is the main hotplug irq handler for all platforms. The platform specific
 459 * irq handlers call the platform specific hotplug irq handlers, which read and
 460 * decode the appropriate registers into bitmasks about hpd pins that have
 461 * triggered (@pin_mask), and which of those pins may be long pulses
 462 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
 463 * is not a digital port.
 464 *
 465 * Here, we do hotplug irq storm detection and mitigation, and pass further
 466 * processing to appropriate bottom halves.
 467 */
 468void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 469                           u32 pin_mask, u32 long_mask)
 470{
 471        struct intel_encoder *encoder;
 472        bool storm_detected = false;
 473        bool queue_dig = false, queue_hp = false;
 474        u32 long_hpd_pulse_mask = 0;
 475        u32 short_hpd_pulse_mask = 0;
 476        enum hpd_pin pin;
 477
 478        if (!pin_mask)
 479                return;
 480
 481        spin_lock(&dev_priv->irq_lock);
 482
 483        /*
 484         * Determine whether ->hpd_pulse() exists for each pin, and
 485         * whether we have a short or a long pulse. This is needed
 486         * as each pin may have up to two encoders (HDMI and DP) and
 487         * only the one of them (DP) will have ->hpd_pulse().
 488         */
 489        for_each_intel_encoder(&dev_priv->drm, encoder) {
 490                enum port port = encoder->port;
 491                bool long_hpd;
 492
 493                pin = encoder->hpd_pin;
 494                if (!(BIT(pin) & pin_mask))
 495                        continue;
 496
 497                if (!intel_encoder_has_hpd_pulse(encoder))
 498                        continue;
 499
 500                long_hpd = long_mask & BIT(pin);
 501
 502                drm_dbg(&dev_priv->drm,
 503                        "digital hpd on [ENCODER:%d:%s] - %s\n",
 504                        encoder->base.base.id, encoder->base.name,
 505                        long_hpd ? "long" : "short");
 506                queue_dig = true;
 507
 508                if (long_hpd) {
 509                        long_hpd_pulse_mask |= BIT(pin);
 510                        dev_priv->hotplug.long_port_mask |= BIT(port);
 511                } else {
 512                        short_hpd_pulse_mask |= BIT(pin);
 513                        dev_priv->hotplug.short_port_mask |= BIT(port);
 514                }
 515        }
 516
 517        /* Now process each pin just once */
 518        for_each_hpd_pin(pin) {
 519                bool long_hpd;
 520
 521                if (!(BIT(pin) & pin_mask))
 522                        continue;
 523
 524                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
 525                        /*
 526                         * On GMCH platforms the interrupt mask bits only
 527                         * prevent irq generation, not the setting of the
 528                         * hotplug bits itself. So only WARN about unexpected
 529                         * interrupts on saner platforms.
 530                         */
 531                        drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
 532                                      "Received HPD interrupt on pin %d although disabled\n",
 533                                      pin);
 534                        continue;
 535                }
 536
 537                if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
 538                        continue;
 539
 540                /*
 541                 * Delegate to ->hpd_pulse() if one of the encoders for this
 542                 * pin has it, otherwise let the hotplug_work deal with this
 543                 * pin directly.
 544                 */
 545                if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
 546                        long_hpd = long_hpd_pulse_mask & BIT(pin);
 547                } else {
 548                        dev_priv->hotplug.event_bits |= BIT(pin);
 549                        long_hpd = true;
 550                        queue_hp = true;
 551                }
 552
 553                if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
 554                        dev_priv->hotplug.event_bits &= ~BIT(pin);
 555                        storm_detected = true;
 556                        queue_hp = true;
 557                }
 558        }
 559
 560        /*
 561         * Disable any IRQs that storms were detected on. Polling enablement
 562         * happens later in our hotplug work.
 563         */
 564        if (storm_detected)
 565                intel_hpd_irq_setup(dev_priv);
 566        spin_unlock(&dev_priv->irq_lock);
 567
 568        /*
 569         * Our hotplug handler can grab modeset locks (by calling down into the
 570         * fb helpers). Hence it must not be run on our own dev-priv->wq work
 571         * queue for otherwise the flush_work in the pageflip code will
 572         * deadlock.
 573         */
 574        if (queue_dig)
 575                queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
 576        if (queue_hp)
 577                queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
 578}
 579
 580/**
 581 * intel_hpd_init - initializes and enables hpd support
 582 * @dev_priv: i915 device instance
 583 *
 584 * This function enables the hotplug support. It requires that interrupts have
 585 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
 586 * poll request can run concurrently to other code, so locking rules must be
 587 * obeyed.
 588 *
 589 * This is a separate step from interrupt enabling to simplify the locking rules
 590 * in the driver load and resume code.
 591 *
 592 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
 593 */
 594void intel_hpd_init(struct drm_i915_private *dev_priv)
 595{
 596        int i;
 597
 598        if (!HAS_DISPLAY(dev_priv))
 599                return;
 600
 601        for_each_hpd_pin(i) {
 602                dev_priv->hotplug.stats[i].count = 0;
 603                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 604        }
 605
 606        /*
 607         * Interrupt setup is already guaranteed to be single-threaded, this is
 608         * just to make the assert_spin_locked checks happy.
 609         */
 610        spin_lock_irq(&dev_priv->irq_lock);
 611        intel_hpd_irq_setup(dev_priv);
 612        spin_unlock_irq(&dev_priv->irq_lock);
 613}
 614
 615static void i915_hpd_poll_init_work(struct work_struct *work)
 616{
 617        struct drm_i915_private *dev_priv =
 618                container_of(work, struct drm_i915_private,
 619                             hotplug.poll_init_work);
 620        struct drm_device *dev = &dev_priv->drm;
 621        struct drm_connector_list_iter conn_iter;
 622        struct intel_connector *connector;
 623        bool enabled;
 624
 625        mutex_lock(&dev->mode_config.mutex);
 626
 627        enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
 628
 629        drm_connector_list_iter_begin(dev, &conn_iter);
 630        for_each_intel_connector_iter(connector, &conn_iter) {
 631                enum hpd_pin pin;
 632
 633                pin = intel_connector_hpd_pin(connector);
 634                if (pin == HPD_NONE)
 635                        continue;
 636
 637                connector->base.polled = connector->polled;
 638
 639                if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
 640                        connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
 641                                DRM_CONNECTOR_POLL_DISCONNECT;
 642        }
 643        drm_connector_list_iter_end(&conn_iter);
 644
 645        if (enabled)
 646                drm_kms_helper_poll_enable(dev);
 647
 648        mutex_unlock(&dev->mode_config.mutex);
 649
 650        /*
 651         * We might have missed any hotplugs that happened while we were
 652         * in the middle of disabling polling
 653         */
 654        if (!enabled)
 655                drm_helper_hpd_irq_event(dev);
 656}
 657
 658/**
 659 * intel_hpd_poll_enable - enable polling for connectors with hpd
 660 * @dev_priv: i915 device instance
 661 *
 662 * This function enables polling for all connectors which support HPD.
 663 * Under certain conditions HPD may not be functional. On most Intel GPUs,
 664 * this happens when we enter runtime suspend.
 665 * On Valleyview and Cherryview systems, this also happens when we shut off all
 666 * of the powerwells.
 667 *
 668 * Since this function can get called in contexts where we're already holding
 669 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
 670 * worker.
 671 *
 672 * Also see: intel_hpd_init() and intel_hpd_poll_disable().
 673 */
 674void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
 675{
 676        if (!HAS_DISPLAY(dev_priv))
 677                return;
 678
 679        WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
 680
 681        /*
 682         * We might already be holding dev->mode_config.mutex, so do this in a
 683         * seperate worker
 684         * As well, there's no issue if we race here since we always reschedule
 685         * this worker anyway
 686         */
 687        schedule_work(&dev_priv->hotplug.poll_init_work);
 688}
 689
 690/**
 691 * intel_hpd_poll_disable - disable polling for connectors with hpd
 692 * @dev_priv: i915 device instance
 693 *
 694 * This function disables polling for all connectors which support HPD.
 695 * Under certain conditions HPD may not be functional. On most Intel GPUs,
 696 * this happens when we enter runtime suspend.
 697 * On Valleyview and Cherryview systems, this also happens when we shut off all
 698 * of the powerwells.
 699 *
 700 * Since this function can get called in contexts where we're already holding
 701 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
 702 * worker.
 703 *
 704 * Also used during driver init to initialize connector->polled
 705 * appropriately for all connectors.
 706 *
 707 * Also see: intel_hpd_init() and intel_hpd_poll_enable().
 708 */
 709void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
 710{
 711        if (!HAS_DISPLAY(dev_priv))
 712                return;
 713
 714        WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
 715        schedule_work(&dev_priv->hotplug.poll_init_work);
 716}
 717
 718void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 719{
 720        INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
 721                          i915_hotplug_work_func);
 722        INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
 723        INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
 724        INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
 725                          intel_hpd_irq_storm_reenable_work);
 726}
 727
 728void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 729{
 730        if (!HAS_DISPLAY(dev_priv))
 731                return;
 732
 733        spin_lock_irq(&dev_priv->irq_lock);
 734
 735        dev_priv->hotplug.long_port_mask = 0;
 736        dev_priv->hotplug.short_port_mask = 0;
 737        dev_priv->hotplug.event_bits = 0;
 738        dev_priv->hotplug.retry_bits = 0;
 739
 740        spin_unlock_irq(&dev_priv->irq_lock);
 741
 742        cancel_work_sync(&dev_priv->hotplug.dig_port_work);
 743        cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
 744        cancel_work_sync(&dev_priv->hotplug.poll_init_work);
 745        cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 746}
 747
 748bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 749{
 750        bool ret = false;
 751
 752        if (pin == HPD_NONE)
 753                return false;
 754
 755        spin_lock_irq(&dev_priv->irq_lock);
 756        if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
 757                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 758                ret = true;
 759        }
 760        spin_unlock_irq(&dev_priv->irq_lock);
 761
 762        return ret;
 763}
 764
 765void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 766{
 767        if (pin == HPD_NONE)
 768                return;
 769
 770        spin_lock_irq(&dev_priv->irq_lock);
 771        dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 772        spin_unlock_irq(&dev_priv->irq_lock);
 773}
 774