linux/drivers/gpu/drm/i915/display/intel_hotplug.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include <linux/kernel.h>
  25
  26#include "i915_drv.h"
  27#include "intel_display_types.h"
  28#include "intel_hotplug.h"
  29
  30/**
  31 * DOC: Hotplug
  32 *
  33 * Simply put, hotplug occurs when a display is connected to or disconnected
  34 * from the system. However, there may be adapters and docking stations and
  35 * Display Port short pulses and MST devices involved, complicating matters.
  36 *
  37 * Hotplug in i915 is handled in many different levels of abstraction.
  38 *
  39 * The platform dependent interrupt handling code in i915_irq.c enables,
  40 * disables, and does preliminary handling of the interrupts. The interrupt
  41 * handlers gather the hotplug detect (HPD) information from relevant registers
  42 * into a platform independent mask of hotplug pins that have fired.
  43 *
  44 * The platform independent interrupt handler intel_hpd_irq_handler() in
  45 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
  46 * further processing to appropriate bottom halves (Display Port specific and
  47 * regular hotplug).
  48 *
  49 * The Display Port work function i915_digport_work_func() calls into
  50 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
  51 * pulses, with failures and non-MST long pulses triggering regular hotplug
  52 * processing on the connector.
  53 *
  54 * The regular hotplug work function i915_hotplug_work_func() calls connector
  55 * detect hooks, and, if connector status changes, triggers sending of hotplug
  56 * uevent to userspace via drm_kms_helper_hotplug_event().
  57 *
  58 * Finally, the userspace is responsible for triggering a modeset upon receiving
  59 * the hotplug uevent, disabling or enabling the crtc as needed.
  60 *
  61 * The hotplug interrupt storm detection and mitigation code keeps track of the
  62 * number of interrupts per hotplug pin per a period of time, and if the number
  63 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
  64 * while before being re-enabled. The intention is to mitigate issues raising
  65 * from broken hardware triggering massive amounts of interrupts and grinding
  66 * the system to a halt.
  67 *
  68 * Current implementation expects that hotplug interrupt storm will not be
  69 * seen when display port sink is connected, hence on platforms whose DP
  70 * callback is handled by i915_digport_work_func reenabling of hpd is not
  71 * performed (it was never expected to be disabled in the first place ;) )
  72 * this is specific to DP sinks handled by this routine and any other display
  73 * such as HDMI or DVI enabled on the same port will have proper logic since
  74 * it will use i915_hotplug_work_func where this logic is handled.
  75 */
  76
  77/**
  78 * intel_hpd_pin_default - return default pin associated with certain port.
  79 * @dev_priv: private driver data pointer
  80 * @port: the hpd port to get associated pin
  81 *
  82 * It is only valid and used by digital port encoder.
  83 *
  84 * Return pin that is associatade with @port and HDP_NONE if no pin is
  85 * hard associated with that @port.
  86 */
  87enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
  88                                   enum port port)
  89{
  90        enum phy phy = intel_port_to_phy(dev_priv, port);
  91
  92        /*
  93         * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
  94         * based on the DDI rather than the PHY (i.e., the last two outputs
  95         * shold be HPD_PORT_{D,E} rather than {C,D}.  Note that this differs
  96         * from the behavior of both TGL+TGP and RKL+CMP.
  97         */
  98        if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
  99                return HPD_PORT_A + port - PORT_A;
 100
 101        switch (phy) {
 102        case PHY_F:
 103                return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
 104        case PHY_A ... PHY_E:
 105        case PHY_G ... PHY_I:
 106                return HPD_PORT_A + phy - PHY_A;
 107        default:
 108                MISSING_CASE(phy);
 109                return HPD_NONE;
 110        }
 111}
 112
 113#define HPD_STORM_DETECT_PERIOD         1000
 114#define HPD_STORM_REENABLE_DELAY        (2 * 60 * 1000)
 115#define HPD_RETRY_DELAY                 1000
 116
 117static enum hpd_pin
 118intel_connector_hpd_pin(struct intel_connector *connector)
 119{
 120        struct intel_encoder *encoder = intel_attached_encoder(connector);
 121
 122        /*
 123         * MST connectors get their encoder attached dynamically
 124         * so need to make sure we have an encoder here. But since
 125         * MST encoders have their hpd_pin set to HPD_NONE we don't
 126         * have to special case them beyond that.
 127         */
 128        return encoder ? encoder->hpd_pin : HPD_NONE;
 129}
 130
 131/**
 132 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
 133 * @dev_priv: private driver data pointer
 134 * @pin: the pin to gather stats on
 135 * @long_hpd: whether the HPD IRQ was long or short
 136 *
 137 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
 138 * storms. Only the pin specific stats and state are changed, the caller is
 139 * responsible for further action.
 140 *
 141 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
 142 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
 143 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
 144 * short IRQs count as +1. If this threshold is exceeded, it's considered an
 145 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
 146 *
 147 * By default, most systems will only count long IRQs towards
 148 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
 149 * suffer from short IRQ storms and must also track these. Because short IRQ
 150 * storms are naturally caused by sideband interactions with DP MST devices,
 151 * short IRQ detection is only enabled for systems without DP MST support.
 152 * Systems which are new enough to support DP MST are far less likely to
 153 * suffer from IRQ storms at all, so this is fine.
 154 *
 155 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
 156 * and should only be adjusted for automated hotplug testing.
 157 *
 158 * Return true if an IRQ storm was detected on @pin.
 159 */
 160static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 161                                       enum hpd_pin pin, bool long_hpd)
 162{
 163        struct i915_hotplug *hpd = &dev_priv->hotplug;
 164        unsigned long start = hpd->stats[pin].last_jiffies;
 165        unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
 166        const int increment = long_hpd ? 10 : 1;
 167        const int threshold = hpd->hpd_storm_threshold;
 168        bool storm = false;
 169
 170        if (!threshold ||
 171            (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
 172                return false;
 173
 174        if (!time_in_range(jiffies, start, end)) {
 175                hpd->stats[pin].last_jiffies = jiffies;
 176                hpd->stats[pin].count = 0;
 177        }
 178
 179        hpd->stats[pin].count += increment;
 180        if (hpd->stats[pin].count > threshold) {
 181                hpd->stats[pin].state = HPD_MARK_DISABLED;
 182                drm_dbg_kms(&dev_priv->drm,
 183                            "HPD interrupt storm detected on PIN %d\n", pin);
 184                storm = true;
 185        } else {
 186                drm_dbg_kms(&dev_priv->drm,
 187                            "Received HPD interrupt on PIN %d - cnt: %d\n",
 188                              pin,
 189                              hpd->stats[pin].count);
 190        }
 191
 192        return storm;
 193}
 194
 195static void
 196intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
 197{
 198        struct drm_device *dev = &dev_priv->drm;
 199        struct drm_connector_list_iter conn_iter;
 200        struct intel_connector *connector;
 201        bool hpd_disabled = false;
 202
 203        lockdep_assert_held(&dev_priv->irq_lock);
 204
 205        drm_connector_list_iter_begin(dev, &conn_iter);
 206        for_each_intel_connector_iter(connector, &conn_iter) {
 207                enum hpd_pin pin;
 208
 209                if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
 210                        continue;
 211
 212                pin = intel_connector_hpd_pin(connector);
 213                if (pin == HPD_NONE ||
 214                    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
 215                        continue;
 216
 217                drm_info(&dev_priv->drm,
 218                         "HPD interrupt storm detected on connector %s: "
 219                         "switching from hotplug detection to polling\n",
 220                         connector->base.name);
 221
 222                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 223                connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
 224                        DRM_CONNECTOR_POLL_DISCONNECT;
 225                hpd_disabled = true;
 226        }
 227        drm_connector_list_iter_end(&conn_iter);
 228
 229        /* Enable polling and queue hotplug re-enabling. */
 230        if (hpd_disabled) {
 231                drm_kms_helper_poll_enable(dev);
 232                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 233                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 234        }
 235}
 236
 237static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 238{
 239        struct drm_i915_private *dev_priv =
 240                container_of(work, typeof(*dev_priv),
 241                             hotplug.reenable_work.work);
 242        struct drm_device *dev = &dev_priv->drm;
 243        struct drm_connector_list_iter conn_iter;
 244        struct intel_connector *connector;
 245        intel_wakeref_t wakeref;
 246        enum hpd_pin pin;
 247
 248        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 249
 250        spin_lock_irq(&dev_priv->irq_lock);
 251
 252        drm_connector_list_iter_begin(dev, &conn_iter);
 253        for_each_intel_connector_iter(connector, &conn_iter) {
 254                pin = intel_connector_hpd_pin(connector);
 255                if (pin == HPD_NONE ||
 256                    dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
 257                        continue;
 258
 259                if (connector->base.polled != connector->polled)
 260                        drm_dbg(&dev_priv->drm,
 261                                "Reenabling HPD on connector %s\n",
 262                                connector->base.name);
 263                connector->base.polled = connector->polled;
 264        }
 265        drm_connector_list_iter_end(&conn_iter);
 266
 267        for_each_hpd_pin(pin) {
 268                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
 269                        dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 270        }
 271
 272        if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
 273                dev_priv->display.hpd_irq_setup(dev_priv);
 274
 275        spin_unlock_irq(&dev_priv->irq_lock);
 276
 277        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 278}
 279
 280enum intel_hotplug_state
 281intel_encoder_hotplug(struct intel_encoder *encoder,
 282                      struct intel_connector *connector)
 283{
 284        struct drm_device *dev = connector->base.dev;
 285        enum drm_connector_status old_status;
 286        u64 old_epoch_counter;
 287        bool ret = false;
 288
 289        drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
 290        old_status = connector->base.status;
 291        old_epoch_counter = connector->base.epoch_counter;
 292
 293        connector->base.status =
 294                drm_helper_probe_detect(&connector->base, NULL, false);
 295
 296        if (old_epoch_counter != connector->base.epoch_counter)
 297                ret = true;
 298
 299        if (ret) {
 300                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
 301                              connector->base.base.id,
 302                              connector->base.name,
 303                              drm_get_connector_status_name(old_status),
 304                              drm_get_connector_status_name(connector->base.status),
 305                              old_epoch_counter,
 306                              connector->base.epoch_counter);
 307                return INTEL_HOTPLUG_CHANGED;
 308        }
 309        return INTEL_HOTPLUG_UNCHANGED;
 310}
 311
 312static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
 313{
 314        return intel_encoder_is_dig_port(encoder) &&
 315                enc_to_dig_port(encoder)->hpd_pulse != NULL;
 316}
 317
 318static void i915_digport_work_func(struct work_struct *work)
 319{
 320        struct drm_i915_private *dev_priv =
 321                container_of(work, struct drm_i915_private, hotplug.dig_port_work);
 322        u32 long_port_mask, short_port_mask;
 323        struct intel_encoder *encoder;
 324        u32 old_bits = 0;
 325
 326        spin_lock_irq(&dev_priv->irq_lock);
 327        long_port_mask = dev_priv->hotplug.long_port_mask;
 328        dev_priv->hotplug.long_port_mask = 0;
 329        short_port_mask = dev_priv->hotplug.short_port_mask;
 330        dev_priv->hotplug.short_port_mask = 0;
 331        spin_unlock_irq(&dev_priv->irq_lock);
 332
 333        for_each_intel_encoder(&dev_priv->drm, encoder) {
 334                struct intel_digital_port *dig_port;
 335                enum port port = encoder->port;
 336                bool long_hpd, short_hpd;
 337                enum irqreturn ret;
 338
 339                if (!intel_encoder_has_hpd_pulse(encoder))
 340                        continue;
 341
 342                long_hpd = long_port_mask & BIT(port);
 343                short_hpd = short_port_mask & BIT(port);
 344
 345                if (!long_hpd && !short_hpd)
 346                        continue;
 347
 348                dig_port = enc_to_dig_port(encoder);
 349
 350                ret = dig_port->hpd_pulse(dig_port, long_hpd);
 351                if (ret == IRQ_NONE) {
 352                        /* fall back to old school hpd */
 353                        old_bits |= BIT(encoder->hpd_pin);
 354                }
 355        }
 356
 357        if (old_bits) {
 358                spin_lock_irq(&dev_priv->irq_lock);
 359                dev_priv->hotplug.event_bits |= old_bits;
 360                spin_unlock_irq(&dev_priv->irq_lock);
 361                queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
 362        }
 363}
 364
 365/**
 366 * intel_hpd_trigger_irq - trigger an hpd irq event for a port
 367 * @dig_port: digital port
 368 *
 369 * Trigger an HPD interrupt event for the given port, emulating a short pulse
 370 * generated by the sink, and schedule the dig port work to handle it.
 371 */
 372void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
 373{
 374        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 375
 376        spin_lock_irq(&i915->irq_lock);
 377        i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
 378        spin_unlock_irq(&i915->irq_lock);
 379
 380        queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
 381}
 382
 383/*
 384 * Handle hotplug events outside the interrupt handler proper.
 385 */
 386static void i915_hotplug_work_func(struct work_struct *work)
 387{
 388        struct drm_i915_private *dev_priv =
 389                container_of(work, struct drm_i915_private,
 390                             hotplug.hotplug_work.work);
 391        struct drm_device *dev = &dev_priv->drm;
 392        struct drm_connector_list_iter conn_iter;
 393        struct intel_connector *connector;
 394        u32 changed = 0, retry = 0;
 395        u32 hpd_event_bits;
 396        u32 hpd_retry_bits;
 397
 398        mutex_lock(&dev->mode_config.mutex);
 399        drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
 400
 401        spin_lock_irq(&dev_priv->irq_lock);
 402
 403        hpd_event_bits = dev_priv->hotplug.event_bits;
 404        dev_priv->hotplug.event_bits = 0;
 405        hpd_retry_bits = dev_priv->hotplug.retry_bits;
 406        dev_priv->hotplug.retry_bits = 0;
 407
 408        /* Enable polling for connectors which had HPD IRQ storms */
 409        intel_hpd_irq_storm_switch_to_polling(dev_priv);
 410
 411        spin_unlock_irq(&dev_priv->irq_lock);
 412
 413        drm_connector_list_iter_begin(dev, &conn_iter);
 414        for_each_intel_connector_iter(connector, &conn_iter) {
 415                enum hpd_pin pin;
 416                u32 hpd_bit;
 417
 418                pin = intel_connector_hpd_pin(connector);
 419                if (pin == HPD_NONE)
 420                        continue;
 421
 422                hpd_bit = BIT(pin);
 423                if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
 424                        struct intel_encoder *encoder =
 425                                intel_attached_encoder(connector);
 426
 427                        if (hpd_event_bits & hpd_bit)
 428                                connector->hotplug_retries = 0;
 429                        else
 430                                connector->hotplug_retries++;
 431
 432                        drm_dbg_kms(&dev_priv->drm,
 433                                    "Connector %s (pin %i) received hotplug event. (retry %d)\n",
 434                                    connector->base.name, pin,
 435                                    connector->hotplug_retries);
 436
 437                        switch (encoder->hotplug(encoder, connector)) {
 438                        case INTEL_HOTPLUG_UNCHANGED:
 439                                break;
 440                        case INTEL_HOTPLUG_CHANGED:
 441                                changed |= hpd_bit;
 442                                break;
 443                        case INTEL_HOTPLUG_RETRY:
 444                                retry |= hpd_bit;
 445                                break;
 446                        }
 447                }
 448        }
 449        drm_connector_list_iter_end(&conn_iter);
 450        mutex_unlock(&dev->mode_config.mutex);
 451
 452        if (changed)
 453                drm_kms_helper_hotplug_event(dev);
 454
 455        /* Remove shared HPD pins that have changed */
 456        retry &= ~changed;
 457        if (retry) {
 458                spin_lock_irq(&dev_priv->irq_lock);
 459                dev_priv->hotplug.retry_bits |= retry;
 460                spin_unlock_irq(&dev_priv->irq_lock);
 461
 462                mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
 463                                 msecs_to_jiffies(HPD_RETRY_DELAY));
 464        }
 465}
 466
 467
 468/**
 469 * intel_hpd_irq_handler - main hotplug irq handler
 470 * @dev_priv: drm_i915_private
 471 * @pin_mask: a mask of hpd pins that have triggered the irq
 472 * @long_mask: a mask of hpd pins that may be long hpd pulses
 473 *
 474 * This is the main hotplug irq handler for all platforms. The platform specific
 475 * irq handlers call the platform specific hotplug irq handlers, which read and
 476 * decode the appropriate registers into bitmasks about hpd pins that have
 477 * triggered (@pin_mask), and which of those pins may be long pulses
 478 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
 479 * is not a digital port.
 480 *
 481 * Here, we do hotplug irq storm detection and mitigation, and pass further
 482 * processing to appropriate bottom halves.
 483 */
 484void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 485                           u32 pin_mask, u32 long_mask)
 486{
 487        struct intel_encoder *encoder;
 488        bool storm_detected = false;
 489        bool queue_dig = false, queue_hp = false;
 490        u32 long_hpd_pulse_mask = 0;
 491        u32 short_hpd_pulse_mask = 0;
 492        enum hpd_pin pin;
 493
 494        if (!pin_mask)
 495                return;
 496
 497        spin_lock(&dev_priv->irq_lock);
 498
 499        /*
 500         * Determine whether ->hpd_pulse() exists for each pin, and
 501         * whether we have a short or a long pulse. This is needed
 502         * as each pin may have up to two encoders (HDMI and DP) and
 503         * only the one of them (DP) will have ->hpd_pulse().
 504         */
 505        for_each_intel_encoder(&dev_priv->drm, encoder) {
 506                bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
 507                enum port port = encoder->port;
 508                bool long_hpd;
 509
 510                pin = encoder->hpd_pin;
 511                if (!(BIT(pin) & pin_mask))
 512                        continue;
 513
 514                if (!has_hpd_pulse)
 515                        continue;
 516
 517                long_hpd = long_mask & BIT(pin);
 518
 519                drm_dbg(&dev_priv->drm,
 520                        "digital hpd on [ENCODER:%d:%s] - %s\n",
 521                        encoder->base.base.id, encoder->base.name,
 522                        long_hpd ? "long" : "short");
 523                queue_dig = true;
 524
 525                if (long_hpd) {
 526                        long_hpd_pulse_mask |= BIT(pin);
 527                        dev_priv->hotplug.long_port_mask |= BIT(port);
 528                } else {
 529                        short_hpd_pulse_mask |= BIT(pin);
 530                        dev_priv->hotplug.short_port_mask |= BIT(port);
 531                }
 532        }
 533
 534        /* Now process each pin just once */
 535        for_each_hpd_pin(pin) {
 536                bool long_hpd;
 537
 538                if (!(BIT(pin) & pin_mask))
 539                        continue;
 540
 541                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
 542                        /*
 543                         * On GMCH platforms the interrupt mask bits only
 544                         * prevent irq generation, not the setting of the
 545                         * hotplug bits itself. So only WARN about unexpected
 546                         * interrupts on saner platforms.
 547                         */
 548                        drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
 549                                      "Received HPD interrupt on pin %d although disabled\n",
 550                                      pin);
 551                        continue;
 552                }
 553
 554                if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
 555                        continue;
 556
 557                /*
 558                 * Delegate to ->hpd_pulse() if one of the encoders for this
 559                 * pin has it, otherwise let the hotplug_work deal with this
 560                 * pin directly.
 561                 */
 562                if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
 563                        long_hpd = long_hpd_pulse_mask & BIT(pin);
 564                } else {
 565                        dev_priv->hotplug.event_bits |= BIT(pin);
 566                        long_hpd = true;
 567                        queue_hp = true;
 568                }
 569
 570                if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
 571                        dev_priv->hotplug.event_bits &= ~BIT(pin);
 572                        storm_detected = true;
 573                        queue_hp = true;
 574                }
 575        }
 576
 577        /*
 578         * Disable any IRQs that storms were detected on. Polling enablement
 579         * happens later in our hotplug work.
 580         */
 581        if (storm_detected && dev_priv->display_irqs_enabled)
 582                dev_priv->display.hpd_irq_setup(dev_priv);
 583        spin_unlock(&dev_priv->irq_lock);
 584
 585        /*
 586         * Our hotplug handler can grab modeset locks (by calling down into the
 587         * fb helpers). Hence it must not be run on our own dev-priv->wq work
 588         * queue for otherwise the flush_work in the pageflip code will
 589         * deadlock.
 590         */
 591        if (queue_dig)
 592                queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
 593        if (queue_hp)
 594                queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
 595}
 596
 597/**
 598 * intel_hpd_init - initializes and enables hpd support
 599 * @dev_priv: i915 device instance
 600 *
 601 * This function enables the hotplug support. It requires that interrupts have
 602 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
 603 * poll request can run concurrently to other code, so locking rules must be
 604 * obeyed.
 605 *
 606 * This is a separate step from interrupt enabling to simplify the locking rules
 607 * in the driver load and resume code.
 608 *
 609 * Also see: intel_hpd_poll_init(), which enables connector polling
 610 */
 611void intel_hpd_init(struct drm_i915_private *dev_priv)
 612{
 613        int i;
 614
 615        for_each_hpd_pin(i) {
 616                dev_priv->hotplug.stats[i].count = 0;
 617                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 618        }
 619
 620        WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
 621        schedule_work(&dev_priv->hotplug.poll_init_work);
 622
 623        /*
 624         * Interrupt setup is already guaranteed to be single-threaded, this is
 625         * just to make the assert_spin_locked checks happy.
 626         */
 627        if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
 628                spin_lock_irq(&dev_priv->irq_lock);
 629                if (dev_priv->display_irqs_enabled)
 630                        dev_priv->display.hpd_irq_setup(dev_priv);
 631                spin_unlock_irq(&dev_priv->irq_lock);
 632        }
 633}
 634
 635static void i915_hpd_poll_init_work(struct work_struct *work)
 636{
 637        struct drm_i915_private *dev_priv =
 638                container_of(work, struct drm_i915_private,
 639                             hotplug.poll_init_work);
 640        struct drm_device *dev = &dev_priv->drm;
 641        struct drm_connector_list_iter conn_iter;
 642        struct intel_connector *connector;
 643        bool enabled;
 644
 645        mutex_lock(&dev->mode_config.mutex);
 646
 647        enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
 648
 649        drm_connector_list_iter_begin(dev, &conn_iter);
 650        for_each_intel_connector_iter(connector, &conn_iter) {
 651                enum hpd_pin pin;
 652
 653                pin = intel_connector_hpd_pin(connector);
 654                if (pin == HPD_NONE)
 655                        continue;
 656
 657                connector->base.polled = connector->polled;
 658
 659                if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
 660                        connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
 661                                DRM_CONNECTOR_POLL_DISCONNECT;
 662        }
 663        drm_connector_list_iter_end(&conn_iter);
 664
 665        if (enabled)
 666                drm_kms_helper_poll_enable(dev);
 667
 668        mutex_unlock(&dev->mode_config.mutex);
 669
 670        /*
 671         * We might have missed any hotplugs that happened while we were
 672         * in the middle of disabling polling
 673         */
 674        if (!enabled)
 675                drm_helper_hpd_irq_event(dev);
 676}
 677
 678/**
 679 * intel_hpd_poll_init - enables/disables polling for connectors with hpd
 680 * @dev_priv: i915 device instance
 681 *
 682 * This function enables polling for all connectors, regardless of whether or
 683 * not they support hotplug detection. Under certain conditions HPD may not be
 684 * functional. On most Intel GPUs, this happens when we enter runtime suspend.
 685 * On Valleyview and Cherryview systems, this also happens when we shut off all
 686 * of the powerwells.
 687 *
 688 * Since this function can get called in contexts where we're already holding
 689 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
 690 * worker.
 691 *
 692 * Also see: intel_hpd_init(), which restores hpd handling.
 693 */
 694void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
 695{
 696        WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
 697
 698        /*
 699         * We might already be holding dev->mode_config.mutex, so do this in a
 700         * seperate worker
 701         * As well, there's no issue if we race here since we always reschedule
 702         * this worker anyway
 703         */
 704        schedule_work(&dev_priv->hotplug.poll_init_work);
 705}
 706
 707void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 708{
 709        INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
 710                          i915_hotplug_work_func);
 711        INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
 712        INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
 713        INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
 714                          intel_hpd_irq_storm_reenable_work);
 715}
 716
 717void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 718{
 719        spin_lock_irq(&dev_priv->irq_lock);
 720
 721        dev_priv->hotplug.long_port_mask = 0;
 722        dev_priv->hotplug.short_port_mask = 0;
 723        dev_priv->hotplug.event_bits = 0;
 724        dev_priv->hotplug.retry_bits = 0;
 725
 726        spin_unlock_irq(&dev_priv->irq_lock);
 727
 728        cancel_work_sync(&dev_priv->hotplug.dig_port_work);
 729        cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
 730        cancel_work_sync(&dev_priv->hotplug.poll_init_work);
 731        cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 732}
 733
 734bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 735{
 736        bool ret = false;
 737
 738        if (pin == HPD_NONE)
 739                return false;
 740
 741        spin_lock_irq(&dev_priv->irq_lock);
 742        if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
 743                dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
 744                ret = true;
 745        }
 746        spin_unlock_irq(&dev_priv->irq_lock);
 747
 748        return ret;
 749}
 750
 751void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
 752{
 753        if (pin == HPD_NONE)
 754                return;
 755
 756        spin_lock_irq(&dev_priv->irq_lock);
 757        dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 758        spin_unlock_irq(&dev_priv->irq_lock);
 759}
 760