linux/drivers/gpu/drm/i915/intel_dp.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/i2c.h>
  29#include <linux/slab.h>
  30#include <linux/export.h>
  31#include <linux/notifier.h>
  32#include <linux/reboot.h>
  33#include <drm/drmP.h>
  34#include <drm/drm_atomic_helper.h>
  35#include <drm/drm_crtc.h>
  36#include <drm/drm_crtc_helper.h>
  37#include <drm/drm_edid.h>
  38#include "intel_drv.h"
  39#include <drm/i915_drm.h>
  40#include "i915_drv.h"
  41
  42#define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
  43
  44/* Compliance test status bits  */
  45#define INTEL_DP_RESOLUTION_SHIFT_MASK  0
  46#define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  47#define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  48#define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  49
  50struct dp_link_dpll {
  51        int clock;
  52        struct dpll dpll;
  53};
  54
  55static const struct dp_link_dpll gen4_dpll[] = {
  56        { 162000,
  57                { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  58        { 270000,
  59                { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  60};
  61
  62static const struct dp_link_dpll pch_dpll[] = {
  63        { 162000,
  64                { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  65        { 270000,
  66                { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  67};
  68
  69static const struct dp_link_dpll vlv_dpll[] = {
  70        { 162000,
  71                { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  72        { 270000,
  73                { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  74};
  75
  76/*
  77 * CHV supports eDP 1.4 that have  more link rates.
  78 * Below only provides the fixed rate but exclude variable rate.
  79 */
  80static const struct dp_link_dpll chv_dpll[] = {
  81        /*
  82         * CHV requires to program fractional division for m2.
  83         * m2 is stored in fixed point format using formula below
  84         * (m2_int << 22) | m2_fraction
  85         */
  86        { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
  87                { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  88        { 270000,       /* m2_int = 27, m2_fraction = 0 */
  89                { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  90        { 540000,       /* m2_int = 27, m2_fraction = 0 */
  91                { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  92};
  93
  94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
  95                                  324000, 432000, 540000 };
  96static const int skl_rates[] = { 162000, 216000, 270000,
  97                                  324000, 432000, 540000 };
  98static const int default_rates[] = { 162000, 270000, 540000 };
  99
 100/**
 101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
 102 * @intel_dp: DP struct
 103 *
 104 * If a CPU or PCH DP output is attached to an eDP panel, this function
 105 * will return true, and false otherwise.
 106 */
 107static bool is_edp(struct intel_dp *intel_dp)
 108{
 109        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 110
 111        return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 112}
 113
 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 115{
 116        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 117
 118        return intel_dig_port->base.base.dev;
 119}
 120
 121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 122{
 123        return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 124}
 125
 126static void intel_dp_link_down(struct intel_dp *intel_dp);
 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
 130static void vlv_steal_power_sequencer(struct drm_device *dev,
 131                                      enum pipe pipe);
 132
 133static unsigned int intel_dp_unused_lane_mask(int lane_count)
 134{
 135        return ~((1 << lane_count) - 1) & 0xf;
 136}
 137
 138static int
 139intel_dp_max_link_bw(struct intel_dp  *intel_dp)
 140{
 141        int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 142
 143        switch (max_link_bw) {
 144        case DP_LINK_BW_1_62:
 145        case DP_LINK_BW_2_7:
 146        case DP_LINK_BW_5_4:
 147                break;
 148        default:
 149                WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
 150                     max_link_bw);
 151                max_link_bw = DP_LINK_BW_1_62;
 152                break;
 153        }
 154        return max_link_bw;
 155}
 156
 157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
 158{
 159        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 160        struct drm_device *dev = intel_dig_port->base.base.dev;
 161        u8 source_max, sink_max;
 162
 163        source_max = 4;
 164        if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
 165            (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
 166                source_max = 2;
 167
 168        sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 169
 170        return min(source_max, sink_max);
 171}
 172
 173/*
 174 * The units on the numbers in the next two are... bizarre.  Examples will
 175 * make it clearer; this one parallels an example in the eDP spec.
 176 *
 177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
 178 *
 179 *     270000 * 1 * 8 / 10 == 216000
 180 *
 181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
 182 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
 183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
 184 * 119000.  At 18bpp that's 2142000 kilobits per second.
 185 *
 186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
 187 * get the result in decakilobits instead of kilobits.
 188 */
 189
 190static int
 191intel_dp_link_required(int pixel_clock, int bpp)
 192{
 193        return (pixel_clock * bpp + 9) / 10;
 194}
 195
 196static int
 197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 198{
 199        return (max_link_clock * max_lanes * 8) / 10;
 200}
 201
 202static enum drm_mode_status
 203intel_dp_mode_valid(struct drm_connector *connector,
 204                    struct drm_display_mode *mode)
 205{
 206        struct intel_dp *intel_dp = intel_attached_dp(connector);
 207        struct intel_connector *intel_connector = to_intel_connector(connector);
 208        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 209        int target_clock = mode->clock;
 210        int max_rate, mode_rate, max_lanes, max_link_clock;
 211
 212        if (is_edp(intel_dp) && fixed_mode) {
 213                if (mode->hdisplay > fixed_mode->hdisplay)
 214                        return MODE_PANEL;
 215
 216                if (mode->vdisplay > fixed_mode->vdisplay)
 217                        return MODE_PANEL;
 218
 219                target_clock = fixed_mode->clock;
 220        }
 221
 222        max_link_clock = intel_dp_max_link_rate(intel_dp);
 223        max_lanes = intel_dp_max_lane_count(intel_dp);
 224
 225        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 226        mode_rate = intel_dp_link_required(target_clock, 18);
 227
 228        if (mode_rate > max_rate)
 229                return MODE_CLOCK_HIGH;
 230
 231        if (mode->clock < 10000)
 232                return MODE_CLOCK_LOW;
 233
 234        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 235                return MODE_H_ILLEGAL;
 236
 237        return MODE_OK;
 238}
 239
 240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
 241{
 242        int     i;
 243        uint32_t v = 0;
 244
 245        if (src_bytes > 4)
 246                src_bytes = 4;
 247        for (i = 0; i < src_bytes; i++)
 248                v |= ((uint32_t) src[i]) << ((3-i) * 8);
 249        return v;
 250}
 251
 252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 253{
 254        int i;
 255        if (dst_bytes > 4)
 256                dst_bytes = 4;
 257        for (i = 0; i < dst_bytes; i++)
 258                dst[i] = src >> ((3-i) * 8);
 259}
 260
 261static void
 262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 263                                    struct intel_dp *intel_dp);
 264static void
 265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 266                                              struct intel_dp *intel_dp);
 267
 268static void pps_lock(struct intel_dp *intel_dp)
 269{
 270        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 271        struct intel_encoder *encoder = &intel_dig_port->base;
 272        struct drm_device *dev = encoder->base.dev;
 273        struct drm_i915_private *dev_priv = dev->dev_private;
 274        enum intel_display_power_domain power_domain;
 275
 276        /*
 277         * See vlv_power_sequencer_reset() why we need
 278         * a power domain reference here.
 279         */
 280        power_domain = intel_display_port_aux_power_domain(encoder);
 281        intel_display_power_get(dev_priv, power_domain);
 282
 283        mutex_lock(&dev_priv->pps_mutex);
 284}
 285
 286static void pps_unlock(struct intel_dp *intel_dp)
 287{
 288        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 289        struct intel_encoder *encoder = &intel_dig_port->base;
 290        struct drm_device *dev = encoder->base.dev;
 291        struct drm_i915_private *dev_priv = dev->dev_private;
 292        enum intel_display_power_domain power_domain;
 293
 294        mutex_unlock(&dev_priv->pps_mutex);
 295
 296        power_domain = intel_display_port_aux_power_domain(encoder);
 297        intel_display_power_put(dev_priv, power_domain);
 298}
 299
 300static void
 301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 302{
 303        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 304        struct drm_device *dev = intel_dig_port->base.base.dev;
 305        struct drm_i915_private *dev_priv = dev->dev_private;
 306        enum pipe pipe = intel_dp->pps_pipe;
 307        bool pll_enabled, release_cl_override = false;
 308        enum dpio_phy phy = DPIO_PHY(pipe);
 309        enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 310        uint32_t DP;
 311
 312        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 313                 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
 314                 pipe_name(pipe), port_name(intel_dig_port->port)))
 315                return;
 316
 317        DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 318                      pipe_name(pipe), port_name(intel_dig_port->port));
 319
 320        /* Preserve the BIOS-computed detected bit. This is
 321         * supposed to be read-only.
 322         */
 323        DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 324        DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 325        DP |= DP_PORT_WIDTH(1);
 326        DP |= DP_LINK_TRAIN_PAT_1;
 327
 328        if (IS_CHERRYVIEW(dev))
 329                DP |= DP_PIPE_SELECT_CHV(pipe);
 330        else if (pipe == PIPE_B)
 331                DP |= DP_PIPEB_SELECT;
 332
 333        pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 334
 335        /*
 336         * The DPLL for the pipe must be enabled for this to work.
 337         * So enable temporarily it if it's not already enabled.
 338         */
 339        if (!pll_enabled) {
 340                release_cl_override = IS_CHERRYVIEW(dev) &&
 341                        !chv_phy_powergate_ch(dev_priv, phy, ch, true);
 342
 343                vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
 344                                 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
 345        }
 346
 347        /*
 348         * Similar magic as in intel_dp_enable_port().
 349         * We _must_ do this port enable + disable trick
 350         * to make this power seqeuencer lock onto the port.
 351         * Otherwise even VDD force bit won't work.
 352         */
 353        I915_WRITE(intel_dp->output_reg, DP);
 354        POSTING_READ(intel_dp->output_reg);
 355
 356        I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
 357        POSTING_READ(intel_dp->output_reg);
 358
 359        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
 360        POSTING_READ(intel_dp->output_reg);
 361
 362        if (!pll_enabled) {
 363                vlv_force_pll_off(dev, pipe);
 364
 365                if (release_cl_override)
 366                        chv_phy_powergate_ch(dev_priv, phy, ch, false);
 367        }
 368}
 369
 370static enum pipe
 371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 372{
 373        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 374        struct drm_device *dev = intel_dig_port->base.base.dev;
 375        struct drm_i915_private *dev_priv = dev->dev_private;
 376        struct intel_encoder *encoder;
 377        unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 378        enum pipe pipe;
 379
 380        lockdep_assert_held(&dev_priv->pps_mutex);
 381
 382        /* We should never land here with regular DP ports */
 383        WARN_ON(!is_edp(intel_dp));
 384
 385        if (intel_dp->pps_pipe != INVALID_PIPE)
 386                return intel_dp->pps_pipe;
 387
 388        /*
 389         * We don't have power sequencer currently.
 390         * Pick one that's not used by other ports.
 391         */
 392        for_each_intel_encoder(dev, encoder) {
 393                struct intel_dp *tmp;
 394
 395                if (encoder->type != INTEL_OUTPUT_EDP)
 396                        continue;
 397
 398                tmp = enc_to_intel_dp(&encoder->base);
 399
 400                if (tmp->pps_pipe != INVALID_PIPE)
 401                        pipes &= ~(1 << tmp->pps_pipe);
 402        }
 403
 404        /*
 405         * Didn't find one. This should not happen since there
 406         * are two power sequencers and up to two eDP ports.
 407         */
 408        if (WARN_ON(pipes == 0))
 409                pipe = PIPE_A;
 410        else
 411                pipe = ffs(pipes) - 1;
 412
 413        vlv_steal_power_sequencer(dev, pipe);
 414        intel_dp->pps_pipe = pipe;
 415
 416        DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 417                      pipe_name(intel_dp->pps_pipe),
 418                      port_name(intel_dig_port->port));
 419
 420        /* init power sequencer on this pipe and port */
 421        intel_dp_init_panel_power_sequencer(dev, intel_dp);
 422        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 423
 424        /*
 425         * Even vdd force doesn't work until we've made
 426         * the power sequencer lock in on the port.
 427         */
 428        vlv_power_sequencer_kick(intel_dp);
 429
 430        return intel_dp->pps_pipe;
 431}
 432
 433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 434                               enum pipe pipe);
 435
 436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 437                               enum pipe pipe)
 438{
 439        return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
 440}
 441
 442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 443                                enum pipe pipe)
 444{
 445        return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 446}
 447
 448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 449                         enum pipe pipe)
 450{
 451        return true;
 452}
 453
 454static enum pipe
 455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 456                     enum port port,
 457                     vlv_pipe_check pipe_check)
 458{
 459        enum pipe pipe;
 460
 461        for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 462                u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
 463                        PANEL_PORT_SELECT_MASK;
 464
 465                if (port_sel != PANEL_PORT_SELECT_VLV(port))
 466                        continue;
 467
 468                if (!pipe_check(dev_priv, pipe))
 469                        continue;
 470
 471                return pipe;
 472        }
 473
 474        return INVALID_PIPE;
 475}
 476
 477static void
 478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 479{
 480        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 481        struct drm_device *dev = intel_dig_port->base.base.dev;
 482        struct drm_i915_private *dev_priv = dev->dev_private;
 483        enum port port = intel_dig_port->port;
 484
 485        lockdep_assert_held(&dev_priv->pps_mutex);
 486
 487        /* try to find a pipe with this port selected */
 488        /* first pick one where the panel is on */
 489        intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 490                                                  vlv_pipe_has_pp_on);
 491        /* didn't find one? pick one where vdd is on */
 492        if (intel_dp->pps_pipe == INVALID_PIPE)
 493                intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 494                                                          vlv_pipe_has_vdd_on);
 495        /* didn't find one? pick one with just the correct port */
 496        if (intel_dp->pps_pipe == INVALID_PIPE)
 497                intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 498                                                          vlv_pipe_any);
 499
 500        /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 501        if (intel_dp->pps_pipe == INVALID_PIPE) {
 502                DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 503                              port_name(port));
 504                return;
 505        }
 506
 507        DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 508                      port_name(port), pipe_name(intel_dp->pps_pipe));
 509
 510        intel_dp_init_panel_power_sequencer(dev, intel_dp);
 511        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 512}
 513
 514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
 515{
 516        struct drm_device *dev = dev_priv->dev;
 517        struct intel_encoder *encoder;
 518
 519        if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
 520                return;
 521
 522        /*
 523         * We can't grab pps_mutex here due to deadlock with power_domain
 524         * mutex when power_domain functions are called while holding pps_mutex.
 525         * That also means that in order to use pps_pipe the code needs to
 526         * hold both a power domain reference and pps_mutex, and the power domain
 527         * reference get/put must be done while _not_ holding pps_mutex.
 528         * pps_{lock,unlock}() do these steps in the correct order, so one
 529         * should use them always.
 530         */
 531
 532        for_each_intel_encoder(dev, encoder) {
 533                struct intel_dp *intel_dp;
 534
 535                if (encoder->type != INTEL_OUTPUT_EDP)
 536                        continue;
 537
 538                intel_dp = enc_to_intel_dp(&encoder->base);
 539                intel_dp->pps_pipe = INVALID_PIPE;
 540        }
 541}
 542
 543static i915_reg_t
 544_pp_ctrl_reg(struct intel_dp *intel_dp)
 545{
 546        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 547
 548        if (IS_BROXTON(dev))
 549                return BXT_PP_CONTROL(0);
 550        else if (HAS_PCH_SPLIT(dev))
 551                return PCH_PP_CONTROL;
 552        else
 553                return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
 554}
 555
 556static i915_reg_t
 557_pp_stat_reg(struct intel_dp *intel_dp)
 558{
 559        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 560
 561        if (IS_BROXTON(dev))
 562                return BXT_PP_STATUS(0);
 563        else if (HAS_PCH_SPLIT(dev))
 564                return PCH_PP_STATUS;
 565        else
 566                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
 567}
 568
 569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
 570   This function only applicable when panel PM state is not to be tracked */
 571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 572                              void *unused)
 573{
 574        struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
 575                                                 edp_notifier);
 576        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 577        struct drm_i915_private *dev_priv = dev->dev_private;
 578
 579        if (!is_edp(intel_dp) || code != SYS_RESTART)
 580                return 0;
 581
 582        pps_lock(intel_dp);
 583
 584        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 585                enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 586                i915_reg_t pp_ctrl_reg, pp_div_reg;
 587                u32 pp_div;
 588
 589                pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
 590                pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
 591                pp_div = I915_READ(pp_div_reg);
 592                pp_div &= PP_REFERENCE_DIVIDER_MASK;
 593
 594                /* 0x1F write to PP_DIV_REG sets max cycle delay */
 595                I915_WRITE(pp_div_reg, pp_div | 0x1F);
 596                I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
 597                msleep(intel_dp->panel_power_cycle_delay);
 598        }
 599
 600        pps_unlock(intel_dp);
 601
 602        return 0;
 603}
 604
 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
 606{
 607        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 608        struct drm_i915_private *dev_priv = dev->dev_private;
 609
 610        lockdep_assert_held(&dev_priv->pps_mutex);
 611
 612        if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
 613            intel_dp->pps_pipe == INVALID_PIPE)
 614                return false;
 615
 616        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 617}
 618
 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 620{
 621        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 622        struct drm_i915_private *dev_priv = dev->dev_private;
 623
 624        lockdep_assert_held(&dev_priv->pps_mutex);
 625
 626        if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
 627            intel_dp->pps_pipe == INVALID_PIPE)
 628                return false;
 629
 630        return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 631}
 632
 633static void
 634intel_dp_check_edp(struct intel_dp *intel_dp)
 635{
 636        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 637        struct drm_i915_private *dev_priv = dev->dev_private;
 638
 639        if (!is_edp(intel_dp))
 640                return;
 641
 642        if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 643                WARN(1, "eDP powered off while attempting aux channel communication.\n");
 644                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
 645                              I915_READ(_pp_stat_reg(intel_dp)),
 646                              I915_READ(_pp_ctrl_reg(intel_dp)));
 647        }
 648}
 649
 650static uint32_t
 651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 652{
 653        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 654        struct drm_device *dev = intel_dig_port->base.base.dev;
 655        struct drm_i915_private *dev_priv = dev->dev_private;
 656        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
 657        uint32_t status;
 658        bool done;
 659
 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 661        if (has_aux_irq)
 662                done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
 663                                          msecs_to_jiffies_timeout(10));
 664        else
 665                done = wait_for_atomic(C, 10) == 0;
 666        if (!done)
 667                DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
 668                          has_aux_irq);
 669#undef C
 670
 671        return status;
 672}
 673
 674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 675{
 676        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 677        struct drm_device *dev = intel_dig_port->base.base.dev;
 678
 679        /*
 680         * The clock divider is based off the hrawclk, and would like to run at
 681         * 2MHz.  So, take the hrawclk value and divide by 2 and use that
 682         */
 683        return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
 684}
 685
 686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 687{
 688        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 689        struct drm_device *dev = intel_dig_port->base.base.dev;
 690        struct drm_i915_private *dev_priv = dev->dev_private;
 691
 692        if (index)
 693                return 0;
 694
 695        if (intel_dig_port->port == PORT_A) {
 696                return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 697
 698        } else {
 699                return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 700        }
 701}
 702
 703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 704{
 705        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 706        struct drm_device *dev = intel_dig_port->base.base.dev;
 707        struct drm_i915_private *dev_priv = dev->dev_private;
 708
 709        if (intel_dig_port->port == PORT_A) {
 710                if (index)
 711                        return 0;
 712                return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 713        } else if (HAS_PCH_LPT_H(dev_priv)) {
 714                /* Workaround for non-ULT HSW */
 715                switch (index) {
 716                case 0: return 63;
 717                case 1: return 72;
 718                default: return 0;
 719                }
 720        } else  {
 721                return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 722        }
 723}
 724
 725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 726{
 727        return index ? 0 : 100;
 728}
 729
 730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 731{
 732        /*
 733         * SKL doesn't need us to program the AUX clock divider (Hardware will
 734         * derive the clock from CDCLK automatically). We still implement the
 735         * get_aux_clock_divider vfunc to plug-in into the existing code.
 736         */
 737        return index ? 0 : 1;
 738}
 739
 740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
 741                                      bool has_aux_irq,
 742                                      int send_bytes,
 743                                      uint32_t aux_clock_divider)
 744{
 745        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 746        struct drm_device *dev = intel_dig_port->base.base.dev;
 747        uint32_t precharge, timeout;
 748
 749        if (IS_GEN6(dev))
 750                precharge = 3;
 751        else
 752                precharge = 5;
 753
 754        if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
 755                timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 756        else
 757                timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 758
 759        return DP_AUX_CH_CTL_SEND_BUSY |
 760               DP_AUX_CH_CTL_DONE |
 761               (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 762               DP_AUX_CH_CTL_TIME_OUT_ERROR |
 763               timeout |
 764               DP_AUX_CH_CTL_RECEIVE_ERROR |
 765               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 766               (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 767               (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 768}
 769
 770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
 771                                      bool has_aux_irq,
 772                                      int send_bytes,
 773                                      uint32_t unused)
 774{
 775        return DP_AUX_CH_CTL_SEND_BUSY |
 776               DP_AUX_CH_CTL_DONE |
 777               (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 778               DP_AUX_CH_CTL_TIME_OUT_ERROR |
 779               DP_AUX_CH_CTL_TIME_OUT_1600us |
 780               DP_AUX_CH_CTL_RECEIVE_ERROR |
 781               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 782               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 783}
 784
 785static int
 786intel_dp_aux_ch(struct intel_dp *intel_dp,
 787                const uint8_t *send, int send_bytes,
 788                uint8_t *recv, int recv_size)
 789{
 790        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 791        struct drm_device *dev = intel_dig_port->base.base.dev;
 792        struct drm_i915_private *dev_priv = dev->dev_private;
 793        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
 794        uint32_t aux_clock_divider;
 795        int i, ret, recv_bytes;
 796        uint32_t status;
 797        int try, clock = 0;
 798        bool has_aux_irq = HAS_AUX_IRQ(dev);
 799        bool vdd;
 800
 801        pps_lock(intel_dp);
 802
 803        /*
 804         * We will be called with VDD already enabled for dpcd/edid/oui reads.
 805         * In such cases we want to leave VDD enabled and it's up to upper layers
 806         * to turn it off. But for eg. i2c-dev access we need to turn it on/off
 807         * ourselves.
 808         */
 809        vdd = edp_panel_vdd_on(intel_dp);
 810
 811        /* dp aux is extremely sensitive to irq latency, hence request the
 812         * lowest possible wakeup latency and so prevent the cpu from going into
 813         * deep sleep states.
 814         */
 815        pm_qos_update_request(&dev_priv->pm_qos, 0);
 816
 817        intel_dp_check_edp(intel_dp);
 818
 819        /* Try to wait for any previous AUX channel activity */
 820        for (try = 0; try < 3; try++) {
 821                status = I915_READ_NOTRACE(ch_ctl);
 822                if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 823                        break;
 824                msleep(1);
 825        }
 826
 827        if (try == 3) {
 828                static u32 last_status = -1;
 829                const u32 status = I915_READ(ch_ctl);
 830
 831                if (status != last_status) {
 832                        WARN(1, "dp_aux_ch not started status 0x%08x\n",
 833                             status);
 834                        last_status = status;
 835                }
 836
 837                ret = -EBUSY;
 838                goto out;
 839        }
 840
 841        /* Only 5 data registers! */
 842        if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
 843                ret = -E2BIG;
 844                goto out;
 845        }
 846
 847        while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
 848                u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
 849                                                          has_aux_irq,
 850                                                          send_bytes,
 851                                                          aux_clock_divider);
 852
 853                /* Must try at least 3 times according to DP spec */
 854                for (try = 0; try < 5; try++) {
 855                        /* Load the send data into the aux channel data registers */
 856                        for (i = 0; i < send_bytes; i += 4)
 857                                I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
 858                                           intel_dp_pack_aux(send + i,
 859                                                             send_bytes - i));
 860
 861                        /* Send the command and wait for it to complete */
 862                        I915_WRITE(ch_ctl, send_ctl);
 863
 864                        status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
 865
 866                        /* Clear done status and any errors */
 867                        I915_WRITE(ch_ctl,
 868                                   status |
 869                                   DP_AUX_CH_CTL_DONE |
 870                                   DP_AUX_CH_CTL_TIME_OUT_ERROR |
 871                                   DP_AUX_CH_CTL_RECEIVE_ERROR);
 872
 873                        if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
 874                                continue;
 875
 876                        /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
 877                         *   400us delay required for errors and timeouts
 878                         *   Timeout errors from the HW already meet this
 879                         *   requirement so skip to next iteration
 880                         */
 881                        if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
 882                                usleep_range(400, 500);
 883                                continue;
 884                        }
 885                        if (status & DP_AUX_CH_CTL_DONE)
 886                                goto done;
 887                }
 888        }
 889
 890        if ((status & DP_AUX_CH_CTL_DONE) == 0) {
 891                DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
 892                ret = -EBUSY;
 893                goto out;
 894        }
 895
 896done:
 897        /* Check for timeout or receive error.
 898         * Timeouts occur when the sink is not connected
 899         */
 900        if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
 901                DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
 902                ret = -EIO;
 903                goto out;
 904        }
 905
 906        /* Timeouts occur when the device isn't connected, so they're
 907         * "normal" -- don't fill the kernel log with these */
 908        if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
 909                DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
 910                ret = -ETIMEDOUT;
 911                goto out;
 912        }
 913
 914        /* Unload any bytes sent back from the other side */
 915        recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
 916                      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
 917
 918        /*
 919         * By BSpec: "Message sizes of 0 or >20 are not allowed."
 920         * We have no idea of what happened so we return -EBUSY so
 921         * drm layer takes care for the necessary retries.
 922         */
 923        if (recv_bytes == 0 || recv_bytes > 20) {
 924                DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
 925                              recv_bytes);
 926                /*
 927                 * FIXME: This patch was created on top of a series that
 928                 * organize the retries at drm level. There EBUSY should
 929                 * also take care for 1ms wait before retrying.
 930                 * That aux retries re-org is still needed and after that is
 931                 * merged we remove this sleep from here.
 932                 */
 933                usleep_range(1000, 1500);
 934                ret = -EBUSY;
 935                goto out;
 936        }
 937
 938        if (recv_bytes > recv_size)
 939                recv_bytes = recv_size;
 940
 941        for (i = 0; i < recv_bytes; i += 4)
 942                intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
 943                                    recv + i, recv_bytes - i);
 944
 945        ret = recv_bytes;
 946out:
 947        pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
 948
 949        if (vdd)
 950                edp_panel_vdd_off(intel_dp, false);
 951
 952        pps_unlock(intel_dp);
 953
 954        return ret;
 955}
 956
 957#define BARE_ADDRESS_SIZE       3
 958#define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
 959static ssize_t
 960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 961{
 962        struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
 963        uint8_t txbuf[20], rxbuf[20];
 964        size_t txsize, rxsize;
 965        int ret;
 966
 967        txbuf[0] = (msg->request << 4) |
 968                ((msg->address >> 16) & 0xf);
 969        txbuf[1] = (msg->address >> 8) & 0xff;
 970        txbuf[2] = msg->address & 0xff;
 971        txbuf[3] = msg->size - 1;
 972
 973        switch (msg->request & ~DP_AUX_I2C_MOT) {
 974        case DP_AUX_NATIVE_WRITE:
 975        case DP_AUX_I2C_WRITE:
 976        case DP_AUX_I2C_WRITE_STATUS_UPDATE:
 977                txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
 978                rxsize = 2; /* 0 or 1 data bytes */
 979
 980                if (WARN_ON(txsize > 20))
 981                        return -E2BIG;
 982
 983                memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
 984
 985                ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
 986                if (ret > 0) {
 987                        msg->reply = rxbuf[0] >> 4;
 988
 989                        if (ret > 1) {
 990                                /* Number of bytes written in a short write. */
 991                                ret = clamp_t(int, rxbuf[1], 0, msg->size);
 992                        } else {
 993                                /* Return payload size. */
 994                                ret = msg->size;
 995                        }
 996                }
 997                break;
 998
 999        case DP_AUX_NATIVE_READ:
1000        case DP_AUX_I2C_READ:
1001                txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1002                rxsize = msg->size + 1;
1003
1004                if (WARN_ON(rxsize > 20))
1005                        return -E2BIG;
1006
1007                ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1008                if (ret > 0) {
1009                        msg->reply = rxbuf[0] >> 4;
1010                        /*
1011                         * Assume happy day, and copy the data. The caller is
1012                         * expected to check msg->reply before touching it.
1013                         *
1014                         * Return payload size.
1015                         */
1016                        ret--;
1017                        memcpy(msg->buffer, rxbuf + 1, ret);
1018                }
1019                break;
1020
1021        default:
1022                ret = -EINVAL;
1023                break;
1024        }
1025
1026        return ret;
1027}
1028
1029static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1030                                       enum port port)
1031{
1032        switch (port) {
1033        case PORT_B:
1034        case PORT_C:
1035        case PORT_D:
1036                return DP_AUX_CH_CTL(port);
1037        default:
1038                MISSING_CASE(port);
1039                return DP_AUX_CH_CTL(PORT_B);
1040        }
1041}
1042
1043static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1044                                        enum port port, int index)
1045{
1046        switch (port) {
1047        case PORT_B:
1048        case PORT_C:
1049        case PORT_D:
1050                return DP_AUX_CH_DATA(port, index);
1051        default:
1052                MISSING_CASE(port);
1053                return DP_AUX_CH_DATA(PORT_B, index);
1054        }
1055}
1056
1057static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1058                                       enum port port)
1059{
1060        switch (port) {
1061        case PORT_A:
1062                return DP_AUX_CH_CTL(port);
1063        case PORT_B:
1064        case PORT_C:
1065        case PORT_D:
1066                return PCH_DP_AUX_CH_CTL(port);
1067        default:
1068                MISSING_CASE(port);
1069                return DP_AUX_CH_CTL(PORT_A);
1070        }
1071}
1072
1073static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1074                                        enum port port, int index)
1075{
1076        switch (port) {
1077        case PORT_A:
1078                return DP_AUX_CH_DATA(port, index);
1079        case PORT_B:
1080        case PORT_C:
1081        case PORT_D:
1082                return PCH_DP_AUX_CH_DATA(port, index);
1083        default:
1084                MISSING_CASE(port);
1085                return DP_AUX_CH_DATA(PORT_A, index);
1086        }
1087}
1088
1089/*
1090 * On SKL we don't have Aux for port E so we rely
1091 * on VBT to set a proper alternate aux channel.
1092 */
1093static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1094{
1095        const struct ddi_vbt_port_info *info =
1096                &dev_priv->vbt.ddi_port_info[PORT_E];
1097
1098        switch (info->alternate_aux_channel) {
1099        case DP_AUX_A:
1100                return PORT_A;
1101        case DP_AUX_B:
1102                return PORT_B;
1103        case DP_AUX_C:
1104                return PORT_C;
1105        case DP_AUX_D:
1106                return PORT_D;
1107        default:
1108                MISSING_CASE(info->alternate_aux_channel);
1109                return PORT_A;
1110        }
1111}
1112
1113static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1114                                       enum port port)
1115{
1116        if (port == PORT_E)
1117                port = skl_porte_aux_port(dev_priv);
1118
1119        switch (port) {
1120        case PORT_A:
1121        case PORT_B:
1122        case PORT_C:
1123        case PORT_D:
1124                return DP_AUX_CH_CTL(port);
1125        default:
1126                MISSING_CASE(port);
1127                return DP_AUX_CH_CTL(PORT_A);
1128        }
1129}
1130
1131static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1132                                        enum port port, int index)
1133{
1134        if (port == PORT_E)
1135                port = skl_porte_aux_port(dev_priv);
1136
1137        switch (port) {
1138        case PORT_A:
1139        case PORT_B:
1140        case PORT_C:
1141        case PORT_D:
1142                return DP_AUX_CH_DATA(port, index);
1143        default:
1144                MISSING_CASE(port);
1145                return DP_AUX_CH_DATA(PORT_A, index);
1146        }
1147}
1148
1149static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1150                                         enum port port)
1151{
1152        if (INTEL_INFO(dev_priv)->gen >= 9)
1153                return skl_aux_ctl_reg(dev_priv, port);
1154        else if (HAS_PCH_SPLIT(dev_priv))
1155                return ilk_aux_ctl_reg(dev_priv, port);
1156        else
1157                return g4x_aux_ctl_reg(dev_priv, port);
1158}
1159
1160static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1161                                          enum port port, int index)
1162{
1163        if (INTEL_INFO(dev_priv)->gen >= 9)
1164                return skl_aux_data_reg(dev_priv, port, index);
1165        else if (HAS_PCH_SPLIT(dev_priv))
1166                return ilk_aux_data_reg(dev_priv, port, index);
1167        else
1168                return g4x_aux_data_reg(dev_priv, port, index);
1169}
1170
1171static void intel_aux_reg_init(struct intel_dp *intel_dp)
1172{
1173        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1174        enum port port = dp_to_dig_port(intel_dp)->port;
1175        int i;
1176
1177        intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1178        for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1179                intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1180}
1181
1182static void
1183intel_dp_aux_fini(struct intel_dp *intel_dp)
1184{
1185        drm_dp_aux_unregister(&intel_dp->aux);
1186        kfree(intel_dp->aux.name);
1187}
1188
1189static int
1190intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1191{
1192        struct drm_device *dev = intel_dp_to_dev(intel_dp);
1193        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1194        enum port port = intel_dig_port->port;
1195        int ret;
1196
1197        intel_aux_reg_init(intel_dp);
1198
1199        intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1200        if (!intel_dp->aux.name)
1201                return -ENOMEM;
1202
1203        intel_dp->aux.dev = dev->dev;
1204        intel_dp->aux.transfer = intel_dp_aux_transfer;
1205
1206        DRM_DEBUG_KMS("registering %s bus for %s\n",
1207                      intel_dp->aux.name,
1208                      connector->base.kdev->kobj.name);
1209
1210        ret = drm_dp_aux_register(&intel_dp->aux);
1211        if (ret < 0) {
1212                DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1213                          intel_dp->aux.name, ret);
1214                kfree(intel_dp->aux.name);
1215                return ret;
1216        }
1217
1218        ret = sysfs_create_link(&connector->base.kdev->kobj,
1219                                &intel_dp->aux.ddc.dev.kobj,
1220                                intel_dp->aux.ddc.dev.kobj.name);
1221        if (ret < 0) {
1222                DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1223                          intel_dp->aux.name, ret);
1224                intel_dp_aux_fini(intel_dp);
1225                return ret;
1226        }
1227
1228        return 0;
1229}
1230
1231static void
1232intel_dp_connector_unregister(struct intel_connector *intel_connector)
1233{
1234        struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1235
1236        if (!intel_connector->mst_port)
1237                sysfs_remove_link(&intel_connector->base.kdev->kobj,
1238                                  intel_dp->aux.ddc.dev.kobj.name);
1239        intel_connector_unregister(intel_connector);
1240}
1241
1242static void
1243skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1244{
1245        u32 ctrl1;
1246
1247        memset(&pipe_config->dpll_hw_state, 0,
1248               sizeof(pipe_config->dpll_hw_state));
1249
1250        pipe_config->ddi_pll_sel = SKL_DPLL0;
1251        pipe_config->dpll_hw_state.cfgcr1 = 0;
1252        pipe_config->dpll_hw_state.cfgcr2 = 0;
1253
1254        ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1255        switch (pipe_config->port_clock / 2) {
1256        case 81000:
1257                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1258                                              SKL_DPLL0);
1259                break;
1260        case 135000:
1261                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1262                                              SKL_DPLL0);
1263                break;
1264        case 270000:
1265                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1266                                              SKL_DPLL0);
1267                break;
1268        case 162000:
1269                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1270                                              SKL_DPLL0);
1271                break;
1272        /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1273        results in CDCLK change. Need to handle the change of CDCLK by
1274        disabling pipes and re-enabling them */
1275        case 108000:
1276                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1277                                              SKL_DPLL0);
1278                break;
1279        case 216000:
1280                ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1281                                              SKL_DPLL0);
1282                break;
1283
1284        }
1285        pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1286}
1287
1288void
1289hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1290{
1291        memset(&pipe_config->dpll_hw_state, 0,
1292               sizeof(pipe_config->dpll_hw_state));
1293
1294        switch (pipe_config->port_clock / 2) {
1295        case 81000:
1296                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1297                break;
1298        case 135000:
1299                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1300                break;
1301        case 270000:
1302                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1303                break;
1304        }
1305}
1306
1307static int
1308intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1309{
1310        if (intel_dp->num_sink_rates) {
1311                *sink_rates = intel_dp->sink_rates;
1312                return intel_dp->num_sink_rates;
1313        }
1314
1315        *sink_rates = default_rates;
1316
1317        return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1318}
1319
1320bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1321{
1322        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1323        struct drm_device *dev = dig_port->base.base.dev;
1324
1325        /* WaDisableHBR2:skl */
1326        if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1327                return false;
1328
1329        if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1330            (INTEL_INFO(dev)->gen >= 9))
1331                return true;
1332        else
1333                return false;
1334}
1335
1336static int
1337intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1338{
1339        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1340        struct drm_device *dev = dig_port->base.base.dev;
1341        int size;
1342
1343        if (IS_BROXTON(dev)) {
1344                *source_rates = bxt_rates;
1345                size = ARRAY_SIZE(bxt_rates);
1346        } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1347                *source_rates = skl_rates;
1348                size = ARRAY_SIZE(skl_rates);
1349        } else {
1350                *source_rates = default_rates;
1351                size = ARRAY_SIZE(default_rates);
1352        }
1353
1354        /* This depends on the fact that 5.4 is last value in the array */
1355        if (!intel_dp_source_supports_hbr2(intel_dp))
1356                size--;
1357
1358        return size;
1359}
1360
1361static void
1362intel_dp_set_clock(struct intel_encoder *encoder,
1363                   struct intel_crtc_state *pipe_config)
1364{
1365        struct drm_device *dev = encoder->base.dev;
1366        const struct dp_link_dpll *divisor = NULL;
1367        int i, count = 0;
1368
1369        if (IS_G4X(dev)) {
1370                divisor = gen4_dpll;
1371                count = ARRAY_SIZE(gen4_dpll);
1372        } else if (HAS_PCH_SPLIT(dev)) {
1373                divisor = pch_dpll;
1374                count = ARRAY_SIZE(pch_dpll);
1375        } else if (IS_CHERRYVIEW(dev)) {
1376                divisor = chv_dpll;
1377                count = ARRAY_SIZE(chv_dpll);
1378        } else if (IS_VALLEYVIEW(dev)) {
1379                divisor = vlv_dpll;
1380                count = ARRAY_SIZE(vlv_dpll);
1381        }
1382
1383        if (divisor && count) {
1384                for (i = 0; i < count; i++) {
1385                        if (pipe_config->port_clock == divisor[i].clock) {
1386                                pipe_config->dpll = divisor[i].dpll;
1387                                pipe_config->clock_set = true;
1388                                break;
1389                        }
1390                }
1391        }
1392}
1393
1394static int intersect_rates(const int *source_rates, int source_len,
1395                           const int *sink_rates, int sink_len,
1396                           int *common_rates)
1397{
1398        int i = 0, j = 0, k = 0;
1399
1400        while (i < source_len && j < sink_len) {
1401                if (source_rates[i] == sink_rates[j]) {
1402                        if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1403                                return k;
1404                        common_rates[k] = source_rates[i];
1405                        ++k;
1406                        ++i;
1407                        ++j;
1408                } else if (source_rates[i] < sink_rates[j]) {
1409                        ++i;
1410                } else {
1411                        ++j;
1412                }
1413        }
1414        return k;
1415}
1416
1417static int intel_dp_common_rates(struct intel_dp *intel_dp,
1418                                 int *common_rates)
1419{
1420        const int *source_rates, *sink_rates;
1421        int source_len, sink_len;
1422
1423        sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1424        source_len = intel_dp_source_rates(intel_dp, &source_rates);
1425
1426        return intersect_rates(source_rates, source_len,
1427                               sink_rates, sink_len,
1428                               common_rates);
1429}
1430
1431static void snprintf_int_array(char *str, size_t len,
1432                               const int *array, int nelem)
1433{
1434        int i;
1435
1436        str[0] = '\0';
1437
1438        for (i = 0; i < nelem; i++) {
1439                int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1440                if (r >= len)
1441                        return;
1442                str += r;
1443                len -= r;
1444        }
1445}
1446
1447static void intel_dp_print_rates(struct intel_dp *intel_dp)
1448{
1449        const int *source_rates, *sink_rates;
1450        int source_len, sink_len, common_len;
1451        int common_rates[DP_MAX_SUPPORTED_RATES];
1452        char str[128]; /* FIXME: too big for stack? */
1453
1454        if ((drm_debug & DRM_UT_KMS) == 0)
1455                return;
1456
1457        source_len = intel_dp_source_rates(intel_dp, &source_rates);
1458        snprintf_int_array(str, sizeof(str), source_rates, source_len);
1459        DRM_DEBUG_KMS("source rates: %s\n", str);
1460
1461        sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1462        snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1463        DRM_DEBUG_KMS("sink rates: %s\n", str);
1464
1465        common_len = intel_dp_common_rates(intel_dp, common_rates);
1466        snprintf_int_array(str, sizeof(str), common_rates, common_len);
1467        DRM_DEBUG_KMS("common rates: %s\n", str);
1468}
1469
1470static int rate_to_index(int find, const int *rates)
1471{
1472        int i = 0;
1473
1474        for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1475                if (find == rates[i])
1476                        break;
1477
1478        return i;
1479}
1480
1481int
1482intel_dp_max_link_rate(struct intel_dp *intel_dp)
1483{
1484        int rates[DP_MAX_SUPPORTED_RATES] = {};
1485        int len;
1486
1487        len = intel_dp_common_rates(intel_dp, rates);
1488        if (WARN_ON(len <= 0))
1489                return 162000;
1490
1491        return rates[rate_to_index(0, rates) - 1];
1492}
1493
1494int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1495{
1496        return rate_to_index(rate, intel_dp->sink_rates);
1497}
1498
1499void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1500                           uint8_t *link_bw, uint8_t *rate_select)
1501{
1502        if (intel_dp->num_sink_rates) {
1503                *link_bw = 0;
1504                *rate_select =
1505                        intel_dp_rate_select(intel_dp, port_clock);
1506        } else {
1507                *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1508                *rate_select = 0;
1509        }
1510}
1511
1512bool
1513intel_dp_compute_config(struct intel_encoder *encoder,
1514                        struct intel_crtc_state *pipe_config)
1515{
1516        struct drm_device *dev = encoder->base.dev;
1517        struct drm_i915_private *dev_priv = dev->dev_private;
1518        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1519        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1520        enum port port = dp_to_dig_port(intel_dp)->port;
1521        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1522        struct intel_connector *intel_connector = intel_dp->attached_connector;
1523        int lane_count, clock;
1524        int min_lane_count = 1;
1525        int max_lane_count = intel_dp_max_lane_count(intel_dp);
1526        /* Conveniently, the link BW constants become indices with a shift...*/
1527        int min_clock = 0;
1528        int max_clock;
1529        int bpp, mode_rate;
1530        int link_avail, link_clock;
1531        int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1532        int common_len;
1533        uint8_t link_bw, rate_select;
1534
1535        common_len = intel_dp_common_rates(intel_dp, common_rates);
1536
1537        /* No common link rates between source and sink */
1538        WARN_ON(common_len <= 0);
1539
1540        max_clock = common_len - 1;
1541
1542        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1543                pipe_config->has_pch_encoder = true;
1544
1545        pipe_config->has_dp_encoder = true;
1546        pipe_config->has_drrs = false;
1547        pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1548
1549        if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1550                intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1551                                       adjusted_mode);
1552
1553                if (INTEL_INFO(dev)->gen >= 9) {
1554                        int ret;
1555                        ret = skl_update_scaler_crtc(pipe_config);
1556                        if (ret)
1557                                return ret;
1558                }
1559
1560                if (HAS_GMCH_DISPLAY(dev))
1561                        intel_gmch_panel_fitting(intel_crtc, pipe_config,
1562                                                 intel_connector->panel.fitting_mode);
1563                else
1564                        intel_pch_panel_fitting(intel_crtc, pipe_config,
1565                                                intel_connector->panel.fitting_mode);
1566        }
1567
1568        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1569                return false;
1570
1571        DRM_DEBUG_KMS("DP link computation with max lane count %i "
1572                      "max bw %d pixel clock %iKHz\n",
1573                      max_lane_count, common_rates[max_clock],
1574                      adjusted_mode->crtc_clock);
1575
1576        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1577         * bpc in between. */
1578        bpp = pipe_config->pipe_bpp;
1579        if (is_edp(intel_dp)) {
1580
1581                /* Get bpp from vbt only for panels that dont have bpp in edid */
1582                if (intel_connector->base.display_info.bpc == 0 &&
1583                        (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1584                        DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1585                                      dev_priv->vbt.edp_bpp);
1586                        bpp = dev_priv->vbt.edp_bpp;
1587                }
1588
1589                /*
1590                 * Use the maximum clock and number of lanes the eDP panel
1591                 * advertizes being capable of. The panels are generally
1592                 * designed to support only a single clock and lane
1593                 * configuration, and typically these values correspond to the
1594                 * native resolution of the panel.
1595                 */
1596                min_lane_count = max_lane_count;
1597                min_clock = max_clock;
1598        }
1599
1600        for (; bpp >= 6*3; bpp -= 2*3) {
1601                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1602                                                   bpp);
1603
1604                for (clock = min_clock; clock <= max_clock; clock++) {
1605                        for (lane_count = min_lane_count;
1606                                lane_count <= max_lane_count;
1607                                lane_count <<= 1) {
1608
1609                                link_clock = common_rates[clock];
1610                                link_avail = intel_dp_max_data_rate(link_clock,
1611                                                                    lane_count);
1612
1613                                if (mode_rate <= link_avail) {
1614                                        goto found;
1615                                }
1616                        }
1617                }
1618        }
1619
1620        return false;
1621
1622found:
1623        if (intel_dp->color_range_auto) {
1624                /*
1625                 * See:
1626                 * CEA-861-E - 5.1 Default Encoding Parameters
1627                 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1628                 */
1629                pipe_config->limited_color_range =
1630                        bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1631        } else {
1632                pipe_config->limited_color_range =
1633                        intel_dp->limited_color_range;
1634        }
1635
1636        pipe_config->lane_count = lane_count;
1637
1638        pipe_config->pipe_bpp = bpp;
1639        pipe_config->port_clock = common_rates[clock];
1640
1641        intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1642                              &link_bw, &rate_select);
1643
1644        DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1645                      link_bw, rate_select, pipe_config->lane_count,
1646                      pipe_config->port_clock, bpp);
1647        DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1648                      mode_rate, link_avail);
1649
1650        intel_link_compute_m_n(bpp, lane_count,
1651                               adjusted_mode->crtc_clock,
1652                               pipe_config->port_clock,
1653                               &pipe_config->dp_m_n);
1654
1655        if (intel_connector->panel.downclock_mode != NULL &&
1656                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1657                        pipe_config->has_drrs = true;
1658                        intel_link_compute_m_n(bpp, lane_count,
1659                                intel_connector->panel.downclock_mode->clock,
1660                                pipe_config->port_clock,
1661                                &pipe_config->dp_m2_n2);
1662        }
1663
1664        if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1665                skl_edp_set_pll_config(pipe_config);
1666        else if (IS_BROXTON(dev))
1667                /* handled in ddi */;
1668        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1669                hsw_dp_set_ddi_pll_sel(pipe_config);
1670        else
1671                intel_dp_set_clock(encoder, pipe_config);
1672
1673        return true;
1674}
1675
1676void intel_dp_set_link_params(struct intel_dp *intel_dp,
1677                              const struct intel_crtc_state *pipe_config)
1678{
1679        intel_dp->link_rate = pipe_config->port_clock;
1680        intel_dp->lane_count = pipe_config->lane_count;
1681}
1682
1683static void intel_dp_prepare(struct intel_encoder *encoder)
1684{
1685        struct drm_device *dev = encoder->base.dev;
1686        struct drm_i915_private *dev_priv = dev->dev_private;
1687        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1688        enum port port = dp_to_dig_port(intel_dp)->port;
1689        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1690        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1691
1692        intel_dp_set_link_params(intel_dp, crtc->config);
1693
1694        /*
1695         * There are four kinds of DP registers:
1696         *
1697         *      IBX PCH
1698         *      SNB CPU
1699         *      IVB CPU
1700         *      CPT PCH
1701         *
1702         * IBX PCH and CPU are the same for almost everything,
1703         * except that the CPU DP PLL is configured in this
1704         * register
1705         *
1706         * CPT PCH is quite different, having many bits moved
1707         * to the TRANS_DP_CTL register instead. That
1708         * configuration happens (oddly) in ironlake_pch_enable
1709         */
1710
1711        /* Preserve the BIOS-computed detected bit. This is
1712         * supposed to be read-only.
1713         */
1714        intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1715
1716        /* Handle DP bits in common between all three register formats */
1717        intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1718        intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1719
1720        /* Split out the IBX/CPU vs CPT settings */
1721
1722        if (IS_GEN7(dev) && port == PORT_A) {
1723                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1724                        intel_dp->DP |= DP_SYNC_HS_HIGH;
1725                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1726                        intel_dp->DP |= DP_SYNC_VS_HIGH;
1727                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1728
1729                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1730                        intel_dp->DP |= DP_ENHANCED_FRAMING;
1731
1732                intel_dp->DP |= crtc->pipe << 29;
1733        } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1734                u32 trans_dp;
1735
1736                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1737
1738                trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1739                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1740                        trans_dp |= TRANS_DP_ENH_FRAMING;
1741                else
1742                        trans_dp &= ~TRANS_DP_ENH_FRAMING;
1743                I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1744        } else {
1745                if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1746                    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1747                        intel_dp->DP |= DP_COLOR_RANGE_16_235;
1748
1749                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1750                        intel_dp->DP |= DP_SYNC_HS_HIGH;
1751                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1752                        intel_dp->DP |= DP_SYNC_VS_HIGH;
1753                intel_dp->DP |= DP_LINK_TRAIN_OFF;
1754
1755                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1756                        intel_dp->DP |= DP_ENHANCED_FRAMING;
1757
1758                if (IS_CHERRYVIEW(dev))
1759                        intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1760                else if (crtc->pipe == PIPE_B)
1761                        intel_dp->DP |= DP_PIPEB_SELECT;
1762        }
1763}
1764
1765#define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1766#define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1767
1768#define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1769#define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1770
1771#define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1772#define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1773
1774static void wait_panel_status(struct intel_dp *intel_dp,
1775                                       u32 mask,
1776                                       u32 value)
1777{
1778        struct drm_device *dev = intel_dp_to_dev(intel_dp);
1779        struct drm_i915_private *dev_priv = dev->dev_private;
1780        i915_reg_t pp_stat_reg, pp_ctrl_reg;
1781
1782        lockdep_assert_held(&dev_priv->pps_mutex);
1783
1784        pp_stat_reg = _pp_stat_reg(intel_dp);
1785        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1786
1787        DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1788                        mask, value,
1789                        I915_READ(pp_stat_reg),
1790                        I915_READ(pp_ctrl_reg));
1791
1792        if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1793                DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1794                                I915_READ(pp_stat_reg),
1795                                I915_READ(pp_ctrl_reg));
1796        }
1797
1798        DRM_DEBUG_KMS("Wait complete\n");
1799}
1800
1801static void wait_panel_on(struct intel_dp *intel_dp)
1802{
1803        DRM_DEBUG_KMS("Wait for panel power on\n");
1804        wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1805}
1806
1807static void wait_panel_off(struct intel_dp *intel_dp)
1808{
1809        DRM_DEBUG_KMS("Wait for panel power off time\n");
1810        wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1811}
1812
1813static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1814{
1815        DRM_DEBUG_KMS("Wait for panel power cycle\n");
1816
1817        /* When we disable the VDD override bit last we have to do the manual
1818         * wait. */
1819        wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1820                                       intel_dp->panel_power_cycle_delay);
1821
1822        wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1823}
1824
1825static void wait_backlight_on(struct intel_dp *intel_dp)
1826{
1827        wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1828                                       intel_dp->backlight_on_delay);
1829}
1830
1831static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1832{
1833        wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1834                                       intel_dp->backlight_off_delay);
1835}
1836
1837/* Read the current pp_control value, unlocking the register if it
1838 * is locked
1839 */
1840
1841static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1842{
1843        struct drm_device *dev = intel_dp_to_dev(intel_dp);
1844        struct drm_i915_private *dev_priv = dev->dev_private;
1845        u32 control;
1846
1847        lockdep_assert_held(&dev_priv->pps_mutex);
1848
1849        control = I915_READ(_pp_ctrl_reg(intel_dp));
1850        if (!IS_BROXTON(dev)) {
1851                control &= ~PANEL_UNLOCK_MASK;
1852                control |= PANEL_UNLOCK_REGS;
1853        }
1854        return control;
1855}
1856
1857/*
1858 * Must be paired with edp_panel_vdd_off().
1859 * Must hold pps_mutex around the whole on/off sequence.
1860 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1861 */
1862static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1863{
1864        struct drm_device *dev = intel_dp_to_dev(intel_dp);
1865        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1866        struct intel_encoder *intel_encoder = &intel_dig_port->base;
1867        struct drm_i915_private *dev_priv = dev->dev_private;
1868        enum intel_display_power_domain power_domain;
1869        u32 pp;
1870        i915_reg_t pp_stat_reg, pp_ctrl_reg;
1871        bool need_to_disable = !intel_dp->want_panel_vdd;
1872
1873        lockdep_assert_held(&dev_priv->pps_mutex);
1874
1875        if (!is_edp(intel_dp))
1876                return false;
1877
1878        cancel_delayed_work(&intel_dp->panel_vdd_work);
1879        intel_dp->want_panel_vdd = true;
1880
1881        if (edp_have_panel_vdd(intel_dp))
1882                return need_to_disable;
1883
1884        power_domain = intel_display_port_aux_power_domain(intel_encoder);
1885        intel_display_power_get(dev_priv, power_domain);
1886
1887        DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1888                      port_name(intel_dig_port->port));
1889
1890        if (!edp_have_panel_power(intel_dp))
1891                wait_panel_power_cycle(intel_dp);
1892
1893        pp = ironlake_get_pp_control(intel_dp);
1894        pp |= EDP_FORCE_VDD;
1895
1896        pp_stat_reg = _pp_stat_reg(intel_dp);
1897        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1898
1899        I915_WRITE(pp_ctrl_reg, pp);
1900        POSTING_READ(pp_ctrl_reg);
1901        DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1902                        I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1903        /*
1904         * If the panel wasn't on, delay before accessing aux channel
1905         */
1906        if (!edp_have_panel_power(intel_dp)) {
1907                DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1908                              port_name(intel_dig_port->port));
1909                msleep(intel_dp->panel_power_up_delay);
1910        }
1911
1912        return need_to_disable;
1913}
1914
1915/*
1916 * Must be paired with intel_edp_panel_vdd_off() or
1917 * intel_edp_panel_off().
1918 * Nested calls to these functions are not allowed since
1919 * we drop the lock. Caller must use some higher level
1920 * locking to prevent nested calls from other threads.
1921 */
1922void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1923{
1924        bool vdd;
1925
1926        if (!is_edp(intel_dp))
1927                return;
1928
1929        pps_lock(intel_dp);
1930        vdd = edp_panel_vdd_on(intel_dp);
1931        pps_unlock(intel_dp);
1932
1933        I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1934             port_name(dp_to_dig_port(intel_dp)->port));
1935}
1936
1937static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1938{
1939        struct drm_device *dev = intel_dp_to_dev(intel_dp);
1940        struct drm_i915_private *dev_priv = dev->dev_private;
1941        struct intel_digital_port *intel_dig_port =
1942                dp_to_dig_port(intel_dp);
1943        struct intel_encoder *intel_encoder = &intel_dig_port->base;
1944        enum intel_display_power_domain power_domain;
1945        u32 pp;
1946        i915_reg_t pp_stat_reg, pp_ctrl_reg;
1947
1948        lockdep_assert_held(&dev_priv->pps_mutex);
1949
1950        WARN_ON(intel_dp->want_panel_vdd);
1951
1952        if (!edp_have_panel_vdd(intel_dp))
1953                return;
1954
1955        DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1956                      port_name(intel_dig_port->port));
1957
1958        pp = ironlake_get_pp_control(intel_dp);
1959        pp &= ~EDP_FORCE_VDD;
1960
1961        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1962        pp_stat_reg = _pp_stat_reg(intel_dp);
1963
1964        I915_WRITE(pp_ctrl_reg, pp);
1965        POSTING_READ(pp_ctrl_reg);
1966
1967        /* Make sure sequencer is idle before allowing subsequent activity */
1968        DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1969        I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1970
1971        if ((pp & POWER_TARGET_ON) == 0)
1972                intel_dp->last_power_cycle = jiffies;
1973
1974        power_domain = intel_display_port_aux_power_domain(intel_encoder);
1975        intel_display_power_put(dev_priv, power_domain);
1976}
1977
1978static void edp_panel_vdd_work(struct work_struct *__work)
1979{
1980        struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1981                                                 struct intel_dp, panel_vdd_work);
1982
1983        pps_lock(intel_dp);
1984        if (!intel_dp->want_panel_vdd)
1985                edp_panel_vdd_off_sync(intel_dp);
1986        pps_unlock(intel_dp);
1987}
1988
1989static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1990{
1991        unsigned long delay;
1992
1993        /*
1994         * Queue the timer to fire a long time from now (relative to the power
1995         * down delay) to keep the panel power up across a sequence of
1996         * operations.
1997         */
1998        delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1999        schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2000}
2001
2002/*
2003 * Must be paired with edp_panel_vdd_on().
2004 * Must hold pps_mutex around the whole on/off sequence.
2005 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2006 */
2007static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2008{
2009        struct drm_i915_private *dev_priv =
2010                intel_dp_to_dev(intel_dp)->dev_private;
2011
2012        lockdep_assert_held(&dev_priv->pps_mutex);
2013
2014        if (!is_edp(intel_dp))
2015                return;
2016
2017        I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2018             port_name(dp_to_dig_port(intel_dp)->port));
2019
2020        intel_dp->want_panel_vdd = false;
2021
2022        if (sync)
2023                edp_panel_vdd_off_sync(intel_dp);
2024        else
2025                edp_panel_vdd_schedule_off(intel_dp);
2026}
2027
2028static void edp_panel_on(struct intel_dp *intel_dp)
2029{
2030        struct drm_device *dev = intel_dp_to_dev(intel_dp);
2031        struct drm_i915_private *dev_priv = dev->dev_private;
2032        u32 pp;
2033        i915_reg_t pp_ctrl_reg;
2034
2035        lockdep_assert_held(&dev_priv->pps_mutex);
2036
2037        if (!is_edp(intel_dp))
2038                return;
2039
2040        DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2041                      port_name(dp_to_dig_port(intel_dp)->port));
2042
2043        if (WARN(edp_have_panel_power(intel_dp),
2044                 "eDP port %c panel power already on\n",
2045                 port_name(dp_to_dig_port(intel_dp)->port)))
2046                return;
2047
2048        wait_panel_power_cycle(intel_dp);
2049
2050        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2051        pp = ironlake_get_pp_control(intel_dp);
2052        if (IS_GEN5(dev)) {
2053                /* ILK workaround: disable reset around power sequence */
2054                pp &= ~PANEL_POWER_RESET;
2055                I915_WRITE(pp_ctrl_reg, pp);
2056                POSTING_READ(pp_ctrl_reg);
2057        }
2058
2059        pp |= POWER_TARGET_ON;
2060        if (!IS_GEN5(dev))
2061                pp |= PANEL_POWER_RESET;
2062
2063        I915_WRITE(pp_ctrl_reg, pp);
2064        POSTING_READ(pp_ctrl_reg);
2065
2066        wait_panel_on(intel_dp);
2067        intel_dp->last_power_on = jiffies;
2068
2069        if (IS_GEN5(dev)) {
2070                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2071                I915_WRITE(pp_ctrl_reg, pp);
2072                POSTING_READ(pp_ctrl_reg);
2073        }
2074}
2075
2076void intel_edp_panel_on(struct intel_dp *intel_dp)
2077{
2078        if (!is_edp(intel_dp))
2079                return;
2080
2081        pps_lock(intel_dp);
2082        edp_panel_on(intel_dp);
2083        pps_unlock(intel_dp);
2084}
2085
2086
2087static void edp_panel_off(struct intel_dp *intel_dp)
2088{
2089        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090        struct intel_encoder *intel_encoder = &intel_dig_port->base;
2091        struct drm_device *dev = intel_dp_to_dev(intel_dp);
2092        struct drm_i915_private *dev_priv = dev->dev_private;
2093        enum intel_display_power_domain power_domain;
2094        u32 pp;
2095        i915_reg_t pp_ctrl_reg;
2096
2097        lockdep_assert_held(&dev_priv->pps_mutex);
2098
2099        if (!is_edp(intel_dp))
2100                return;
2101
2102        DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2103                      port_name(dp_to_dig_port(intel_dp)->port));
2104
2105        WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2106             port_name(dp_to_dig_port(intel_dp)->port));
2107
2108        pp = ironlake_get_pp_control(intel_dp);
2109        /* We need to switch off panel power _and_ force vdd, for otherwise some
2110         * panels get very unhappy and cease to work. */
2111        pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2112                EDP_BLC_ENABLE);
2113
2114        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2115
2116        intel_dp->want_panel_vdd = false;
2117
2118        I915_WRITE(pp_ctrl_reg, pp);
2119        POSTING_READ(pp_ctrl_reg);
2120
2121        intel_dp->last_power_cycle = jiffies;
2122        wait_panel_off(intel_dp);
2123
2124        /* We got a reference when we enabled the VDD. */
2125        power_domain = intel_display_port_aux_power_domain(intel_encoder);
2126        intel_display_power_put(dev_priv, power_domain);
2127}
2128
2129void intel_edp_panel_off(struct intel_dp *intel_dp)
2130{
2131        if (!is_edp(intel_dp))
2132                return;
2133
2134        pps_lock(intel_dp);
2135        edp_panel_off(intel_dp);
2136        pps_unlock(intel_dp);
2137}
2138
2139/* Enable backlight in the panel power control. */
2140static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2141{
2142        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2143        struct drm_device *dev = intel_dig_port->base.base.dev;
2144        struct drm_i915_private *dev_priv = dev->dev_private;
2145        u32 pp;
2146        i915_reg_t pp_ctrl_reg;
2147
2148        /*
2149         * If we enable the backlight right away following a panel power
2150         * on, we may see slight flicker as the panel syncs with the eDP
2151         * link.  So delay a bit to make sure the image is solid before
2152         * allowing it to appear.
2153         */
2154        wait_backlight_on(intel_dp);
2155
2156        pps_lock(intel_dp);
2157
2158        pp = ironlake_get_pp_control(intel_dp);
2159        pp |= EDP_BLC_ENABLE;
2160
2161        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2162
2163        I915_WRITE(pp_ctrl_reg, pp);
2164        POSTING_READ(pp_ctrl_reg);
2165
2166        pps_unlock(intel_dp);
2167}
2168
2169/* Enable backlight PWM and backlight PP control. */
2170void intel_edp_backlight_on(struct intel_dp *intel_dp)
2171{
2172        if (!is_edp(intel_dp))
2173                return;
2174
2175        DRM_DEBUG_KMS("\n");
2176
2177        intel_panel_enable_backlight(intel_dp->attached_connector);
2178        _intel_edp_backlight_on(intel_dp);
2179}
2180
2181/* Disable backlight in the panel power control. */
2182static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2183{
2184        struct drm_device *dev = intel_dp_to_dev(intel_dp);
2185        struct drm_i915_private *dev_priv = dev->dev_private;
2186        u32 pp;
2187        i915_reg_t pp_ctrl_reg;
2188
2189        if (!is_edp(intel_dp))
2190                return;
2191
2192        pps_lock(intel_dp);
2193
2194        pp = ironlake_get_pp_control(intel_dp);
2195        pp &= ~EDP_BLC_ENABLE;
2196
2197        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2198
2199        I915_WRITE(pp_ctrl_reg, pp);
2200        POSTING_READ(pp_ctrl_reg);
2201
2202        pps_unlock(intel_dp);
2203
2204        intel_dp->last_backlight_off = jiffies;
2205        edp_wait_backlight_off(intel_dp);
2206}
2207
2208/* Disable backlight PP control and backlight PWM. */
2209void intel_edp_backlight_off(struct intel_dp *intel_dp)
2210{
2211        if (!is_edp(intel_dp))
2212                return;
2213
2214        DRM_DEBUG_KMS("\n");
2215
2216        _intel_edp_backlight_off(intel_dp);
2217        intel_panel_disable_backlight(intel_dp->attached_connector);
2218}
2219
2220/*
2221 * Hook for controlling the panel power control backlight through the bl_power
2222 * sysfs attribute. Take care to handle multiple calls.
2223 */
2224static void intel_edp_backlight_power(struct intel_connector *connector,
2225                                      bool enable)
2226{
2227        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2228        bool is_enabled;
2229
2230        pps_lock(intel_dp);
2231        is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2232        pps_unlock(intel_dp);
2233
2234        if (is_enabled == enable)
2235                return;
2236
2237        DRM_DEBUG_KMS("panel power control backlight %s\n",
2238                      enable ? "enable" : "disable");
2239
2240        if (enable)
2241                _intel_edp_backlight_on(intel_dp);
2242        else
2243                _intel_edp_backlight_off(intel_dp);
2244}
2245
2246static const char *state_string(bool enabled)
2247{
2248        return enabled ? "on" : "off";
2249}
2250
2251static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2252{
2253        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2254        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2255        bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2256
2257        I915_STATE_WARN(cur_state != state,
2258                        "DP port %c state assertion failure (expected %s, current %s)\n",
2259                        port_name(dig_port->port),
2260                        state_string(state), state_string(cur_state));
2261}
2262#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2263
2264static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2265{
2266        bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2267
2268        I915_STATE_WARN(cur_state != state,
2269                        "eDP PLL state assertion failure (expected %s, current %s)\n",
2270                        state_string(state), state_string(cur_state));
2271}
2272#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2273#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2274
2275static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2276{
2277        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2278        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2279        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2280
2281        assert_pipe_disabled(dev_priv, crtc->pipe);
2282        assert_dp_port_disabled(intel_dp);
2283        assert_edp_pll_disabled(dev_priv);
2284
2285        DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2286                      crtc->config->port_clock);
2287
2288        intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2289
2290        if (crtc->config->port_clock == 162000)
2291                intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2292        else
2293                intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2294
2295        I915_WRITE(DP_A, intel_dp->DP);
2296        POSTING_READ(DP_A);
2297        udelay(500);
2298
2299        intel_dp->DP |= DP_PLL_ENABLE;
2300
2301        I915_WRITE(DP_A, intel_dp->DP);
2302        POSTING_READ(DP_A);
2303        udelay(200);
2304}
2305
2306static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2307{
2308        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2309        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2310        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2311
2312        assert_pipe_disabled(dev_priv, crtc->pipe);
2313        assert_dp_port_disabled(intel_dp);
2314        assert_edp_pll_enabled(dev_priv);
2315
2316        DRM_DEBUG_KMS("disabling eDP PLL\n");
2317
2318        intel_dp->DP &= ~DP_PLL_ENABLE;
2319
2320        I915_WRITE(DP_A, intel_dp->DP);
2321        POSTING_READ(DP_A);
2322        udelay(200);
2323}
2324
2325/* If the sink supports it, try to set the power state appropriately */
2326void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2327{
2328        int ret, i;
2329
2330        /* Should have a valid DPCD by this point */
2331        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2332                return;
2333
2334        if (mode != DRM_MODE_DPMS_ON) {
2335                ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2336                                         DP_SET_POWER_D3);
2337        } else {
2338                /*
2339                 * When turning on, we need to retry for 1ms to give the sink
2340                 * time to wake up.
2341                 */
2342                for (i = 0; i < 3; i++) {
2343                        ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2344                                                 DP_SET_POWER_D0);
2345                        if (ret == 1)
2346                                break;
2347                        msleep(1);
2348                }
2349        }
2350
2351        if (ret != 1)
2352                DRM_DEBUG_KMS("failed to %s sink power state\n",
2353                              mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2354}
2355
2356static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2357                                  enum pipe *pipe)
2358{
2359        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2360        enum port port = dp_to_dig_port(intel_dp)->port;
2361        struct drm_device *dev = encoder->base.dev;
2362        struct drm_i915_private *dev_priv = dev->dev_private;
2363        enum intel_display_power_domain power_domain;
2364        u32 tmp;
2365        bool ret;
2366
2367        power_domain = intel_display_port_power_domain(encoder);
2368        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2369                return false;
2370
2371        ret = false;
2372
2373        tmp = I915_READ(intel_dp->output_reg);
2374
2375        if (!(tmp & DP_PORT_EN))
2376                goto out;
2377
2378        if (IS_GEN7(dev) && port == PORT_A) {
2379                *pipe = PORT_TO_PIPE_CPT(tmp);
2380        } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2381                enum pipe p;
2382
2383                for_each_pipe(dev_priv, p) {
2384                        u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2385                        if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2386                                *pipe = p;
2387                                ret = true;
2388
2389                                goto out;
2390                        }
2391                }
2392
2393                DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2394                              i915_mmio_reg_offset(intel_dp->output_reg));
2395        } else if (IS_CHERRYVIEW(dev)) {
2396                *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2397        } else {
2398                *pipe = PORT_TO_PIPE(tmp);
2399        }
2400
2401        ret = true;
2402
2403out:
2404        intel_display_power_put(dev_priv, power_domain);
2405
2406        return ret;
2407}
2408
2409static void intel_dp_get_config(struct intel_encoder *encoder,
2410                                struct intel_crtc_state *pipe_config)
2411{
2412        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2413        u32 tmp, flags = 0;
2414        struct drm_device *dev = encoder->base.dev;
2415        struct drm_i915_private *dev_priv = dev->dev_private;
2416        enum port port = dp_to_dig_port(intel_dp)->port;
2417        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2418        int dotclock;
2419
2420        tmp = I915_READ(intel_dp->output_reg);
2421
2422        pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2423
2424        if (HAS_PCH_CPT(dev) && port != PORT_A) {
2425                u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2426
2427                if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2428                        flags |= DRM_MODE_FLAG_PHSYNC;
2429                else
2430                        flags |= DRM_MODE_FLAG_NHSYNC;
2431
2432                if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2433                        flags |= DRM_MODE_FLAG_PVSYNC;
2434                else
2435                        flags |= DRM_MODE_FLAG_NVSYNC;
2436        } else {
2437                if (tmp & DP_SYNC_HS_HIGH)
2438                        flags |= DRM_MODE_FLAG_PHSYNC;
2439                else
2440                        flags |= DRM_MODE_FLAG_NHSYNC;
2441
2442                if (tmp & DP_SYNC_VS_HIGH)
2443                        flags |= DRM_MODE_FLAG_PVSYNC;
2444                else
2445                        flags |= DRM_MODE_FLAG_NVSYNC;
2446        }
2447
2448        pipe_config->base.adjusted_mode.flags |= flags;
2449
2450        if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2451            !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2452                pipe_config->limited_color_range = true;
2453
2454        pipe_config->has_dp_encoder = true;
2455
2456        pipe_config->lane_count =
2457                ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2458
2459        intel_dp_get_m_n(crtc, pipe_config);
2460
2461        if (port == PORT_A) {
2462                if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2463                        pipe_config->port_clock = 162000;
2464                else
2465                        pipe_config->port_clock = 270000;
2466        }
2467
2468        dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2469                                            &pipe_config->dp_m_n);
2470
2471        if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2472                ironlake_check_encoder_dotclock(pipe_config, dotclock);
2473
2474        pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2475
2476        if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2477            pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2478                /*
2479                 * This is a big fat ugly hack.
2480                 *
2481                 * Some machines in UEFI boot mode provide us a VBT that has 18
2482                 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2483                 * unknown we fail to light up. Yet the same BIOS boots up with
2484                 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2485                 * max, not what it tells us to use.
2486                 *
2487                 * Note: This will still be broken if the eDP panel is not lit
2488                 * up by the BIOS, and thus we can't get the mode at module
2489                 * load.
2490                 */
2491                DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2492                              pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2493                dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2494        }
2495}
2496
2497static void intel_disable_dp(struct intel_encoder *encoder)
2498{
2499        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2500        struct drm_device *dev = encoder->base.dev;
2501        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2502
2503        if (crtc->config->has_audio)
2504                intel_audio_codec_disable(encoder);
2505
2506        if (HAS_PSR(dev) && !HAS_DDI(dev))
2507                intel_psr_disable(intel_dp);
2508
2509        /* Make sure the panel is off before trying to change the mode. But also
2510         * ensure that we have vdd while we switch off the panel. */
2511        intel_edp_panel_vdd_on(intel_dp);
2512        intel_edp_backlight_off(intel_dp);
2513        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2514        intel_edp_panel_off(intel_dp);
2515
2516        /* disable the port before the pipe on g4x */
2517        if (INTEL_INFO(dev)->gen < 5)
2518                intel_dp_link_down(intel_dp);
2519}
2520
2521static void ilk_post_disable_dp(struct intel_encoder *encoder)
2522{
2523        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2524        enum port port = dp_to_dig_port(intel_dp)->port;
2525
2526        intel_dp_link_down(intel_dp);
2527
2528        /* Only ilk+ has port A */
2529        if (port == PORT_A)
2530                ironlake_edp_pll_off(intel_dp);
2531}
2532
2533static void vlv_post_disable_dp(struct intel_encoder *encoder)
2534{
2535        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2536
2537        intel_dp_link_down(intel_dp);
2538}
2539
2540static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2541                                     bool reset)
2542{
2543        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2544        enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2545        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2546        enum pipe pipe = crtc->pipe;
2547        uint32_t val;
2548
2549        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2550        if (reset)
2551                val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2552        else
2553                val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2554        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2555
2556        if (crtc->config->lane_count > 2) {
2557                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2558                if (reset)
2559                        val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2560                else
2561                        val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2562                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2563        }
2564
2565        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2566        val |= CHV_PCS_REQ_SOFTRESET_EN;
2567        if (reset)
2568                val &= ~DPIO_PCS_CLK_SOFT_RESET;
2569        else
2570                val |= DPIO_PCS_CLK_SOFT_RESET;
2571        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2572
2573        if (crtc->config->lane_count > 2) {
2574                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2575                val |= CHV_PCS_REQ_SOFTRESET_EN;
2576                if (reset)
2577                        val &= ~DPIO_PCS_CLK_SOFT_RESET;
2578                else
2579                        val |= DPIO_PCS_CLK_SOFT_RESET;
2580                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2581        }
2582}
2583
2584static void chv_post_disable_dp(struct intel_encoder *encoder)
2585{
2586        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2587        struct drm_device *dev = encoder->base.dev;
2588        struct drm_i915_private *dev_priv = dev->dev_private;
2589
2590        intel_dp_link_down(intel_dp);
2591
2592        mutex_lock(&dev_priv->sb_lock);
2593
2594        /* Assert data lane reset */
2595        chv_data_lane_soft_reset(encoder, true);
2596
2597        mutex_unlock(&dev_priv->sb_lock);
2598}
2599
2600static void
2601_intel_dp_set_link_train(struct intel_dp *intel_dp,
2602                         uint32_t *DP,
2603                         uint8_t dp_train_pat)
2604{
2605        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2606        struct drm_device *dev = intel_dig_port->base.base.dev;
2607        struct drm_i915_private *dev_priv = dev->dev_private;
2608        enum port port = intel_dig_port->port;
2609
2610        if (HAS_DDI(dev)) {
2611                uint32_t temp = I915_READ(DP_TP_CTL(port));
2612
2613                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2614                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2615                else
2616                        temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2617
2618                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2619                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2620                case DP_TRAINING_PATTERN_DISABLE:
2621                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2622
2623                        break;
2624                case DP_TRAINING_PATTERN_1:
2625                        temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2626                        break;
2627                case DP_TRAINING_PATTERN_2:
2628                        temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2629                        break;
2630                case DP_TRAINING_PATTERN_3:
2631                        temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2632                        break;
2633                }
2634                I915_WRITE(DP_TP_CTL(port), temp);
2635
2636        } else if ((IS_GEN7(dev) && port == PORT_A) ||
2637                   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2638                *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2639
2640                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2641                case DP_TRAINING_PATTERN_DISABLE:
2642                        *DP |= DP_LINK_TRAIN_OFF_CPT;
2643                        break;
2644                case DP_TRAINING_PATTERN_1:
2645                        *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2646                        break;
2647                case DP_TRAINING_PATTERN_2:
2648                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2649                        break;
2650                case DP_TRAINING_PATTERN_3:
2651                        DRM_ERROR("DP training pattern 3 not supported\n");
2652                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2653                        break;
2654                }
2655
2656        } else {
2657                if (IS_CHERRYVIEW(dev))
2658                        *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2659                else
2660                        *DP &= ~DP_LINK_TRAIN_MASK;
2661
2662                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2663                case DP_TRAINING_PATTERN_DISABLE:
2664                        *DP |= DP_LINK_TRAIN_OFF;
2665                        break;
2666                case DP_TRAINING_PATTERN_1:
2667                        *DP |= DP_LINK_TRAIN_PAT_1;
2668                        break;
2669                case DP_TRAINING_PATTERN_2:
2670                        *DP |= DP_LINK_TRAIN_PAT_2;
2671                        break;
2672                case DP_TRAINING_PATTERN_3:
2673                        if (IS_CHERRYVIEW(dev)) {
2674                                *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2675                        } else {
2676                                DRM_ERROR("DP training pattern 3 not supported\n");
2677                                *DP |= DP_LINK_TRAIN_PAT_2;
2678                        }
2679                        break;
2680                }
2681        }
2682}
2683
2684static void intel_dp_enable_port(struct intel_dp *intel_dp)
2685{
2686        struct drm_device *dev = intel_dp_to_dev(intel_dp);
2687        struct drm_i915_private *dev_priv = dev->dev_private;
2688        struct intel_crtc *crtc =
2689                to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2690
2691        /* enable with pattern 1 (as per spec) */
2692        _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2693                                 DP_TRAINING_PATTERN_1);
2694
2695        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2696        POSTING_READ(intel_dp->output_reg);
2697
2698        /*
2699         * Magic for VLV/CHV. We _must_ first set up the register
2700         * without actually enabling the port, and then do another
2701         * write to enable the port. Otherwise link training will
2702         * fail when the power sequencer is freshly used for this port.
2703         */
2704        intel_dp->DP |= DP_PORT_EN;
2705        if (crtc->config->has_audio)
2706                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2707
2708        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2709        POSTING_READ(intel_dp->output_reg);
2710}
2711
2712static void intel_enable_dp(struct intel_encoder *encoder)
2713{
2714        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2715        struct drm_device *dev = encoder->base.dev;
2716        struct drm_i915_private *dev_priv = dev->dev_private;
2717        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2718        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2719        enum port port = dp_to_dig_port(intel_dp)->port;
2720        enum pipe pipe = crtc->pipe;
2721
2722        if (WARN_ON(dp_reg & DP_PORT_EN))
2723                return;
2724
2725        pps_lock(intel_dp);
2726
2727        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2728                vlv_init_panel_power_sequencer(intel_dp);
2729
2730        /*
2731         * We get an occasional spurious underrun between the port
2732         * enable and vdd enable, when enabling port A eDP.
2733         *
2734         * FIXME: Not sure if this applies to (PCH) port D eDP as well
2735         */
2736        if (port == PORT_A)
2737                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2738
2739        intel_dp_enable_port(intel_dp);
2740
2741        if (port == PORT_A && IS_GEN5(dev_priv)) {
2742                /*
2743                 * Underrun reporting for the other pipe was disabled in
2744                 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2745                 * enabled, so it's now safe to re-enable underrun reporting.
2746                 */
2747                intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2748                intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2749                intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2750        }
2751
2752        edp_panel_vdd_on(intel_dp);
2753        edp_panel_on(intel_dp);
2754        edp_panel_vdd_off(intel_dp, true);
2755
2756        if (port == PORT_A)
2757                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2758
2759        pps_unlock(intel_dp);
2760
2761        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2762                unsigned int lane_mask = 0x0;
2763
2764                if (IS_CHERRYVIEW(dev))
2765                        lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2766
2767                vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2768                                    lane_mask);
2769        }
2770
2771        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2772        intel_dp_start_link_train(intel_dp);
2773        intel_dp_stop_link_train(intel_dp);
2774
2775        if (crtc->config->has_audio) {
2776                DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2777                                 pipe_name(pipe));
2778                intel_audio_codec_enable(encoder);
2779        }
2780}
2781
2782static void g4x_enable_dp(struct intel_encoder *encoder)
2783{
2784        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2785
2786        intel_enable_dp(encoder);
2787        intel_edp_backlight_on(intel_dp);
2788}
2789
2790static void vlv_enable_dp(struct intel_encoder *encoder)
2791{
2792        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2793
2794        intel_edp_backlight_on(intel_dp);
2795        intel_psr_enable(intel_dp);
2796}
2797
2798static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2799{
2800        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2801        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2802        enum port port = dp_to_dig_port(intel_dp)->port;
2803        enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2804
2805        intel_dp_prepare(encoder);
2806
2807        if (port == PORT_A && IS_GEN5(dev_priv)) {
2808                /*
2809                 * We get FIFO underruns on the other pipe when
2810                 * enabling the CPU eDP PLL, and when enabling CPU
2811                 * eDP port. We could potentially avoid the PLL
2812                 * underrun with a vblank wait just prior to enabling
2813                 * the PLL, but that doesn't appear to help the port
2814                 * enable case. Just sweep it all under the rug.
2815                 */
2816                intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2817                intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2818        }
2819
2820        /* Only ilk+ has port A */
2821        if (port == PORT_A)
2822                ironlake_edp_pll_on(intel_dp);
2823}
2824
2825static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2826{
2827        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2828        struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2829        enum pipe pipe = intel_dp->pps_pipe;
2830        i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2831
2832        edp_panel_vdd_off_sync(intel_dp);
2833
2834        /*
2835         * VLV seems to get confused when multiple power seqeuencers
2836         * have the same port selected (even if only one has power/vdd
2837         * enabled). The failure manifests as vlv_wait_port_ready() failing
2838         * CHV on the other hand doesn't seem to mind having the same port
2839         * selected in multiple power seqeuencers, but let's clear the
2840         * port select always when logically disconnecting a power sequencer
2841         * from a port.
2842         */
2843        DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2844                      pipe_name(pipe), port_name(intel_dig_port->port));
2845        I915_WRITE(pp_on_reg, 0);
2846        POSTING_READ(pp_on_reg);
2847
2848        intel_dp->pps_pipe = INVALID_PIPE;
2849}
2850
2851static void vlv_steal_power_sequencer(struct drm_device *dev,
2852                                      enum pipe pipe)
2853{
2854        struct drm_i915_private *dev_priv = dev->dev_private;
2855        struct intel_encoder *encoder;
2856
2857        lockdep_assert_held(&dev_priv->pps_mutex);
2858
2859        if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2860                return;
2861
2862        for_each_intel_encoder(dev, encoder) {
2863                struct intel_dp *intel_dp;
2864                enum port port;
2865
2866                if (encoder->type != INTEL_OUTPUT_EDP)
2867                        continue;
2868
2869                intel_dp = enc_to_intel_dp(&encoder->base);
2870                port = dp_to_dig_port(intel_dp)->port;
2871
2872                if (intel_dp->pps_pipe != pipe)
2873                        continue;
2874
2875                DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2876                              pipe_name(pipe), port_name(port));
2877
2878                WARN(encoder->base.crtc,
2879                     "stealing pipe %c power sequencer from active eDP port %c\n",
2880                     pipe_name(pipe), port_name(port));
2881
2882                /* make sure vdd is off before we steal it */
2883                vlv_detach_power_sequencer(intel_dp);
2884        }
2885}
2886
2887static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2888{
2889        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2890        struct intel_encoder *encoder = &intel_dig_port->base;
2891        struct drm_device *dev = encoder->base.dev;
2892        struct drm_i915_private *dev_priv = dev->dev_private;
2893        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2894
2895        lockdep_assert_held(&dev_priv->pps_mutex);
2896
2897        if (!is_edp(intel_dp))
2898                return;
2899
2900        if (intel_dp->pps_pipe == crtc->pipe)
2901                return;
2902
2903        /*
2904         * If another power sequencer was being used on this
2905         * port previously make sure to turn off vdd there while
2906         * we still have control of it.
2907         */
2908        if (intel_dp->pps_pipe != INVALID_PIPE)
2909                vlv_detach_power_sequencer(intel_dp);
2910
2911        /*
2912         * We may be stealing the power
2913         * sequencer from another port.
2914         */
2915        vlv_steal_power_sequencer(dev, crtc->pipe);
2916
2917        /* now it's all ours */
2918        intel_dp->pps_pipe = crtc->pipe;
2919
2920        DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2921                      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2922
2923        /* init power sequencer on this pipe and port */
2924        intel_dp_init_panel_power_sequencer(dev, intel_dp);
2925        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2926}
2927
2928static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2929{
2930        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2931        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2932        struct drm_device *dev = encoder->base.dev;
2933        struct drm_i915_private *dev_priv = dev->dev_private;
2934        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2935        enum dpio_channel port = vlv_dport_to_channel(dport);
2936        int pipe = intel_crtc->pipe;
2937        u32 val;
2938
2939        mutex_lock(&dev_priv->sb_lock);
2940
2941        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2942        val = 0;
2943        if (pipe)
2944                val |= (1<<21);
2945        else
2946                val &= ~(1<<21);
2947        val |= 0x001000c4;
2948        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2949        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2950        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2951
2952        mutex_unlock(&dev_priv->sb_lock);
2953
2954        intel_enable_dp(encoder);
2955}
2956
2957static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2958{
2959        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2960        struct drm_device *dev = encoder->base.dev;
2961        struct drm_i915_private *dev_priv = dev->dev_private;
2962        struct intel_crtc *intel_crtc =
2963                to_intel_crtc(encoder->base.crtc);
2964        enum dpio_channel port = vlv_dport_to_channel(dport);
2965        int pipe = intel_crtc->pipe;
2966
2967        intel_dp_prepare(encoder);
2968
2969        /* Program Tx lane resets to default */
2970        mutex_lock(&dev_priv->sb_lock);
2971        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2972                         DPIO_PCS_TX_LANE2_RESET |
2973                         DPIO_PCS_TX_LANE1_RESET);
2974        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2975                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2976                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2977                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2978                                 DPIO_PCS_CLK_SOFT_RESET);
2979
2980        /* Fix up inter-pair skew failure */
2981        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2982        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2983        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2984        mutex_unlock(&dev_priv->sb_lock);
2985}
2986
2987static void chv_pre_enable_dp(struct intel_encoder *encoder)
2988{
2989        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2990        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2991        struct drm_device *dev = encoder->base.dev;
2992        struct drm_i915_private *dev_priv = dev->dev_private;
2993        struct intel_crtc *intel_crtc =
2994                to_intel_crtc(encoder->base.crtc);
2995        enum dpio_channel ch = vlv_dport_to_channel(dport);
2996        int pipe = intel_crtc->pipe;
2997        int data, i, stagger;
2998        u32 val;
2999
3000        mutex_lock(&dev_priv->sb_lock);
3001
3002        /* allow hardware to manage TX FIFO reset source */
3003        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3004        val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3005        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3006
3007        if (intel_crtc->config->lane_count > 2) {
3008                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3009                val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3010                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3011        }
3012
3013        /* Program Tx lane latency optimal setting*/
3014        for (i = 0; i < intel_crtc->config->lane_count; i++) {
3015                /* Set the upar bit */
3016                if (intel_crtc->config->lane_count == 1)
3017                        data = 0x0;
3018                else
3019                        data = (i == 1) ? 0x0 : 0x1;
3020                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3021                                data << DPIO_UPAR_SHIFT);
3022        }
3023
3024        /* Data lane stagger programming */
3025        if (intel_crtc->config->port_clock > 270000)
3026                stagger = 0x18;
3027        else if (intel_crtc->config->port_clock > 135000)
3028                stagger = 0xd;
3029        else if (intel_crtc->config->port_clock > 67500)
3030                stagger = 0x7;
3031        else if (intel_crtc->config->port_clock > 33750)
3032                stagger = 0x4;
3033        else
3034                stagger = 0x2;
3035
3036        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3037        val |= DPIO_TX2_STAGGER_MASK(0x1f);
3038        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3039
3040        if (intel_crtc->config->lane_count > 2) {
3041                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3042                val |= DPIO_TX2_STAGGER_MASK(0x1f);
3043                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3044        }
3045
3046        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3047                       DPIO_LANESTAGGER_STRAP(stagger) |
3048                       DPIO_LANESTAGGER_STRAP_OVRD |
3049                       DPIO_TX1_STAGGER_MASK(0x1f) |
3050                       DPIO_TX1_STAGGER_MULT(6) |
3051                       DPIO_TX2_STAGGER_MULT(0));
3052
3053        if (intel_crtc->config->lane_count > 2) {
3054                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3055                               DPIO_LANESTAGGER_STRAP(stagger) |
3056                               DPIO_LANESTAGGER_STRAP_OVRD |
3057                               DPIO_TX1_STAGGER_MASK(0x1f) |
3058                               DPIO_TX1_STAGGER_MULT(7) |
3059                               DPIO_TX2_STAGGER_MULT(5));
3060        }
3061
3062        /* Deassert data lane reset */
3063        chv_data_lane_soft_reset(encoder, false);
3064
3065        mutex_unlock(&dev_priv->sb_lock);
3066
3067        intel_enable_dp(encoder);
3068
3069        /* Second common lane will stay alive on its own now */
3070        if (dport->release_cl2_override) {
3071                chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3072                dport->release_cl2_override = false;
3073        }
3074}
3075
3076static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3077{
3078        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3079        struct drm_device *dev = encoder->base.dev;
3080        struct drm_i915_private *dev_priv = dev->dev_private;
3081        struct intel_crtc *intel_crtc =
3082                to_intel_crtc(encoder->base.crtc);
3083        enum dpio_channel ch = vlv_dport_to_channel(dport);
3084        enum pipe pipe = intel_crtc->pipe;
3085        unsigned int lane_mask =
3086                intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3087        u32 val;
3088
3089        intel_dp_prepare(encoder);
3090
3091        /*
3092         * Must trick the second common lane into life.
3093         * Otherwise we can't even access the PLL.
3094         */
3095        if (ch == DPIO_CH0 && pipe == PIPE_B)
3096                dport->release_cl2_override =
3097                        !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3098
3099        chv_phy_powergate_lanes(encoder, true, lane_mask);
3100
3101        mutex_lock(&dev_priv->sb_lock);
3102
3103        /* Assert data lane reset */
3104        chv_data_lane_soft_reset(encoder, true);
3105
3106        /* program left/right clock distribution */
3107        if (pipe != PIPE_B) {
3108                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3109                val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3110                if (ch == DPIO_CH0)
3111                        val |= CHV_BUFLEFTENA1_FORCE;
3112                if (ch == DPIO_CH1)
3113                        val |= CHV_BUFRIGHTENA1_FORCE;
3114                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3115        } else {
3116                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3117                val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3118                if (ch == DPIO_CH0)
3119                        val |= CHV_BUFLEFTENA2_FORCE;
3120                if (ch == DPIO_CH1)
3121                        val |= CHV_BUFRIGHTENA2_FORCE;
3122                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3123        }
3124
3125        /* program clock channel usage */
3126        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3127        val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3128        if (pipe != PIPE_B)
3129                val &= ~CHV_PCS_USEDCLKCHANNEL;
3130        else
3131                val |= CHV_PCS_USEDCLKCHANNEL;
3132        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3133
3134        if (intel_crtc->config->lane_count > 2) {
3135                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3136                val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3137                if (pipe != PIPE_B)
3138                        val &= ~CHV_PCS_USEDCLKCHANNEL;
3139                else
3140                        val |= CHV_PCS_USEDCLKCHANNEL;
3141                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3142        }
3143
3144        /*
3145         * This a a bit weird since generally CL
3146         * matches the pipe, but here we need to
3147         * pick the CL based on the port.
3148         */
3149        val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3150        if (pipe != PIPE_B)
3151                val &= ~CHV_CMN_USEDCLKCHANNEL;
3152        else
3153                val |= CHV_CMN_USEDCLKCHANNEL;
3154        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3155
3156        mutex_unlock(&dev_priv->sb_lock);
3157}
3158
3159static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3160{
3161        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3162        enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3163        u32 val;
3164
3165        mutex_lock(&dev_priv->sb_lock);
3166
3167        /* disable left/right clock distribution */
3168        if (pipe != PIPE_B) {
3169                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3170                val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3171                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3172        } else {
3173                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3174                val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3175                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3176        }
3177
3178        mutex_unlock(&dev_priv->sb_lock);
3179
3180        /*
3181         * Leave the power down bit cleared for at least one
3182         * lane so that chv_powergate_phy_ch() will power
3183         * on something when the channel is otherwise unused.
3184         * When the port is off and the override is removed
3185         * the lanes power down anyway, so otherwise it doesn't
3186         * really matter what the state of power down bits is
3187         * after this.
3188         */
3189        chv_phy_powergate_lanes(encoder, false, 0x0);
3190}
3191
3192/*
3193 * Native read with retry for link status and receiver capability reads for
3194 * cases where the sink may still be asleep.
3195 *
3196 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3197 * supposed to retry 3 times per the spec.
3198 */
3199static ssize_t
3200intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3201                        void *buffer, size_t size)
3202{
3203        ssize_t ret;
3204        int i;
3205
3206        /*
3207         * Sometime we just get the same incorrect byte repeated
3208         * over the entire buffer. Doing just one throw away read
3209         * initially seems to "solve" it.
3210         */
3211        drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3212
3213        for (i = 0; i < 3; i++) {
3214                ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3215                if (ret == size)
3216                        return ret;
3217                msleep(1);
3218        }
3219
3220        return ret;
3221}
3222
3223/*
3224 * Fetch AUX CH registers 0x202 - 0x207 which contain
3225 * link status information
3226 */
3227bool
3228intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3229{
3230        return intel_dp_dpcd_read_wake(&intel_dp->aux,
3231                                       DP_LANE0_1_STATUS,
3232                                       link_status,
3233                                       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3234}
3235
3236/* These are source-specific values. */
3237uint8_t
3238intel_dp_voltage_max(struct intel_dp *intel_dp)
3239{
3240        struct drm_device *dev = intel_dp_to_dev(intel_dp);
3241        struct drm_i915_private *dev_priv = dev->dev_private;
3242        enum port port = dp_to_dig_port(intel_dp)->port;
3243
3244        if (IS_BROXTON(dev))
3245                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3246        else if (INTEL_INFO(dev)->gen >= 9) {
3247                if (dev_priv->edp_low_vswing && port == PORT_A)
3248                        return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3249                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3250        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3251                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3252        else if (IS_GEN7(dev) && port == PORT_A)
3253                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3254        else if (HAS_PCH_CPT(dev) && port != PORT_A)
3255                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3256        else
3257                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3258}
3259
3260uint8_t
3261intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3262{
3263        struct drm_device *dev = intel_dp_to_dev(intel_dp);
3264        enum port port = dp_to_dig_port(intel_dp)->port;
3265
3266        if (INTEL_INFO(dev)->gen >= 9) {
3267                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3268                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3269                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
3270                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3271                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3272                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3273                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3274                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3275                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3276                default:
3277                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3278                }
3279        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3280                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3281                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3282                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
3283                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3284                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3285                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3286                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3287                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3288                default:
3289                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3290                }
3291        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3292                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3293                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3294                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
3295                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3296                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3297                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3298                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3299                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3300                default:
3301                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3302                }
3303        } else if (IS_GEN7(dev) && port == PORT_A) {
3304                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3305                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3306                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3307                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3308                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3309                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3310                default:
3311                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3312                }
3313        } else {
3314                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3315                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3316                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3317                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3318                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3319                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3320                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3321                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3322                default:
3323                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3324                }
3325        }
3326}
3327
3328static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3329{
3330        struct drm_device *dev = intel_dp_to_dev(intel_dp);
3331        struct drm_i915_private *dev_priv = dev->dev_private;
3332        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3333        struct intel_crtc *intel_crtc =
3334                to_intel_crtc(dport->base.base.crtc);
3335        unsigned long demph_reg_value, preemph_reg_value,
3336                uniqtranscale_reg_value;
3337        uint8_t train_set = intel_dp->train_set[0];
3338        enum dpio_channel port = vlv_dport_to_channel(dport);
3339        int pipe = intel_crtc->pipe;
3340
3341        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3342        case DP_TRAIN_PRE_EMPH_LEVEL_0:
3343                preemph_reg_value = 0x0004000;
3344                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3345                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3346                        demph_reg_value = 0x2B405555;
3347                        uniqtranscale_reg_value = 0x552AB83A;
3348                        break;
3349                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3350                        demph_reg_value = 0x2B404040;
3351                        uniqtranscale_reg_value = 0x5548B83A;
3352                        break;
3353                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3354                        demph_reg_value = 0x2B245555;
3355                        uniqtranscale_reg_value = 0x5560B83A;
3356                        break;
3357                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3358                        demph_reg_value = 0x2B405555;
3359                        uniqtranscale_reg_value = 0x5598DA3A;
3360                        break;
3361                default:
3362                        return 0;
3363                }
3364                break;
3365        case DP_TRAIN_PRE_EMPH_LEVEL_1:
3366                preemph_reg_value = 0x0002000;
3367                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3368                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3369                        demph_reg_value = 0x2B404040;
3370                        uniqtranscale_reg_value = 0x5552B83A;
3371                        break;
3372                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3373                        demph_reg_value = 0x2B404848;
3374                        uniqtranscale_reg_value = 0x5580B83A;
3375                        break;
3376                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3377                        demph_reg_value = 0x2B404040;
3378                        uniqtranscale_reg_value = 0x55ADDA3A;
3379                        break;
3380                default:
3381                        return 0;
3382                }
3383                break;
3384        case DP_TRAIN_PRE_EMPH_LEVEL_2:
3385                preemph_reg_value = 0x0000000;
3386                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3387                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3388                        demph_reg_value = 0x2B305555;
3389                        uniqtranscale_reg_value = 0x5570B83A;
3390                        break;
3391                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3392                        demph_reg_value = 0x2B2B4040;
3393                        uniqtranscale_reg_value = 0x55ADDA3A;
3394                        break;
3395                default:
3396                        return 0;
3397                }
3398                break;
3399        case DP_TRAIN_PRE_EMPH_LEVEL_3:
3400                preemph_reg_value = 0x0006000;
3401                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3402                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3403                        demph_reg_value = 0x1B405555;
3404                        uniqtranscale_reg_value = 0x55ADDA3A;
3405                        break;
3406                default:
3407                        return 0;
3408                }
3409                break;
3410        default:
3411                return 0;
3412        }
3413
3414        mutex_lock(&dev_priv->sb_lock);
3415        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3416        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3417        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3418                         uniqtranscale_reg_value);
3419        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3420        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3421        vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3422        vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3423        mutex_unlock(&dev_priv->sb_lock);
3424
3425        return 0;
3426}
3427
3428static bool chv_need_uniq_trans_scale(uint8_t train_set)
3429{
3430        return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3431                (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3432}
3433
3434static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3435{
3436        struct drm_device *dev = intel_dp_to_dev(intel_dp);
3437        struct drm_i915_private *dev_priv = dev->dev_private;
3438        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3439        struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3440        u32 deemph_reg_value, margin_reg_value, val;
3441        uint8_t train_set = intel_dp->train_set[0];
3442        enum dpio_channel ch = vlv_dport_to_channel(dport);
3443        enum pipe pipe = intel_crtc->pipe;
3444        int i;
3445
3446        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3447        case DP_TRAIN_PRE_EMPH_LEVEL_0:
3448                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3449                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3450                        deemph_reg_value = 128;
3451                        margin_reg_value = 52;
3452                        break;
3453                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3454                        deemph_reg_value = 128;
3455                        margin_reg_value = 77;
3456                        break;
3457                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3458                        deemph_reg_value = 128;
3459                        margin_reg_value = 102;
3460                        break;
3461                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3462                        deemph_reg_value = 128;
3463                        margin_reg_value = 154;
3464                        /* FIXME extra to set for 1200 */
3465                        break;
3466                default:
3467                        return 0;
3468                }
3469                break;
3470        case DP_TRAIN_PRE_EMPH_LEVEL_1:
3471                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3472                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3473                        deemph_reg_value = 85;
3474                        margin_reg_value = 78;
3475                        break;
3476                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3477                        deemph_reg_value = 85;
3478                        margin_reg_value = 116;
3479                        break;
3480                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3481                        deemph_reg_value = 85;
3482                        margin_reg_value = 154;
3483                        break;
3484                default:
3485                        return 0;
3486                }
3487                break;
3488        case DP_TRAIN_PRE_EMPH_LEVEL_2:
3489                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3490                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3491                        deemph_reg_value = 64;
3492                        margin_reg_value = 104;
3493                        break;
3494                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3495                        deemph_reg_value = 64;
3496                        margin_reg_value = 154;
3497                        break;
3498                default:
3499                        return 0;
3500                }
3501                break;
3502        case DP_TRAIN_PRE_EMPH_LEVEL_3:
3503                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3504                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3505                        deemph_reg_value = 43;
3506                        margin_reg_value = 154;
3507                        break;
3508                default:
3509                        return 0;
3510                }
3511                break;
3512        default:
3513                return 0;
3514        }
3515
3516        mutex_lock(&dev_priv->sb_lock);
3517
3518        /* Clear calc init */
3519        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3520        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3521        val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3522        val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3523        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3524
3525        if (intel_crtc->config->lane_count > 2) {
3526                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3527                val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3528                val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3529                val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3530                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3531        }
3532
3533        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3534        val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3535        val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3536        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3537
3538        if (intel_crtc->config->lane_count > 2) {
3539                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3540                val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3541                val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3542                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3543        }
3544
3545        /* Program swing deemph */
3546        for (i = 0; i < intel_crtc->config->lane_count; i++) {
3547                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3548                val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3549                val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3550                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3551        }
3552
3553        /* Program swing margin */
3554        for (i = 0; i < intel_crtc->config->lane_count; i++) {
3555                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3556
3557                val &= ~DPIO_SWING_MARGIN000_MASK;
3558                val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3559
3560                /*
3561                 * Supposedly this value shouldn't matter when unique transition
3562                 * scale is disabled, but in fact it does matter. Let's just
3563                 * always program the same value and hope it's OK.
3564                 */
3565                val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3566                val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3567
3568                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3569        }
3570
3571        /*
3572         * The document said it needs to set bit 27 for ch0 and bit 26
3573         * for ch1. Might be a typo in the doc.
3574         * For now, for this unique transition scale selection, set bit
3575         * 27 for ch0 and ch1.
3576         */
3577        for (i = 0; i < intel_crtc->config->lane_count; i++) {
3578                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3579                if (chv_need_uniq_trans_scale(train_set))
3580                        val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3581                else
3582                        val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3583                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3584        }
3585
3586        /* Start swing calculation */
3587        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3588        val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3589        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3590
3591        if (intel_crtc->config->lane_count > 2) {
3592                val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3593                val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3594                vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3595        }
3596
3597        mutex_unlock(&dev_priv->sb_lock);
3598
3599        return 0;
3600}
3601
3602static uint32_t
3603gen4_signal_levels(uint8_t train_set)
3604{
3605        uint32_t        signal_levels = 0;
3606
3607        switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3608        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3609        default:
3610                signal_levels |= DP_VOLTAGE_0_4;
3611                break;
3612        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3613                signal_levels |= DP_VOLTAGE_0_6;
3614                break;
3615        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3616                signal_levels |= DP_VOLTAGE_0_8;
3617                break;
3618        case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3619                signal_levels |= DP_VOLTAGE_1_2;
3620                break;
3621        }
3622        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3623        case DP_TRAIN_PRE_EMPH_LEVEL_0:
3624        default:
3625                signal_levels |= DP_PRE_EMPHASIS_0;
3626                break;
3627        case DP_TRAIN_PRE_EMPH_LEVEL_1:
3628                signal_levels |= DP_PRE_EMPHASIS_3_5;
3629                break;
3630        case DP_TRAIN_PRE_EMPH_LEVEL_2:
3631                signal_levels |= DP_PRE_EMPHASIS_6;
3632                break;
3633        case DP_TRAIN_PRE_EMPH_LEVEL_3:
3634                signal_levels |= DP_PRE_EMPHASIS_9_5;
3635                break;
3636        }
3637        return signal_levels;
3638}
3639
3640/* Gen6's DP voltage swing and pre-emphasis control */
3641static uint32_t
3642gen6_edp_signal_levels(uint8_t train_set)
3643{
3644        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3645                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3646        switch (signal_levels) {
3647        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3648        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3649                return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3650        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3651                return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3652        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3653        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3654                return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3655        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3656        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3657                return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3658        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3659        case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3660                return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3661        default:
3662                DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3663                              "0x%x\n", signal_levels);
3664                return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3665        }
3666}
3667
3668/* Gen7's DP voltage swing and pre-emphasis control */
3669static uint32_t
3670gen7_edp_signal_levels(uint8_t train_set)
3671{
3672        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3673                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3674        switch (signal_levels) {
3675        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3676                return EDP_LINK_TRAIN_400MV_0DB_IVB;
3677        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3678                return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3679        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3680                return EDP_LINK_TRAIN_400MV_6DB_IVB;
3681
3682        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3683                return EDP_LINK_TRAIN_600MV_0DB_IVB;
3684        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3685                return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3686
3687        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3688                return EDP_LINK_TRAIN_800MV_0DB_IVB;
3689        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3690                return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3691
3692        default:
3693                DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3694                              "0x%x\n", signal_levels);
3695                return EDP_LINK_TRAIN_500MV_0DB_IVB;
3696        }
3697}
3698
3699void
3700intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3701{
3702        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3703        enum port port = intel_dig_port->port;
3704        struct drm_device *dev = intel_dig_port->base.base.dev;
3705        struct drm_i915_private *dev_priv = to_i915(dev);
3706        uint32_t signal_levels, mask = 0;
3707        uint8_t train_set = intel_dp->train_set[0];
3708
3709        if (HAS_DDI(dev)) {
3710                signal_levels = ddi_signal_levels(intel_dp);
3711
3712                if (IS_BROXTON(dev))
3713                        signal_levels = 0;
3714                else
3715                        mask = DDI_BUF_EMP_MASK;
3716        } else if (IS_CHERRYVIEW(dev)) {
3717                signal_levels = chv_signal_levels(intel_dp);
3718        } else if (IS_VALLEYVIEW(dev)) {
3719                signal_levels = vlv_signal_levels(intel_dp);
3720        } else if (IS_GEN7(dev) && port == PORT_A) {
3721                signal_levels = gen7_edp_signal_levels(train_set);
3722                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3723        } else if (IS_GEN6(dev) && port == PORT_A) {
3724                signal_levels = gen6_edp_signal_levels(train_set);
3725                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3726        } else {
3727                signal_levels = gen4_signal_levels(train_set);
3728                mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3729        }
3730
3731        if (mask)
3732                DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3733
3734        DRM_DEBUG_KMS("Using vswing level %d\n",
3735                train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3736        DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3737                (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3738                        DP_TRAIN_PRE_EMPHASIS_SHIFT);
3739
3740        intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3741
3742        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3743        POSTING_READ(intel_dp->output_reg);
3744}
3745
3746void
3747intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3748                                       uint8_t dp_train_pat)
3749{
3750        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3751        struct drm_i915_private *dev_priv =
3752                to_i915(intel_dig_port->base.base.dev);
3753
3754        _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3755
3756        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3757        POSTING_READ(intel_dp->output_reg);
3758}
3759
3760void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3761{
3762        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3763        struct drm_device *dev = intel_dig_port->base.base.dev;
3764        struct drm_i915_private *dev_priv = dev->dev_private;
3765        enum port port = intel_dig_port->port;
3766        uint32_t val;
3767
3768        if (!HAS_DDI(dev))
3769                return;
3770
3771        val = I915_READ(DP_TP_CTL(port));
3772        val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3773        val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3774        I915_WRITE(DP_TP_CTL(port), val);
3775
3776        /*
3777         * On PORT_A we can have only eDP in SST mode. There the only reason
3778         * we need to set idle transmission mode is to work around a HW issue
3779         * where we enable the pipe while not in idle link-training mode.
3780         * In this case there is requirement to wait for a minimum number of
3781         * idle patterns to be sent.
3782         */
3783        if (port == PORT_A)
3784                return;
3785
3786        if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3787                     1))
3788                DRM_ERROR("Timed out waiting for DP idle patterns\n");
3789}
3790
3791static void
3792intel_dp_link_down(struct intel_dp *intel_dp)
3793{
3794        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3795        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3796        enum port port = intel_dig_port->port;
3797        struct drm_device *dev = intel_dig_port->base.base.dev;
3798        struct drm_i915_private *dev_priv = dev->dev_private;
3799        uint32_t DP = intel_dp->DP;
3800
3801        if (WARN_ON(HAS_DDI(dev)))
3802                return;
3803
3804        if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3805                return;
3806
3807        DRM_DEBUG_KMS("\n");
3808
3809        if ((IS_GEN7(dev) && port == PORT_A) ||
3810            (HAS_PCH_CPT(dev) && port != PORT_A)) {
3811                DP &= ~DP_LINK_TRAIN_MASK_CPT;
3812                DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3813        } else {
3814                if (IS_CHERRYVIEW(dev))
3815                        DP &= ~DP_LINK_TRAIN_MASK_CHV;
3816                else
3817                        DP &= ~DP_LINK_TRAIN_MASK;
3818                DP |= DP_LINK_TRAIN_PAT_IDLE;
3819        }
3820        I915_WRITE(intel_dp->output_reg, DP);
3821        POSTING_READ(intel_dp->output_reg);
3822
3823        DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3824        I915_WRITE(intel_dp->output_reg, DP);
3825        POSTING_READ(intel_dp->output_reg);
3826
3827        /*
3828         * HW workaround for IBX, we need to move the port
3829         * to transcoder A after disabling it to allow the
3830         * matching HDMI port to be enabled on transcoder A.
3831         */
3832        if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3833                /*
3834                 * We get CPU/PCH FIFO underruns on the other pipe when
3835                 * doing the workaround. Sweep them under the rug.
3836                 */
3837                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3838                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3839
3840                /* always enable with pattern 1 (as per spec) */
3841                DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3842                DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3843                I915_WRITE(intel_dp->output_reg, DP);
3844                POSTING_READ(intel_dp->output_reg);
3845
3846                DP &= ~DP_PORT_EN;
3847                I915_WRITE(intel_dp->output_reg, DP);
3848                POSTING_READ(intel_dp->output_reg);
3849
3850                intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3851                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3852                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3853        }
3854
3855        msleep(intel_dp->panel_power_down_delay);
3856
3857        intel_dp->DP = DP;
3858}
3859
3860static bool
3861intel_dp_get_dpcd(struct intel_dp *intel_dp)
3862{
3863        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3864        struct drm_device *dev = dig_port->base.base.dev;
3865        struct drm_i915_private *dev_priv = dev->dev_private;
3866        uint8_t rev;
3867
3868        if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3869                                    sizeof(intel_dp->dpcd)) < 0)
3870                return false; /* aux transfer failed */
3871
3872        DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3873
3874        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3875                return false; /* DPCD not present */
3876
3877        /* Check if the panel supports PSR */
3878        memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3879        if (is_edp(intel_dp)) {
3880                intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3881                                        intel_dp->psr_dpcd,
3882                                        sizeof(intel_dp->psr_dpcd));
3883                if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3884                        dev_priv->psr.sink_support = true;
3885                        DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3886                }
3887
3888                if (INTEL_INFO(dev)->gen >= 9 &&
3889                        (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3890                        uint8_t frame_sync_cap;
3891
3892                        dev_priv->psr.sink_support = true;
3893                        intel_dp_dpcd_read_wake(&intel_dp->aux,
3894                                        DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3895                                        &frame_sync_cap, 1);
3896                        dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3897                        /* PSR2 needs frame sync as well */
3898                        dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3899                        DRM_DEBUG_KMS("PSR2 %s on sink",
3900                                dev_priv->psr.psr2_support ? "supported" : "not supported");
3901                }
3902        }
3903
3904        DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3905                      yesno(intel_dp_source_supports_hbr2(intel_dp)),
3906                      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3907
3908        /* Intermediate frequency support */
3909        if (is_edp(intel_dp) &&
3910            (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3911            (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3912            (rev >= 0x03)) { /* eDp v1.4 or higher */
3913                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3914                int i;
3915
3916                intel_dp_dpcd_read_wake(&intel_dp->aux,
3917                                DP_SUPPORTED_LINK_RATES,
3918                                sink_rates,
3919                                sizeof(sink_rates));
3920
3921                for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3922                        int val = le16_to_cpu(sink_rates[i]);
3923
3924                        if (val == 0)
3925                                break;
3926
3927                        /* Value read is in kHz while drm clock is saved in deca-kHz */
3928                        intel_dp->sink_rates[i] = (val * 200) / 10;
3929                }
3930                intel_dp->num_sink_rates = i;
3931        }
3932
3933        intel_dp_print_rates(intel_dp);
3934
3935        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3936              DP_DWN_STRM_PORT_PRESENT))
3937                return true; /* native DP sink */
3938
3939        if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3940                return true; /* no per-port downstream info */
3941
3942        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3943                                    intel_dp->downstream_ports,
3944                                    DP_MAX_DOWNSTREAM_PORTS) < 0)
3945                return false; /* downstream port status fetch failed */
3946
3947        return true;
3948}
3949
3950static void
3951intel_dp_probe_oui(struct intel_dp *intel_dp)
3952{
3953        u8 buf[3];
3954
3955        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3956                return;
3957
3958        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3959                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3960                              buf[0], buf[1], buf[2]);
3961
3962        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3963                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3964                              buf[0], buf[1], buf[2]);
3965}
3966
3967static bool
3968intel_dp_probe_mst(struct intel_dp *intel_dp)
3969{
3970        u8 buf[1];
3971
3972        if (!intel_dp->can_mst)
3973                return false;
3974
3975        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3976                return false;
3977
3978        if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3979                if (buf[0] & DP_MST_CAP) {
3980                        DRM_DEBUG_KMS("Sink is MST capable\n");
3981                        intel_dp->is_mst = true;
3982                } else {
3983                        DRM_DEBUG_KMS("Sink is not MST capable\n");
3984                        intel_dp->is_mst = false;
3985                }
3986        }
3987
3988        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3989        return intel_dp->is_mst;
3990}
3991
3992static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3993{
3994        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3995        struct drm_device *dev = dig_port->base.base.dev;
3996        struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3997        u8 buf;
3998        int ret = 0;
3999        int count = 0;
4000        int attempts = 10;
4001
4002        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4003                DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4004                ret = -EIO;
4005                goto out;
4006        }
4007
4008        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4009                               buf & ~DP_TEST_SINK_START) < 0) {
4010                DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4011                ret = -EIO;
4012                goto out;
4013        }
4014
4015        do {
4016                intel_wait_for_vblank(dev, intel_crtc->pipe);
4017
4018                if (drm_dp_dpcd_readb(&intel_dp->aux,
4019                                      DP_TEST_SINK_MISC, &buf) < 0) {
4020                        ret = -EIO;
4021                        goto out;
4022                }
4023                count = buf & DP_TEST_COUNT_MASK;
4024        } while (--attempts && count);
4025
4026        if (attempts == 0) {
4027                DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4028                ret = -ETIMEDOUT;
4029        }
4030
4031 out:
4032        hsw_enable_ips(intel_crtc);
4033        return ret;
4034}
4035
4036static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4037{
4038        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4039        struct drm_device *dev = dig_port->base.base.dev;
4040        struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4041        u8 buf;
4042        int ret;
4043
4044        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4045                return -EIO;
4046
4047        if (!(buf & DP_TEST_CRC_SUPPORTED))
4048                return -ENOTTY;
4049
4050        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4051                return -EIO;
4052
4053        if (buf & DP_TEST_SINK_START) {
4054                ret = intel_dp_sink_crc_stop(intel_dp);
4055                if (ret)
4056                        return ret;
4057        }
4058
4059        hsw_disable_ips(intel_crtc);
4060
4061        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4062                               buf | DP_TEST_SINK_START) < 0) {
4063                hsw_enable_ips(intel_crtc);
4064                return -EIO;
4065        }
4066
4067        intel_wait_for_vblank(dev, intel_crtc->pipe);
4068        return 0;
4069}
4070
4071int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4072{
4073        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4074        struct drm_device *dev = dig_port->base.base.dev;
4075        struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4076        u8 buf;
4077        int count, ret;
4078        int attempts = 6;
4079
4080        ret = intel_dp_sink_crc_start(intel_dp);
4081        if (ret)
4082                return ret;
4083
4084        do {
4085                intel_wait_for_vblank(dev, intel_crtc->pipe);
4086
4087                if (drm_dp_dpcd_readb(&intel_dp->aux,
4088                                      DP_TEST_SINK_MISC, &buf) < 0) {
4089                        ret = -EIO;
4090                        goto stop;
4091                }
4092                count = buf & DP_TEST_COUNT_MASK;
4093
4094        } while (--attempts && count == 0);
4095
4096        if (attempts == 0) {
4097                DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4098                ret = -ETIMEDOUT;
4099                goto stop;
4100        }
4101
4102        if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4103                ret = -EIO;
4104                goto stop;
4105        }
4106
4107stop:
4108        intel_dp_sink_crc_stop(intel_dp);
4109        return ret;
4110}
4111
4112static bool
4113intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4114{
4115        return intel_dp_dpcd_read_wake(&intel_dp->aux,
4116                                       DP_DEVICE_SERVICE_IRQ_VECTOR,
4117                                       sink_irq_vector, 1) == 1;
4118}
4119
4120static bool
4121intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4122{
4123        int ret;
4124
4125        ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4126                                             DP_SINK_COUNT_ESI,
4127                                             sink_irq_vector, 14);
4128        if (ret != 14)
4129                return false;
4130
4131        return true;
4132}
4133
4134static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4135{
4136        uint8_t test_result = DP_TEST_ACK;
4137        return test_result;
4138}
4139
4140static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4141{
4142        uint8_t test_result = DP_TEST_NAK;
4143        return test_result;
4144}
4145
4146static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4147{
4148        uint8_t test_result = DP_TEST_NAK;
4149        struct intel_connector *intel_connector = intel_dp->attached_connector;
4150        struct drm_connector *connector = &intel_connector->base;
4151
4152        if (intel_connector->detect_edid == NULL ||
4153            connector->edid_corrupt ||
4154            intel_dp->aux.i2c_defer_count > 6) {
4155                /* Check EDID read for NACKs, DEFERs and corruption
4156                 * (DP CTS 1.2 Core r1.1)
4157                 *    4.2.2.4 : Failed EDID read, I2C_NAK
4158                 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4159                 *    4.2.2.6 : EDID corruption detected
4160                 * Use failsafe mode for all cases
4161                 */
4162                if (intel_dp->aux.i2c_nack_count > 0 ||
4163                        intel_dp->aux.i2c_defer_count > 0)
4164                        DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4165                                      intel_dp->aux.i2c_nack_count,
4166                                      intel_dp->aux.i2c_defer_count);
4167                intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4168        } else {
4169                struct edid *block = intel_connector->detect_edid;
4170
4171                /* We have to write the checksum
4172                 * of the last block read
4173                 */
4174                block += intel_connector->detect_edid->extensions;
4175
4176                if (!drm_dp_dpcd_write(&intel_dp->aux,
4177                                        DP_TEST_EDID_CHECKSUM,
4178                                        &block->checksum,
4179                                        1))
4180                        DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4181
4182                test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4183                intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4184        }
4185
4186        /* Set test active flag here so userspace doesn't interrupt things */
4187        intel_dp->compliance_test_active = 1;
4188
4189        return test_result;
4190}
4191
4192static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4193{
4194        uint8_t test_result = DP_TEST_NAK;
4195        return test_result;
4196}
4197
4198static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4199{
4200        uint8_t response = DP_TEST_NAK;
4201        uint8_t rxdata = 0;
4202        int status = 0;
4203
4204        status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4205        if (status <= 0) {
4206                DRM_DEBUG_KMS("Could not read test request from sink\n");
4207                goto update_status;
4208        }
4209
4210        switch (rxdata) {
4211        case DP_TEST_LINK_TRAINING:
4212                DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4213                intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4214                response = intel_dp_autotest_link_training(intel_dp);
4215                break;
4216        case DP_TEST_LINK_VIDEO_PATTERN:
4217                DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4218                intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4219                response = intel_dp_autotest_video_pattern(intel_dp);
4220                break;
4221        case DP_TEST_LINK_EDID_READ:
4222                DRM_DEBUG_KMS("EDID test requested\n");
4223                intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4224                response = intel_dp_autotest_edid(intel_dp);
4225                break;
4226        case DP_TEST_LINK_PHY_TEST_PATTERN:
4227                DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4228                intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4229                response = intel_dp_autotest_phy_pattern(intel_dp);
4230                break;
4231        default:
4232                DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4233                break;
4234        }
4235
4236update_status:
4237        status = drm_dp_dpcd_write(&intel_dp->aux,
4238                                   DP_TEST_RESPONSE,
4239                                   &response, 1);
4240        if (status <= 0)
4241                DRM_DEBUG_KMS("Could not write test response to sink\n");
4242}
4243
4244static int
4245intel_dp_check_mst_status(struct intel_dp *intel_dp)
4246{
4247        bool bret;
4248
4249        if (intel_dp->is_mst) {
4250                u8 esi[16] = { 0 };
4251                int ret = 0;
4252                int retry;
4253                bool handled;
4254                bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4255go_again:
4256                if (bret == true) {
4257
4258                        /* check link status - esi[10] = 0x200c */
4259                        if (intel_dp->active_mst_links &&
4260                            !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4261                                DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4262                                intel_dp_start_link_train(intel_dp);
4263                                intel_dp_stop_link_train(intel_dp);
4264                        }
4265
4266                        DRM_DEBUG_KMS("got esi %3ph\n", esi);
4267                        ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4268
4269                        if (handled) {
4270                                for (retry = 0; retry < 3; retry++) {
4271                                        int wret;
4272                                        wret = drm_dp_dpcd_write(&intel_dp->aux,
4273                                                                 DP_SINK_COUNT_ESI+1,
4274                                                                 &esi[1], 3);
4275                                        if (wret == 3) {
4276                                                break;
4277                                        }
4278                                }
4279
4280                                bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4281                                if (bret == true) {
4282                                        DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4283                                        goto go_again;
4284                                }
4285                        } else
4286                                ret = 0;
4287
4288                        return ret;
4289                } else {
4290                        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4291                        DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4292                        intel_dp->is_mst = false;
4293                        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4294                        /* send a hotplug event */
4295                        drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4296                }
4297        }
4298        return -EINVAL;
4299}
4300
4301/*
4302 * According to DP spec
4303 * 5.1.2:
4304 *  1. Read DPCD
4305 *  2. Configure link according to Receiver Capabilities
4306 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4307 *  4. Check link status on receipt of hot-plug interrupt
4308 */
4309static void
4310intel_dp_check_link_status(struct intel_dp *intel_dp)
4311{
4312        struct drm_device *dev = intel_dp_to_dev(intel_dp);
4313        struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4314        u8 sink_irq_vector;
4315        u8 link_status[DP_LINK_STATUS_SIZE];
4316
4317        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4318
4319        /*
4320         * Clearing compliance test variables to allow capturing
4321         * of values for next automated test request.
4322         */
4323        intel_dp->compliance_test_active = 0;
4324        intel_dp->compliance_test_type = 0;
4325        intel_dp->compliance_test_data = 0;
4326
4327        if (!intel_encoder->base.crtc)
4328                return;
4329
4330        if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4331                return;
4332
4333        /* Try to read receiver status if the link appears to be up */
4334        if (!intel_dp_get_link_status(intel_dp, link_status)) {
4335                return;
4336        }
4337
4338        /* Now read the DPCD to see if it's actually running */
4339        if (!intel_dp_get_dpcd(intel_dp)) {
4340                return;
4341        }
4342
4343        /* Try to read the source of the interrupt */
4344        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4345            intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4346                /* Clear interrupt source */
4347                drm_dp_dpcd_writeb(&intel_dp->aux,
4348                                   DP_DEVICE_SERVICE_IRQ_VECTOR,
4349                                   sink_irq_vector);
4350
4351                if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4352                        DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4353                if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4354                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4355        }
4356
4357        /* if link training is requested we should perform it always */
4358        if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4359                (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4360                DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4361                              intel_encoder->base.name);
4362                intel_dp_start_link_train(intel_dp);
4363                intel_dp_stop_link_train(intel_dp);
4364        }
4365}
4366
4367/* XXX this is probably wrong for multiple downstream ports */
4368static enum drm_connector_status
4369intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4370{
4371        uint8_t *dpcd = intel_dp->dpcd;
4372        uint8_t type;
4373
4374        if (!intel_dp_get_dpcd(intel_dp))
4375                return connector_status_disconnected;
4376
4377        /* if there's no downstream port, we're done */
4378        if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4379                return connector_status_connected;
4380
4381        /* If we're HPD-aware, SINK_COUNT changes dynamically */
4382        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4383            intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4384                uint8_t reg;
4385
4386                if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4387                                            &reg, 1) < 0)
4388                        return connector_status_unknown;
4389
4390                return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4391                                              : connector_status_disconnected;
4392        }
4393
4394        /* If no HPD, poke DDC gently */
4395        if (drm_probe_ddc(&intel_dp->aux.ddc))
4396                return connector_status_connected;
4397
4398        /* Well we tried, say unknown for unreliable port types */
4399        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4400                type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4401                if (type == DP_DS_PORT_TYPE_VGA ||
4402                    type == DP_DS_PORT_TYPE_NON_EDID)
4403                        return connector_status_unknown;
4404        } else {
4405                type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4406                        DP_DWN_STRM_PORT_TYPE_MASK;
4407                if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4408                    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4409                        return connector_status_unknown;
4410        }
4411
4412        /* Anything else is out of spec, warn and ignore */
4413        DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4414        return connector_status_disconnected;
4415}
4416
4417static enum drm_connector_status
4418edp_detect(struct intel_dp *intel_dp)
4419{
4420        struct drm_device *dev = intel_dp_to_dev(intel_dp);
4421        enum drm_connector_status status;
4422
4423        status = intel_panel_detect(dev);
4424        if (status == connector_status_unknown)
4425                status = connector_status_connected;
4426
4427        return status;
4428}
4429
4430static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4431                                       struct intel_digital_port *port)
4432{
4433        u32 bit;
4434
4435        switch (port->port) {
4436        case PORT_A:
4437                return true;
4438        case PORT_B:
4439                bit = SDE_PORTB_HOTPLUG;
4440                break;
4441        case PORT_C:
4442                bit = SDE_PORTC_HOTPLUG;
4443                break;
4444        case PORT_D:
4445                bit = SDE_PORTD_HOTPLUG;
4446                break;
4447        default:
4448                MISSING_CASE(port->port);
4449                return false;
4450        }
4451
4452        return I915_READ(SDEISR) & bit;
4453}
4454
4455static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4456                                       struct intel_digital_port *port)
4457{
4458        u32 bit;
4459
4460        switch (port->port) {
4461        case PORT_A:
4462                return true;
4463        case PORT_B:
4464                bit = SDE_PORTB_HOTPLUG_CPT;
4465                break;
4466        case PORT_C:
4467                bit = SDE_PORTC_HOTPLUG_CPT;
4468                break;
4469        case PORT_D:
4470                bit = SDE_PORTD_HOTPLUG_CPT;
4471                break;
4472        case PORT_E:
4473                bit = SDE_PORTE_HOTPLUG_SPT;
4474                break;
4475        default:
4476                MISSING_CASE(port->port);
4477                return false;
4478        }
4479
4480        return I915_READ(SDEISR) & bit;
4481}
4482
4483static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4484                                       struct intel_digital_port *port)
4485{
4486        u32 bit;
4487
4488        switch (port->port) {
4489        case PORT_B:
4490                bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4491                break;
4492        case PORT_C:
4493                bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4494                break;
4495        case PORT_D:
4496                bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4497                break;
4498        default:
4499                MISSING_CASE(port->port);
4500                return false;
4501        }
4502
4503        return I915_READ(PORT_HOTPLUG_STAT) & bit;
4504}
4505
4506static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4507                                        struct intel_digital_port *port)
4508{
4509        u32 bit;
4510
4511        switch (port->port) {
4512        case PORT_B:
4513                bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4514                break;
4515        case PORT_C:
4516                bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4517                break;
4518        case PORT_D:
4519                bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4520                break;
4521        default:
4522                MISSING_CASE(port->port);
4523                return false;
4524        }
4525
4526        return I915_READ(PORT_HOTPLUG_STAT) & bit;
4527}
4528
4529static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4530                                       struct intel_digital_port *intel_dig_port)
4531{
4532        struct intel_encoder *intel_encoder = &intel_dig_port->base;
4533        enum port port;
4534        u32 bit;
4535
4536        intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4537        switch (port) {
4538        case PORT_A:
4539                bit = BXT_DE_PORT_HP_DDIA;
4540                break;
4541        case PORT_B:
4542                bit = BXT_DE_PORT_HP_DDIB;
4543                break;
4544        case PORT_C:
4545                bit = BXT_DE_PORT_HP_DDIC;
4546                break;
4547        default:
4548                MISSING_CASE(port);
4549                return false;
4550        }
4551
4552        return I915_READ(GEN8_DE_PORT_ISR) & bit;
4553}
4554
4555/*
4556 * intel_digital_port_connected - is the specified port connected?
4557 * @dev_priv: i915 private structure
4558 * @port: the port to test
4559 *
4560 * Return %true if @port is connected, %false otherwise.
4561 */
4562bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4563                                         struct intel_digital_port *port)
4564{
4565        if (HAS_PCH_IBX(dev_priv))
4566                return ibx_digital_port_connected(dev_priv, port);
4567        if (HAS_PCH_SPLIT(dev_priv))
4568                return cpt_digital_port_connected(dev_priv, port);
4569        else if (IS_BROXTON(dev_priv))
4570                return bxt_digital_port_connected(dev_priv, port);
4571        else if (IS_GM45(dev_priv))
4572                return gm45_digital_port_connected(dev_priv, port);
4573        else
4574                return g4x_digital_port_connected(dev_priv, port);
4575}
4576
4577static struct edid *
4578intel_dp_get_edid(struct intel_dp *intel_dp)
4579{
4580        struct intel_connector *intel_connector = intel_dp->attached_connector;
4581
4582        /* use cached edid if we have one */
4583        if (intel_connector->edid) {
4584                /* invalid edid */
4585                if (IS_ERR(intel_connector->edid))
4586                        return NULL;
4587
4588                return drm_edid_duplicate(intel_connector->edid);
4589        } else
4590                return drm_get_edid(&intel_connector->base,
4591                                    &intel_dp->aux.ddc);
4592}
4593
4594static void
4595intel_dp_set_edid(struct intel_dp *intel_dp)
4596{
4597        struct intel_connector *intel_connector = intel_dp->attached_connector;
4598        struct edid *edid;
4599
4600        edid = intel_dp_get_edid(intel_dp);
4601        intel_connector->detect_edid = edid;
4602
4603        if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4604                intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4605        else
4606                intel_dp->has_audio = drm_detect_monitor_audio(edid);
4607}
4608
4609static void
4610intel_dp_unset_edid(struct intel_dp *intel_dp)
4611{
4612        struct intel_connector *intel_connector = intel_dp->attached_connector;
4613
4614        kfree(intel_connector->detect_edid);
4615        intel_connector->detect_edid = NULL;
4616
4617        intel_dp->has_audio = false;
4618}
4619
4620static enum drm_connector_status
4621intel_dp_detect(struct drm_connector *connector, bool force)
4622{
4623        struct intel_dp *intel_dp = intel_attached_dp(connector);
4624        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4625        struct intel_encoder *intel_encoder = &intel_dig_port->base;
4626        struct drm_device *dev = connector->dev;
4627        enum drm_connector_status status;
4628        enum intel_display_power_domain power_domain;
4629        bool ret;
4630        u8 sink_irq_vector;
4631
4632        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4633                      connector->base.id, connector->name);
4634        intel_dp_unset_edid(intel_dp);
4635
4636        if (intel_dp->is_mst) {
4637                /* MST devices are disconnected from a monitor POV */
4638                if (intel_encoder->type != INTEL_OUTPUT_EDP)
4639                        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4640                return connector_status_disconnected;
4641        }
4642
4643        power_domain = intel_display_port_aux_power_domain(intel_encoder);
4644        intel_display_power_get(to_i915(dev), power_domain);
4645
4646        /* Can't disconnect eDP, but you can close the lid... */
4647        if (is_edp(intel_dp))
4648                status = edp_detect(intel_dp);
4649        else if (intel_digital_port_connected(to_i915(dev),
4650                                              dp_to_dig_port(intel_dp)))
4651                status = intel_dp_detect_dpcd(intel_dp);
4652        else
4653                status = connector_status_disconnected;
4654
4655        if (status != connector_status_connected) {
4656                intel_dp->compliance_test_active = 0;
4657                intel_dp->compliance_test_type = 0;
4658                intel_dp->compliance_test_data = 0;
4659
4660                goto out;
4661        }
4662
4663        intel_dp_probe_oui(intel_dp);
4664
4665        ret = intel_dp_probe_mst(intel_dp);
4666        if (ret) {
4667                /* if we are in MST mode then this connector
4668                   won't appear connected or have anything with EDID on it */
4669                if (intel_encoder->type != INTEL_OUTPUT_EDP)
4670                        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4671                status = connector_status_disconnected;
4672                goto out;
4673        }
4674
4675        /*
4676         * Clearing NACK and defer counts to get their exact values
4677         * while reading EDID which are required by Compliance tests
4678         * 4.2.2.4 and 4.2.2.5
4679         */
4680        intel_dp->aux.i2c_nack_count = 0;
4681        intel_dp->aux.i2c_defer_count = 0;
4682
4683        intel_dp_set_edid(intel_dp);
4684
4685        if (intel_encoder->type != INTEL_OUTPUT_EDP)
4686                intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4687        status = connector_status_connected;
4688
4689        /* Try to read the source of the interrupt */
4690        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4691            intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4692                /* Clear interrupt source */
4693                drm_dp_dpcd_writeb(&intel_dp->aux,
4694                                   DP_DEVICE_SERVICE_IRQ_VECTOR,
4695                                   sink_irq_vector);
4696
4697                if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4698                        intel_dp_handle_test_request(intel_dp);
4699                if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4700                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4701        }
4702
4703out:
4704        intel_display_power_put(to_i915(dev), power_domain);
4705        return status;
4706}
4707
4708static void
4709intel_dp_force(struct drm_connector *connector)
4710{
4711        struct intel_dp *intel_dp = intel_attached_dp(connector);
4712        struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4713        struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4714        enum intel_display_power_domain power_domain;
4715
4716        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4717                      connector->base.id, connector->name);
4718        intel_dp_unset_edid(intel_dp);
4719
4720        if (connector->status != connector_status_connected)
4721                return;
4722
4723        power_domain = intel_display_port_aux_power_domain(intel_encoder);
4724        intel_display_power_get(dev_priv, power_domain);
4725
4726        intel_dp_set_edid(intel_dp);
4727
4728        intel_display_power_put(dev_priv, power_domain);
4729
4730        if (intel_encoder->type != INTEL_OUTPUT_EDP)
4731                intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4732}
4733
4734static int intel_dp_get_modes(struct drm_connector *connector)
4735{
4736        struct intel_connector *intel_connector = to_intel_connector(connector);
4737        struct edid *edid;
4738
4739        edid = intel_connector->detect_edid;
4740        if (edid) {
4741                int ret = intel_connector_update_modes(connector, edid);
4742                if (ret)
4743                        return ret;
4744        }
4745
4746        /* if eDP has no EDID, fall back to fixed mode */
4747        if (is_edp(intel_attached_dp(connector)) &&
4748            intel_connector->panel.fixed_mode) {
4749                struct drm_display_mode *mode;
4750
4751                mode = drm_mode_duplicate(connector->dev,
4752                                          intel_connector->panel.fixed_mode);
4753                if (mode) {
4754                        drm_mode_probed_add(connector, mode);
4755                        return 1;
4756                }
4757        }
4758
4759        return 0;
4760}
4761
4762static bool
4763intel_dp_detect_audio(struct drm_connector *connector)
4764{
4765        bool has_audio = false;
4766        struct edid *edid;
4767
4768        edid = to_intel_connector(connector)->detect_edid;
4769        if (edid)
4770                has_audio = drm_detect_monitor_audio(edid);
4771
4772        return has_audio;
4773}
4774
4775static int
4776intel_dp_set_property(struct drm_connector *connector,
4777                      struct drm_property *property,
4778                      uint64_t val)
4779{
4780        struct drm_i915_private *dev_priv = connector->dev->dev_private;
4781        struct intel_connector *intel_connector = to_intel_connector(connector);
4782        struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4783        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4784        int ret;
4785
4786        ret = drm_object_property_set_value(&connector->base, property, val);
4787        if (ret)
4788                return ret;
4789
4790        if (property == dev_priv->force_audio_property) {
4791                int i = val;
4792                bool has_audio;
4793
4794                if (i == intel_dp->force_audio)
4795                        return 0;
4796
4797                intel_dp->force_audio = i;
4798
4799                if (i == HDMI_AUDIO_AUTO)
4800                        has_audio = intel_dp_detect_audio(connector);
4801                else
4802                        has_audio = (i == HDMI_AUDIO_ON);
4803
4804                if (has_audio == intel_dp->has_audio)
4805                        return 0;
4806
4807                intel_dp->has_audio = has_audio;
4808                goto done;
4809        }
4810
4811        if (property == dev_priv->broadcast_rgb_property) {
4812                bool old_auto = intel_dp->color_range_auto;
4813                bool old_range = intel_dp->limited_color_range;
4814
4815                switch (val) {
4816                case INTEL_BROADCAST_RGB_AUTO:
4817                        intel_dp->color_range_auto = true;
4818                        break;
4819                case INTEL_BROADCAST_RGB_FULL:
4820                        intel_dp->color_range_auto = false;
4821                        intel_dp->limited_color_range = false;
4822                        break;
4823                case INTEL_BROADCAST_RGB_LIMITED:
4824                        intel_dp->color_range_auto = false;
4825                        intel_dp->limited_color_range = true;
4826                        break;
4827                default:
4828                        return -EINVAL;
4829                }
4830
4831                if (old_auto == intel_dp->color_range_auto &&
4832                    old_range == intel_dp->limited_color_range)
4833                        return 0;
4834
4835                goto done;
4836        }
4837
4838        if (is_edp(intel_dp) &&
4839            property == connector->dev->mode_config.scaling_mode_property) {
4840                if (val == DRM_MODE_SCALE_NONE) {
4841                        DRM_DEBUG_KMS("no scaling not supported\n");
4842                        return -EINVAL;
4843                }
4844
4845                if (intel_connector->panel.fitting_mode == val) {
4846                        /* the eDP scaling property is not changed */
4847                        return 0;
4848                }
4849                intel_connector->panel.fitting_mode = val;
4850
4851                goto done;
4852        }
4853
4854        return -EINVAL;
4855
4856done:
4857        if (intel_encoder->base.crtc)
4858                intel_crtc_restore_mode(intel_encoder->base.crtc);
4859
4860        return 0;
4861}
4862
4863static void
4864intel_dp_connector_destroy(struct drm_connector *connector)
4865{
4866        struct intel_connector *intel_connector = to_intel_connector(connector);
4867
4868        kfree(intel_connector->detect_edid);
4869
4870        if (!IS_ERR_OR_NULL(intel_connector->edid))
4871                kfree(intel_connector->edid);
4872
4873        /* Can't call is_edp() since the encoder may have been destroyed
4874         * already. */
4875        if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4876                intel_panel_fini(&intel_connector->panel);
4877
4878        drm_connector_cleanup(connector);
4879        kfree(connector);
4880}
4881
4882void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4883{
4884        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4885        struct intel_dp *intel_dp = &intel_dig_port->dp;
4886
4887        intel_dp_aux_fini(intel_dp);
4888        intel_dp_mst_encoder_cleanup(intel_dig_port);
4889        if (is_edp(intel_dp)) {
4890                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4891                /*
4892                 * vdd might still be enabled do to the delayed vdd off.
4893                 * Make sure vdd is actually turned off here.
4894                 */
4895                pps_lock(intel_dp);
4896                edp_panel_vdd_off_sync(intel_dp);
4897                pps_unlock(intel_dp);
4898
4899                if (intel_dp->edp_notifier.notifier_call) {
4900                        unregister_reboot_notifier(&intel_dp->edp_notifier);
4901                        intel_dp->edp_notifier.notifier_call = NULL;
4902                }
4903        }
4904        drm_encoder_cleanup(encoder);
4905        kfree(intel_dig_port);
4906}
4907
4908static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4909{
4910        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4911
4912        if (!is_edp(intel_dp))
4913                return;
4914
4915        /*
4916         * vdd might still be enabled do to the delayed vdd off.
4917         * Make sure vdd is actually turned off here.
4918         */
4919        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4920        pps_lock(intel_dp);
4921        edp_panel_vdd_off_sync(intel_dp);
4922        pps_unlock(intel_dp);
4923}
4924
4925static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4926{
4927        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4928        struct drm_device *dev = intel_dig_port->base.base.dev;
4929        struct drm_i915_private *dev_priv = dev->dev_private;
4930        enum intel_display_power_domain power_domain;
4931
4932        lockdep_assert_held(&dev_priv->pps_mutex);
4933
4934        if (!edp_have_panel_vdd(intel_dp))
4935                return;
4936
4937        /*
4938         * The VDD bit needs a power domain reference, so if the bit is
4939         * already enabled when we boot or resume, grab this reference and
4940         * schedule a vdd off, so we don't hold on to the reference
4941         * indefinitely.
4942         */
4943        DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4944        power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4945        intel_display_power_get(dev_priv, power_domain);
4946
4947        edp_panel_vdd_schedule_off(intel_dp);
4948}
4949
4950static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4951{
4952        struct intel_dp *intel_dp;
4953
4954        if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4955                return;
4956
4957        intel_dp = enc_to_intel_dp(encoder);
4958
4959        pps_lock(intel_dp);
4960
4961        /*
4962         * Read out the current power sequencer assignment,
4963         * in case the BIOS did something with it.
4964         */
4965        if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4966                vlv_initial_power_sequencer_setup(intel_dp);
4967
4968        intel_edp_panel_vdd_sanitize(intel_dp);
4969
4970        pps_unlock(intel_dp);
4971}
4972
4973static const struct drm_connector_funcs intel_dp_connector_funcs = {
4974        .dpms = drm_atomic_helper_connector_dpms,
4975        .detect = intel_dp_detect,
4976        .force = intel_dp_force,
4977        .fill_modes = drm_helper_probe_single_connector_modes,
4978        .set_property = intel_dp_set_property,
4979        .atomic_get_property = intel_connector_atomic_get_property,
4980        .destroy = intel_dp_connector_destroy,
4981        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4982        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4983};
4984
4985static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4986        .get_modes = intel_dp_get_modes,
4987        .mode_valid = intel_dp_mode_valid,
4988        .best_encoder = intel_best_encoder,
4989};
4990
4991static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4992        .reset = intel_dp_encoder_reset,
4993        .destroy = intel_dp_encoder_destroy,
4994};
4995
4996enum irqreturn
4997intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4998{
4999        struct intel_dp *intel_dp = &intel_dig_port->dp;
5000        struct intel_encoder *intel_encoder = &intel_dig_port->base;
5001        struct drm_device *dev = intel_dig_port->base.base.dev;
5002        struct drm_i915_private *dev_priv = dev->dev_private;
5003        enum intel_display_power_domain power_domain;
5004        enum irqreturn ret = IRQ_NONE;
5005
5006        if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5007            intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5008                intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5009
5010        if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5011                /*
5012                 * vdd off can generate a long pulse on eDP which
5013                 * would require vdd on to handle it, and thus we
5014                 * would end up in an endless cycle of
5015                 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5016                 */
5017                DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5018                              port_name(intel_dig_port->port));
5019                return IRQ_HANDLED;
5020        }
5021
5022        DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5023                      port_name(intel_dig_port->port),
5024                      long_hpd ? "long" : "short");
5025
5026        power_domain = intel_display_port_aux_power_domain(intel_encoder);
5027        intel_display_power_get(dev_priv, power_domain);
5028
5029        if (long_hpd) {
5030                /* indicate that we need to restart link training */
5031                intel_dp->train_set_valid = false;
5032
5033                if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5034                        goto mst_fail;
5035
5036                if (!intel_dp_get_dpcd(intel_dp)) {
5037                        goto mst_fail;
5038                }
5039
5040                intel_dp_probe_oui(intel_dp);
5041
5042                if (!intel_dp_probe_mst(intel_dp)) {
5043                        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5044                        intel_dp_check_link_status(intel_dp);
5045                        drm_modeset_unlock(&dev->mode_config.connection_mutex);
5046                        goto mst_fail;
5047                }
5048        } else {
5049                if (intel_dp->is_mst) {
5050                        if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5051                                goto mst_fail;
5052                }
5053
5054                if (!intel_dp->is_mst) {
5055                        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5056                        intel_dp_check_link_status(intel_dp);
5057                        drm_modeset_unlock(&dev->mode_config.connection_mutex);
5058                }
5059        }
5060
5061        ret = IRQ_HANDLED;
5062
5063        goto put_power;
5064mst_fail:
5065        /* if we were in MST mode, and device is not there get out of MST mode */
5066        if (intel_dp->is_mst) {
5067                DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5068                intel_dp->is_mst = false;
5069                drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5070        }
5071put_power:
5072        intel_display_power_put(dev_priv, power_domain);
5073
5074        return ret;
5075}
5076
5077/* check the VBT to see whether the eDP is on another port */
5078bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5079{
5080        struct drm_i915_private *dev_priv = dev->dev_private;
5081        union child_device_config *p_child;
5082        int i;
5083        static const short port_mapping[] = {
5084                [PORT_B] = DVO_PORT_DPB,
5085                [PORT_C] = DVO_PORT_DPC,
5086                [PORT_D] = DVO_PORT_DPD,
5087                [PORT_E] = DVO_PORT_DPE,
5088        };
5089
5090        /*
5091         * eDP not supported on g4x. so bail out early just
5092         * for a bit extra safety in case the VBT is bonkers.
5093         */
5094        if (INTEL_INFO(dev)->gen < 5)
5095                return false;
5096
5097        if (port == PORT_A)
5098                return true;
5099
5100        if (!dev_priv->vbt.child_dev_num)
5101                return false;
5102
5103        for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5104                p_child = dev_priv->vbt.child_dev + i;
5105
5106                if (p_child->common.dvo_port == port_mapping[port] &&
5107                    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5108                    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5109                        return true;
5110        }
5111        return false;
5112}
5113
5114void
5115intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5116{
5117        struct intel_connector *intel_connector = to_intel_connector(connector);
5118
5119        intel_attach_force_audio_property(connector);
5120        intel_attach_broadcast_rgb_property(connector);
5121        intel_dp->color_range_auto = true;
5122
5123        if (is_edp(intel_dp)) {
5124                drm_mode_create_scaling_mode_property(connector->dev);
5125                drm_object_attach_property(
5126                        &connector->base,
5127                        connector->dev->mode_config.scaling_mode_property,
5128                        DRM_MODE_SCALE_ASPECT);
5129                intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5130        }
5131}
5132
5133static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5134{
5135        intel_dp->last_power_cycle = jiffies;
5136        intel_dp->last_power_on = jiffies;
5137        intel_dp->last_backlight_off = jiffies;
5138}
5139
5140static void
5141intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5142                                    struct intel_dp *intel_dp)
5143{
5144        struct drm_i915_private *dev_priv = dev->dev_private;
5145        struct edp_power_seq cur, vbt, spec,
5146                *final = &intel_dp->pps_delays;
5147        u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5148        i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5149
5150        lockdep_assert_held(&dev_priv->pps_mutex);
5151
5152        /* already initialized? */
5153        if (final->t11_t12 != 0)
5154                return;
5155
5156        if (IS_BROXTON(dev)) {
5157                /*
5158                 * TODO: BXT has 2 sets of PPS registers.
5159                 * Correct Register for Broxton need to be identified
5160                 * using VBT. hardcoding for now
5161                 */
5162                pp_ctrl_reg = BXT_PP_CONTROL(0);
5163                pp_on_reg = BXT_PP_ON_DELAYS(0);
5164                pp_off_reg = BXT_PP_OFF_DELAYS(0);
5165        } else if (HAS_PCH_SPLIT(dev)) {
5166                pp_ctrl_reg = PCH_PP_CONTROL;
5167                pp_on_reg = PCH_PP_ON_DELAYS;
5168                pp_off_reg = PCH_PP_OFF_DELAYS;
5169                pp_div_reg = PCH_PP_DIVISOR;
5170        } else {
5171                enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5172
5173                pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5174                pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5175                pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5176                pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5177        }
5178
5179        /* Workaround: Need to write PP_CONTROL with the unlock key as
5180         * the very first thing. */
5181        pp_ctl = ironlake_get_pp_control(intel_dp);
5182
5183        pp_on = I915_READ(pp_on_reg);
5184        pp_off = I915_READ(pp_off_reg);
5185        if (!IS_BROXTON(dev)) {
5186                I915_WRITE(pp_ctrl_reg, pp_ctl);
5187                pp_div = I915_READ(pp_div_reg);
5188        }
5189
5190        /* Pull timing values out of registers */
5191        cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5192                PANEL_POWER_UP_DELAY_SHIFT;
5193
5194        cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5195                PANEL_LIGHT_ON_DELAY_SHIFT;
5196
5197        cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5198                PANEL_LIGHT_OFF_DELAY_SHIFT;
5199
5200        cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5201                PANEL_POWER_DOWN_DELAY_SHIFT;
5202
5203        if (IS_BROXTON(dev)) {
5204                u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5205                        BXT_POWER_CYCLE_DELAY_SHIFT;
5206                if (tmp > 0)
5207                        cur.t11_t12 = (tmp - 1) * 1000;
5208                else
5209                        cur.t11_t12 = 0;
5210        } else {
5211                cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5212                       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5213        }
5214
5215        DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5216                      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5217
5218        vbt = dev_priv->vbt.edp_pps;
5219
5220        /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5221         * our hw here, which are all in 100usec. */
5222        spec.t1_t3 = 210 * 10;
5223        spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5224        spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5225        spec.t10 = 500 * 10;
5226        /* This one is special and actually in units of 100ms, but zero
5227         * based in the hw (so we need to add 100 ms). But the sw vbt
5228         * table multiplies it with 1000 to make it in units of 100usec,
5229         * too. */
5230        spec.t11_t12 = (510 + 100) * 10;
5231
5232        DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5233                      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5234
5235        /* Use the max of the register settings and vbt. If both are
5236         * unset, fall back to the spec limits. */
5237#define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5238                                       spec.field : \
5239                                       max(cur.field, vbt.field))
5240        assign_final(t1_t3);
5241        assign_final(t8);
5242        assign_final(t9);
5243        assign_final(t10);
5244        assign_final(t11_t12);
5245#undef assign_final
5246
5247#define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5248        intel_dp->panel_power_up_delay = get_delay(t1_t3);
5249        intel_dp->backlight_on_delay = get_delay(t8);
5250        intel_dp->backlight_off_delay = get_delay(t9);
5251        intel_dp->panel_power_down_delay = get_delay(t10);
5252        intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5253#undef get_delay
5254
5255        DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5256                      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5257                      intel_dp->panel_power_cycle_delay);
5258
5259        DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5260                      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5261}
5262
5263static void
5264intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5265                                              struct intel_dp *intel_dp)
5266{
5267        struct drm_i915_private *dev_priv = dev->dev_private;
5268        u32 pp_on, pp_off, pp_div, port_sel = 0;
5269        int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5270        i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5271        enum port port = dp_to_dig_port(intel_dp)->port;
5272        const struct edp_power_seq *seq = &intel_dp->pps_delays;
5273
5274        lockdep_assert_held(&dev_priv->pps_mutex);
5275
5276        if (IS_BROXTON(dev)) {
5277                /*
5278                 * TODO: BXT has 2 sets of PPS registers.
5279                 * Correct Register for Broxton need to be identified
5280                 * using VBT. hardcoding for now
5281                 */
5282                pp_ctrl_reg = BXT_PP_CONTROL(0);
5283                pp_on_reg = BXT_PP_ON_DELAYS(0);
5284                pp_off_reg = BXT_PP_OFF_DELAYS(0);
5285
5286        } else if (HAS_PCH_SPLIT(dev)) {
5287                pp_on_reg = PCH_PP_ON_DELAYS;
5288                pp_off_reg = PCH_PP_OFF_DELAYS;
5289                pp_div_reg = PCH_PP_DIVISOR;
5290        } else {
5291                enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5292
5293                pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5294                pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5295                pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5296        }
5297
5298        /*
5299         * And finally store the new values in the power sequencer. The
5300         * backlight delays are set to 1 because we do manual waits on them. For
5301         * T8, even BSpec recommends doing it. For T9, if we don't do this,
5302         * we'll end up waiting for the backlight off delay twice: once when we
5303         * do the manual sleep, and once when we disable the panel and wait for
5304         * the PP_STATUS bit to become zero.
5305         */
5306        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5307                (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5308        pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5309                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5310        /* Compute the divisor for the pp clock, simply match the Bspec
5311         * formula. */
5312        if (IS_BROXTON(dev)) {
5313                pp_div = I915_READ(pp_ctrl_reg);
5314                pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5315                pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5316                                << BXT_POWER_CYCLE_DELAY_SHIFT);
5317        } else {
5318                pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5319                pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5320                                << PANEL_POWER_CYCLE_DELAY_SHIFT);
5321        }
5322
5323        /* Haswell doesn't have any port selection bits for the panel
5324         * power sequencer any more. */
5325        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5326                port_sel = PANEL_PORT_SELECT_VLV(port);
5327        } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5328                if (port == PORT_A)
5329                        port_sel = PANEL_PORT_SELECT_DPA;
5330                else
5331                        port_sel = PANEL_PORT_SELECT_DPD;
5332        }
5333
5334        pp_on |= port_sel;
5335
5336        I915_WRITE(pp_on_reg, pp_on);
5337        I915_WRITE(pp_off_reg, pp_off);
5338        if (IS_BROXTON(dev))
5339                I915_WRITE(pp_ctrl_reg, pp_div);
5340        else
5341                I915_WRITE(pp_div_reg, pp_div);
5342
5343        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5344                      I915_READ(pp_on_reg),
5345                      I915_READ(pp_off_reg),
5346                      IS_BROXTON(dev) ?
5347                      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5348                      I915_READ(pp_div_reg));
5349}
5350
5351/**
5352 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5353 * @dev: DRM device
5354 * @refresh_rate: RR to be programmed
5355 *
5356 * This function gets called when refresh rate (RR) has to be changed from
5357 * one frequency to another. Switches can be between high and low RR
5358 * supported by the panel or to any other RR based on media playback (in
5359 * this case, RR value needs to be passed from user space).
5360 *
5361 * The caller of this function needs to take a lock on dev_priv->drrs.
5362 */
5363static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5364{
5365        struct drm_i915_private *dev_priv = dev->dev_private;
5366        struct intel_encoder *encoder;
5367        struct intel_digital_port *dig_port = NULL;
5368        struct intel_dp *intel_dp = dev_priv->drrs.dp;
5369        struct intel_crtc_state *config = NULL;
5370        struct intel_crtc *intel_crtc = NULL;
5371        enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5372
5373        if (refresh_rate <= 0) {
5374                DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5375                return;
5376        }
5377
5378        if (intel_dp == NULL) {
5379                DRM_DEBUG_KMS("DRRS not supported.\n");
5380                return;
5381        }
5382
5383        /*
5384         * FIXME: This needs proper synchronization with psr state for some
5385         * platforms that cannot have PSR and DRRS enabled at the same time.
5386         */
5387
5388        dig_port = dp_to_dig_port(intel_dp);
5389        encoder = &dig_port->base;
5390        intel_crtc = to_intel_crtc(encoder->base.crtc);
5391
5392        if (!intel_crtc) {
5393                DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5394                return;
5395        }
5396
5397        config = intel_crtc->config;
5398
5399        if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5400                DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5401                return;
5402        }
5403
5404        if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5405                        refresh_rate)
5406                index = DRRS_LOW_RR;
5407
5408        if (index == dev_priv->drrs.refresh_rate_type) {
5409                DRM_DEBUG_KMS(
5410                        "DRRS requested for previously set RR...ignoring\n");
5411                return;
5412        }
5413
5414        if (!intel_crtc->active) {
5415                DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5416                return;
5417        }
5418
5419        if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5420                switch (index) {
5421                case DRRS_HIGH_RR:
5422                        intel_dp_set_m_n(intel_crtc, M1_N1);
5423                        break;
5424                case DRRS_LOW_RR:
5425                        intel_dp_set_m_n(intel_crtc, M2_N2);
5426                        break;
5427                case DRRS_MAX_RR:
5428                default:
5429                        DRM_ERROR("Unsupported refreshrate type\n");
5430                }
5431        } else if (INTEL_INFO(dev)->gen > 6) {
5432                i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5433                u32 val;
5434
5435                val = I915_READ(reg);
5436                if (index > DRRS_HIGH_RR) {
5437                        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5438                                val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5439                        else
5440                                val |= PIPECONF_EDP_RR_MODE_SWITCH;
5441                } else {
5442                        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5443                                val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5444                        else
5445                                val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5446                }
5447                I915_WRITE(reg, val);
5448        }
5449
5450        dev_priv->drrs.refresh_rate_type = index;
5451
5452        DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5453}
5454
5455/**
5456 * intel_edp_drrs_enable - init drrs struct if supported
5457 * @intel_dp: DP struct
5458 *
5459 * Initializes frontbuffer_bits and drrs.dp
5460 */
5461void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5462{
5463        struct drm_device *dev = intel_dp_to_dev(intel_dp);
5464        struct drm_i915_private *dev_priv = dev->dev_private;
5465        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5466        struct drm_crtc *crtc = dig_port->base.base.crtc;
5467        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5468
5469        if (!intel_crtc->config->has_drrs) {
5470                DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5471                return;
5472        }
5473
5474        mutex_lock(&dev_priv->drrs.mutex);
5475        if (WARN_ON(dev_priv->drrs.dp)) {
5476                DRM_ERROR("DRRS already enabled\n");
5477                goto unlock;
5478        }
5479
5480        dev_priv->drrs.busy_frontbuffer_bits = 0;
5481
5482        dev_priv->drrs.dp = intel_dp;
5483
5484unlock:
5485        mutex_unlock(&dev_priv->drrs.mutex);
5486}
5487
5488/**
5489 * intel_edp_drrs_disable - Disable DRRS
5490 * @intel_dp: DP struct
5491 *
5492 */
5493void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5494{
5495        struct drm_device *dev = intel_dp_to_dev(intel_dp);
5496        struct drm_i915_private *dev_priv = dev->dev_private;
5497        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5498        struct drm_crtc *crtc = dig_port->base.base.crtc;
5499        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5500
5501        if (!intel_crtc->config->has_drrs)
5502                return;
5503
5504        mutex_lock(&dev_priv->drrs.mutex);
5505        if (!dev_priv->drrs.dp) {
5506                mutex_unlock(&dev_priv->drrs.mutex);
5507                return;
5508        }
5509
5510        if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5511                intel_dp_set_drrs_state(dev_priv->dev,
5512                        intel_dp->attached_connector->panel.
5513                        fixed_mode->vrefresh);
5514
5515        dev_priv->drrs.dp = NULL;
5516        mutex_unlock(&dev_priv->drrs.mutex);
5517
5518        cancel_delayed_work_sync(&dev_priv->drrs.work);
5519}
5520
5521static void intel_edp_drrs_downclock_work(struct work_struct *work)
5522{
5523        struct drm_i915_private *dev_priv =
5524                container_of(work, typeof(*dev_priv), drrs.work.work);
5525        struct intel_dp *intel_dp;
5526
5527        mutex_lock(&dev_priv->drrs.mutex);
5528
5529        intel_dp = dev_priv->drrs.dp;
5530
5531        if (!intel_dp)
5532                goto unlock;
5533
5534        /*
5535         * The delayed work can race with an invalidate hence we need to
5536         * recheck.
5537         */
5538
5539        if (dev_priv->drrs.busy_frontbuffer_bits)
5540                goto unlock;
5541
5542        if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5543                intel_dp_set_drrs_state(dev_priv->dev,
5544                        intel_dp->attached_connector->panel.
5545                        downclock_mode->vrefresh);
5546
5547unlock:
5548        mutex_unlock(&dev_priv->drrs.mutex);
5549}
5550
5551/**
5552 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5553 * @dev: DRM device
5554 * @frontbuffer_bits: frontbuffer plane tracking bits
5555 *
5556 * This function gets called everytime rendering on the given planes start.
5557 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5558 *
5559 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5560 */
5561void intel_edp_drrs_invalidate(struct drm_device *dev,
5562                unsigned frontbuffer_bits)
5563{
5564        struct drm_i915_private *dev_priv = dev->dev_private;
5565        struct drm_crtc *crtc;
5566        enum pipe pipe;
5567
5568        if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5569                return;
5570
5571        cancel_delayed_work(&dev_priv->drrs.work);
5572
5573        mutex_lock(&dev_priv->drrs.mutex);
5574        if (!dev_priv->drrs.dp) {
5575                mutex_unlock(&dev_priv->drrs.mutex);
5576                return;
5577        }
5578
5579        crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5580        pipe = to_intel_crtc(crtc)->pipe;
5581
5582        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5583        dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5584
5585        /* invalidate means busy screen hence upclock */
5586        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5587                intel_dp_set_drrs_state(dev_priv->dev,
5588                                dev_priv->drrs.dp->attached_connector->panel.
5589                                fixed_mode->vrefresh);
5590
5591        mutex_unlock(&dev_priv->drrs.mutex);
5592}
5593
5594/**
5595 * intel_edp_drrs_flush - Restart Idleness DRRS
5596 * @dev: DRM device
5597 * @frontbuffer_bits: frontbuffer plane tracking bits
5598 *
5599 * This function gets called every time rendering on the given planes has
5600 * completed or flip on a crtc is completed. So DRRS should be upclocked
5601 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5602 * if no other planes are dirty.
5603 *
5604 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5605 */
5606void intel_edp_drrs_flush(struct drm_device *dev,
5607                unsigned frontbuffer_bits)
5608{
5609        struct drm_i915_private *dev_priv = dev->dev_private;
5610        struct drm_crtc *crtc;
5611        enum pipe pipe;
5612
5613        if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5614                return;
5615
5616        cancel_delayed_work(&dev_priv->drrs.work);
5617
5618        mutex_lock(&dev_priv->drrs.mutex);
5619        if (!dev_priv->drrs.dp) {
5620                mutex_unlock(&dev_priv->drrs.mutex);
5621                return;
5622        }
5623
5624        crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5625        pipe = to_intel_crtc(crtc)->pipe;
5626
5627        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5628        dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5629
5630        /* flush means busy screen hence upclock */
5631        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5632                intel_dp_set_drrs_state(dev_priv->dev,
5633                                dev_priv->drrs.dp->attached_connector->panel.
5634                                fixed_mode->vrefresh);
5635
5636        /*
5637         * flush also means no more activity hence schedule downclock, if all
5638         * other fbs are quiescent too
5639         */
5640        if (!dev_priv->drrs.busy_frontbuffer_bits)
5641                schedule_delayed_work(&dev_priv->drrs.work,
5642                                msecs_to_jiffies(1000));
5643        mutex_unlock(&dev_priv->drrs.mutex);
5644}
5645
5646/**
5647 * DOC: Display Refresh Rate Switching (DRRS)
5648 *
5649 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5650 * which enables swtching between low and high refresh rates,
5651 * dynamically, based on the usage scenario. This feature is applicable
5652 * for internal panels.
5653 *
5654 * Indication that the panel supports DRRS is given by the panel EDID, which
5655 * would list multiple refresh rates for one resolution.
5656 *
5657 * DRRS is of 2 types - static and seamless.
5658 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5659 * (may appear as a blink on screen) and is used in dock-undock scenario.
5660 * Seamless DRRS involves changing RR without any visual effect to the user
5661 * and can be used during normal system usage. This is done by programming
5662 * certain registers.
5663 *
5664 * Support for static/seamless DRRS may be indicated in the VBT based on
5665 * inputs from the panel spec.
5666 *
5667 * DRRS saves power by switching to low RR based on usage scenarios.
5668 *
5669 * eDP DRRS:-
5670 *        The implementation is based on frontbuffer tracking implementation.
5671 * When there is a disturbance on the screen triggered by user activity or a
5672 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5673 * When there is no movement on screen, after a timeout of 1 second, a switch
5674 * to low RR is made.
5675 *        For integration with frontbuffer tracking code,
5676 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5677 *
5678 * DRRS can be further extended to support other internal panels and also
5679 * the scenario of video playback wherein RR is set based on the rate
5680 * requested by userspace.
5681 */
5682
5683/**
5684 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5685 * @intel_connector: eDP connector
5686 * @fixed_mode: preferred mode of panel
5687 *
5688 * This function is  called only once at driver load to initialize basic
5689 * DRRS stuff.
5690 *
5691 * Returns:
5692 * Downclock mode if panel supports it, else return NULL.
5693 * DRRS support is determined by the presence of downclock mode (apart
5694 * from VBT setting).
5695 */
5696static struct drm_display_mode *
5697intel_dp_drrs_init(struct intel_connector *intel_connector,
5698                struct drm_display_mode *fixed_mode)
5699{
5700        struct drm_connector *connector = &intel_connector->base;
5701        struct drm_device *dev = connector->dev;
5702        struct drm_i915_private *dev_priv = dev->dev_private;
5703        struct drm_display_mode *downclock_mode = NULL;
5704
5705        INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5706        mutex_init(&dev_priv->drrs.mutex);
5707
5708        if (INTEL_INFO(dev)->gen <= 6) {
5709                DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5710                return NULL;
5711        }
5712
5713        if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5714                DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5715                return NULL;
5716        }
5717
5718        downclock_mode = intel_find_panel_downclock
5719                                        (dev, fixed_mode, connector);
5720
5721        if (!downclock_mode) {
5722                DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5723                return NULL;
5724        }
5725
5726        dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5727
5728        dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5729        DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5730        return downclock_mode;
5731}
5732
5733static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5734                                     struct intel_connector *intel_connector)
5735{
5736        struct drm_connector *connector = &intel_connector->base;
5737        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5738        struct intel_encoder *intel_encoder = &intel_dig_port->base;
5739        struct drm_device *dev = intel_encoder->base.dev;
5740        struct drm_i915_private *dev_priv = dev->dev_private;
5741        struct drm_display_mode *fixed_mode = NULL;
5742        struct drm_display_mode *downclock_mode = NULL;
5743        bool has_dpcd;
5744        struct drm_display_mode *scan;
5745        struct edid *edid;
5746        enum pipe pipe = INVALID_PIPE;
5747
5748        if (!is_edp(intel_dp))
5749                return true;
5750
5751        pps_lock(intel_dp);
5752        intel_edp_panel_vdd_sanitize(intel_dp);
5753        pps_unlock(intel_dp);
5754
5755        /* Cache DPCD and EDID for edp. */
5756        has_dpcd = intel_dp_get_dpcd(intel_dp);
5757
5758        if (has_dpcd) {
5759                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5760                        dev_priv->no_aux_handshake =
5761                                intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5762                                DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5763        } else {
5764                /* if this fails, presume the device is a ghost */
5765                DRM_INFO("failed to retrieve link info, disabling eDP\n");
5766                return false;
5767        }
5768
5769        /* We now know it's not a ghost, init power sequence regs. */
5770        pps_lock(intel_dp);
5771        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5772        pps_unlock(intel_dp);
5773
5774        mutex_lock(&dev->mode_config.mutex);
5775        edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5776        if (edid) {
5777                if (drm_add_edid_modes(connector, edid)) {
5778                        drm_mode_connector_update_edid_property(connector,
5779                                                                edid);
5780                        drm_edid_to_eld(connector, edid);
5781                } else {
5782                        kfree(edid);
5783                        edid = ERR_PTR(-EINVAL);
5784                }
5785        } else {
5786                edid = ERR_PTR(-ENOENT);
5787        }
5788        intel_connector->edid = edid;
5789
5790        /* prefer fixed mode from EDID if available */
5791        list_for_each_entry(scan, &connector->probed_modes, head) {
5792                if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5793                        fixed_mode = drm_mode_duplicate(dev, scan);
5794                        downclock_mode = intel_dp_drrs_init(
5795                                                intel_connector, fixed_mode);
5796                        break;
5797                }
5798        }
5799
5800        /* fallback to VBT if available for eDP */
5801        if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5802                fixed_mode = drm_mode_duplicate(dev,
5803                                        dev_priv->vbt.lfp_lvds_vbt_mode);
5804                if (fixed_mode)
5805                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5806        }
5807        mutex_unlock(&dev->mode_config.mutex);
5808
5809        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5810                intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5811                register_reboot_notifier(&intel_dp->edp_notifier);
5812
5813                /*
5814                 * Figure out the current pipe for the initial backlight setup.
5815                 * If the current pipe isn't valid, try the PPS pipe, and if that
5816                 * fails just assume pipe A.
5817                 */
5818                if (IS_CHERRYVIEW(dev))
5819                        pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5820                else
5821                        pipe = PORT_TO_PIPE(intel_dp->DP);
5822
5823                if (pipe != PIPE_A && pipe != PIPE_B)
5824                        pipe = intel_dp->pps_pipe;
5825
5826                if (pipe != PIPE_A && pipe != PIPE_B)
5827                        pipe = PIPE_A;
5828
5829                DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5830                              pipe_name(pipe));
5831        }
5832
5833        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5834        intel_connector->panel.backlight.power = intel_edp_backlight_power;
5835        intel_panel_setup_backlight(connector, pipe);
5836
5837        return true;
5838}
5839
5840bool
5841intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5842                        struct intel_connector *intel_connector)
5843{
5844        struct drm_connector *connector = &intel_connector->base;
5845        struct intel_dp *intel_dp = &intel_dig_port->dp;
5846        struct intel_encoder *intel_encoder = &intel_dig_port->base;
5847        struct drm_device *dev = intel_encoder->base.dev;
5848        struct drm_i915_private *dev_priv = dev->dev_private;
5849        enum port port = intel_dig_port->port;
5850        int type, ret;
5851
5852        intel_dp->pps_pipe = INVALID_PIPE;
5853
5854        /* intel_dp vfuncs */
5855        if (INTEL_INFO(dev)->gen >= 9)
5856                intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5857        else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5858                intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5859        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5860                intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5861        else if (HAS_PCH_SPLIT(dev))
5862                intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5863        else
5864                intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5865
5866        if (INTEL_INFO(dev)->gen >= 9)
5867                intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5868        else
5869                intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5870
5871        if (HAS_DDI(dev))
5872                intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5873
5874        /* Preserve the current hw state. */
5875        intel_dp->DP = I915_READ(intel_dp->output_reg);
5876        intel_dp->attached_connector = intel_connector;
5877
5878        if (intel_dp_is_edp(dev, port))
5879                type = DRM_MODE_CONNECTOR_eDP;
5880        else
5881                type = DRM_MODE_CONNECTOR_DisplayPort;
5882
5883        /*
5884         * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5885         * for DP the encoder type can be set by the caller to
5886         * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5887         */
5888        if (type == DRM_MODE_CONNECTOR_eDP)
5889                intel_encoder->type = INTEL_OUTPUT_EDP;
5890
5891        /* eDP only on port B and/or C on vlv/chv */
5892        if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5893                    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5894                return false;
5895
5896        DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5897                        type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5898                        port_name(port));
5899
5900        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5901        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5902
5903        connector->interlace_allowed = true;
5904        connector->doublescan_allowed = 0;
5905
5906        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5907                          edp_panel_vdd_work);
5908
5909        intel_connector_attach_encoder(intel_connector, intel_encoder);
5910        drm_connector_register(connector);
5911
5912        if (HAS_DDI(dev))
5913                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5914        else
5915                intel_connector->get_hw_state = intel_connector_get_hw_state;
5916        intel_connector->unregister = intel_dp_connector_unregister;
5917
5918        /* Set up the hotplug pin. */
5919        switch (port) {
5920        case PORT_A:
5921                intel_encoder->hpd_pin = HPD_PORT_A;
5922                break;
5923        case PORT_B:
5924                intel_encoder->hpd_pin = HPD_PORT_B;
5925                if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5926                        intel_encoder->hpd_pin = HPD_PORT_A;
5927                break;
5928        case PORT_C:
5929                intel_encoder->hpd_pin = HPD_PORT_C;
5930                break;
5931        case PORT_D:
5932                intel_encoder->hpd_pin = HPD_PORT_D;
5933                break;
5934        case PORT_E:
5935                intel_encoder->hpd_pin = HPD_PORT_E;
5936                break;
5937        default:
5938                BUG();
5939        }
5940
5941        if (is_edp(intel_dp)) {
5942                pps_lock(intel_dp);
5943                intel_dp_init_panel_power_timestamps(intel_dp);
5944                if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5945                        vlv_initial_power_sequencer_setup(intel_dp);
5946                else
5947                        intel_dp_init_panel_power_sequencer(dev, intel_dp);
5948                pps_unlock(intel_dp);
5949        }
5950
5951        ret = intel_dp_aux_init(intel_dp, intel_connector);
5952        if (ret)
5953                goto fail;
5954
5955        /* init MST on ports that can support it */
5956        if (HAS_DP_MST(dev) &&
5957            (port == PORT_B || port == PORT_C || port == PORT_D))
5958                intel_dp_mst_encoder_init(intel_dig_port,
5959                                          intel_connector->base.base.id);
5960
5961        if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5962                intel_dp_aux_fini(intel_dp);
5963                intel_dp_mst_encoder_cleanup(intel_dig_port);
5964                goto fail;
5965        }
5966
5967        intel_dp_add_properties(intel_dp, connector);
5968
5969        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5970         * 0xd.  Failure to do so will result in spurious interrupts being
5971         * generated on the port when a cable is not attached.
5972         */
5973        if (IS_G4X(dev) && !IS_GM45(dev)) {
5974                u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5975                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5976        }
5977
5978        i915_debugfs_connector_add(connector);
5979
5980        return true;
5981
5982fail:
5983        if (is_edp(intel_dp)) {
5984                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5985                /*
5986                 * vdd might still be enabled do to the delayed vdd off.
5987                 * Make sure vdd is actually turned off here.
5988                 */
5989                pps_lock(intel_dp);
5990                edp_panel_vdd_off_sync(intel_dp);
5991                pps_unlock(intel_dp);
5992        }
5993        drm_connector_unregister(connector);
5994        drm_connector_cleanup(connector);
5995
5996        return false;
5997}
5998
5999void
6000intel_dp_init(struct drm_device *dev,
6001              i915_reg_t output_reg, enum port port)
6002{
6003        struct drm_i915_private *dev_priv = dev->dev_private;
6004        struct intel_digital_port *intel_dig_port;
6005        struct intel_encoder *intel_encoder;
6006        struct drm_encoder *encoder;
6007        struct intel_connector *intel_connector;
6008
6009        intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6010        if (!intel_dig_port)
6011                return;
6012
6013        intel_connector = intel_connector_alloc();
6014        if (!intel_connector)
6015                goto err_connector_alloc;
6016
6017        intel_encoder = &intel_dig_port->base;
6018        encoder = &intel_encoder->base;
6019
6020        if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6021                             DRM_MODE_ENCODER_TMDS, NULL))
6022                goto err_encoder_init;
6023
6024        intel_encoder->compute_config = intel_dp_compute_config;
6025        intel_encoder->disable = intel_disable_dp;
6026        intel_encoder->get_hw_state = intel_dp_get_hw_state;
6027        intel_encoder->get_config = intel_dp_get_config;
6028        intel_encoder->suspend = intel_dp_encoder_suspend;
6029        if (IS_CHERRYVIEW(dev)) {
6030                intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6031                intel_encoder->pre_enable = chv_pre_enable_dp;
6032                intel_encoder->enable = vlv_enable_dp;
6033                intel_encoder->post_disable = chv_post_disable_dp;
6034                intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6035        } else if (IS_VALLEYVIEW(dev)) {
6036                intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6037                intel_encoder->pre_enable = vlv_pre_enable_dp;
6038                intel_encoder->enable = vlv_enable_dp;
6039                intel_encoder->post_disable = vlv_post_disable_dp;
6040        } else {
6041                intel_encoder->pre_enable = g4x_pre_enable_dp;
6042                intel_encoder->enable = g4x_enable_dp;
6043                if (INTEL_INFO(dev)->gen >= 5)
6044                        intel_encoder->post_disable = ilk_post_disable_dp;
6045        }
6046
6047        intel_dig_port->port = port;
6048        intel_dig_port->dp.output_reg = output_reg;
6049
6050        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6051        if (IS_CHERRYVIEW(dev)) {
6052                if (port == PORT_D)
6053                        intel_encoder->crtc_mask = 1 << 2;
6054                else
6055                        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6056        } else {
6057                intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6058        }
6059        intel_encoder->cloneable = 0;
6060
6061        intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6062        dev_priv->hotplug.irq_port[port] = intel_dig_port;
6063
6064        if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6065                goto err_init_connector;
6066
6067        return;
6068
6069err_init_connector:
6070        drm_encoder_cleanup(encoder);
6071err_encoder_init:
6072        kfree(intel_connector);
6073err_connector_alloc:
6074        kfree(intel_dig_port);
6075
6076        return;
6077}
6078
6079void intel_dp_mst_suspend(struct drm_device *dev)
6080{
6081        struct drm_i915_private *dev_priv = dev->dev_private;
6082        int i;
6083
6084        /* disable MST */
6085        for (i = 0; i < I915_MAX_PORTS; i++) {
6086                struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6087                if (!intel_dig_port)
6088                        continue;
6089
6090                if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6091                        if (!intel_dig_port->dp.can_mst)
6092                                continue;
6093                        if (intel_dig_port->dp.is_mst)
6094                                drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6095                }
6096        }
6097}
6098
6099void intel_dp_mst_resume(struct drm_device *dev)
6100{
6101        struct drm_i915_private *dev_priv = dev->dev_private;
6102        int i;
6103
6104        for (i = 0; i < I915_MAX_PORTS; i++) {
6105                struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6106                if (!intel_dig_port)
6107                        continue;
6108                if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6109                        int ret;
6110
6111                        if (!intel_dig_port->dp.can_mst)
6112                                continue;
6113
6114                        ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6115                        if (ret != 0) {
6116                                intel_dp_check_mst_status(&intel_dig_port->dp);
6117                        }
6118                }
6119        }
6120}
6121