linux/drivers/gpu/drm/i915/display/intel_dp.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/export.h>
  29#include <linux/i2c.h>
  30#include <linux/notifier.h>
  31#include <linux/reboot.h>
  32#include <linux/slab.h>
  33#include <linux/types.h>
  34
  35#include <asm/byteorder.h>
  36
  37#include <drm/drm_atomic_helper.h>
  38#include <drm/drm_crtc.h>
  39#include <drm/drm_dp_helper.h>
  40#include <drm/drm_edid.h>
  41#include <drm/drm_hdcp.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/i915_drm.h>
  44
  45#include "i915_debugfs.h"
  46#include "i915_drv.h"
  47#include "i915_trace.h"
  48#include "intel_atomic.h"
  49#include "intel_audio.h"
  50#include "intel_connector.h"
  51#include "intel_ddi.h"
  52#include "intel_display_types.h"
  53#include "intel_dp.h"
  54#include "intel_dp_link_training.h"
  55#include "intel_dp_mst.h"
  56#include "intel_dpio_phy.h"
  57#include "intel_fifo_underrun.h"
  58#include "intel_hdcp.h"
  59#include "intel_hdmi.h"
  60#include "intel_hotplug.h"
  61#include "intel_lspcon.h"
  62#include "intel_lvds.h"
  63#include "intel_panel.h"
  64#include "intel_psr.h"
  65#include "intel_sideband.h"
  66#include "intel_tc.h"
  67#include "intel_vdsc.h"
  68
  69#define DP_DPRX_ESI_LEN 14
  70
  71/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
  72#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER      61440
  73#define DP_DSC_MIN_SUPPORTED_BPC                8
  74#define DP_DSC_MAX_SUPPORTED_BPC                10
  75
  76/* DP DSC throughput values used for slice count calculations KPixels/s */
  77#define DP_DSC_PEAK_PIXEL_RATE                  2720000
  78#define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
  79#define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
  80
  81/* DP DSC FEC Overhead factor = 1/(0.972261) */
  82#define DP_DSC_FEC_OVERHEAD_FACTOR              972261
  83
  84/* Compliance test status bits  */
  85#define INTEL_DP_RESOLUTION_SHIFT_MASK  0
  86#define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  87#define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  88#define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  89
  90struct dp_link_dpll {
  91        int clock;
  92        struct dpll dpll;
  93};
  94
  95static const struct dp_link_dpll g4x_dpll[] = {
  96        { 162000,
  97                { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  98        { 270000,
  99                { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
 100};
 101
 102static const struct dp_link_dpll pch_dpll[] = {
 103        { 162000,
 104                { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
 105        { 270000,
 106                { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
 107};
 108
 109static const struct dp_link_dpll vlv_dpll[] = {
 110        { 162000,
 111                { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
 112        { 270000,
 113                { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
 114};
 115
 116/*
 117 * CHV supports eDP 1.4 that have  more link rates.
 118 * Below only provides the fixed rate but exclude variable rate.
 119 */
 120static const struct dp_link_dpll chv_dpll[] = {
 121        /*
 122         * CHV requires to program fractional division for m2.
 123         * m2 is stored in fixed point format using formula below
 124         * (m2_int << 22) | m2_fraction
 125         */
 126        { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
 127                { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
 128        { 270000,       /* m2_int = 27, m2_fraction = 0 */
 129                { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
 130};
 131
 132/* Constants for DP DSC configurations */
 133static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
 134
 135/* With Single pipe configuration, HW is capable of supporting maximum
 136 * of 4 slices per line.
 137 */
 138static const u8 valid_dsc_slicecount[] = {1, 2, 4};
 139
 140/**
 141 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
 142 * @intel_dp: DP struct
 143 *
 144 * If a CPU or PCH DP output is attached to an eDP panel, this function
 145 * will return true, and false otherwise.
 146 */
 147bool intel_dp_is_edp(struct intel_dp *intel_dp)
 148{
 149        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 150
 151        return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 152}
 153
 154static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 155{
 156        return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 157}
 158
 159static void intel_dp_link_down(struct intel_encoder *encoder,
 160                               const struct intel_crtc_state *old_crtc_state);
 161static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 162static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 163static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
 164                                           const struct intel_crtc_state *crtc_state);
 165static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 166                                      enum pipe pipe);
 167static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 168
 169/* update sink rates from dpcd */
 170static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 171{
 172        static const int dp_rates[] = {
 173                162000, 270000, 540000, 810000
 174        };
 175        int i, max_rate;
 176
 177        max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 178
 179        for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
 180                if (dp_rates[i] > max_rate)
 181                        break;
 182                intel_dp->sink_rates[i] = dp_rates[i];
 183        }
 184
 185        intel_dp->num_sink_rates = i;
 186}
 187
 188/* Get length of rates array potentially limited by max_rate. */
 189static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
 190{
 191        int i;
 192
 193        /* Limit results by potentially reduced max rate */
 194        for (i = 0; i < len; i++) {
 195                if (rates[len - i - 1] <= max_rate)
 196                        return len - i;
 197        }
 198
 199        return 0;
 200}
 201
 202/* Get length of common rates array potentially limited by max_rate. */
 203static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
 204                                          int max_rate)
 205{
 206        return intel_dp_rate_limit_len(intel_dp->common_rates,
 207                                       intel_dp->num_common_rates, max_rate);
 208}
 209
 210/* Theoretical max between source and sink */
 211static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 212{
 213        return intel_dp->common_rates[intel_dp->num_common_rates - 1];
 214}
 215
 216/* Theoretical max between source and sink */
 217static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 218{
 219        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 220        int source_max = intel_dig_port->max_lanes;
 221        int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 222        int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
 223
 224        return min3(source_max, sink_max, fia_max);
 225}
 226
 227int intel_dp_max_lane_count(struct intel_dp *intel_dp)
 228{
 229        return intel_dp->max_link_lane_count;
 230}
 231
 232int
 233intel_dp_link_required(int pixel_clock, int bpp)
 234{
 235        /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
 236        return DIV_ROUND_UP(pixel_clock * bpp, 8);
 237}
 238
 239int
 240intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 241{
 242        /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
 243         * link rate that is generally expressed in Gbps. Since, 8 bits of data
 244         * is transmitted every LS_Clk per lane, there is no need to account for
 245         * the channel encoding that is done in the PHY layer here.
 246         */
 247
 248        return max_link_clock * max_lanes;
 249}
 250
 251static int
 252intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
 253{
 254        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 255        struct intel_encoder *encoder = &intel_dig_port->base;
 256        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 257        int max_dotclk = dev_priv->max_dotclk_freq;
 258        int ds_max_dotclk;
 259
 260        int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
 261
 262        if (type != DP_DS_PORT_TYPE_VGA)
 263                return max_dotclk;
 264
 265        ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
 266                                                    intel_dp->downstream_ports);
 267
 268        if (ds_max_dotclk != 0)
 269                max_dotclk = min(max_dotclk, ds_max_dotclk);
 270
 271        return max_dotclk;
 272}
 273
 274static int cnl_max_source_rate(struct intel_dp *intel_dp)
 275{
 276        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 277        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 278        enum port port = dig_port->base.port;
 279
 280        u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 281
 282        /* Low voltage SKUs are limited to max of 5.4G */
 283        if (voltage == VOLTAGE_INFO_0_85V)
 284                return 540000;
 285
 286        /* For this SKU 8.1G is supported in all ports */
 287        if (IS_CNL_WITH_PORT_F(dev_priv))
 288                return 810000;
 289
 290        /* For other SKUs, max rate on ports A and D is 5.4G */
 291        if (port == PORT_A || port == PORT_D)
 292                return 540000;
 293
 294        return 810000;
 295}
 296
 297static int icl_max_source_rate(struct intel_dp *intel_dp)
 298{
 299        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 300        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 301        enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
 302
 303        if (intel_phy_is_combo(dev_priv, phy) &&
 304            !IS_ELKHARTLAKE(dev_priv) &&
 305            !intel_dp_is_edp(intel_dp))
 306                return 540000;
 307
 308        return 810000;
 309}
 310
 311static void
 312intel_dp_set_source_rates(struct intel_dp *intel_dp)
 313{
 314        /* The values must be in increasing order */
 315        static const int cnl_rates[] = {
 316                162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
 317        };
 318        static const int bxt_rates[] = {
 319                162000, 216000, 243000, 270000, 324000, 432000, 540000
 320        };
 321        static const int skl_rates[] = {
 322                162000, 216000, 270000, 324000, 432000, 540000
 323        };
 324        static const int hsw_rates[] = {
 325                162000, 270000, 540000
 326        };
 327        static const int g4x_rates[] = {
 328                162000, 270000
 329        };
 330        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 331        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 332        const struct ddi_vbt_port_info *info =
 333                &dev_priv->vbt.ddi_port_info[dig_port->base.port];
 334        const int *source_rates;
 335        int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
 336
 337        /* This should only be done once */
 338        WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
 339
 340        if (INTEL_GEN(dev_priv) >= 10) {
 341                source_rates = cnl_rates;
 342                size = ARRAY_SIZE(cnl_rates);
 343                if (IS_GEN(dev_priv, 10))
 344                        max_rate = cnl_max_source_rate(intel_dp);
 345                else
 346                        max_rate = icl_max_source_rate(intel_dp);
 347        } else if (IS_GEN9_LP(dev_priv)) {
 348                source_rates = bxt_rates;
 349                size = ARRAY_SIZE(bxt_rates);
 350        } else if (IS_GEN9_BC(dev_priv)) {
 351                source_rates = skl_rates;
 352                size = ARRAY_SIZE(skl_rates);
 353        } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
 354                   IS_BROADWELL(dev_priv)) {
 355                source_rates = hsw_rates;
 356                size = ARRAY_SIZE(hsw_rates);
 357        } else {
 358                source_rates = g4x_rates;
 359                size = ARRAY_SIZE(g4x_rates);
 360        }
 361
 362        if (max_rate && vbt_max_rate)
 363                max_rate = min(max_rate, vbt_max_rate);
 364        else if (vbt_max_rate)
 365                max_rate = vbt_max_rate;
 366
 367        if (max_rate)
 368                size = intel_dp_rate_limit_len(source_rates, size, max_rate);
 369
 370        intel_dp->source_rates = source_rates;
 371        intel_dp->num_source_rates = size;
 372}
 373
 374static int intersect_rates(const int *source_rates, int source_len,
 375                           const int *sink_rates, int sink_len,
 376                           int *common_rates)
 377{
 378        int i = 0, j = 0, k = 0;
 379
 380        while (i < source_len && j < sink_len) {
 381                if (source_rates[i] == sink_rates[j]) {
 382                        if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
 383                                return k;
 384                        common_rates[k] = source_rates[i];
 385                        ++k;
 386                        ++i;
 387                        ++j;
 388                } else if (source_rates[i] < sink_rates[j]) {
 389                        ++i;
 390                } else {
 391                        ++j;
 392                }
 393        }
 394        return k;
 395}
 396
 397/* return index of rate in rates array, or -1 if not found */
 398static int intel_dp_rate_index(const int *rates, int len, int rate)
 399{
 400        int i;
 401
 402        for (i = 0; i < len; i++)
 403                if (rate == rates[i])
 404                        return i;
 405
 406        return -1;
 407}
 408
 409static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
 410{
 411        WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
 412
 413        intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
 414                                                     intel_dp->num_source_rates,
 415                                                     intel_dp->sink_rates,
 416                                                     intel_dp->num_sink_rates,
 417                                                     intel_dp->common_rates);
 418
 419        /* Paranoia, there should always be something in common. */
 420        if (WARN_ON(intel_dp->num_common_rates == 0)) {
 421                intel_dp->common_rates[0] = 162000;
 422                intel_dp->num_common_rates = 1;
 423        }
 424}
 425
 426static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
 427                                       u8 lane_count)
 428{
 429        /*
 430         * FIXME: we need to synchronize the current link parameters with
 431         * hardware readout. Currently fast link training doesn't work on
 432         * boot-up.
 433         */
 434        if (link_rate == 0 ||
 435            link_rate > intel_dp->max_link_rate)
 436                return false;
 437
 438        if (lane_count == 0 ||
 439            lane_count > intel_dp_max_lane_count(intel_dp))
 440                return false;
 441
 442        return true;
 443}
 444
 445static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
 446                                                     int link_rate,
 447                                                     u8 lane_count)
 448{
 449        const struct drm_display_mode *fixed_mode =
 450                intel_dp->attached_connector->panel.fixed_mode;
 451        int mode_rate, max_rate;
 452
 453        mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
 454        max_rate = intel_dp_max_data_rate(link_rate, lane_count);
 455        if (mode_rate > max_rate)
 456                return false;
 457
 458        return true;
 459}
 460
 461int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 462                                            int link_rate, u8 lane_count)
 463{
 464        int index;
 465
 466        index = intel_dp_rate_index(intel_dp->common_rates,
 467                                    intel_dp->num_common_rates,
 468                                    link_rate);
 469        if (index > 0) {
 470                if (intel_dp_is_edp(intel_dp) &&
 471                    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
 472                                                              intel_dp->common_rates[index - 1],
 473                                                              lane_count)) {
 474                        DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
 475                        return 0;
 476                }
 477                intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
 478                intel_dp->max_link_lane_count = lane_count;
 479        } else if (lane_count > 1) {
 480                if (intel_dp_is_edp(intel_dp) &&
 481                    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
 482                                                              intel_dp_max_common_rate(intel_dp),
 483                                                              lane_count >> 1)) {
 484                        DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
 485                        return 0;
 486                }
 487                intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
 488                intel_dp->max_link_lane_count = lane_count >> 1;
 489        } else {
 490                DRM_ERROR("Link Training Unsuccessful\n");
 491                return -1;
 492        }
 493
 494        return 0;
 495}
 496
 497u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
 498{
 499        return div_u64(mul_u32_u32(mode_clock, 1000000U),
 500                       DP_DSC_FEC_OVERHEAD_FACTOR);
 501}
 502
 503static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
 504                                       u32 mode_clock, u32 mode_hdisplay)
 505{
 506        u32 bits_per_pixel, max_bpp_small_joiner_ram;
 507        int i;
 508
 509        /*
 510         * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
 511         * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
 512         * for SST -> TimeSlotsPerMTP is 1,
 513         * for MST -> TimeSlotsPerMTP has to be calculated
 514         */
 515        bits_per_pixel = (link_clock * lane_count * 8) /
 516                         intel_dp_mode_to_fec_clock(mode_clock);
 517        DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
 518
 519        /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
 520        max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
 521        DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
 522
 523        /*
 524         * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
 525         * check, output bpp from small joiner RAM check)
 526         */
 527        bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
 528
 529        /* Error out if the max bpp is less than smallest allowed valid bpp */
 530        if (bits_per_pixel < valid_dsc_bpp[0]) {
 531                DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
 532                              bits_per_pixel, valid_dsc_bpp[0]);
 533                return 0;
 534        }
 535
 536        /* Find the nearest match in the array of known BPPs from VESA */
 537        for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
 538                if (bits_per_pixel < valid_dsc_bpp[i + 1])
 539                        break;
 540        }
 541        bits_per_pixel = valid_dsc_bpp[i];
 542
 543        /*
 544         * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
 545         * fractional part is 0
 546         */
 547        return bits_per_pixel << 4;
 548}
 549
 550static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
 551                                       int mode_clock, int mode_hdisplay)
 552{
 553        u8 min_slice_count, i;
 554        int max_slice_width;
 555
 556        if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
 557                min_slice_count = DIV_ROUND_UP(mode_clock,
 558                                               DP_DSC_MAX_ENC_THROUGHPUT_0);
 559        else
 560                min_slice_count = DIV_ROUND_UP(mode_clock,
 561                                               DP_DSC_MAX_ENC_THROUGHPUT_1);
 562
 563        max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
 564        if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
 565                DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
 566                              max_slice_width);
 567                return 0;
 568        }
 569        /* Also take into account max slice width */
 570        min_slice_count = min_t(u8, min_slice_count,
 571                                DIV_ROUND_UP(mode_hdisplay,
 572                                             max_slice_width));
 573
 574        /* Find the closest match to the valid slice count values */
 575        for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
 576                if (valid_dsc_slicecount[i] >
 577                    drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 578                                                    false))
 579                        break;
 580                if (min_slice_count  <= valid_dsc_slicecount[i])
 581                        return valid_dsc_slicecount[i];
 582        }
 583
 584        DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
 585        return 0;
 586}
 587
 588static enum drm_mode_status
 589intel_dp_mode_valid(struct drm_connector *connector,
 590                    struct drm_display_mode *mode)
 591{
 592        struct intel_dp *intel_dp = intel_attached_dp(connector);
 593        struct intel_connector *intel_connector = to_intel_connector(connector);
 594        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 595        struct drm_i915_private *dev_priv = to_i915(connector->dev);
 596        int target_clock = mode->clock;
 597        int max_rate, mode_rate, max_lanes, max_link_clock;
 598        int max_dotclk;
 599        u16 dsc_max_output_bpp = 0;
 600        u8 dsc_slice_count = 0;
 601
 602        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 603                return MODE_NO_DBLESCAN;
 604
 605        max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 606
 607        if (intel_dp_is_edp(intel_dp) && fixed_mode) {
 608                if (mode->hdisplay > fixed_mode->hdisplay)
 609                        return MODE_PANEL;
 610
 611                if (mode->vdisplay > fixed_mode->vdisplay)
 612                        return MODE_PANEL;
 613
 614                target_clock = fixed_mode->clock;
 615        }
 616
 617        max_link_clock = intel_dp_max_link_rate(intel_dp);
 618        max_lanes = intel_dp_max_lane_count(intel_dp);
 619
 620        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 621        mode_rate = intel_dp_link_required(target_clock, 18);
 622
 623        /*
 624         * Output bpp is stored in 6.4 format so right shift by 4 to get the
 625         * integer value since we support only integer values of bpp.
 626         */
 627        if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
 628            drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
 629                if (intel_dp_is_edp(intel_dp)) {
 630                        dsc_max_output_bpp =
 631                                drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
 632                        dsc_slice_count =
 633                                drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 634                                                                true);
 635                } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
 636                        dsc_max_output_bpp =
 637                                intel_dp_dsc_get_output_bpp(max_link_clock,
 638                                                            max_lanes,
 639                                                            target_clock,
 640                                                            mode->hdisplay) >> 4;
 641                        dsc_slice_count =
 642                                intel_dp_dsc_get_slice_count(intel_dp,
 643                                                             target_clock,
 644                                                             mode->hdisplay);
 645                }
 646        }
 647
 648        if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
 649            target_clock > max_dotclk)
 650                return MODE_CLOCK_HIGH;
 651
 652        if (mode->clock < 10000)
 653                return MODE_CLOCK_LOW;
 654
 655        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 656                return MODE_H_ILLEGAL;
 657
 658        return MODE_OK;
 659}
 660
 661u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
 662{
 663        int i;
 664        u32 v = 0;
 665
 666        if (src_bytes > 4)
 667                src_bytes = 4;
 668        for (i = 0; i < src_bytes; i++)
 669                v |= ((u32)src[i]) << ((3 - i) * 8);
 670        return v;
 671}
 672
 673static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
 674{
 675        int i;
 676        if (dst_bytes > 4)
 677                dst_bytes = 4;
 678        for (i = 0; i < dst_bytes; i++)
 679                dst[i] = src >> ((3-i) * 8);
 680}
 681
 682static void
 683intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
 684static void
 685intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 686                                              bool force_disable_vdd);
 687static void
 688intel_dp_pps_init(struct intel_dp *intel_dp);
 689
 690static intel_wakeref_t
 691pps_lock(struct intel_dp *intel_dp)
 692{
 693        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 694        intel_wakeref_t wakeref;
 695
 696        /*
 697         * See intel_power_sequencer_reset() why we need
 698         * a power domain reference here.
 699         */
 700        wakeref = intel_display_power_get(dev_priv,
 701                                          intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 702
 703        mutex_lock(&dev_priv->pps_mutex);
 704
 705        return wakeref;
 706}
 707
 708static intel_wakeref_t
 709pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
 710{
 711        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 712
 713        mutex_unlock(&dev_priv->pps_mutex);
 714        intel_display_power_put(dev_priv,
 715                                intel_aux_power_domain(dp_to_dig_port(intel_dp)),
 716                                wakeref);
 717        return 0;
 718}
 719
 720#define with_pps_lock(dp, wf) \
 721        for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
 722
 723static void
 724vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 725{
 726        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 727        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 728        enum pipe pipe = intel_dp->pps_pipe;
 729        bool pll_enabled, release_cl_override = false;
 730        enum dpio_phy phy = DPIO_PHY(pipe);
 731        enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 732        u32 DP;
 733
 734        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 735                 "skipping pipe %c power sequencer kick due to port %c being active\n",
 736                 pipe_name(pipe), port_name(intel_dig_port->base.port)))
 737                return;
 738
 739        DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 740                      pipe_name(pipe), port_name(intel_dig_port->base.port));
 741
 742        /* Preserve the BIOS-computed detected bit. This is
 743         * supposed to be read-only.
 744         */
 745        DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 746        DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 747        DP |= DP_PORT_WIDTH(1);
 748        DP |= DP_LINK_TRAIN_PAT_1;
 749
 750        if (IS_CHERRYVIEW(dev_priv))
 751                DP |= DP_PIPE_SEL_CHV(pipe);
 752        else
 753                DP |= DP_PIPE_SEL(pipe);
 754
 755        pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 756
 757        /*
 758         * The DPLL for the pipe must be enabled for this to work.
 759         * So enable temporarily it if it's not already enabled.
 760         */
 761        if (!pll_enabled) {
 762                release_cl_override = IS_CHERRYVIEW(dev_priv) &&
 763                        !chv_phy_powergate_ch(dev_priv, phy, ch, true);
 764
 765                if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
 766                                     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
 767                        DRM_ERROR("Failed to force on pll for pipe %c!\n",
 768                                  pipe_name(pipe));
 769                        return;
 770                }
 771        }
 772
 773        /*
 774         * Similar magic as in intel_dp_enable_port().
 775         * We _must_ do this port enable + disable trick
 776         * to make this power sequencer lock onto the port.
 777         * Otherwise even VDD force bit won't work.
 778         */
 779        I915_WRITE(intel_dp->output_reg, DP);
 780        POSTING_READ(intel_dp->output_reg);
 781
 782        I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
 783        POSTING_READ(intel_dp->output_reg);
 784
 785        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
 786        POSTING_READ(intel_dp->output_reg);
 787
 788        if (!pll_enabled) {
 789                vlv_force_pll_off(dev_priv, pipe);
 790
 791                if (release_cl_override)
 792                        chv_phy_powergate_ch(dev_priv, phy, ch, false);
 793        }
 794}
 795
 796static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 797{
 798        struct intel_encoder *encoder;
 799        unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 800
 801        /*
 802         * We don't have power sequencer currently.
 803         * Pick one that's not used by other ports.
 804         */
 805        for_each_intel_dp(&dev_priv->drm, encoder) {
 806                struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 807
 808                if (encoder->type == INTEL_OUTPUT_EDP) {
 809                        WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
 810                                intel_dp->active_pipe != intel_dp->pps_pipe);
 811
 812                        if (intel_dp->pps_pipe != INVALID_PIPE)
 813                                pipes &= ~(1 << intel_dp->pps_pipe);
 814                } else {
 815                        WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
 816
 817                        if (intel_dp->active_pipe != INVALID_PIPE)
 818                                pipes &= ~(1 << intel_dp->active_pipe);
 819                }
 820        }
 821
 822        if (pipes == 0)
 823                return INVALID_PIPE;
 824
 825        return ffs(pipes) - 1;
 826}
 827
 828static enum pipe
 829vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 830{
 831        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 832        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 833        enum pipe pipe;
 834
 835        lockdep_assert_held(&dev_priv->pps_mutex);
 836
 837        /* We should never land here with regular DP ports */
 838        WARN_ON(!intel_dp_is_edp(intel_dp));
 839
 840        WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
 841                intel_dp->active_pipe != intel_dp->pps_pipe);
 842
 843        if (intel_dp->pps_pipe != INVALID_PIPE)
 844                return intel_dp->pps_pipe;
 845
 846        pipe = vlv_find_free_pps(dev_priv);
 847
 848        /*
 849         * Didn't find one. This should not happen since there
 850         * are two power sequencers and up to two eDP ports.
 851         */
 852        if (WARN_ON(pipe == INVALID_PIPE))
 853                pipe = PIPE_A;
 854
 855        vlv_steal_power_sequencer(dev_priv, pipe);
 856        intel_dp->pps_pipe = pipe;
 857
 858        DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 859                      pipe_name(intel_dp->pps_pipe),
 860                      port_name(intel_dig_port->base.port));
 861
 862        /* init power sequencer on this pipe and port */
 863        intel_dp_init_panel_power_sequencer(intel_dp);
 864        intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
 865
 866        /*
 867         * Even vdd force doesn't work until we've made
 868         * the power sequencer lock in on the port.
 869         */
 870        vlv_power_sequencer_kick(intel_dp);
 871
 872        return intel_dp->pps_pipe;
 873}
 874
 875static int
 876bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 877{
 878        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 879        int backlight_controller = dev_priv->vbt.backlight.controller;
 880
 881        lockdep_assert_held(&dev_priv->pps_mutex);
 882
 883        /* We should never land here with regular DP ports */
 884        WARN_ON(!intel_dp_is_edp(intel_dp));
 885
 886        if (!intel_dp->pps_reset)
 887                return backlight_controller;
 888
 889        intel_dp->pps_reset = false;
 890
 891        /*
 892         * Only the HW needs to be reprogrammed, the SW state is fixed and
 893         * has been setup during connector init.
 894         */
 895        intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 896
 897        return backlight_controller;
 898}
 899
 900typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 901                               enum pipe pipe);
 902
 903static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 904                               enum pipe pipe)
 905{
 906        return I915_READ(PP_STATUS(pipe)) & PP_ON;
 907}
 908
 909static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 910                                enum pipe pipe)
 911{
 912        return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 913}
 914
 915static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 916                         enum pipe pipe)
 917{
 918        return true;
 919}
 920
 921static enum pipe
 922vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 923                     enum port port,
 924                     vlv_pipe_check pipe_check)
 925{
 926        enum pipe pipe;
 927
 928        for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 929                u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
 930                        PANEL_PORT_SELECT_MASK;
 931
 932                if (port_sel != PANEL_PORT_SELECT_VLV(port))
 933                        continue;
 934
 935                if (!pipe_check(dev_priv, pipe))
 936                        continue;
 937
 938                return pipe;
 939        }
 940
 941        return INVALID_PIPE;
 942}
 943
 944static void
 945vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 946{
 947        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 948        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 949        enum port port = intel_dig_port->base.port;
 950
 951        lockdep_assert_held(&dev_priv->pps_mutex);
 952
 953        /* try to find a pipe with this port selected */
 954        /* first pick one where the panel is on */
 955        intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 956                                                  vlv_pipe_has_pp_on);
 957        /* didn't find one? pick one where vdd is on */
 958        if (intel_dp->pps_pipe == INVALID_PIPE)
 959                intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 960                                                          vlv_pipe_has_vdd_on);
 961        /* didn't find one? pick one with just the correct port */
 962        if (intel_dp->pps_pipe == INVALID_PIPE)
 963                intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 964                                                          vlv_pipe_any);
 965
 966        /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 967        if (intel_dp->pps_pipe == INVALID_PIPE) {
 968                DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 969                              port_name(port));
 970                return;
 971        }
 972
 973        DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 974                      port_name(port), pipe_name(intel_dp->pps_pipe));
 975
 976        intel_dp_init_panel_power_sequencer(intel_dp);
 977        intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 978}
 979
 980void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 981{
 982        struct intel_encoder *encoder;
 983
 984        if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 985                    !IS_GEN9_LP(dev_priv)))
 986                return;
 987
 988        /*
 989         * We can't grab pps_mutex here due to deadlock with power_domain
 990         * mutex when power_domain functions are called while holding pps_mutex.
 991         * That also means that in order to use pps_pipe the code needs to
 992         * hold both a power domain reference and pps_mutex, and the power domain
 993         * reference get/put must be done while _not_ holding pps_mutex.
 994         * pps_{lock,unlock}() do these steps in the correct order, so one
 995         * should use them always.
 996         */
 997
 998        for_each_intel_dp(&dev_priv->drm, encoder) {
 999                struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1000
1001                WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
1002
1003                if (encoder->type != INTEL_OUTPUT_EDP)
1004                        continue;
1005
1006                if (IS_GEN9_LP(dev_priv))
1007                        intel_dp->pps_reset = true;
1008                else
1009                        intel_dp->pps_pipe = INVALID_PIPE;
1010        }
1011}
1012
1013struct pps_registers {
1014        i915_reg_t pp_ctrl;
1015        i915_reg_t pp_stat;
1016        i915_reg_t pp_on;
1017        i915_reg_t pp_off;
1018        i915_reg_t pp_div;
1019};
1020
1021static void intel_pps_get_registers(struct intel_dp *intel_dp,
1022                                    struct pps_registers *regs)
1023{
1024        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1025        int pps_idx = 0;
1026
1027        memset(regs, 0, sizeof(*regs));
1028
1029        if (IS_GEN9_LP(dev_priv))
1030                pps_idx = bxt_power_sequencer_idx(intel_dp);
1031        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1032                pps_idx = vlv_power_sequencer_pipe(intel_dp);
1033
1034        regs->pp_ctrl = PP_CONTROL(pps_idx);
1035        regs->pp_stat = PP_STATUS(pps_idx);
1036        regs->pp_on = PP_ON_DELAYS(pps_idx);
1037        regs->pp_off = PP_OFF_DELAYS(pps_idx);
1038
1039        /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1040        if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1041                regs->pp_div = INVALID_MMIO_REG;
1042        else
1043                regs->pp_div = PP_DIVISOR(pps_idx);
1044}
1045
1046static i915_reg_t
1047_pp_ctrl_reg(struct intel_dp *intel_dp)
1048{
1049        struct pps_registers regs;
1050
1051        intel_pps_get_registers(intel_dp, &regs);
1052
1053        return regs.pp_ctrl;
1054}
1055
1056static i915_reg_t
1057_pp_stat_reg(struct intel_dp *intel_dp)
1058{
1059        struct pps_registers regs;
1060
1061        intel_pps_get_registers(intel_dp, &regs);
1062
1063        return regs.pp_stat;
1064}
1065
1066/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1067   This function only applicable when panel PM state is not to be tracked */
1068static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1069                              void *unused)
1070{
1071        struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1072                                                 edp_notifier);
1073        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1074        intel_wakeref_t wakeref;
1075
1076        if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1077                return 0;
1078
1079        with_pps_lock(intel_dp, wakeref) {
1080                if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1081                        enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1082                        i915_reg_t pp_ctrl_reg, pp_div_reg;
1083                        u32 pp_div;
1084
1085                        pp_ctrl_reg = PP_CONTROL(pipe);
1086                        pp_div_reg  = PP_DIVISOR(pipe);
1087                        pp_div = I915_READ(pp_div_reg);
1088                        pp_div &= PP_REFERENCE_DIVIDER_MASK;
1089
1090                        /* 0x1F write to PP_DIV_REG sets max cycle delay */
1091                        I915_WRITE(pp_div_reg, pp_div | 0x1F);
1092                        I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1093                        msleep(intel_dp->panel_power_cycle_delay);
1094                }
1095        }
1096
1097        return 0;
1098}
1099
1100static bool edp_have_panel_power(struct intel_dp *intel_dp)
1101{
1102        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1103
1104        lockdep_assert_held(&dev_priv->pps_mutex);
1105
1106        if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1107            intel_dp->pps_pipe == INVALID_PIPE)
1108                return false;
1109
1110        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1111}
1112
1113static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1114{
1115        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1116
1117        lockdep_assert_held(&dev_priv->pps_mutex);
1118
1119        if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1120            intel_dp->pps_pipe == INVALID_PIPE)
1121                return false;
1122
1123        return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1124}
1125
1126static void
1127intel_dp_check_edp(struct intel_dp *intel_dp)
1128{
1129        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1130
1131        if (!intel_dp_is_edp(intel_dp))
1132                return;
1133
1134        if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1135                WARN(1, "eDP powered off while attempting aux channel communication.\n");
1136                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1137                              I915_READ(_pp_stat_reg(intel_dp)),
1138                              I915_READ(_pp_ctrl_reg(intel_dp)));
1139        }
1140}
1141
1142static u32
1143intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1144{
1145        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1146        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1147        u32 status;
1148        bool done;
1149
1150#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1151        done = wait_event_timeout(i915->gmbus_wait_queue, C,
1152                                  msecs_to_jiffies_timeout(10));
1153
1154        /* just trace the final value */
1155        trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1156
1157        if (!done)
1158                DRM_ERROR("dp aux hw did not signal timeout!\n");
1159#undef C
1160
1161        return status;
1162}
1163
1164static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1165{
1166        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1167
1168        if (index)
1169                return 0;
1170
1171        /*
1172         * The clock divider is based off the hrawclk, and would like to run at
1173         * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1174         */
1175        return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1176}
1177
1178static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1179{
1180        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1182
1183        if (index)
1184                return 0;
1185
1186        /*
1187         * The clock divider is based off the cdclk or PCH rawclk, and would
1188         * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1189         * divide by 2000 and use that
1190         */
1191        if (dig_port->aux_ch == AUX_CH_A)
1192                return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1193        else
1194                return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1195}
1196
1197static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1198{
1199        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1200        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1201
1202        if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1203                /* Workaround for non-ULT HSW */
1204                switch (index) {
1205                case 0: return 63;
1206                case 1: return 72;
1207                default: return 0;
1208                }
1209        }
1210
1211        return ilk_get_aux_clock_divider(intel_dp, index);
1212}
1213
1214static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1215{
1216        /*
1217         * SKL doesn't need us to program the AUX clock divider (Hardware will
1218         * derive the clock from CDCLK automatically). We still implement the
1219         * get_aux_clock_divider vfunc to plug-in into the existing code.
1220         */
1221        return index ? 0 : 1;
1222}
1223
1224static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1225                                int send_bytes,
1226                                u32 aux_clock_divider)
1227{
1228        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1229        struct drm_i915_private *dev_priv =
1230                        to_i915(intel_dig_port->base.base.dev);
1231        u32 precharge, timeout;
1232
1233        if (IS_GEN(dev_priv, 6))
1234                precharge = 3;
1235        else
1236                precharge = 5;
1237
1238        if (IS_BROADWELL(dev_priv))
1239                timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1240        else
1241                timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1242
1243        return DP_AUX_CH_CTL_SEND_BUSY |
1244               DP_AUX_CH_CTL_DONE |
1245               DP_AUX_CH_CTL_INTERRUPT |
1246               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1247               timeout |
1248               DP_AUX_CH_CTL_RECEIVE_ERROR |
1249               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1250               (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1251               (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1252}
1253
1254static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1255                                int send_bytes,
1256                                u32 unused)
1257{
1258        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1259        struct drm_i915_private *i915 =
1260                        to_i915(intel_dig_port->base.base.dev);
1261        enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1262        u32 ret;
1263
1264        ret = DP_AUX_CH_CTL_SEND_BUSY |
1265              DP_AUX_CH_CTL_DONE |
1266              DP_AUX_CH_CTL_INTERRUPT |
1267              DP_AUX_CH_CTL_TIME_OUT_ERROR |
1268              DP_AUX_CH_CTL_TIME_OUT_MAX |
1269              DP_AUX_CH_CTL_RECEIVE_ERROR |
1270              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1271              DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1272              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1273
1274        if (intel_phy_is_tc(i915, phy) &&
1275            intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1276                ret |= DP_AUX_CH_CTL_TBT_IO;
1277
1278        return ret;
1279}
1280
1281static int
1282intel_dp_aux_xfer(struct intel_dp *intel_dp,
1283                  const u8 *send, int send_bytes,
1284                  u8 *recv, int recv_size,
1285                  u32 aux_send_ctl_flags)
1286{
1287        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1288        struct drm_i915_private *i915 =
1289                        to_i915(intel_dig_port->base.base.dev);
1290        struct intel_uncore *uncore = &i915->uncore;
1291        enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1292        bool is_tc_port = intel_phy_is_tc(i915, phy);
1293        i915_reg_t ch_ctl, ch_data[5];
1294        u32 aux_clock_divider;
1295        enum intel_display_power_domain aux_domain =
1296                intel_aux_power_domain(intel_dig_port);
1297        intel_wakeref_t aux_wakeref;
1298        intel_wakeref_t pps_wakeref;
1299        int i, ret, recv_bytes;
1300        int try, clock = 0;
1301        u32 status;
1302        bool vdd;
1303
1304        ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1305        for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1306                ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1307
1308        if (is_tc_port)
1309                intel_tc_port_lock(intel_dig_port);
1310
1311        aux_wakeref = intel_display_power_get(i915, aux_domain);
1312        pps_wakeref = pps_lock(intel_dp);
1313
1314        /*
1315         * We will be called with VDD already enabled for dpcd/edid/oui reads.
1316         * In such cases we want to leave VDD enabled and it's up to upper layers
1317         * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1318         * ourselves.
1319         */
1320        vdd = edp_panel_vdd_on(intel_dp);
1321
1322        /* dp aux is extremely sensitive to irq latency, hence request the
1323         * lowest possible wakeup latency and so prevent the cpu from going into
1324         * deep sleep states.
1325         */
1326        pm_qos_update_request(&i915->pm_qos, 0);
1327
1328        intel_dp_check_edp(intel_dp);
1329
1330        /* Try to wait for any previous AUX channel activity */
1331        for (try = 0; try < 3; try++) {
1332                status = intel_uncore_read_notrace(uncore, ch_ctl);
1333                if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1334                        break;
1335                msleep(1);
1336        }
1337        /* just trace the final value */
1338        trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1339
1340        if (try == 3) {
1341                static u32 last_status = -1;
1342                const u32 status = intel_uncore_read(uncore, ch_ctl);
1343
1344                if (status != last_status) {
1345                        WARN(1, "dp_aux_ch not started status 0x%08x\n",
1346                             status);
1347                        last_status = status;
1348                }
1349
1350                ret = -EBUSY;
1351                goto out;
1352        }
1353
1354        /* Only 5 data registers! */
1355        if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1356                ret = -E2BIG;
1357                goto out;
1358        }
1359
1360        while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1361                u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1362                                                          send_bytes,
1363                                                          aux_clock_divider);
1364
1365                send_ctl |= aux_send_ctl_flags;
1366
1367                /* Must try at least 3 times according to DP spec */
1368                for (try = 0; try < 5; try++) {
1369                        /* Load the send data into the aux channel data registers */
1370                        for (i = 0; i < send_bytes; i += 4)
1371                                intel_uncore_write(uncore,
1372                                                   ch_data[i >> 2],
1373                                                   intel_dp_pack_aux(send + i,
1374                                                                     send_bytes - i));
1375
1376                        /* Send the command and wait for it to complete */
1377                        intel_uncore_write(uncore, ch_ctl, send_ctl);
1378
1379                        status = intel_dp_aux_wait_done(intel_dp);
1380
1381                        /* Clear done status and any errors */
1382                        intel_uncore_write(uncore,
1383                                           ch_ctl,
1384                                           status |
1385                                           DP_AUX_CH_CTL_DONE |
1386                                           DP_AUX_CH_CTL_TIME_OUT_ERROR |
1387                                           DP_AUX_CH_CTL_RECEIVE_ERROR);
1388
1389                        /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1390                         *   400us delay required for errors and timeouts
1391                         *   Timeout errors from the HW already meet this
1392                         *   requirement so skip to next iteration
1393                         */
1394                        if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1395                                continue;
1396
1397                        if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1398                                usleep_range(400, 500);
1399                                continue;
1400                        }
1401                        if (status & DP_AUX_CH_CTL_DONE)
1402                                goto done;
1403                }
1404        }
1405
1406        if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1407                DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1408                ret = -EBUSY;
1409                goto out;
1410        }
1411
1412done:
1413        /* Check for timeout or receive error.
1414         * Timeouts occur when the sink is not connected
1415         */
1416        if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1417                DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1418                ret = -EIO;
1419                goto out;
1420        }
1421
1422        /* Timeouts occur when the device isn't connected, so they're
1423         * "normal" -- don't fill the kernel log with these */
1424        if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1425                DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1426                ret = -ETIMEDOUT;
1427                goto out;
1428        }
1429
1430        /* Unload any bytes sent back from the other side */
1431        recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1432                      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1433
1434        /*
1435         * By BSpec: "Message sizes of 0 or >20 are not allowed."
1436         * We have no idea of what happened so we return -EBUSY so
1437         * drm layer takes care for the necessary retries.
1438         */
1439        if (recv_bytes == 0 || recv_bytes > 20) {
1440                DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1441                              recv_bytes);
1442                ret = -EBUSY;
1443                goto out;
1444        }
1445
1446        if (recv_bytes > recv_size)
1447                recv_bytes = recv_size;
1448
1449        for (i = 0; i < recv_bytes; i += 4)
1450                intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1451                                    recv + i, recv_bytes - i);
1452
1453        ret = recv_bytes;
1454out:
1455        pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1456
1457        if (vdd)
1458                edp_panel_vdd_off(intel_dp, false);
1459
1460        pps_unlock(intel_dp, pps_wakeref);
1461        intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1462
1463        if (is_tc_port)
1464                intel_tc_port_unlock(intel_dig_port);
1465
1466        return ret;
1467}
1468
1469#define BARE_ADDRESS_SIZE       3
1470#define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1471
1472static void
1473intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1474                    const struct drm_dp_aux_msg *msg)
1475{
1476        txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1477        txbuf[1] = (msg->address >> 8) & 0xff;
1478        txbuf[2] = msg->address & 0xff;
1479        txbuf[3] = msg->size - 1;
1480}
1481
1482static ssize_t
1483intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1484{
1485        struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1486        u8 txbuf[20], rxbuf[20];
1487        size_t txsize, rxsize;
1488        int ret;
1489
1490        intel_dp_aux_header(txbuf, msg);
1491
1492        switch (msg->request & ~DP_AUX_I2C_MOT) {
1493        case DP_AUX_NATIVE_WRITE:
1494        case DP_AUX_I2C_WRITE:
1495        case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1496                txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1497                rxsize = 2; /* 0 or 1 data bytes */
1498
1499                if (WARN_ON(txsize > 20))
1500                        return -E2BIG;
1501
1502                WARN_ON(!msg->buffer != !msg->size);
1503
1504                if (msg->buffer)
1505                        memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1506
1507                ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1508                                        rxbuf, rxsize, 0);
1509                if (ret > 0) {
1510                        msg->reply = rxbuf[0] >> 4;
1511
1512                        if (ret > 1) {
1513                                /* Number of bytes written in a short write. */
1514                                ret = clamp_t(int, rxbuf[1], 0, msg->size);
1515                        } else {
1516                                /* Return payload size. */
1517                                ret = msg->size;
1518                        }
1519                }
1520                break;
1521
1522        case DP_AUX_NATIVE_READ:
1523        case DP_AUX_I2C_READ:
1524                txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1525                rxsize = msg->size + 1;
1526
1527                if (WARN_ON(rxsize > 20))
1528                        return -E2BIG;
1529
1530                ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1531                                        rxbuf, rxsize, 0);
1532                if (ret > 0) {
1533                        msg->reply = rxbuf[0] >> 4;
1534                        /*
1535                         * Assume happy day, and copy the data. The caller is
1536                         * expected to check msg->reply before touching it.
1537                         *
1538                         * Return payload size.
1539                         */
1540                        ret--;
1541                        memcpy(msg->buffer, rxbuf + 1, ret);
1542                }
1543                break;
1544
1545        default:
1546                ret = -EINVAL;
1547                break;
1548        }
1549
1550        return ret;
1551}
1552
1553
1554static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1555{
1556        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1557        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1558        enum aux_ch aux_ch = dig_port->aux_ch;
1559
1560        switch (aux_ch) {
1561        case AUX_CH_B:
1562        case AUX_CH_C:
1563        case AUX_CH_D:
1564                return DP_AUX_CH_CTL(aux_ch);
1565        default:
1566                MISSING_CASE(aux_ch);
1567                return DP_AUX_CH_CTL(AUX_CH_B);
1568        }
1569}
1570
1571static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1572{
1573        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1574        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1575        enum aux_ch aux_ch = dig_port->aux_ch;
1576
1577        switch (aux_ch) {
1578        case AUX_CH_B:
1579        case AUX_CH_C:
1580        case AUX_CH_D:
1581                return DP_AUX_CH_DATA(aux_ch, index);
1582        default:
1583                MISSING_CASE(aux_ch);
1584                return DP_AUX_CH_DATA(AUX_CH_B, index);
1585        }
1586}
1587
1588static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1589{
1590        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1591        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1592        enum aux_ch aux_ch = dig_port->aux_ch;
1593
1594        switch (aux_ch) {
1595        case AUX_CH_A:
1596                return DP_AUX_CH_CTL(aux_ch);
1597        case AUX_CH_B:
1598        case AUX_CH_C:
1599        case AUX_CH_D:
1600                return PCH_DP_AUX_CH_CTL(aux_ch);
1601        default:
1602                MISSING_CASE(aux_ch);
1603                return DP_AUX_CH_CTL(AUX_CH_A);
1604        }
1605}
1606
1607static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1608{
1609        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1610        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1611        enum aux_ch aux_ch = dig_port->aux_ch;
1612
1613        switch (aux_ch) {
1614        case AUX_CH_A:
1615                return DP_AUX_CH_DATA(aux_ch, index);
1616        case AUX_CH_B:
1617        case AUX_CH_C:
1618        case AUX_CH_D:
1619                return PCH_DP_AUX_CH_DATA(aux_ch, index);
1620        default:
1621                MISSING_CASE(aux_ch);
1622                return DP_AUX_CH_DATA(AUX_CH_A, index);
1623        }
1624}
1625
1626static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1627{
1628        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1629        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1630        enum aux_ch aux_ch = dig_port->aux_ch;
1631
1632        switch (aux_ch) {
1633        case AUX_CH_A:
1634        case AUX_CH_B:
1635        case AUX_CH_C:
1636        case AUX_CH_D:
1637        case AUX_CH_E:
1638        case AUX_CH_F:
1639                return DP_AUX_CH_CTL(aux_ch);
1640        default:
1641                MISSING_CASE(aux_ch);
1642                return DP_AUX_CH_CTL(AUX_CH_A);
1643        }
1644}
1645
1646static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1647{
1648        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1649        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1650        enum aux_ch aux_ch = dig_port->aux_ch;
1651
1652        switch (aux_ch) {
1653        case AUX_CH_A:
1654        case AUX_CH_B:
1655        case AUX_CH_C:
1656        case AUX_CH_D:
1657        case AUX_CH_E:
1658        case AUX_CH_F:
1659                return DP_AUX_CH_DATA(aux_ch, index);
1660        default:
1661                MISSING_CASE(aux_ch);
1662                return DP_AUX_CH_DATA(AUX_CH_A, index);
1663        }
1664}
1665
1666static void
1667intel_dp_aux_fini(struct intel_dp *intel_dp)
1668{
1669        kfree(intel_dp->aux.name);
1670}
1671
1672static void
1673intel_dp_aux_init(struct intel_dp *intel_dp)
1674{
1675        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1676        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1677        struct intel_encoder *encoder = &dig_port->base;
1678
1679        if (INTEL_GEN(dev_priv) >= 9) {
1680                intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1681                intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1682        } else if (HAS_PCH_SPLIT(dev_priv)) {
1683                intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1684                intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1685        } else {
1686                intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1687                intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1688        }
1689
1690        if (INTEL_GEN(dev_priv) >= 9)
1691                intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1692        else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1693                intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1694        else if (HAS_PCH_SPLIT(dev_priv))
1695                intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1696        else
1697                intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1698
1699        if (INTEL_GEN(dev_priv) >= 9)
1700                intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1701        else
1702                intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1703
1704        drm_dp_aux_init(&intel_dp->aux);
1705
1706        /* Failure to allocate our preferred name is not critical */
1707        intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1708                                       port_name(encoder->port));
1709        intel_dp->aux.transfer = intel_dp_aux_transfer;
1710}
1711
1712bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1713{
1714        int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1715
1716        return max_rate >= 540000;
1717}
1718
1719bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1720{
1721        int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1722
1723        return max_rate >= 810000;
1724}
1725
1726static void
1727intel_dp_set_clock(struct intel_encoder *encoder,
1728                   struct intel_crtc_state *pipe_config)
1729{
1730        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1731        const struct dp_link_dpll *divisor = NULL;
1732        int i, count = 0;
1733
1734        if (IS_G4X(dev_priv)) {
1735                divisor = g4x_dpll;
1736                count = ARRAY_SIZE(g4x_dpll);
1737        } else if (HAS_PCH_SPLIT(dev_priv)) {
1738                divisor = pch_dpll;
1739                count = ARRAY_SIZE(pch_dpll);
1740        } else if (IS_CHERRYVIEW(dev_priv)) {
1741                divisor = chv_dpll;
1742                count = ARRAY_SIZE(chv_dpll);
1743        } else if (IS_VALLEYVIEW(dev_priv)) {
1744                divisor = vlv_dpll;
1745                count = ARRAY_SIZE(vlv_dpll);
1746        }
1747
1748        if (divisor && count) {
1749                for (i = 0; i < count; i++) {
1750                        if (pipe_config->port_clock == divisor[i].clock) {
1751                                pipe_config->dpll = divisor[i].dpll;
1752                                pipe_config->clock_set = true;
1753                                break;
1754                        }
1755                }
1756        }
1757}
1758
1759static void snprintf_int_array(char *str, size_t len,
1760                               const int *array, int nelem)
1761{
1762        int i;
1763
1764        str[0] = '\0';
1765
1766        for (i = 0; i < nelem; i++) {
1767                int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1768                if (r >= len)
1769                        return;
1770                str += r;
1771                len -= r;
1772        }
1773}
1774
1775static void intel_dp_print_rates(struct intel_dp *intel_dp)
1776{
1777        char str[128]; /* FIXME: too big for stack? */
1778
1779        if ((drm_debug & DRM_UT_KMS) == 0)
1780                return;
1781
1782        snprintf_int_array(str, sizeof(str),
1783                           intel_dp->source_rates, intel_dp->num_source_rates);
1784        DRM_DEBUG_KMS("source rates: %s\n", str);
1785
1786        snprintf_int_array(str, sizeof(str),
1787                           intel_dp->sink_rates, intel_dp->num_sink_rates);
1788        DRM_DEBUG_KMS("sink rates: %s\n", str);
1789
1790        snprintf_int_array(str, sizeof(str),
1791                           intel_dp->common_rates, intel_dp->num_common_rates);
1792        DRM_DEBUG_KMS("common rates: %s\n", str);
1793}
1794
1795int
1796intel_dp_max_link_rate(struct intel_dp *intel_dp)
1797{
1798        int len;
1799
1800        len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1801        if (WARN_ON(len <= 0))
1802                return 162000;
1803
1804        return intel_dp->common_rates[len - 1];
1805}
1806
1807int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1808{
1809        int i = intel_dp_rate_index(intel_dp->sink_rates,
1810                                    intel_dp->num_sink_rates, rate);
1811
1812        if (WARN_ON(i < 0))
1813                i = 0;
1814
1815        return i;
1816}
1817
1818void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1819                           u8 *link_bw, u8 *rate_select)
1820{
1821        /* eDP 1.4 rate select method. */
1822        if (intel_dp->use_rate_select) {
1823                *link_bw = 0;
1824                *rate_select =
1825                        intel_dp_rate_select(intel_dp, port_clock);
1826        } else {
1827                *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1828                *rate_select = 0;
1829        }
1830}
1831
1832static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1833                                         const struct intel_crtc_state *pipe_config)
1834{
1835        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1836
1837        return INTEL_GEN(dev_priv) >= 11 &&
1838                pipe_config->cpu_transcoder != TRANSCODER_A;
1839}
1840
1841static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1842                                  const struct intel_crtc_state *pipe_config)
1843{
1844        return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1845                drm_dp_sink_supports_fec(intel_dp->fec_capable);
1846}
1847
1848static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1849                                         const struct intel_crtc_state *pipe_config)
1850{
1851        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1852
1853        return INTEL_GEN(dev_priv) >= 10 &&
1854                pipe_config->cpu_transcoder != TRANSCODER_A;
1855}
1856
1857static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1858                                  const struct intel_crtc_state *pipe_config)
1859{
1860        if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1861                return false;
1862
1863        return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1864                drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1865}
1866
1867static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1868                                struct intel_crtc_state *pipe_config)
1869{
1870        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1871        struct intel_connector *intel_connector = intel_dp->attached_connector;
1872        int bpp, bpc;
1873
1874        bpp = pipe_config->pipe_bpp;
1875        bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1876
1877        if (bpc > 0)
1878                bpp = min(bpp, 3*bpc);
1879
1880        if (intel_dp_is_edp(intel_dp)) {
1881                /* Get bpp from vbt only for panels that dont have bpp in edid */
1882                if (intel_connector->base.display_info.bpc == 0 &&
1883                    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1884                        DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1885                                      dev_priv->vbt.edp.bpp);
1886                        bpp = dev_priv->vbt.edp.bpp;
1887                }
1888        }
1889
1890        return bpp;
1891}
1892
1893/* Adjust link config limits based on compliance test requests. */
1894void
1895intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1896                                  struct intel_crtc_state *pipe_config,
1897                                  struct link_config_limits *limits)
1898{
1899        /* For DP Compliance we override the computed bpp for the pipe */
1900        if (intel_dp->compliance.test_data.bpc != 0) {
1901                int bpp = 3 * intel_dp->compliance.test_data.bpc;
1902
1903                limits->min_bpp = limits->max_bpp = bpp;
1904                pipe_config->dither_force_disable = bpp == 6 * 3;
1905
1906                DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1907        }
1908
1909        /* Use values requested by Compliance Test Request */
1910        if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1911                int index;
1912
1913                /* Validate the compliance test data since max values
1914                 * might have changed due to link train fallback.
1915                 */
1916                if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1917                                               intel_dp->compliance.test_lane_count)) {
1918                        index = intel_dp_rate_index(intel_dp->common_rates,
1919                                                    intel_dp->num_common_rates,
1920                                                    intel_dp->compliance.test_link_rate);
1921                        if (index >= 0)
1922                                limits->min_clock = limits->max_clock = index;
1923                        limits->min_lane_count = limits->max_lane_count =
1924                                intel_dp->compliance.test_lane_count;
1925                }
1926        }
1927}
1928
1929static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1930{
1931        /*
1932         * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1933         * format of the number of bytes per pixel will be half the number
1934         * of bytes of RGB pixel.
1935         */
1936        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1937                bpp /= 2;
1938
1939        return bpp;
1940}
1941
1942/* Optimize link config in order: max bpp, min clock, min lanes */
1943static int
1944intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1945                                  struct intel_crtc_state *pipe_config,
1946                                  const struct link_config_limits *limits)
1947{
1948        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1949        int bpp, clock, lane_count;
1950        int mode_rate, link_clock, link_avail;
1951
1952        for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1953                int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
1954
1955                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1956                                                   output_bpp);
1957
1958                for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1959                        for (lane_count = limits->min_lane_count;
1960                             lane_count <= limits->max_lane_count;
1961                             lane_count <<= 1) {
1962                                link_clock = intel_dp->common_rates[clock];
1963                                link_avail = intel_dp_max_data_rate(link_clock,
1964                                                                    lane_count);
1965
1966                                if (mode_rate <= link_avail) {
1967                                        pipe_config->lane_count = lane_count;
1968                                        pipe_config->pipe_bpp = bpp;
1969                                        pipe_config->port_clock = link_clock;
1970
1971                                        return 0;
1972                                }
1973                        }
1974                }
1975        }
1976
1977        return -EINVAL;
1978}
1979
1980static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1981{
1982        int i, num_bpc;
1983        u8 dsc_bpc[3] = {0};
1984
1985        num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1986                                                       dsc_bpc);
1987        for (i = 0; i < num_bpc; i++) {
1988                if (dsc_max_bpc >= dsc_bpc[i])
1989                        return dsc_bpc[i] * 3;
1990        }
1991
1992        return 0;
1993}
1994
1995static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1996                                       struct intel_crtc_state *pipe_config,
1997                                       struct drm_connector_state *conn_state,
1998                                       struct link_config_limits *limits)
1999{
2000        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2001        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2002        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2003        u8 dsc_max_bpc;
2004        int pipe_bpp;
2005        int ret;
2006
2007        pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2008                intel_dp_supports_fec(intel_dp, pipe_config);
2009
2010        if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2011                return -EINVAL;
2012
2013        dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
2014                            conn_state->max_requested_bpc);
2015
2016        pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2017        if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
2018                DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
2019                return -EINVAL;
2020        }
2021
2022        /*
2023         * For now enable DSC for max bpp, max link rate, max lane count.
2024         * Optimize this later for the minimum possible link rate/lane count
2025         * with DSC enabled for the requested mode.
2026         */
2027        pipe_config->pipe_bpp = pipe_bpp;
2028        pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2029        pipe_config->lane_count = limits->max_lane_count;
2030
2031        if (intel_dp_is_edp(intel_dp)) {
2032                pipe_config->dsc_params.compressed_bpp =
2033                        min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2034                              pipe_config->pipe_bpp);
2035                pipe_config->dsc_params.slice_count =
2036                        drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2037                                                        true);
2038        } else {
2039                u16 dsc_max_output_bpp;
2040                u8 dsc_dp_slice_count;
2041
2042                dsc_max_output_bpp =
2043                        intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
2044                                                    pipe_config->lane_count,
2045                                                    adjusted_mode->crtc_clock,
2046                                                    adjusted_mode->crtc_hdisplay);
2047                dsc_dp_slice_count =
2048                        intel_dp_dsc_get_slice_count(intel_dp,
2049                                                     adjusted_mode->crtc_clock,
2050                                                     adjusted_mode->crtc_hdisplay);
2051                if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2052                        DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
2053                        return -EINVAL;
2054                }
2055                pipe_config->dsc_params.compressed_bpp = min_t(u16,
2056                                                               dsc_max_output_bpp >> 4,
2057                                                               pipe_config->pipe_bpp);
2058                pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
2059        }
2060        /*
2061         * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2062         * is greater than the maximum Cdclock and if slice count is even
2063         * then we need to use 2 VDSC instances.
2064         */
2065        if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2066                if (pipe_config->dsc_params.slice_count > 1) {
2067                        pipe_config->dsc_params.dsc_split = true;
2068                } else {
2069                        DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
2070                        return -EINVAL;
2071                }
2072        }
2073
2074        ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2075        if (ret < 0) {
2076                DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2077                              "Compressed BPP = %d\n",
2078                              pipe_config->pipe_bpp,
2079                              pipe_config->dsc_params.compressed_bpp);
2080                return ret;
2081        }
2082
2083        pipe_config->dsc_params.compression_enable = true;
2084        DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2085                      "Compressed Bpp = %d Slice Count = %d\n",
2086                      pipe_config->pipe_bpp,
2087                      pipe_config->dsc_params.compressed_bpp,
2088                      pipe_config->dsc_params.slice_count);
2089
2090        return 0;
2091}
2092
2093int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2094{
2095        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2096                return 6 * 3;
2097        else
2098                return 8 * 3;
2099}
2100
2101static int
2102intel_dp_compute_link_config(struct intel_encoder *encoder,
2103                             struct intel_crtc_state *pipe_config,
2104                             struct drm_connector_state *conn_state)
2105{
2106        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2107        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2108        struct link_config_limits limits;
2109        int common_len;
2110        int ret;
2111
2112        common_len = intel_dp_common_len_rate_limit(intel_dp,
2113                                                    intel_dp->max_link_rate);
2114
2115        /* No common link rates between source and sink */
2116        WARN_ON(common_len <= 0);
2117
2118        limits.min_clock = 0;
2119        limits.max_clock = common_len - 1;
2120
2121        limits.min_lane_count = 1;
2122        limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2123
2124        limits.min_bpp = intel_dp_min_bpp(pipe_config);
2125        limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2126
2127        if (intel_dp_is_edp(intel_dp)) {
2128                /*
2129                 * Use the maximum clock and number of lanes the eDP panel
2130                 * advertizes being capable of. The panels are generally
2131                 * designed to support only a single clock and lane
2132                 * configuration, and typically these values correspond to the
2133                 * native resolution of the panel.
2134                 */
2135                limits.min_lane_count = limits.max_lane_count;
2136                limits.min_clock = limits.max_clock;
2137        }
2138
2139        intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2140
2141        DRM_DEBUG_KMS("DP link computation with max lane count %i "
2142                      "max rate %d max bpp %d pixel clock %iKHz\n",
2143                      limits.max_lane_count,
2144                      intel_dp->common_rates[limits.max_clock],
2145                      limits.max_bpp, adjusted_mode->crtc_clock);
2146
2147        /*
2148         * Optimize for slow and wide. This is the place to add alternative
2149         * optimization policy.
2150         */
2151        ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2152
2153        /* enable compression if the mode doesn't fit available BW */
2154        DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2155        if (ret || intel_dp->force_dsc_en) {
2156                ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2157                                                  conn_state, &limits);
2158                if (ret < 0)
2159                        return ret;
2160        }
2161
2162        if (pipe_config->dsc_params.compression_enable) {
2163                DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2164                              pipe_config->lane_count, pipe_config->port_clock,
2165                              pipe_config->pipe_bpp,
2166                              pipe_config->dsc_params.compressed_bpp);
2167
2168                DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2169                              intel_dp_link_required(adjusted_mode->crtc_clock,
2170                                                     pipe_config->dsc_params.compressed_bpp),
2171                              intel_dp_max_data_rate(pipe_config->port_clock,
2172                                                     pipe_config->lane_count));
2173        } else {
2174                DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2175                              pipe_config->lane_count, pipe_config->port_clock,
2176                              pipe_config->pipe_bpp);
2177
2178                DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2179                              intel_dp_link_required(adjusted_mode->crtc_clock,
2180                                                     pipe_config->pipe_bpp),
2181                              intel_dp_max_data_rate(pipe_config->port_clock,
2182                                                     pipe_config->lane_count));
2183        }
2184        return 0;
2185}
2186
2187static int
2188intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2189                         struct drm_connector *connector,
2190                         struct intel_crtc_state *crtc_state)
2191{
2192        const struct drm_display_info *info = &connector->display_info;
2193        const struct drm_display_mode *adjusted_mode =
2194                &crtc_state->base.adjusted_mode;
2195        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2196        int ret;
2197
2198        if (!drm_mode_is_420_only(info, adjusted_mode) ||
2199            !intel_dp_get_colorimetry_status(intel_dp) ||
2200            !connector->ycbcr_420_allowed)
2201                return 0;
2202
2203        crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2204
2205        /* YCBCR 420 output conversion needs a scaler */
2206        ret = skl_update_scaler_crtc(crtc_state);
2207        if (ret) {
2208                DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2209                return ret;
2210        }
2211
2212        intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2213
2214        return 0;
2215}
2216
2217bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2218                                  const struct drm_connector_state *conn_state)
2219{
2220        const struct intel_digital_connector_state *intel_conn_state =
2221                to_intel_digital_connector_state(conn_state);
2222        const struct drm_display_mode *adjusted_mode =
2223                &crtc_state->base.adjusted_mode;
2224
2225        if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2226                /*
2227                 * See:
2228                 * CEA-861-E - 5.1 Default Encoding Parameters
2229                 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2230                 */
2231                return crtc_state->pipe_bpp != 18 &&
2232                        drm_default_rgb_quant_range(adjusted_mode) ==
2233                        HDMI_QUANTIZATION_RANGE_LIMITED;
2234        } else {
2235                return intel_conn_state->broadcast_rgb ==
2236                        INTEL_BROADCAST_RGB_LIMITED;
2237        }
2238}
2239
2240int
2241intel_dp_compute_config(struct intel_encoder *encoder,
2242                        struct intel_crtc_state *pipe_config,
2243                        struct drm_connector_state *conn_state)
2244{
2245        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2246        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2247        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2248        struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2249        enum port port = encoder->port;
2250        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2251        struct intel_connector *intel_connector = intel_dp->attached_connector;
2252        struct intel_digital_connector_state *intel_conn_state =
2253                to_intel_digital_connector_state(conn_state);
2254        bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2255                                           DP_DPCD_QUIRK_CONSTANT_N);
2256        int ret = 0, output_bpp;
2257
2258        if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2259                pipe_config->has_pch_encoder = true;
2260
2261        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2262        if (lspcon->active)
2263                lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2264        else
2265                ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2266                                               pipe_config);
2267
2268        if (ret)
2269                return ret;
2270
2271        pipe_config->has_drrs = false;
2272        if (IS_G4X(dev_priv) || port == PORT_A)
2273                pipe_config->has_audio = false;
2274        else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2275                pipe_config->has_audio = intel_dp->has_audio;
2276        else
2277                pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2278
2279        if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2280                intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2281                                       adjusted_mode);
2282
2283                if (INTEL_GEN(dev_priv) >= 9) {
2284                        ret = skl_update_scaler_crtc(pipe_config);
2285                        if (ret)
2286                                return ret;
2287                }
2288
2289                if (HAS_GMCH(dev_priv))
2290                        intel_gmch_panel_fitting(intel_crtc, pipe_config,
2291                                                 conn_state->scaling_mode);
2292                else
2293                        intel_pch_panel_fitting(intel_crtc, pipe_config,
2294                                                conn_state->scaling_mode);
2295        }
2296
2297        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2298                return -EINVAL;
2299
2300        if (HAS_GMCH(dev_priv) &&
2301            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2302                return -EINVAL;
2303
2304        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2305                return -EINVAL;
2306
2307        ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2308        if (ret < 0)
2309                return ret;
2310
2311        pipe_config->limited_color_range =
2312                intel_dp_limited_color_range(pipe_config, conn_state);
2313
2314        if (pipe_config->dsc_params.compression_enable)
2315                output_bpp = pipe_config->dsc_params.compressed_bpp;
2316        else
2317                output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2318
2319        intel_link_compute_m_n(output_bpp,
2320                               pipe_config->lane_count,
2321                               adjusted_mode->crtc_clock,
2322                               pipe_config->port_clock,
2323                               &pipe_config->dp_m_n,
2324                               constant_n, pipe_config->fec_enable);
2325
2326        if (intel_connector->panel.downclock_mode != NULL &&
2327                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2328                        pipe_config->has_drrs = true;
2329                        intel_link_compute_m_n(output_bpp,
2330                                               pipe_config->lane_count,
2331                                               intel_connector->panel.downclock_mode->clock,
2332                                               pipe_config->port_clock,
2333                                               &pipe_config->dp_m2_n2,
2334                                               constant_n, pipe_config->fec_enable);
2335        }
2336
2337        if (!HAS_DDI(dev_priv))
2338                intel_dp_set_clock(encoder, pipe_config);
2339
2340        intel_psr_compute_config(intel_dp, pipe_config);
2341
2342        return 0;
2343}
2344
2345void intel_dp_set_link_params(struct intel_dp *intel_dp,
2346                              int link_rate, u8 lane_count,
2347                              bool link_mst)
2348{
2349        intel_dp->link_trained = false;
2350        intel_dp->link_rate = link_rate;
2351        intel_dp->lane_count = lane_count;
2352        intel_dp->link_mst = link_mst;
2353}
2354
2355static void intel_dp_prepare(struct intel_encoder *encoder,
2356                             const struct intel_crtc_state *pipe_config)
2357{
2358        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2359        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2360        enum port port = encoder->port;
2361        struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2362        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2363
2364        intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2365                                 pipe_config->lane_count,
2366                                 intel_crtc_has_type(pipe_config,
2367                                                     INTEL_OUTPUT_DP_MST));
2368
2369        /*
2370         * There are four kinds of DP registers:
2371         *
2372         *      IBX PCH
2373         *      SNB CPU
2374         *      IVB CPU
2375         *      CPT PCH
2376         *
2377         * IBX PCH and CPU are the same for almost everything,
2378         * except that the CPU DP PLL is configured in this
2379         * register
2380         *
2381         * CPT PCH is quite different, having many bits moved
2382         * to the TRANS_DP_CTL register instead. That
2383         * configuration happens (oddly) in ironlake_pch_enable
2384         */
2385
2386        /* Preserve the BIOS-computed detected bit. This is
2387         * supposed to be read-only.
2388         */
2389        intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2390
2391        /* Handle DP bits in common between all three register formats */
2392        intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2393        intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2394
2395        /* Split out the IBX/CPU vs CPT settings */
2396
2397        if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2398                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2399                        intel_dp->DP |= DP_SYNC_HS_HIGH;
2400                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2401                        intel_dp->DP |= DP_SYNC_VS_HIGH;
2402                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2403
2404                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2405                        intel_dp->DP |= DP_ENHANCED_FRAMING;
2406
2407                intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2408        } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2409                u32 trans_dp;
2410
2411                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2412
2413                trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2414                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2415                        trans_dp |= TRANS_DP_ENH_FRAMING;
2416                else
2417                        trans_dp &= ~TRANS_DP_ENH_FRAMING;
2418                I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2419        } else {
2420                if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2421                        intel_dp->DP |= DP_COLOR_RANGE_16_235;
2422
2423                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2424                        intel_dp->DP |= DP_SYNC_HS_HIGH;
2425                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2426                        intel_dp->DP |= DP_SYNC_VS_HIGH;
2427                intel_dp->DP |= DP_LINK_TRAIN_OFF;
2428
2429                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2430                        intel_dp->DP |= DP_ENHANCED_FRAMING;
2431
2432                if (IS_CHERRYVIEW(dev_priv))
2433                        intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2434                else
2435                        intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2436        }
2437}
2438
2439#define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2440#define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2441
2442#define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2443#define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2444
2445#define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2446#define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2447
2448static void intel_pps_verify_state(struct intel_dp *intel_dp);
2449
2450static void wait_panel_status(struct intel_dp *intel_dp,
2451                                       u32 mask,
2452                                       u32 value)
2453{
2454        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2455        i915_reg_t pp_stat_reg, pp_ctrl_reg;
2456
2457        lockdep_assert_held(&dev_priv->pps_mutex);
2458
2459        intel_pps_verify_state(intel_dp);
2460
2461        pp_stat_reg = _pp_stat_reg(intel_dp);
2462        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2463
2464        DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2465                        mask, value,
2466                        I915_READ(pp_stat_reg),
2467                        I915_READ(pp_ctrl_reg));
2468
2469        if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2470                                       mask, value, 5000))
2471                DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2472                                I915_READ(pp_stat_reg),
2473                                I915_READ(pp_ctrl_reg));
2474
2475        DRM_DEBUG_KMS("Wait complete\n");
2476}
2477
2478static void wait_panel_on(struct intel_dp *intel_dp)
2479{
2480        DRM_DEBUG_KMS("Wait for panel power on\n");
2481        wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2482}
2483
2484static void wait_panel_off(struct intel_dp *intel_dp)
2485{
2486        DRM_DEBUG_KMS("Wait for panel power off time\n");
2487        wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2488}
2489
2490static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2491{
2492        ktime_t panel_power_on_time;
2493        s64 panel_power_off_duration;
2494
2495        DRM_DEBUG_KMS("Wait for panel power cycle\n");
2496
2497        /* take the difference of currrent time and panel power off time
2498         * and then make panel wait for t11_t12 if needed. */
2499        panel_power_on_time = ktime_get_boottime();
2500        panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2501
2502        /* When we disable the VDD override bit last we have to do the manual
2503         * wait. */
2504        if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2505                wait_remaining_ms_from_jiffies(jiffies,
2506                                       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2507
2508        wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2509}
2510
2511static void wait_backlight_on(struct intel_dp *intel_dp)
2512{
2513        wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2514                                       intel_dp->backlight_on_delay);
2515}
2516
2517static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2518{
2519        wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2520                                       intel_dp->backlight_off_delay);
2521}
2522
2523/* Read the current pp_control value, unlocking the register if it
2524 * is locked
2525 */
2526
2527static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2528{
2529        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2530        u32 control;
2531
2532        lockdep_assert_held(&dev_priv->pps_mutex);
2533
2534        control = I915_READ(_pp_ctrl_reg(intel_dp));
2535        if (WARN_ON(!HAS_DDI(dev_priv) &&
2536                    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2537                control &= ~PANEL_UNLOCK_MASK;
2538                control |= PANEL_UNLOCK_REGS;
2539        }
2540        return control;
2541}
2542
2543/*
2544 * Must be paired with edp_panel_vdd_off().
2545 * Must hold pps_mutex around the whole on/off sequence.
2546 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2547 */
2548static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2549{
2550        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2551        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2552        u32 pp;
2553        i915_reg_t pp_stat_reg, pp_ctrl_reg;
2554        bool need_to_disable = !intel_dp->want_panel_vdd;
2555
2556        lockdep_assert_held(&dev_priv->pps_mutex);
2557
2558        if (!intel_dp_is_edp(intel_dp))
2559                return false;
2560
2561        cancel_delayed_work(&intel_dp->panel_vdd_work);
2562        intel_dp->want_panel_vdd = true;
2563
2564        if (edp_have_panel_vdd(intel_dp))
2565                return need_to_disable;
2566
2567        intel_display_power_get(dev_priv,
2568                                intel_aux_power_domain(intel_dig_port));
2569
2570        DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2571                      port_name(intel_dig_port->base.port));
2572
2573        if (!edp_have_panel_power(intel_dp))
2574                wait_panel_power_cycle(intel_dp);
2575
2576        pp = ironlake_get_pp_control(intel_dp);
2577        pp |= EDP_FORCE_VDD;
2578
2579        pp_stat_reg = _pp_stat_reg(intel_dp);
2580        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2581
2582        I915_WRITE(pp_ctrl_reg, pp);
2583        POSTING_READ(pp_ctrl_reg);
2584        DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2585                        I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2586        /*
2587         * If the panel wasn't on, delay before accessing aux channel
2588         */
2589        if (!edp_have_panel_power(intel_dp)) {
2590                DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2591                              port_name(intel_dig_port->base.port));
2592                msleep(intel_dp->panel_power_up_delay);
2593        }
2594
2595        return need_to_disable;
2596}
2597
2598/*
2599 * Must be paired with intel_edp_panel_vdd_off() or
2600 * intel_edp_panel_off().
2601 * Nested calls to these functions are not allowed since
2602 * we drop the lock. Caller must use some higher level
2603 * locking to prevent nested calls from other threads.
2604 */
2605void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2606{
2607        intel_wakeref_t wakeref;
2608        bool vdd;
2609
2610        if (!intel_dp_is_edp(intel_dp))
2611                return;
2612
2613        vdd = false;
2614        with_pps_lock(intel_dp, wakeref)
2615                vdd = edp_panel_vdd_on(intel_dp);
2616        I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2617             port_name(dp_to_dig_port(intel_dp)->base.port));
2618}
2619
2620static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2621{
2622        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2623        struct intel_digital_port *intel_dig_port =
2624                dp_to_dig_port(intel_dp);
2625        u32 pp;
2626        i915_reg_t pp_stat_reg, pp_ctrl_reg;
2627
2628        lockdep_assert_held(&dev_priv->pps_mutex);
2629
2630        WARN_ON(intel_dp->want_panel_vdd);
2631
2632        if (!edp_have_panel_vdd(intel_dp))
2633                return;
2634
2635        DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2636                      port_name(intel_dig_port->base.port));
2637
2638        pp = ironlake_get_pp_control(intel_dp);
2639        pp &= ~EDP_FORCE_VDD;
2640
2641        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2642        pp_stat_reg = _pp_stat_reg(intel_dp);
2643
2644        I915_WRITE(pp_ctrl_reg, pp);
2645        POSTING_READ(pp_ctrl_reg);
2646
2647        /* Make sure sequencer is idle before allowing subsequent activity */
2648        DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2649        I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2650
2651        if ((pp & PANEL_POWER_ON) == 0)
2652                intel_dp->panel_power_off_time = ktime_get_boottime();
2653
2654        intel_display_power_put_unchecked(dev_priv,
2655                                          intel_aux_power_domain(intel_dig_port));
2656}
2657
2658static void edp_panel_vdd_work(struct work_struct *__work)
2659{
2660        struct intel_dp *intel_dp =
2661                container_of(to_delayed_work(__work),
2662                             struct intel_dp, panel_vdd_work);
2663        intel_wakeref_t wakeref;
2664
2665        with_pps_lock(intel_dp, wakeref) {
2666                if (!intel_dp->want_panel_vdd)
2667                        edp_panel_vdd_off_sync(intel_dp);
2668        }
2669}
2670
2671static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2672{
2673        unsigned long delay;
2674
2675        /*
2676         * Queue the timer to fire a long time from now (relative to the power
2677         * down delay) to keep the panel power up across a sequence of
2678         * operations.
2679         */
2680        delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2681        schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2682}
2683
2684/*
2685 * Must be paired with edp_panel_vdd_on().
2686 * Must hold pps_mutex around the whole on/off sequence.
2687 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2688 */
2689static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2690{
2691        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2692
2693        lockdep_assert_held(&dev_priv->pps_mutex);
2694
2695        if (!intel_dp_is_edp(intel_dp))
2696                return;
2697
2698        I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2699             port_name(dp_to_dig_port(intel_dp)->base.port));
2700
2701        intel_dp->want_panel_vdd = false;
2702
2703        if (sync)
2704                edp_panel_vdd_off_sync(intel_dp);
2705        else
2706                edp_panel_vdd_schedule_off(intel_dp);
2707}
2708
2709static void edp_panel_on(struct intel_dp *intel_dp)
2710{
2711        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2712        u32 pp;
2713        i915_reg_t pp_ctrl_reg;
2714
2715        lockdep_assert_held(&dev_priv->pps_mutex);
2716
2717        if (!intel_dp_is_edp(intel_dp))
2718                return;
2719
2720        DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2721                      port_name(dp_to_dig_port(intel_dp)->base.port));
2722
2723        if (WARN(edp_have_panel_power(intel_dp),
2724                 "eDP port %c panel power already on\n",
2725                 port_name(dp_to_dig_port(intel_dp)->base.port)))
2726                return;
2727
2728        wait_panel_power_cycle(intel_dp);
2729
2730        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2731        pp = ironlake_get_pp_control(intel_dp);
2732        if (IS_GEN(dev_priv, 5)) {
2733                /* ILK workaround: disable reset around power sequence */
2734                pp &= ~PANEL_POWER_RESET;
2735                I915_WRITE(pp_ctrl_reg, pp);
2736                POSTING_READ(pp_ctrl_reg);
2737        }
2738
2739        pp |= PANEL_POWER_ON;
2740        if (!IS_GEN(dev_priv, 5))
2741                pp |= PANEL_POWER_RESET;
2742
2743        I915_WRITE(pp_ctrl_reg, pp);
2744        POSTING_READ(pp_ctrl_reg);
2745
2746        wait_panel_on(intel_dp);
2747        intel_dp->last_power_on = jiffies;
2748
2749        if (IS_GEN(dev_priv, 5)) {
2750                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2751                I915_WRITE(pp_ctrl_reg, pp);
2752                POSTING_READ(pp_ctrl_reg);
2753        }
2754}
2755
2756void intel_edp_panel_on(struct intel_dp *intel_dp)
2757{
2758        intel_wakeref_t wakeref;
2759
2760        if (!intel_dp_is_edp(intel_dp))
2761                return;
2762
2763        with_pps_lock(intel_dp, wakeref)
2764                edp_panel_on(intel_dp);
2765}
2766
2767
2768static void edp_panel_off(struct intel_dp *intel_dp)
2769{
2770        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2771        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2772        u32 pp;
2773        i915_reg_t pp_ctrl_reg;
2774
2775        lockdep_assert_held(&dev_priv->pps_mutex);
2776
2777        if (!intel_dp_is_edp(intel_dp))
2778                return;
2779
2780        DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2781                      port_name(dig_port->base.port));
2782
2783        WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2784             port_name(dig_port->base.port));
2785
2786        pp = ironlake_get_pp_control(intel_dp);
2787        /* We need to switch off panel power _and_ force vdd, for otherwise some
2788         * panels get very unhappy and cease to work. */
2789        pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2790                EDP_BLC_ENABLE);
2791
2792        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2793
2794        intel_dp->want_panel_vdd = false;
2795
2796        I915_WRITE(pp_ctrl_reg, pp);
2797        POSTING_READ(pp_ctrl_reg);
2798
2799        wait_panel_off(intel_dp);
2800        intel_dp->panel_power_off_time = ktime_get_boottime();
2801
2802        /* We got a reference when we enabled the VDD. */
2803        intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2804}
2805
2806void intel_edp_panel_off(struct intel_dp *intel_dp)
2807{
2808        intel_wakeref_t wakeref;
2809
2810        if (!intel_dp_is_edp(intel_dp))
2811                return;
2812
2813        with_pps_lock(intel_dp, wakeref)
2814                edp_panel_off(intel_dp);
2815}
2816
2817/* Enable backlight in the panel power control. */
2818static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2819{
2820        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2821        intel_wakeref_t wakeref;
2822
2823        /*
2824         * If we enable the backlight right away following a panel power
2825         * on, we may see slight flicker as the panel syncs with the eDP
2826         * link.  So delay a bit to make sure the image is solid before
2827         * allowing it to appear.
2828         */
2829        wait_backlight_on(intel_dp);
2830
2831        with_pps_lock(intel_dp, wakeref) {
2832                i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2833                u32 pp;
2834
2835                pp = ironlake_get_pp_control(intel_dp);
2836                pp |= EDP_BLC_ENABLE;
2837
2838                I915_WRITE(pp_ctrl_reg, pp);
2839                POSTING_READ(pp_ctrl_reg);
2840        }
2841}
2842
2843/* Enable backlight PWM and backlight PP control. */
2844void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2845                            const struct drm_connector_state *conn_state)
2846{
2847        struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2848
2849        if (!intel_dp_is_edp(intel_dp))
2850                return;
2851
2852        DRM_DEBUG_KMS("\n");
2853
2854        intel_panel_enable_backlight(crtc_state, conn_state);
2855        _intel_edp_backlight_on(intel_dp);
2856}
2857
2858/* Disable backlight in the panel power control. */
2859static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2860{
2861        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2862        intel_wakeref_t wakeref;
2863
2864        if (!intel_dp_is_edp(intel_dp))
2865                return;
2866
2867        with_pps_lock(intel_dp, wakeref) {
2868                i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2869                u32 pp;
2870
2871                pp = ironlake_get_pp_control(intel_dp);
2872                pp &= ~EDP_BLC_ENABLE;
2873
2874                I915_WRITE(pp_ctrl_reg, pp);
2875                POSTING_READ(pp_ctrl_reg);
2876        }
2877
2878        intel_dp->last_backlight_off = jiffies;
2879        edp_wait_backlight_off(intel_dp);
2880}
2881
2882/* Disable backlight PP control and backlight PWM. */
2883void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2884{
2885        struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2886
2887        if (!intel_dp_is_edp(intel_dp))
2888                return;
2889
2890        DRM_DEBUG_KMS("\n");
2891
2892        _intel_edp_backlight_off(intel_dp);
2893        intel_panel_disable_backlight(old_conn_state);
2894}
2895
2896/*
2897 * Hook for controlling the panel power control backlight through the bl_power
2898 * sysfs attribute. Take care to handle multiple calls.
2899 */
2900static void intel_edp_backlight_power(struct intel_connector *connector,
2901                                      bool enable)
2902{
2903        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2904        intel_wakeref_t wakeref;
2905        bool is_enabled;
2906
2907        is_enabled = false;
2908        with_pps_lock(intel_dp, wakeref)
2909                is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2910        if (is_enabled == enable)
2911                return;
2912
2913        DRM_DEBUG_KMS("panel power control backlight %s\n",
2914                      enable ? "enable" : "disable");
2915
2916        if (enable)
2917                _intel_edp_backlight_on(intel_dp);
2918        else
2919                _intel_edp_backlight_off(intel_dp);
2920}
2921
2922static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2923{
2924        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2925        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2926        bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2927
2928        I915_STATE_WARN(cur_state != state,
2929                        "DP port %c state assertion failure (expected %s, current %s)\n",
2930                        port_name(dig_port->base.port),
2931                        onoff(state), onoff(cur_state));
2932}
2933#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2934
2935static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2936{
2937        bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2938
2939        I915_STATE_WARN(cur_state != state,
2940                        "eDP PLL state assertion failure (expected %s, current %s)\n",
2941                        onoff(state), onoff(cur_state));
2942}
2943#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2944#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2945
2946static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2947                                const struct intel_crtc_state *pipe_config)
2948{
2949        struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2950        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2951
2952        assert_pipe_disabled(dev_priv, crtc->pipe);
2953        assert_dp_port_disabled(intel_dp);
2954        assert_edp_pll_disabled(dev_priv);
2955
2956        DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2957                      pipe_config->port_clock);
2958
2959        intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2960
2961        if (pipe_config->port_clock == 162000)
2962                intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2963        else
2964                intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2965
2966        I915_WRITE(DP_A, intel_dp->DP);
2967        POSTING_READ(DP_A);
2968        udelay(500);
2969
2970        /*
2971         * [DevILK] Work around required when enabling DP PLL
2972         * while a pipe is enabled going to FDI:
2973         * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2974         * 2. Program DP PLL enable
2975         */
2976        if (IS_GEN(dev_priv, 5))
2977                intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2978
2979        intel_dp->DP |= DP_PLL_ENABLE;
2980
2981        I915_WRITE(DP_A, intel_dp->DP);
2982        POSTING_READ(DP_A);
2983        udelay(200);
2984}
2985
2986static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2987                                 const struct intel_crtc_state *old_crtc_state)
2988{
2989        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2990        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2991
2992        assert_pipe_disabled(dev_priv, crtc->pipe);
2993        assert_dp_port_disabled(intel_dp);
2994        assert_edp_pll_enabled(dev_priv);
2995
2996        DRM_DEBUG_KMS("disabling eDP PLL\n");
2997
2998        intel_dp->DP &= ~DP_PLL_ENABLE;
2999
3000        I915_WRITE(DP_A, intel_dp->DP);
3001        POSTING_READ(DP_A);
3002        udelay(200);
3003}
3004
3005static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3006{
3007        /*
3008         * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3009         * be capable of signalling downstream hpd with a long pulse.
3010         * Whether or not that means D3 is safe to use is not clear,
3011         * but let's assume so until proven otherwise.
3012         *
3013         * FIXME should really check all downstream ports...
3014         */
3015        return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3016                intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
3017                intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3018}
3019
3020void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3021                                           const struct intel_crtc_state *crtc_state,
3022                                           bool enable)
3023{
3024        int ret;
3025
3026        if (!crtc_state->dsc_params.compression_enable)
3027                return;
3028
3029        ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3030                                 enable ? DP_DECOMPRESSION_EN : 0);
3031        if (ret < 0)
3032                DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
3033                              enable ? "enable" : "disable");
3034}
3035
3036/* If the sink supports it, try to set the power state appropriately */
3037void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3038{
3039        int ret, i;
3040
3041        /* Should have a valid DPCD by this point */
3042        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3043                return;
3044
3045        if (mode != DRM_MODE_DPMS_ON) {
3046                if (downstream_hpd_needs_d0(intel_dp))
3047                        return;
3048
3049                ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3050                                         DP_SET_POWER_D3);
3051        } else {
3052                struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3053
3054                /*
3055                 * When turning on, we need to retry for 1ms to give the sink
3056                 * time to wake up.
3057                 */
3058                for (i = 0; i < 3; i++) {
3059                        ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3060                                                 DP_SET_POWER_D0);
3061                        if (ret == 1)
3062                                break;
3063                        msleep(1);
3064                }
3065
3066                if (ret == 1 && lspcon->active)
3067                        lspcon_wait_pcon_mode(lspcon);
3068        }
3069
3070        if (ret != 1)
3071                DRM_DEBUG_KMS("failed to %s sink power state\n",
3072                              mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3073}
3074
3075static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3076                                 enum port port, enum pipe *pipe)
3077{
3078        enum pipe p;
3079
3080        for_each_pipe(dev_priv, p) {
3081                u32 val = I915_READ(TRANS_DP_CTL(p));
3082
3083                if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3084                        *pipe = p;
3085                        return true;
3086                }
3087        }
3088
3089        DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3090
3091        /* must initialize pipe to something for the asserts */
3092        *pipe = PIPE_A;
3093
3094        return false;
3095}
3096
3097bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3098                           i915_reg_t dp_reg, enum port port,
3099                           enum pipe *pipe)
3100{
3101        bool ret;
3102        u32 val;
3103
3104        val = I915_READ(dp_reg);
3105
3106        ret = val & DP_PORT_EN;
3107
3108        /* asserts want to know the pipe even if the port is disabled */
3109        if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3110                *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3111        else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3112                ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3113        else if (IS_CHERRYVIEW(dev_priv))
3114                *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3115        else
3116                *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3117
3118        return ret;
3119}
3120
3121static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3122                                  enum pipe *pipe)
3123{
3124        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3125        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3126        intel_wakeref_t wakeref;
3127        bool ret;
3128
3129        wakeref = intel_display_power_get_if_enabled(dev_priv,
3130                                                     encoder->power_domain);
3131        if (!wakeref)
3132                return false;
3133
3134        ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3135                                    encoder->port, pipe);
3136
3137        intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3138
3139        return ret;
3140}
3141
3142static void intel_dp_get_config(struct intel_encoder *encoder,
3143                                struct intel_crtc_state *pipe_config)
3144{
3145        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3146        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3147        u32 tmp, flags = 0;
3148        enum port port = encoder->port;
3149        struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3150
3151        if (encoder->type == INTEL_OUTPUT_EDP)
3152                pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3153        else
3154                pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3155
3156        tmp = I915_READ(intel_dp->output_reg);
3157
3158        pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3159
3160        if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3161                u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3162
3163                if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3164                        flags |= DRM_MODE_FLAG_PHSYNC;
3165                else
3166                        flags |= DRM_MODE_FLAG_NHSYNC;
3167
3168                if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3169                        flags |= DRM_MODE_FLAG_PVSYNC;
3170                else
3171                        flags |= DRM_MODE_FLAG_NVSYNC;
3172        } else {
3173                if (tmp & DP_SYNC_HS_HIGH)
3174                        flags |= DRM_MODE_FLAG_PHSYNC;
3175                else
3176                        flags |= DRM_MODE_FLAG_NHSYNC;
3177
3178                if (tmp & DP_SYNC_VS_HIGH)
3179                        flags |= DRM_MODE_FLAG_PVSYNC;
3180                else
3181                        flags |= DRM_MODE_FLAG_NVSYNC;
3182        }
3183
3184        pipe_config->base.adjusted_mode.flags |= flags;
3185
3186        if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3187                pipe_config->limited_color_range = true;
3188
3189        pipe_config->lane_count =
3190                ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3191
3192        intel_dp_get_m_n(crtc, pipe_config);
3193
3194        if (port == PORT_A) {
3195                if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3196                        pipe_config->port_clock = 162000;
3197                else
3198                        pipe_config->port_clock = 270000;
3199        }
3200
3201        pipe_config->base.adjusted_mode.crtc_clock =
3202                intel_dotclock_calculate(pipe_config->port_clock,
3203                                         &pipe_config->dp_m_n);
3204
3205        if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3206            pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3207                /*
3208                 * This is a big fat ugly hack.
3209                 *
3210                 * Some machines in UEFI boot mode provide us a VBT that has 18
3211                 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3212                 * unknown we fail to light up. Yet the same BIOS boots up with
3213                 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3214                 * max, not what it tells us to use.
3215                 *
3216                 * Note: This will still be broken if the eDP panel is not lit
3217                 * up by the BIOS, and thus we can't get the mode at module
3218                 * load.
3219                 */
3220                DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3221                              pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3222                dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3223        }
3224}
3225
3226static void intel_disable_dp(struct intel_encoder *encoder,
3227                             const struct intel_crtc_state *old_crtc_state,
3228                             const struct drm_connector_state *old_conn_state)
3229{
3230        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3231
3232        intel_dp->link_trained = false;
3233
3234        if (old_crtc_state->has_audio)
3235                intel_audio_codec_disable(encoder,
3236                                          old_crtc_state, old_conn_state);
3237
3238        /* Make sure the panel is off before trying to change the mode. But also
3239         * ensure that we have vdd while we switch off the panel. */
3240        intel_edp_panel_vdd_on(intel_dp);
3241        intel_edp_backlight_off(old_conn_state);
3242        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3243        intel_edp_panel_off(intel_dp);
3244}
3245
3246static void g4x_disable_dp(struct intel_encoder *encoder,
3247                           const struct intel_crtc_state *old_crtc_state,
3248                           const struct drm_connector_state *old_conn_state)
3249{
3250        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3251}
3252
3253static void vlv_disable_dp(struct intel_encoder *encoder,
3254                           const struct intel_crtc_state *old_crtc_state,
3255                           const struct drm_connector_state *old_conn_state)
3256{
3257        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3258}
3259
3260static void g4x_post_disable_dp(struct intel_encoder *encoder,
3261                                const struct intel_crtc_state *old_crtc_state,
3262                                const struct drm_connector_state *old_conn_state)
3263{
3264        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3265        enum port port = encoder->port;
3266
3267        /*
3268         * Bspec does not list a specific disable sequence for g4x DP.
3269         * Follow the ilk+ sequence (disable pipe before the port) for
3270         * g4x DP as it does not suffer from underruns like the normal
3271         * g4x modeset sequence (disable pipe after the port).
3272         */
3273        intel_dp_link_down(encoder, old_crtc_state);
3274
3275        /* Only ilk+ has port A */
3276        if (port == PORT_A)
3277                ironlake_edp_pll_off(intel_dp, old_crtc_state);
3278}
3279
3280static void vlv_post_disable_dp(struct intel_encoder *encoder,
3281                                const struct intel_crtc_state *old_crtc_state,
3282                                const struct drm_connector_state *old_conn_state)
3283{
3284        intel_dp_link_down(encoder, old_crtc_state);
3285}
3286
3287static void chv_post_disable_dp(struct intel_encoder *encoder,
3288                                const struct intel_crtc_state *old_crtc_state,
3289                                const struct drm_connector_state *old_conn_state)
3290{
3291        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3292
3293        intel_dp_link_down(encoder, old_crtc_state);
3294
3295        vlv_dpio_get(dev_priv);
3296
3297        /* Assert data lane reset */
3298        chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3299
3300        vlv_dpio_put(dev_priv);
3301}
3302
3303static void
3304_intel_dp_set_link_train(struct intel_dp *intel_dp,
3305                         u32 *DP,
3306                         u8 dp_train_pat)
3307{
3308        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3309        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3310        enum port port = intel_dig_port->base.port;
3311        u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3312
3313        if (dp_train_pat & train_pat_mask)
3314                DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3315                              dp_train_pat & train_pat_mask);
3316
3317        if (HAS_DDI(dev_priv)) {
3318                u32 temp = I915_READ(DP_TP_CTL(port));
3319
3320                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3321                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3322                else
3323                        temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3324
3325                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3326                switch (dp_train_pat & train_pat_mask) {
3327                case DP_TRAINING_PATTERN_DISABLE:
3328                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3329
3330                        break;
3331                case DP_TRAINING_PATTERN_1:
3332                        temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3333                        break;
3334                case DP_TRAINING_PATTERN_2:
3335                        temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3336                        break;
3337                case DP_TRAINING_PATTERN_3:
3338                        temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3339                        break;
3340                case DP_TRAINING_PATTERN_4:
3341                        temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3342                        break;
3343                }
3344                I915_WRITE(DP_TP_CTL(port), temp);
3345
3346        } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3347                   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3348                *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3349
3350                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3351                case DP_TRAINING_PATTERN_DISABLE:
3352                        *DP |= DP_LINK_TRAIN_OFF_CPT;
3353                        break;
3354                case DP_TRAINING_PATTERN_1:
3355                        *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3356                        break;
3357                case DP_TRAINING_PATTERN_2:
3358                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3359                        break;
3360                case DP_TRAINING_PATTERN_3:
3361                        DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3362                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3363                        break;
3364                }
3365
3366        } else {
3367                *DP &= ~DP_LINK_TRAIN_MASK;
3368
3369                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3370                case DP_TRAINING_PATTERN_DISABLE:
3371                        *DP |= DP_LINK_TRAIN_OFF;
3372                        break;
3373                case DP_TRAINING_PATTERN_1:
3374                        *DP |= DP_LINK_TRAIN_PAT_1;
3375                        break;
3376                case DP_TRAINING_PATTERN_2:
3377                        *DP |= DP_LINK_TRAIN_PAT_2;
3378                        break;
3379                case DP_TRAINING_PATTERN_3:
3380                        DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3381                        *DP |= DP_LINK_TRAIN_PAT_2;
3382                        break;
3383                }
3384        }
3385}
3386
3387static void intel_dp_enable_port(struct intel_dp *intel_dp,
3388                                 const struct intel_crtc_state *old_crtc_state)
3389{
3390        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3391
3392        /* enable with pattern 1 (as per spec) */
3393
3394        intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3395
3396        /*
3397         * Magic for VLV/CHV. We _must_ first set up the register
3398         * without actually enabling the port, and then do another
3399         * write to enable the port. Otherwise link training will
3400         * fail when the power sequencer is freshly used for this port.
3401         */
3402        intel_dp->DP |= DP_PORT_EN;
3403        if (old_crtc_state->has_audio)
3404                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3405
3406        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3407        POSTING_READ(intel_dp->output_reg);
3408}
3409
3410static void intel_enable_dp(struct intel_encoder *encoder,
3411                            const struct intel_crtc_state *pipe_config,
3412                            const struct drm_connector_state *conn_state)
3413{
3414        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3415        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3416        struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3417        u32 dp_reg = I915_READ(intel_dp->output_reg);
3418        enum pipe pipe = crtc->pipe;
3419        intel_wakeref_t wakeref;
3420
3421        if (WARN_ON(dp_reg & DP_PORT_EN))
3422                return;
3423
3424        with_pps_lock(intel_dp, wakeref) {
3425                if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3426                        vlv_init_panel_power_sequencer(encoder, pipe_config);
3427
3428                intel_dp_enable_port(intel_dp, pipe_config);
3429
3430                edp_panel_vdd_on(intel_dp);
3431                edp_panel_on(intel_dp);
3432                edp_panel_vdd_off(intel_dp, true);
3433        }
3434
3435        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3436                unsigned int lane_mask = 0x0;
3437
3438                if (IS_CHERRYVIEW(dev_priv))
3439                        lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3440
3441                vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3442                                    lane_mask);
3443        }
3444
3445        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3446        intel_dp_start_link_train(intel_dp);
3447        intel_dp_stop_link_train(intel_dp);
3448
3449        if (pipe_config->has_audio) {
3450                DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3451                                 pipe_name(pipe));
3452                intel_audio_codec_enable(encoder, pipe_config, conn_state);
3453        }
3454}
3455
3456static void g4x_enable_dp(struct intel_encoder *encoder,
3457                          const struct intel_crtc_state *pipe_config,
3458                          const struct drm_connector_state *conn_state)
3459{
3460        intel_enable_dp(encoder, pipe_config, conn_state);
3461        intel_edp_backlight_on(pipe_config, conn_state);
3462}
3463
3464static void vlv_enable_dp(struct intel_encoder *encoder,
3465                          const struct intel_crtc_state *pipe_config,
3466                          const struct drm_connector_state *conn_state)
3467{
3468        intel_edp_backlight_on(pipe_config, conn_state);
3469}
3470
3471static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3472                              const struct intel_crtc_state *pipe_config,
3473                              const struct drm_connector_state *conn_state)
3474{
3475        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3476        enum port port = encoder->port;
3477
3478        intel_dp_prepare(encoder, pipe_config);
3479
3480        /* Only ilk+ has port A */
3481        if (port == PORT_A)
3482                ironlake_edp_pll_on(intel_dp, pipe_config);
3483}
3484
3485static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3486{
3487        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3488        struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3489        enum pipe pipe = intel_dp->pps_pipe;
3490        i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3491
3492        WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3493
3494        if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3495                return;
3496
3497        edp_panel_vdd_off_sync(intel_dp);
3498
3499        /*
3500         * VLV seems to get confused when multiple power sequencers
3501         * have the same port selected (even if only one has power/vdd
3502         * enabled). The failure manifests as vlv_wait_port_ready() failing
3503         * CHV on the other hand doesn't seem to mind having the same port
3504         * selected in multiple power sequencers, but let's clear the
3505         * port select always when logically disconnecting a power sequencer
3506         * from a port.
3507         */
3508        DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3509                      pipe_name(pipe), port_name(intel_dig_port->base.port));
3510        I915_WRITE(pp_on_reg, 0);
3511        POSTING_READ(pp_on_reg);
3512
3513        intel_dp->pps_pipe = INVALID_PIPE;
3514}
3515
3516static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3517                                      enum pipe pipe)
3518{
3519        struct intel_encoder *encoder;
3520
3521        lockdep_assert_held(&dev_priv->pps_mutex);
3522
3523        for_each_intel_dp(&dev_priv->drm, encoder) {
3524                struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3525                enum port port = encoder->port;
3526
3527                WARN(intel_dp->active_pipe == pipe,
3528                     "stealing pipe %c power sequencer from active (e)DP port %c\n",
3529                     pipe_name(pipe), port_name(port));
3530
3531                if (intel_dp->pps_pipe != pipe)
3532                        continue;
3533
3534                DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3535                              pipe_name(pipe), port_name(port));
3536
3537                /* make sure vdd is off before we steal it */
3538                vlv_detach_power_sequencer(intel_dp);
3539        }
3540}
3541
3542static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3543                                           const struct intel_crtc_state *crtc_state)
3544{
3545        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3546        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3547        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3548
3549        lockdep_assert_held(&dev_priv->pps_mutex);
3550
3551        WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3552
3553        if (intel_dp->pps_pipe != INVALID_PIPE &&
3554            intel_dp->pps_pipe != crtc->pipe) {
3555                /*
3556                 * If another power sequencer was being used on this
3557                 * port previously make sure to turn off vdd there while
3558                 * we still have control of it.
3559                 */
3560                vlv_detach_power_sequencer(intel_dp);
3561        }
3562
3563        /*
3564         * We may be stealing the power
3565         * sequencer from another port.
3566         */
3567        vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3568
3569        intel_dp->active_pipe = crtc->pipe;
3570
3571        if (!intel_dp_is_edp(intel_dp))
3572                return;
3573
3574        /* now it's all ours */
3575        intel_dp->pps_pipe = crtc->pipe;
3576
3577        DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3578                      pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3579
3580        /* init power sequencer on this pipe and port */
3581        intel_dp_init_panel_power_sequencer(intel_dp);
3582        intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3583}
3584
3585static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3586                              const struct intel_crtc_state *pipe_config,
3587                              const struct drm_connector_state *conn_state)
3588{
3589        vlv_phy_pre_encoder_enable(encoder, pipe_config);
3590
3591        intel_enable_dp(encoder, pipe_config, conn_state);
3592}
3593
3594static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3595                                  const struct intel_crtc_state *pipe_config,
3596                                  const struct drm_connector_state *conn_state)
3597{
3598        intel_dp_prepare(encoder, pipe_config);
3599
3600        vlv_phy_pre_pll_enable(encoder, pipe_config);
3601}
3602
3603static void chv_pre_enable_dp(struct intel_encoder *encoder,
3604                              const struct intel_crtc_state *pipe_config,
3605                              const struct drm_connector_state *conn_state)
3606{
3607        chv_phy_pre_encoder_enable(encoder, pipe_config);
3608
3609        intel_enable_dp(encoder, pipe_config, conn_state);
3610
3611        /* Second common lane will stay alive on its own now */
3612        chv_phy_release_cl2_override(encoder);
3613}
3614
3615static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3616                                  const struct intel_crtc_state *pipe_config,
3617                                  const struct drm_connector_state *conn_state)
3618{
3619        intel_dp_prepare(encoder, pipe_config);
3620
3621        chv_phy_pre_pll_enable(encoder, pipe_config);
3622}
3623
3624static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3625                                    const struct intel_crtc_state *old_crtc_state,
3626                                    const struct drm_connector_state *old_conn_state)
3627{
3628        chv_phy_post_pll_disable(encoder, old_crtc_state);
3629}
3630
3631/*
3632 * Fetch AUX CH registers 0x202 - 0x207 which contain
3633 * link status information
3634 */
3635bool
3636intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3637{
3638        return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3639                                DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3640}
3641
3642/* These are source-specific values. */
3643u8
3644intel_dp_voltage_max(struct intel_dp *intel_dp)
3645{
3646        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3647        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3648        enum port port = encoder->port;
3649
3650        if (HAS_DDI(dev_priv))
3651                return intel_ddi_dp_voltage_max(encoder);
3652        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3653                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3654        else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3655                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3656        else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3657                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3658        else
3659                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3660}
3661
3662u8
3663intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3664{
3665        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3666        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3667        enum port port = encoder->port;
3668
3669        if (HAS_DDI(dev_priv)) {
3670                return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3671        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3672                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3673                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3674                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
3675                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3676                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3677                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3678                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3679                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3680                default:
3681                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3682                }
3683        } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3684                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3685                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3686                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3687                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3688                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3689                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3690                default:
3691                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3692                }
3693        } else {
3694                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3695                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3696                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3697                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3698                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
3699                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3700                        return DP_TRAIN_PRE_EMPH_LEVEL_1;
3701                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3702                default:
3703                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
3704                }
3705        }
3706}
3707
3708static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3709{
3710        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3711        unsigned long demph_reg_value, preemph_reg_value,
3712                uniqtranscale_reg_value;
3713        u8 train_set = intel_dp->train_set[0];
3714
3715        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3716        case DP_TRAIN_PRE_EMPH_LEVEL_0:
3717                preemph_reg_value = 0x0004000;
3718                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3719                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3720                        demph_reg_value = 0x2B405555;
3721                        uniqtranscale_reg_value = 0x552AB83A;
3722                        break;
3723                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3724                        demph_reg_value = 0x2B404040;
3725                        uniqtranscale_reg_value = 0x5548B83A;
3726                        break;
3727                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3728                        demph_reg_value = 0x2B245555;
3729                        uniqtranscale_reg_value = 0x5560B83A;
3730                        break;
3731                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3732                        demph_reg_value = 0x2B405555;
3733                        uniqtranscale_reg_value = 0x5598DA3A;
3734                        break;
3735                default:
3736                        return 0;
3737                }
3738                break;
3739        case DP_TRAIN_PRE_EMPH_LEVEL_1:
3740                preemph_reg_value = 0x0002000;
3741                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3742                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3743                        demph_reg_value = 0x2B404040;
3744                        uniqtranscale_reg_value = 0x5552B83A;
3745                        break;
3746                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3747                        demph_reg_value = 0x2B404848;
3748                        uniqtranscale_reg_value = 0x5580B83A;
3749                        break;
3750                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3751                        demph_reg_value = 0x2B404040;
3752                        uniqtranscale_reg_value = 0x55ADDA3A;
3753                        break;
3754                default:
3755                        return 0;
3756                }
3757                break;
3758        case DP_TRAIN_PRE_EMPH_LEVEL_2:
3759                preemph_reg_value = 0x0000000;
3760                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3761                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3762                        demph_reg_value = 0x2B305555;
3763                        uniqtranscale_reg_value = 0x5570B83A;
3764                        break;
3765                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3766                        demph_reg_value = 0x2B2B4040;
3767                        uniqtranscale_reg_value = 0x55ADDA3A;
3768                        break;
3769                default:
3770                        return 0;
3771                }
3772                break;
3773        case DP_TRAIN_PRE_EMPH_LEVEL_3:
3774                preemph_reg_value = 0x0006000;
3775                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3776                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3777                        demph_reg_value = 0x1B405555;
3778                        uniqtranscale_reg_value = 0x55ADDA3A;
3779                        break;
3780                default:
3781                        return 0;
3782                }
3783                break;
3784        default:
3785                return 0;
3786        }
3787
3788        vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3789                                 uniqtranscale_reg_value, 0);
3790
3791        return 0;
3792}
3793
3794static u32 chv_signal_levels(struct intel_dp *intel_dp)
3795{
3796        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3797        u32 deemph_reg_value, margin_reg_value;
3798        bool uniq_trans_scale = false;
3799        u8 train_set = intel_dp->train_set[0];
3800
3801        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3802        case DP_TRAIN_PRE_EMPH_LEVEL_0:
3803                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3804                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3805                        deemph_reg_value = 128;
3806                        margin_reg_value = 52;
3807                        break;
3808                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3809                        deemph_reg_value = 128;
3810                        margin_reg_value = 77;
3811                        break;
3812                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3813                        deemph_reg_value = 128;
3814                        margin_reg_value = 102;
3815                        break;
3816                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3817                        deemph_reg_value = 128;
3818                        margin_reg_value = 154;
3819                        uniq_trans_scale = true;
3820                        break;
3821                default:
3822                        return 0;
3823                }
3824                break;
3825        case DP_TRAIN_PRE_EMPH_LEVEL_1:
3826                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3827                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3828                        deemph_reg_value = 85;
3829                        margin_reg_value = 78;
3830                        break;
3831                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3832                        deemph_reg_value = 85;
3833                        margin_reg_value = 116;
3834                        break;
3835                case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3836                        deemph_reg_value = 85;
3837                        margin_reg_value = 154;
3838                        break;
3839                default:
3840                        return 0;
3841                }
3842                break;
3843        case DP_TRAIN_PRE_EMPH_LEVEL_2:
3844                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3845                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3846                        deemph_reg_value = 64;
3847                        margin_reg_value = 104;
3848                        break;
3849                case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3850                        deemph_reg_value = 64;
3851                        margin_reg_value = 154;
3852                        break;
3853                default:
3854                        return 0;
3855                }
3856                break;
3857        case DP_TRAIN_PRE_EMPH_LEVEL_3:
3858                switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3859                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3860                        deemph_reg_value = 43;
3861                        margin_reg_value = 154;
3862                        break;
3863                default:
3864                        return 0;
3865                }
3866                break;
3867        default:
3868                return 0;
3869        }
3870
3871        chv_set_phy_signal_level(encoder, deemph_reg_value,
3872                                 margin_reg_value, uniq_trans_scale);
3873
3874        return 0;
3875}
3876
3877static u32
3878g4x_signal_levels(u8 train_set)
3879{
3880        u32 signal_levels = 0;
3881
3882        switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3883        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3884        default:
3885                signal_levels |= DP_VOLTAGE_0_4;
3886                break;
3887        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3888                signal_levels |= DP_VOLTAGE_0_6;
3889                break;
3890        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3891                signal_levels |= DP_VOLTAGE_0_8;
3892                break;
3893        case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3894                signal_levels |= DP_VOLTAGE_1_2;
3895                break;
3896        }
3897        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3898        case DP_TRAIN_PRE_EMPH_LEVEL_0:
3899        default:
3900                signal_levels |= DP_PRE_EMPHASIS_0;
3901                break;
3902        case DP_TRAIN_PRE_EMPH_LEVEL_1:
3903                signal_levels |= DP_PRE_EMPHASIS_3_5;
3904                break;
3905        case DP_TRAIN_PRE_EMPH_LEVEL_2:
3906                signal_levels |= DP_PRE_EMPHASIS_6;
3907                break;
3908        case DP_TRAIN_PRE_EMPH_LEVEL_3:
3909                signal_levels |= DP_PRE_EMPHASIS_9_5;
3910                break;
3911        }
3912        return signal_levels;
3913}
3914
3915/* SNB CPU eDP voltage swing and pre-emphasis control */
3916static u32
3917snb_cpu_edp_signal_levels(u8 train_set)
3918{
3919        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3920                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3921        switch (signal_levels) {
3922        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3923        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3924                return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3925        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3926                return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3927        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3928        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3929                return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3930        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3931        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3932                return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3933        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3934        case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3935                return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3936        default:
3937                DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3938                              "0x%x\n", signal_levels);
3939                return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3940        }
3941}
3942
3943/* IVB CPU eDP voltage swing and pre-emphasis control */
3944static u32
3945ivb_cpu_edp_signal_levels(u8 train_set)
3946{
3947        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3948                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3949        switch (signal_levels) {
3950        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3951                return EDP_LINK_TRAIN_400MV_0DB_IVB;
3952        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3953                return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3954        case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3955                return EDP_LINK_TRAIN_400MV_6DB_IVB;
3956
3957        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3958                return EDP_LINK_TRAIN_600MV_0DB_IVB;
3959        case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3960                return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3961
3962        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3963                return EDP_LINK_TRAIN_800MV_0DB_IVB;
3964        case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3965                return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3966
3967        default:
3968                DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3969                              "0x%x\n", signal_levels);
3970                return EDP_LINK_TRAIN_500MV_0DB_IVB;
3971        }
3972}
3973
3974void
3975intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3976{
3977        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3978        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3979        enum port port = intel_dig_port->base.port;
3980        u32 signal_levels, mask = 0;
3981        u8 train_set = intel_dp->train_set[0];
3982
3983        if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3984                signal_levels = bxt_signal_levels(intel_dp);
3985        } else if (HAS_DDI(dev_priv)) {
3986                signal_levels = ddi_signal_levels(intel_dp);
3987                mask = DDI_BUF_EMP_MASK;
3988        } else if (IS_CHERRYVIEW(dev_priv)) {
3989                signal_levels = chv_signal_levels(intel_dp);
3990        } else if (IS_VALLEYVIEW(dev_priv)) {
3991                signal_levels = vlv_signal_levels(intel_dp);
3992        } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3993                signal_levels = ivb_cpu_edp_signal_levels(train_set);
3994                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3995        } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3996                signal_levels = snb_cpu_edp_signal_levels(train_set);
3997                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3998        } else {
3999                signal_levels = g4x_signal_levels(train_set);
4000                mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
4001        }
4002
4003        if (mask)
4004                DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
4005
4006        DRM_DEBUG_KMS("Using vswing level %d\n",
4007                train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
4008        DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
4009                (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4010                        DP_TRAIN_PRE_EMPHASIS_SHIFT);
4011
4012        intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
4013
4014        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4015        POSTING_READ(intel_dp->output_reg);
4016}
4017
4018void
4019intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4020                                       u8 dp_train_pat)
4021{
4022        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4023        struct drm_i915_private *dev_priv =
4024                to_i915(intel_dig_port->base.base.dev);
4025
4026        _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4027
4028        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4029        POSTING_READ(intel_dp->output_reg);
4030}
4031
4032void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4033{
4034        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4035        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4036        enum port port = intel_dig_port->base.port;
4037        u32 val;
4038
4039        if (!HAS_DDI(dev_priv))
4040                return;
4041
4042        val = I915_READ(DP_TP_CTL(port));
4043        val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
4044        val |= DP_TP_CTL_LINK_TRAIN_IDLE;
4045        I915_WRITE(DP_TP_CTL(port), val);
4046
4047        /*
4048         * On PORT_A we can have only eDP in SST mode. There the only reason
4049         * we need to set idle transmission mode is to work around a HW issue
4050         * where we enable the pipe while not in idle link-training mode.
4051         * In this case there is requirement to wait for a minimum number of
4052         * idle patterns to be sent.
4053         */
4054        if (port == PORT_A)
4055                return;
4056
4057        if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
4058                                  DP_TP_STATUS_IDLE_DONE, 1))
4059                DRM_ERROR("Timed out waiting for DP idle patterns\n");
4060}
4061
4062static void
4063intel_dp_link_down(struct intel_encoder *encoder,
4064                   const struct intel_crtc_state *old_crtc_state)
4065{
4066        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4067        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4068        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4069        enum port port = encoder->port;
4070        u32 DP = intel_dp->DP;
4071
4072        if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4073                return;
4074
4075        DRM_DEBUG_KMS("\n");
4076
4077        if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4078            (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4079                DP &= ~DP_LINK_TRAIN_MASK_CPT;
4080                DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4081        } else {
4082                DP &= ~DP_LINK_TRAIN_MASK;
4083                DP |= DP_LINK_TRAIN_PAT_IDLE;
4084        }
4085        I915_WRITE(intel_dp->output_reg, DP);
4086        POSTING_READ(intel_dp->output_reg);
4087
4088        DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4089        I915_WRITE(intel_dp->output_reg, DP);
4090        POSTING_READ(intel_dp->output_reg);
4091
4092        /*
4093         * HW workaround for IBX, we need to move the port
4094         * to transcoder A after disabling it to allow the
4095         * matching HDMI port to be enabled on transcoder A.
4096         */
4097        if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4098                /*
4099                 * We get CPU/PCH FIFO underruns on the other pipe when
4100                 * doing the workaround. Sweep them under the rug.
4101                 */
4102                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4103                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4104
4105                /* always enable with pattern 1 (as per spec) */
4106                DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4107                DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4108                        DP_LINK_TRAIN_PAT_1;
4109                I915_WRITE(intel_dp->output_reg, DP);
4110                POSTING_READ(intel_dp->output_reg);
4111
4112                DP &= ~DP_PORT_EN;
4113                I915_WRITE(intel_dp->output_reg, DP);
4114                POSTING_READ(intel_dp->output_reg);
4115
4116                intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4117                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4118                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4119        }
4120
4121        msleep(intel_dp->panel_power_down_delay);
4122
4123        intel_dp->DP = DP;
4124
4125        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4126                intel_wakeref_t wakeref;
4127
4128                with_pps_lock(intel_dp, wakeref)
4129                        intel_dp->active_pipe = INVALID_PIPE;
4130        }
4131}
4132
4133static void
4134intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4135{
4136        u8 dpcd_ext[6];
4137
4138        /*
4139         * Prior to DP1.3 the bit represented by
4140         * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4141         * if it is set DP_DPCD_REV at 0000h could be at a value less than
4142         * the true capability of the panel. The only way to check is to
4143         * then compare 0000h and 2200h.
4144         */
4145        if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4146              DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4147                return;
4148
4149        if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4150                             &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4151                DRM_ERROR("DPCD failed read at extended capabilities\n");
4152                return;
4153        }
4154
4155        if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4156                DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4157                return;
4158        }
4159
4160        if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4161                return;
4162
4163        DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4164                      (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4165
4166        memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4167}
4168
4169bool
4170intel_dp_read_dpcd(struct intel_dp *intel_dp)
4171{
4172        if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4173                             sizeof(intel_dp->dpcd)) < 0)
4174                return false; /* aux transfer failed */
4175
4176        intel_dp_extended_receiver_capabilities(intel_dp);
4177
4178        DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4179
4180        return intel_dp->dpcd[DP_DPCD_REV] != 0;
4181}
4182
4183bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4184{
4185        u8 dprx = 0;
4186
4187        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4188                              &dprx) != 1)
4189                return false;
4190        return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4191}
4192
4193static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4194{
4195        /*
4196         * Clear the cached register set to avoid using stale values
4197         * for the sinks that do not support DSC.
4198         */
4199        memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4200
4201        /* Clear fec_capable to avoid using stale values */
4202        intel_dp->fec_capable = 0;
4203
4204        /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4205        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4206            intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4207                if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4208                                     intel_dp->dsc_dpcd,
4209                                     sizeof(intel_dp->dsc_dpcd)) < 0)
4210                        DRM_ERROR("Failed to read DPCD register 0x%x\n",
4211                                  DP_DSC_SUPPORT);
4212
4213                DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4214                              (int)sizeof(intel_dp->dsc_dpcd),
4215                              intel_dp->dsc_dpcd);
4216
4217                /* FEC is supported only on DP 1.4 */
4218                if (!intel_dp_is_edp(intel_dp) &&
4219                    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4220                                      &intel_dp->fec_capable) < 0)
4221                        DRM_ERROR("Failed to read FEC DPCD register\n");
4222
4223                DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4224        }
4225}
4226
4227static bool
4228intel_edp_init_dpcd(struct intel_dp *intel_dp)
4229{
4230        struct drm_i915_private *dev_priv =
4231                to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4232
4233        /* this function is meant to be called only once */
4234        WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4235
4236        if (!intel_dp_read_dpcd(intel_dp))
4237                return false;
4238
4239        drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4240                         drm_dp_is_branch(intel_dp->dpcd));
4241
4242        /*
4243         * Read the eDP display control registers.
4244         *
4245         * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4246         * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4247         * set, but require eDP 1.4+ detection (e.g. for supported link rates
4248         * method). The display control registers should read zero if they're
4249         * not supported anyway.
4250         */
4251        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4252                             intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4253                             sizeof(intel_dp->edp_dpcd))
4254                DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4255                              intel_dp->edp_dpcd);
4256
4257        /*
4258         * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4259         * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4260         */
4261        intel_psr_init_dpcd(intel_dp);
4262
4263        /* Read the eDP 1.4+ supported link rates. */
4264        if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4265                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4266                int i;
4267
4268                drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4269                                sink_rates, sizeof(sink_rates));
4270
4271                for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4272                        int val = le16_to_cpu(sink_rates[i]);
4273
4274                        if (val == 0)
4275                                break;
4276
4277                        /* Value read multiplied by 200kHz gives the per-lane
4278                         * link rate in kHz. The source rates are, however,
4279                         * stored in terms of LS_Clk kHz. The full conversion
4280                         * back to symbols is
4281                         * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4282                         */
4283                        intel_dp->sink_rates[i] = (val * 200) / 10;
4284                }
4285                intel_dp->num_sink_rates = i;
4286        }
4287
4288        /*
4289         * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4290         * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4291         */
4292        if (intel_dp->num_sink_rates)
4293                intel_dp->use_rate_select = true;
4294        else
4295                intel_dp_set_sink_rates(intel_dp);
4296
4297        intel_dp_set_common_rates(intel_dp);
4298
4299        /* Read the eDP DSC DPCD registers */
4300        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4301                intel_dp_get_dsc_sink_cap(intel_dp);
4302
4303        return true;
4304}
4305
4306
4307static bool
4308intel_dp_get_dpcd(struct intel_dp *intel_dp)
4309{
4310        if (!intel_dp_read_dpcd(intel_dp))
4311                return false;
4312
4313        /*
4314         * Don't clobber cached eDP rates. Also skip re-reading
4315         * the OUI/ID since we know it won't change.
4316         */
4317        if (!intel_dp_is_edp(intel_dp)) {
4318                drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4319                                 drm_dp_is_branch(intel_dp->dpcd));
4320
4321                intel_dp_set_sink_rates(intel_dp);
4322                intel_dp_set_common_rates(intel_dp);
4323        }
4324
4325        /*
4326         * Some eDP panels do not set a valid value for sink count, that is why
4327         * it don't care about read it here and in intel_edp_init_dpcd().
4328         */
4329        if (!intel_dp_is_edp(intel_dp) &&
4330            !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4331                u8 count;
4332                ssize_t r;
4333
4334                r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4335                if (r < 1)
4336                        return false;
4337
4338                /*
4339                 * Sink count can change between short pulse hpd hence
4340                 * a member variable in intel_dp will track any changes
4341                 * between short pulse interrupts.
4342                 */
4343                intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4344
4345                /*
4346                 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4347                 * a dongle is present but no display. Unless we require to know
4348                 * if a dongle is present or not, we don't need to update
4349                 * downstream port information. So, an early return here saves
4350                 * time from performing other operations which are not required.
4351                 */
4352                if (!intel_dp->sink_count)
4353                        return false;
4354        }
4355
4356        if (!drm_dp_is_branch(intel_dp->dpcd))
4357                return true; /* native DP sink */
4358
4359        if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4360                return true; /* no per-port downstream info */
4361
4362        if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4363                             intel_dp->downstream_ports,
4364                             DP_MAX_DOWNSTREAM_PORTS) < 0)
4365                return false; /* downstream port status fetch failed */
4366
4367        return true;
4368}
4369
4370static bool
4371intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4372{
4373        u8 mstm_cap;
4374
4375        if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4376                return false;
4377
4378        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4379                return false;
4380
4381        return mstm_cap & DP_MST_CAP;
4382}
4383
4384static bool
4385intel_dp_can_mst(struct intel_dp *intel_dp)
4386{
4387        return i915_modparams.enable_dp_mst &&
4388                intel_dp->can_mst &&
4389                intel_dp_sink_can_mst(intel_dp);
4390}
4391
4392static void
4393intel_dp_configure_mst(struct intel_dp *intel_dp)
4394{
4395        struct intel_encoder *encoder =
4396                &dp_to_dig_port(intel_dp)->base;
4397        bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4398
4399        DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4400                      port_name(encoder->port), yesno(intel_dp->can_mst),
4401                      yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4402
4403        if (!intel_dp->can_mst)
4404                return;
4405
4406        intel_dp->is_mst = sink_can_mst &&
4407                i915_modparams.enable_dp_mst;
4408
4409        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4410                                        intel_dp->is_mst);
4411}
4412
4413static bool
4414intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4415{
4416        return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4417                                sink_irq_vector, DP_DPRX_ESI_LEN) ==
4418                DP_DPRX_ESI_LEN;
4419}
4420
4421static void
4422intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4423                               const struct intel_crtc_state *crtc_state)
4424{
4425        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4426        struct dp_sdp vsc_sdp = {};
4427
4428        /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4429        vsc_sdp.sdp_header.HB0 = 0;
4430        vsc_sdp.sdp_header.HB1 = 0x7;
4431
4432        /*
4433         * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4434         * Colorimetry Format indication.
4435         */
4436        vsc_sdp.sdp_header.HB2 = 0x5;
4437
4438        /*
4439         * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4440         * Colorimetry Format indication (HB2 = 05h).
4441         */
4442        vsc_sdp.sdp_header.HB3 = 0x13;
4443
4444        /*
4445         * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4446         * DB16[3:0] DP 1.4a spec, Table 2-120
4447         */
4448        vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4449        /* RGB->YCBCR color conversion uses the BT.709 color space. */
4450        vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4451
4452        /*
4453         * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4454         * the following Component Bit Depth values are defined:
4455         * 001b = 8bpc.
4456         * 010b = 10bpc.
4457         * 011b = 12bpc.
4458         * 100b = 16bpc.
4459         */
4460        switch (crtc_state->pipe_bpp) {
4461        case 24: /* 8bpc */
4462                vsc_sdp.db[17] = 0x1;
4463                break;
4464        case 30: /* 10bpc */
4465                vsc_sdp.db[17] = 0x2;
4466                break;
4467        case 36: /* 12bpc */
4468                vsc_sdp.db[17] = 0x3;
4469                break;
4470        case 48: /* 16bpc */
4471                vsc_sdp.db[17] = 0x4;
4472                break;
4473        default:
4474                MISSING_CASE(crtc_state->pipe_bpp);
4475                break;
4476        }
4477
4478        /*
4479         * Dynamic Range (Bit 7)
4480         * 0 = VESA range, 1 = CTA range.
4481         * all YCbCr are always limited range
4482         */
4483        vsc_sdp.db[17] |= 0x80;
4484
4485        /*
4486         * Content Type (Bits 2:0)
4487         * 000b = Not defined.
4488         * 001b = Graphics.
4489         * 010b = Photo.
4490         * 011b = Video.
4491         * 100b = Game
4492         * All other values are RESERVED.
4493         * Note: See CTA-861-G for the definition and expected
4494         * processing by a stream sink for the above contect types.
4495         */
4496        vsc_sdp.db[18] = 0;
4497
4498        intel_dig_port->write_infoframe(&intel_dig_port->base,
4499                        crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4500}
4501
4502void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4503                               const struct intel_crtc_state *crtc_state)
4504{
4505        if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4506                return;
4507
4508        intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4509}
4510
4511static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4512{
4513        int status = 0;
4514        int test_link_rate;
4515        u8 test_lane_count, test_link_bw;
4516        /* (DP CTS 1.2)
4517         * 4.3.1.11
4518         */
4519        /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4520        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4521                                   &test_lane_count);
4522
4523        if (status <= 0) {
4524                DRM_DEBUG_KMS("Lane count read failed\n");
4525                return DP_TEST_NAK;
4526        }
4527        test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4528
4529        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4530                                   &test_link_bw);
4531        if (status <= 0) {
4532                DRM_DEBUG_KMS("Link Rate read failed\n");
4533                return DP_TEST_NAK;
4534        }
4535        test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4536
4537        /* Validate the requested link rate and lane count */
4538        if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4539                                        test_lane_count))
4540                return DP_TEST_NAK;
4541
4542        intel_dp->compliance.test_lane_count = test_lane_count;
4543        intel_dp->compliance.test_link_rate = test_link_rate;
4544
4545        return DP_TEST_ACK;
4546}
4547
4548static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4549{
4550        u8 test_pattern;
4551        u8 test_misc;
4552        __be16 h_width, v_height;
4553        int status = 0;
4554
4555        /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4556        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4557                                   &test_pattern);
4558        if (status <= 0) {
4559                DRM_DEBUG_KMS("Test pattern read failed\n");
4560                return DP_TEST_NAK;
4561        }
4562        if (test_pattern != DP_COLOR_RAMP)
4563                return DP_TEST_NAK;
4564
4565        status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4566                                  &h_width, 2);
4567        if (status <= 0) {
4568                DRM_DEBUG_KMS("H Width read failed\n");
4569                return DP_TEST_NAK;
4570        }
4571
4572        status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4573                                  &v_height, 2);
4574        if (status <= 0) {
4575                DRM_DEBUG_KMS("V Height read failed\n");
4576                return DP_TEST_NAK;
4577        }
4578
4579        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4580                                   &test_misc);
4581        if (status <= 0) {
4582                DRM_DEBUG_KMS("TEST MISC read failed\n");
4583                return DP_TEST_NAK;
4584        }
4585        if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4586                return DP_TEST_NAK;
4587        if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4588                return DP_TEST_NAK;
4589        switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4590        case DP_TEST_BIT_DEPTH_6:
4591                intel_dp->compliance.test_data.bpc = 6;
4592                break;
4593        case DP_TEST_BIT_DEPTH_8:
4594                intel_dp->compliance.test_data.bpc = 8;
4595                break;
4596        default:
4597                return DP_TEST_NAK;
4598        }
4599
4600        intel_dp->compliance.test_data.video_pattern = test_pattern;
4601        intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4602        intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4603        /* Set test active flag here so userspace doesn't interrupt things */
4604        intel_dp->compliance.test_active = 1;
4605
4606        return DP_TEST_ACK;
4607}
4608
4609static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4610{
4611        u8 test_result = DP_TEST_ACK;
4612        struct intel_connector *intel_connector = intel_dp->attached_connector;
4613        struct drm_connector *connector = &intel_connector->base;
4614
4615        if (intel_connector->detect_edid == NULL ||
4616            connector->edid_corrupt ||
4617            intel_dp->aux.i2c_defer_count > 6) {
4618                /* Check EDID read for NACKs, DEFERs and corruption
4619                 * (DP CTS 1.2 Core r1.1)
4620                 *    4.2.2.4 : Failed EDID read, I2C_NAK
4621                 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4622                 *    4.2.2.6 : EDID corruption detected
4623                 * Use failsafe mode for all cases
4624                 */
4625                if (intel_dp->aux.i2c_nack_count > 0 ||
4626                        intel_dp->aux.i2c_defer_count > 0)
4627                        DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4628                                      intel_dp->aux.i2c_nack_count,
4629                                      intel_dp->aux.i2c_defer_count);
4630                intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4631        } else {
4632                struct edid *block = intel_connector->detect_edid;
4633
4634                /* We have to write the checksum
4635                 * of the last block read
4636                 */
4637                block += intel_connector->detect_edid->extensions;
4638
4639                if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4640                                       block->checksum) <= 0)
4641                        DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4642
4643                test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4644                intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4645        }
4646
4647        /* Set test active flag here so userspace doesn't interrupt things */
4648        intel_dp->compliance.test_active = 1;
4649
4650        return test_result;
4651}
4652
4653static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4654{
4655        u8 test_result = DP_TEST_NAK;
4656        return test_result;
4657}
4658
4659static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4660{
4661        u8 response = DP_TEST_NAK;
4662        u8 request = 0;
4663        int status;
4664
4665        status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4666        if (status <= 0) {
4667                DRM_DEBUG_KMS("Could not read test request from sink\n");
4668                goto update_status;
4669        }
4670
4671        switch (request) {
4672        case DP_TEST_LINK_TRAINING:
4673                DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4674                response = intel_dp_autotest_link_training(intel_dp);
4675                break;
4676        case DP_TEST_LINK_VIDEO_PATTERN:
4677                DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4678                response = intel_dp_autotest_video_pattern(intel_dp);
4679                break;
4680        case DP_TEST_LINK_EDID_READ:
4681                DRM_DEBUG_KMS("EDID test requested\n");
4682                response = intel_dp_autotest_edid(intel_dp);
4683                break;
4684        case DP_TEST_LINK_PHY_TEST_PATTERN:
4685                DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4686                response = intel_dp_autotest_phy_pattern(intel_dp);
4687                break;
4688        default:
4689                DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4690                break;
4691        }
4692
4693        if (response & DP_TEST_ACK)
4694                intel_dp->compliance.test_type = request;
4695
4696update_status:
4697        status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4698        if (status <= 0)
4699                DRM_DEBUG_KMS("Could not write test response to sink\n");
4700}
4701
4702static int
4703intel_dp_check_mst_status(struct intel_dp *intel_dp)
4704{
4705        bool bret;
4706
4707        if (intel_dp->is_mst) {
4708                u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4709                int ret = 0;
4710                int retry;
4711                bool handled;
4712
4713                WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4714                bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4715go_again:
4716                if (bret == true) {
4717
4718                        /* check link status - esi[10] = 0x200c */
4719                        if (intel_dp->active_mst_links > 0 &&
4720                            !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4721                                DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4722                                intel_dp_start_link_train(intel_dp);
4723                                intel_dp_stop_link_train(intel_dp);
4724                        }
4725
4726                        DRM_DEBUG_KMS("got esi %3ph\n", esi);
4727                        ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4728
4729                        if (handled) {
4730                                for (retry = 0; retry < 3; retry++) {
4731                                        int wret;
4732                                        wret = drm_dp_dpcd_write(&intel_dp->aux,
4733                                                                 DP_SINK_COUNT_ESI+1,
4734                                                                 &esi[1], 3);
4735                                        if (wret == 3) {
4736                                                break;
4737                                        }
4738                                }
4739
4740                                bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4741                                if (bret == true) {
4742                                        DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4743                                        goto go_again;
4744                                }
4745                        } else
4746                                ret = 0;
4747
4748                        return ret;
4749                } else {
4750                        DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4751                        intel_dp->is_mst = false;
4752                        drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4753                                                        intel_dp->is_mst);
4754                }
4755        }
4756        return -EINVAL;
4757}
4758
4759static bool
4760intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4761{
4762        u8 link_status[DP_LINK_STATUS_SIZE];
4763
4764        if (!intel_dp->link_trained)
4765                return false;
4766
4767        /*
4768         * While PSR source HW is enabled, it will control main-link sending
4769         * frames, enabling and disabling it so trying to do a retrain will fail
4770         * as the link would or not be on or it could mix training patterns
4771         * and frame data at the same time causing retrain to fail.
4772         * Also when exiting PSR, HW will retrain the link anyways fixing
4773         * any link status error.
4774         */
4775        if (intel_psr_enabled(intel_dp))
4776                return false;
4777
4778        if (!intel_dp_get_link_status(intel_dp, link_status))
4779                return false;
4780
4781        /*
4782         * Validate the cached values of intel_dp->link_rate and
4783         * intel_dp->lane_count before attempting to retrain.
4784         */
4785        if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4786                                        intel_dp->lane_count))
4787                return false;
4788
4789        /* Retrain if Channel EQ or CR not ok */
4790        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4791}
4792
4793int intel_dp_retrain_link(struct intel_encoder *encoder,
4794                          struct drm_modeset_acquire_ctx *ctx)
4795{
4796        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4797        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4798        struct intel_connector *connector = intel_dp->attached_connector;
4799        struct drm_connector_state *conn_state;
4800        struct intel_crtc_state *crtc_state;
4801        struct intel_crtc *crtc;
4802        int ret;
4803
4804        /* FIXME handle the MST connectors as well */
4805
4806        if (!connector || connector->base.status != connector_status_connected)
4807                return 0;
4808
4809        ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4810                               ctx);
4811        if (ret)
4812                return ret;
4813
4814        conn_state = connector->base.state;
4815
4816        crtc = to_intel_crtc(conn_state->crtc);
4817        if (!crtc)
4818                return 0;
4819
4820        ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4821        if (ret)
4822                return ret;
4823
4824        crtc_state = to_intel_crtc_state(crtc->base.state);
4825
4826        WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4827
4828        if (!crtc_state->base.active)
4829                return 0;
4830
4831        if (conn_state->commit &&
4832            !try_wait_for_completion(&conn_state->commit->hw_done))
4833                return 0;
4834
4835        if (!intel_dp_needs_link_retrain(intel_dp))
4836                return 0;
4837
4838        /* Suppress underruns caused by re-training */
4839        intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4840        if (crtc_state->has_pch_encoder)
4841                intel_set_pch_fifo_underrun_reporting(dev_priv,
4842                                                      intel_crtc_pch_transcoder(crtc), false);
4843
4844        intel_dp_start_link_train(intel_dp);
4845        intel_dp_stop_link_train(intel_dp);
4846
4847        /* Keep underrun reporting disabled until things are stable */
4848        intel_wait_for_vblank(dev_priv, crtc->pipe);
4849
4850        intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4851        if (crtc_state->has_pch_encoder)
4852                intel_set_pch_fifo_underrun_reporting(dev_priv,
4853                                                      intel_crtc_pch_transcoder(crtc), true);
4854
4855        return 0;
4856}
4857
4858/*
4859 * If display is now connected check links status,
4860 * there has been known issues of link loss triggering
4861 * long pulse.
4862 *
4863 * Some sinks (eg. ASUS PB287Q) seem to perform some
4864 * weird HPD ping pong during modesets. So we can apparently
4865 * end up with HPD going low during a modeset, and then
4866 * going back up soon after. And once that happens we must
4867 * retrain the link to get a picture. That's in case no
4868 * userspace component reacted to intermittent HPD dip.
4869 */
4870static enum intel_hotplug_state
4871intel_dp_hotplug(struct intel_encoder *encoder,
4872                 struct intel_connector *connector,
4873                 bool irq_received)
4874{
4875        struct drm_modeset_acquire_ctx ctx;
4876        enum intel_hotplug_state state;
4877        int ret;
4878
4879        state = intel_encoder_hotplug(encoder, connector, irq_received);
4880
4881        drm_modeset_acquire_init(&ctx, 0);
4882
4883        for (;;) {
4884                ret = intel_dp_retrain_link(encoder, &ctx);
4885
4886                if (ret == -EDEADLK) {
4887                        drm_modeset_backoff(&ctx);
4888                        continue;
4889                }
4890
4891                break;
4892        }
4893
4894        drm_modeset_drop_locks(&ctx);
4895        drm_modeset_acquire_fini(&ctx);
4896        WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4897
4898        /*
4899         * Keeping it consistent with intel_ddi_hotplug() and
4900         * intel_hdmi_hotplug().
4901         */
4902        if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
4903                state = INTEL_HOTPLUG_RETRY;
4904
4905        return state;
4906}
4907
4908static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4909{
4910        u8 val;
4911
4912        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4913                return;
4914
4915        if (drm_dp_dpcd_readb(&intel_dp->aux,
4916                              DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4917                return;
4918
4919        drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4920
4921        if (val & DP_AUTOMATED_TEST_REQUEST)
4922                intel_dp_handle_test_request(intel_dp);
4923
4924        if (val & DP_CP_IRQ)
4925                intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4926
4927        if (val & DP_SINK_SPECIFIC_IRQ)
4928                DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4929}
4930
4931/*
4932 * According to DP spec
4933 * 5.1.2:
4934 *  1. Read DPCD
4935 *  2. Configure link according to Receiver Capabilities
4936 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4937 *  4. Check link status on receipt of hot-plug interrupt
4938 *
4939 * intel_dp_short_pulse -  handles short pulse interrupts
4940 * when full detection is not required.
4941 * Returns %true if short pulse is handled and full detection
4942 * is NOT required and %false otherwise.
4943 */
4944static bool
4945intel_dp_short_pulse(struct intel_dp *intel_dp)
4946{
4947        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4948        u8 old_sink_count = intel_dp->sink_count;
4949        bool ret;
4950
4951        /*
4952         * Clearing compliance test variables to allow capturing
4953         * of values for next automated test request.
4954         */
4955        memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4956
4957        /*
4958         * Now read the DPCD to see if it's actually running
4959         * If the current value of sink count doesn't match with
4960         * the value that was stored earlier or dpcd read failed
4961         * we need to do full detection
4962         */
4963        ret = intel_dp_get_dpcd(intel_dp);
4964
4965        if ((old_sink_count != intel_dp->sink_count) || !ret) {
4966                /* No need to proceed if we are going to do full detect */
4967                return false;
4968        }
4969
4970        intel_dp_check_service_irq(intel_dp);
4971
4972        /* Handle CEC interrupts, if any */
4973        drm_dp_cec_irq(&intel_dp->aux);
4974
4975        /* defer to the hotplug work for link retraining if needed */
4976        if (intel_dp_needs_link_retrain(intel_dp))
4977                return false;
4978
4979        intel_psr_short_pulse(intel_dp);
4980
4981        if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4982                DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4983                /* Send a Hotplug Uevent to userspace to start modeset */
4984                drm_kms_helper_hotplug_event(&dev_priv->drm);
4985        }
4986
4987        return true;
4988}
4989
4990/* XXX this is probably wrong for multiple downstream ports */
4991static enum drm_connector_status
4992intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4993{
4994        struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4995        u8 *dpcd = intel_dp->dpcd;
4996        u8 type;
4997
4998        if (WARN_ON(intel_dp_is_edp(intel_dp)))
4999                return connector_status_connected;
5000
5001        if (lspcon->active)
5002                lspcon_resume(lspcon);
5003
5004        if (!intel_dp_get_dpcd(intel_dp))
5005                return connector_status_disconnected;
5006
5007        /* if there's no downstream port, we're done */
5008        if (!drm_dp_is_branch(dpcd))
5009                return connector_status_connected;
5010
5011        /* If we're HPD-aware, SINK_COUNT changes dynamically */
5012        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5013            intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5014
5015                return intel_dp->sink_count ?
5016                connector_status_connected : connector_status_disconnected;
5017        }
5018
5019        if (