linux/drivers/gpu/drm/i915/intel_display.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2006-2007 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Eric Anholt <eric@anholt.net>
  25 */
  26
  27#include <linux/dmi.h>
  28#include <linux/module.h>
  29#include <linux/input.h>
  30#include <linux/i2c.h>
  31#include <linux/kernel.h>
  32#include <linux/slab.h>
  33#include <linux/vgaarb.h>
  34#include <drm/drm_edid.h>
  35#include <drm/drmP.h>
  36#include "intel_drv.h"
  37#include <drm/i915_drm.h>
  38#include "i915_drv.h"
  39#include "i915_trace.h"
  40#include <drm/drm_atomic.h>
  41#include <drm/drm_atomic_helper.h>
  42#include <drm/drm_dp_helper.h>
  43#include <drm/drm_crtc_helper.h>
  44#include <drm/drm_plane_helper.h>
  45#include <drm/drm_rect.h>
  46#include <linux/dma_remapping.h>
  47#include <linux/reservation.h>
  48#include <linux/dma-buf.h>
  49
  50/* Primary plane formats for gen <= 3 */
  51static const uint32_t i8xx_primary_formats[] = {
  52        DRM_FORMAT_C8,
  53        DRM_FORMAT_RGB565,
  54        DRM_FORMAT_XRGB1555,
  55        DRM_FORMAT_XRGB8888,
  56};
  57
  58/* Primary plane formats for gen >= 4 */
  59static const uint32_t i965_primary_formats[] = {
  60        DRM_FORMAT_C8,
  61        DRM_FORMAT_RGB565,
  62        DRM_FORMAT_XRGB8888,
  63        DRM_FORMAT_XBGR8888,
  64        DRM_FORMAT_XRGB2101010,
  65        DRM_FORMAT_XBGR2101010,
  66};
  67
  68static const uint32_t skl_primary_formats[] = {
  69        DRM_FORMAT_C8,
  70        DRM_FORMAT_RGB565,
  71        DRM_FORMAT_XRGB8888,
  72        DRM_FORMAT_XBGR8888,
  73        DRM_FORMAT_ARGB8888,
  74        DRM_FORMAT_ABGR8888,
  75        DRM_FORMAT_XRGB2101010,
  76        DRM_FORMAT_XBGR2101010,
  77        DRM_FORMAT_YUYV,
  78        DRM_FORMAT_YVYU,
  79        DRM_FORMAT_UYVY,
  80        DRM_FORMAT_VYUY,
  81};
  82
  83/* Cursor formats */
  84static const uint32_t intel_cursor_formats[] = {
  85        DRM_FORMAT_ARGB8888,
  86};
  87
  88static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  89                                struct intel_crtc_state *pipe_config);
  90static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  91                                   struct intel_crtc_state *pipe_config);
  92
  93static int intel_framebuffer_init(struct drm_device *dev,
  94                                  struct intel_framebuffer *ifb,
  95                                  struct drm_mode_fb_cmd2 *mode_cmd,
  96                                  struct drm_i915_gem_object *obj);
  97static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  98static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  99static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 100                                         struct intel_link_m_n *m_n,
 101                                         struct intel_link_m_n *m2_n2);
 102static void ironlake_set_pipeconf(struct drm_crtc *crtc);
 103static void haswell_set_pipeconf(struct drm_crtc *crtc);
 104static void intel_set_pipe_csc(struct drm_crtc *crtc);
 105static void vlv_prepare_pll(struct intel_crtc *crtc,
 106                            const struct intel_crtc_state *pipe_config);
 107static void chv_prepare_pll(struct intel_crtc *crtc,
 108                            const struct intel_crtc_state *pipe_config);
 109static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 110static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 111static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
 112        struct intel_crtc_state *crtc_state);
 113static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
 114                           int num_connectors);
 115static void skylake_pfit_enable(struct intel_crtc *crtc);
 116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 117static void ironlake_pfit_enable(struct intel_crtc *crtc);
 118static void intel_modeset_setup_hw_state(struct drm_device *dev);
 119static void intel_pre_disable_primary(struct drm_crtc *crtc);
 120
 121typedef struct {
 122        int     min, max;
 123} intel_range_t;
 124
 125typedef struct {
 126        int     dot_limit;
 127        int     p2_slow, p2_fast;
 128} intel_p2_t;
 129
 130typedef struct intel_limit intel_limit_t;
 131struct intel_limit {
 132        intel_range_t   dot, vco, n, m, m1, m2, p, p1;
 133        intel_p2_t          p2;
 134};
 135
 136/* returns HPLL frequency in kHz */
 137static int valleyview_get_vco(struct drm_i915_private *dev_priv)
 138{
 139        int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 140
 141        /* Obtain SKU information */
 142        mutex_lock(&dev_priv->sb_lock);
 143        hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
 144                CCK_FUSE_HPLL_FREQ_MASK;
 145        mutex_unlock(&dev_priv->sb_lock);
 146
 147        return vco_freq[hpll_freq] * 1000;
 148}
 149
 150static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
 151                                  const char *name, u32 reg)
 152{
 153        u32 val;
 154        int divider;
 155
 156        if (dev_priv->hpll_freq == 0)
 157                dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
 158
 159        mutex_lock(&dev_priv->sb_lock);
 160        val = vlv_cck_read(dev_priv, reg);
 161        mutex_unlock(&dev_priv->sb_lock);
 162
 163        divider = val & CCK_FREQUENCY_VALUES;
 164
 165        WARN((val & CCK_FREQUENCY_STATUS) !=
 166             (divider << CCK_FREQUENCY_STATUS_SHIFT),
 167             "%s change in progress\n", name);
 168
 169        return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
 170}
 171
 172int
 173intel_pch_rawclk(struct drm_device *dev)
 174{
 175        struct drm_i915_private *dev_priv = dev->dev_private;
 176
 177        WARN_ON(!HAS_PCH_SPLIT(dev));
 178
 179        return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
 180}
 181
 182/* hrawclock is 1/4 the FSB frequency */
 183int intel_hrawclk(struct drm_device *dev)
 184{
 185        struct drm_i915_private *dev_priv = dev->dev_private;
 186        uint32_t clkcfg;
 187
 188        /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
 189        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 190                return 200;
 191
 192        clkcfg = I915_READ(CLKCFG);
 193        switch (clkcfg & CLKCFG_FSB_MASK) {
 194        case CLKCFG_FSB_400:
 195                return 100;
 196        case CLKCFG_FSB_533:
 197                return 133;
 198        case CLKCFG_FSB_667:
 199                return 166;
 200        case CLKCFG_FSB_800:
 201                return 200;
 202        case CLKCFG_FSB_1067:
 203                return 266;
 204        case CLKCFG_FSB_1333:
 205                return 333;
 206        /* these two are just a guess; one of them might be right */
 207        case CLKCFG_FSB_1600:
 208        case CLKCFG_FSB_1600_ALT:
 209                return 400;
 210        default:
 211                return 133;
 212        }
 213}
 214
 215static void intel_update_czclk(struct drm_i915_private *dev_priv)
 216{
 217        if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
 218                return;
 219
 220        dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
 221                                                      CCK_CZ_CLOCK_CONTROL);
 222
 223        DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
 224}
 225
 226static inline u32 /* units of 100MHz */
 227intel_fdi_link_freq(struct drm_device *dev)
 228{
 229        if (IS_GEN5(dev)) {
 230                struct drm_i915_private *dev_priv = dev->dev_private;
 231                return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
 232        } else
 233                return 27;
 234}
 235
 236static const intel_limit_t intel_limits_i8xx_dac = {
 237        .dot = { .min = 25000, .max = 350000 },
 238        .vco = { .min = 908000, .max = 1512000 },
 239        .n = { .min = 2, .max = 16 },
 240        .m = { .min = 96, .max = 140 },
 241        .m1 = { .min = 18, .max = 26 },
 242        .m2 = { .min = 6, .max = 16 },
 243        .p = { .min = 4, .max = 128 },
 244        .p1 = { .min = 2, .max = 33 },
 245        .p2 = { .dot_limit = 165000,
 246                .p2_slow = 4, .p2_fast = 2 },
 247};
 248
 249static const intel_limit_t intel_limits_i8xx_dvo = {
 250        .dot = { .min = 25000, .max = 350000 },
 251        .vco = { .min = 908000, .max = 1512000 },
 252        .n = { .min = 2, .max = 16 },
 253        .m = { .min = 96, .max = 140 },
 254        .m1 = { .min = 18, .max = 26 },
 255        .m2 = { .min = 6, .max = 16 },
 256        .p = { .min = 4, .max = 128 },
 257        .p1 = { .min = 2, .max = 33 },
 258        .p2 = { .dot_limit = 165000,
 259                .p2_slow = 4, .p2_fast = 4 },
 260};
 261
 262static const intel_limit_t intel_limits_i8xx_lvds = {
 263        .dot = { .min = 25000, .max = 350000 },
 264        .vco = { .min = 908000, .max = 1512000 },
 265        .n = { .min = 2, .max = 16 },
 266        .m = { .min = 96, .max = 140 },
 267        .m1 = { .min = 18, .max = 26 },
 268        .m2 = { .min = 6, .max = 16 },
 269        .p = { .min = 4, .max = 128 },
 270        .p1 = { .min = 1, .max = 6 },
 271        .p2 = { .dot_limit = 165000,
 272                .p2_slow = 14, .p2_fast = 7 },
 273};
 274
 275static const intel_limit_t intel_limits_i9xx_sdvo = {
 276        .dot = { .min = 20000, .max = 400000 },
 277        .vco = { .min = 1400000, .max = 2800000 },
 278        .n = { .min = 1, .max = 6 },
 279        .m = { .min = 70, .max = 120 },
 280        .m1 = { .min = 8, .max = 18 },
 281        .m2 = { .min = 3, .max = 7 },
 282        .p = { .min = 5, .max = 80 },
 283        .p1 = { .min = 1, .max = 8 },
 284        .p2 = { .dot_limit = 200000,
 285                .p2_slow = 10, .p2_fast = 5 },
 286};
 287
 288static const intel_limit_t intel_limits_i9xx_lvds = {
 289        .dot = { .min = 20000, .max = 400000 },
 290        .vco = { .min = 1400000, .max = 2800000 },
 291        .n = { .min = 1, .max = 6 },
 292        .m = { .min = 70, .max = 120 },
 293        .m1 = { .min = 8, .max = 18 },
 294        .m2 = { .min = 3, .max = 7 },
 295        .p = { .min = 7, .max = 98 },
 296        .p1 = { .min = 1, .max = 8 },
 297        .p2 = { .dot_limit = 112000,
 298                .p2_slow = 14, .p2_fast = 7 },
 299};
 300
 301
 302static const intel_limit_t intel_limits_g4x_sdvo = {
 303        .dot = { .min = 25000, .max = 270000 },
 304        .vco = { .min = 1750000, .max = 3500000},
 305        .n = { .min = 1, .max = 4 },
 306        .m = { .min = 104, .max = 138 },
 307        .m1 = { .min = 17, .max = 23 },
 308        .m2 = { .min = 5, .max = 11 },
 309        .p = { .min = 10, .max = 30 },
 310        .p1 = { .min = 1, .max = 3},
 311        .p2 = { .dot_limit = 270000,
 312                .p2_slow = 10,
 313                .p2_fast = 10
 314        },
 315};
 316
 317static const intel_limit_t intel_limits_g4x_hdmi = {
 318        .dot = { .min = 22000, .max = 400000 },
 319        .vco = { .min = 1750000, .max = 3500000},
 320        .n = { .min = 1, .max = 4 },
 321        .m = { .min = 104, .max = 138 },
 322        .m1 = { .min = 16, .max = 23 },
 323        .m2 = { .min = 5, .max = 11 },
 324        .p = { .min = 5, .max = 80 },
 325        .p1 = { .min = 1, .max = 8},
 326        .p2 = { .dot_limit = 165000,
 327                .p2_slow = 10, .p2_fast = 5 },
 328};
 329
 330static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
 331        .dot = { .min = 20000, .max = 115000 },
 332        .vco = { .min = 1750000, .max = 3500000 },
 333        .n = { .min = 1, .max = 3 },
 334        .m = { .min = 104, .max = 138 },
 335        .m1 = { .min = 17, .max = 23 },
 336        .m2 = { .min = 5, .max = 11 },
 337        .p = { .min = 28, .max = 112 },
 338        .p1 = { .min = 2, .max = 8 },
 339        .p2 = { .dot_limit = 0,
 340                .p2_slow = 14, .p2_fast = 14
 341        },
 342};
 343
 344static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
 345        .dot = { .min = 80000, .max = 224000 },
 346        .vco = { .min = 1750000, .max = 3500000 },
 347        .n = { .min = 1, .max = 3 },
 348        .m = { .min = 104, .max = 138 },
 349        .m1 = { .min = 17, .max = 23 },
 350        .m2 = { .min = 5, .max = 11 },
 351        .p = { .min = 14, .max = 42 },
 352        .p1 = { .min = 2, .max = 6 },
 353        .p2 = { .dot_limit = 0,
 354                .p2_slow = 7, .p2_fast = 7
 355        },
 356};
 357
 358static const intel_limit_t intel_limits_pineview_sdvo = {
 359        .dot = { .min = 20000, .max = 400000},
 360        .vco = { .min = 1700000, .max = 3500000 },
 361        /* Pineview's Ncounter is a ring counter */
 362        .n = { .min = 3, .max = 6 },
 363        .m = { .min = 2, .max = 256 },
 364        /* Pineview only has one combined m divider, which we treat as m2. */
 365        .m1 = { .min = 0, .max = 0 },
 366        .m2 = { .min = 0, .max = 254 },
 367        .p = { .min = 5, .max = 80 },
 368        .p1 = { .min = 1, .max = 8 },
 369        .p2 = { .dot_limit = 200000,
 370                .p2_slow = 10, .p2_fast = 5 },
 371};
 372
 373static const intel_limit_t intel_limits_pineview_lvds = {
 374        .dot = { .min = 20000, .max = 400000 },
 375        .vco = { .min = 1700000, .max = 3500000 },
 376        .n = { .min = 3, .max = 6 },
 377        .m = { .min = 2, .max = 256 },
 378        .m1 = { .min = 0, .max = 0 },
 379        .m2 = { .min = 0, .max = 254 },
 380        .p = { .min = 7, .max = 112 },
 381        .p1 = { .min = 1, .max = 8 },
 382        .p2 = { .dot_limit = 112000,
 383                .p2_slow = 14, .p2_fast = 14 },
 384};
 385
 386/* Ironlake / Sandybridge
 387 *
 388 * We calculate clock using (register_value + 2) for N/M1/M2, so here
 389 * the range value for them is (actual_value - 2).
 390 */
 391static const intel_limit_t intel_limits_ironlake_dac = {
 392        .dot = { .min = 25000, .max = 350000 },
 393        .vco = { .min = 1760000, .max = 3510000 },
 394        .n = { .min = 1, .max = 5 },
 395        .m = { .min = 79, .max = 127 },
 396        .m1 = { .min = 12, .max = 22 },
 397        .m2 = { .min = 5, .max = 9 },
 398        .p = { .min = 5, .max = 80 },
 399        .p1 = { .min = 1, .max = 8 },
 400        .p2 = { .dot_limit = 225000,
 401                .p2_slow = 10, .p2_fast = 5 },
 402};
 403
 404static const intel_limit_t intel_limits_ironlake_single_lvds = {
 405        .dot = { .min = 25000, .max = 350000 },
 406        .vco = { .min = 1760000, .max = 3510000 },
 407        .n = { .min = 1, .max = 3 },
 408        .m = { .min = 79, .max = 118 },
 409        .m1 = { .min = 12, .max = 22 },
 410        .m2 = { .min = 5, .max = 9 },
 411        .p = { .min = 28, .max = 112 },
 412        .p1 = { .min = 2, .max = 8 },
 413        .p2 = { .dot_limit = 225000,
 414                .p2_slow = 14, .p2_fast = 14 },
 415};
 416
 417static const intel_limit_t intel_limits_ironlake_dual_lvds = {
 418        .dot = { .min = 25000, .max = 350000 },
 419        .vco = { .min = 1760000, .max = 3510000 },
 420        .n = { .min = 1, .max = 3 },
 421        .m = { .min = 79, .max = 127 },
 422        .m1 = { .min = 12, .max = 22 },
 423        .m2 = { .min = 5, .max = 9 },
 424        .p = { .min = 14, .max = 56 },
 425        .p1 = { .min = 2, .max = 8 },
 426        .p2 = { .dot_limit = 225000,
 427                .p2_slow = 7, .p2_fast = 7 },
 428};
 429
 430/* LVDS 100mhz refclk limits. */
 431static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
 432        .dot = { .min = 25000, .max = 350000 },
 433        .vco = { .min = 1760000, .max = 3510000 },
 434        .n = { .min = 1, .max = 2 },
 435        .m = { .min = 79, .max = 126 },
 436        .m1 = { .min = 12, .max = 22 },
 437        .m2 = { .min = 5, .max = 9 },
 438        .p = { .min = 28, .max = 112 },
 439        .p1 = { .min = 2, .max = 8 },
 440        .p2 = { .dot_limit = 225000,
 441                .p2_slow = 14, .p2_fast = 14 },
 442};
 443
 444static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
 445        .dot = { .min = 25000, .max = 350000 },
 446        .vco = { .min = 1760000, .max = 3510000 },
 447        .n = { .min = 1, .max = 3 },
 448        .m = { .min = 79, .max = 126 },
 449        .m1 = { .min = 12, .max = 22 },
 450        .m2 = { .min = 5, .max = 9 },
 451        .p = { .min = 14, .max = 42 },
 452        .p1 = { .min = 2, .max = 6 },
 453        .p2 = { .dot_limit = 225000,
 454                .p2_slow = 7, .p2_fast = 7 },
 455};
 456
 457static const intel_limit_t intel_limits_vlv = {
 458         /*
 459          * These are the data rate limits (measured in fast clocks)
 460          * since those are the strictest limits we have. The fast
 461          * clock and actual rate limits are more relaxed, so checking
 462          * them would make no difference.
 463          */
 464        .dot = { .min = 25000 * 5, .max = 270000 * 5 },
 465        .vco = { .min = 4000000, .max = 6000000 },
 466        .n = { .min = 1, .max = 7 },
 467        .m1 = { .min = 2, .max = 3 },
 468        .m2 = { .min = 11, .max = 156 },
 469        .p1 = { .min = 2, .max = 3 },
 470        .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
 471};
 472
 473static const intel_limit_t intel_limits_chv = {
 474        /*
 475         * These are the data rate limits (measured in fast clocks)
 476         * since those are the strictest limits we have.  The fast
 477         * clock and actual rate limits are more relaxed, so checking
 478         * them would make no difference.
 479         */
 480        .dot = { .min = 25000 * 5, .max = 540000 * 5},
 481        .vco = { .min = 4800000, .max = 6480000 },
 482        .n = { .min = 1, .max = 1 },
 483        .m1 = { .min = 2, .max = 2 },
 484        .m2 = { .min = 24 << 22, .max = 175 << 22 },
 485        .p1 = { .min = 2, .max = 4 },
 486        .p2 = { .p2_slow = 1, .p2_fast = 14 },
 487};
 488
 489static const intel_limit_t intel_limits_bxt = {
 490        /* FIXME: find real dot limits */
 491        .dot = { .min = 0, .max = INT_MAX },
 492        .vco = { .min = 4800000, .max = 6700000 },
 493        .n = { .min = 1, .max = 1 },
 494        .m1 = { .min = 2, .max = 2 },
 495        /* FIXME: find real m2 limits */
 496        .m2 = { .min = 2 << 22, .max = 255 << 22 },
 497        .p1 = { .min = 2, .max = 4 },
 498        .p2 = { .p2_slow = 1, .p2_fast = 20 },
 499};
 500
 501static bool
 502needs_modeset(struct drm_crtc_state *state)
 503{
 504        return drm_atomic_crtc_needs_modeset(state);
 505}
 506
 507/**
 508 * Returns whether any output on the specified pipe is of the specified type
 509 */
 510bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
 511{
 512        struct drm_device *dev = crtc->base.dev;
 513        struct intel_encoder *encoder;
 514
 515        for_each_encoder_on_crtc(dev, &crtc->base, encoder)
 516                if (encoder->type == type)
 517                        return true;
 518
 519        return false;
 520}
 521
 522/**
 523 * Returns whether any output on the specified pipe will have the specified
 524 * type after a staged modeset is complete, i.e., the same as
 525 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
 526 * encoder->crtc.
 527 */
 528static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
 529                                      int type)
 530{
 531        struct drm_atomic_state *state = crtc_state->base.state;
 532        struct drm_connector *connector;
 533        struct drm_connector_state *connector_state;
 534        struct intel_encoder *encoder;
 535        int i, num_connectors = 0;
 536
 537        for_each_connector_in_state(state, connector, connector_state, i) {
 538                if (connector_state->crtc != crtc_state->base.crtc)
 539                        continue;
 540
 541                num_connectors++;
 542
 543                encoder = to_intel_encoder(connector_state->best_encoder);
 544                if (encoder->type == type)
 545                        return true;
 546        }
 547
 548        WARN_ON(num_connectors == 0);
 549
 550        return false;
 551}
 552
 553static const intel_limit_t *
 554intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
 555{
 556        struct drm_device *dev = crtc_state->base.crtc->dev;
 557        const intel_limit_t *limit;
 558
 559        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 560                if (intel_is_dual_link_lvds(dev)) {
 561                        if (refclk == 100000)
 562                                limit = &intel_limits_ironlake_dual_lvds_100m;
 563                        else
 564                                limit = &intel_limits_ironlake_dual_lvds;
 565                } else {
 566                        if (refclk == 100000)
 567                                limit = &intel_limits_ironlake_single_lvds_100m;
 568                        else
 569                                limit = &intel_limits_ironlake_single_lvds;
 570                }
 571        } else
 572                limit = &intel_limits_ironlake_dac;
 573
 574        return limit;
 575}
 576
 577static const intel_limit_t *
 578intel_g4x_limit(struct intel_crtc_state *crtc_state)
 579{
 580        struct drm_device *dev = crtc_state->base.crtc->dev;
 581        const intel_limit_t *limit;
 582
 583        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 584                if (intel_is_dual_link_lvds(dev))
 585                        limit = &intel_limits_g4x_dual_channel_lvds;
 586                else
 587                        limit = &intel_limits_g4x_single_channel_lvds;
 588        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
 589                   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
 590                limit = &intel_limits_g4x_hdmi;
 591        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
 592                limit = &intel_limits_g4x_sdvo;
 593        } else /* The option is for other outputs */
 594                limit = &intel_limits_i9xx_sdvo;
 595
 596        return limit;
 597}
 598
 599static const intel_limit_t *
 600intel_limit(struct intel_crtc_state *crtc_state, int refclk)
 601{
 602        struct drm_device *dev = crtc_state->base.crtc->dev;
 603        const intel_limit_t *limit;
 604
 605        if (IS_BROXTON(dev))
 606                limit = &intel_limits_bxt;
 607        else if (HAS_PCH_SPLIT(dev))
 608                limit = intel_ironlake_limit(crtc_state, refclk);
 609        else if (IS_G4X(dev)) {
 610                limit = intel_g4x_limit(crtc_state);
 611        } else if (IS_PINEVIEW(dev)) {
 612                if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
 613                        limit = &intel_limits_pineview_lvds;
 614                else
 615                        limit = &intel_limits_pineview_sdvo;
 616        } else if (IS_CHERRYVIEW(dev)) {
 617                limit = &intel_limits_chv;
 618        } else if (IS_VALLEYVIEW(dev)) {
 619                limit = &intel_limits_vlv;
 620        } else if (!IS_GEN2(dev)) {
 621                if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
 622                        limit = &intel_limits_i9xx_lvds;
 623                else
 624                        limit = &intel_limits_i9xx_sdvo;
 625        } else {
 626                if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
 627                        limit = &intel_limits_i8xx_lvds;
 628                else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
 629                        limit = &intel_limits_i8xx_dvo;
 630                else
 631                        limit = &intel_limits_i8xx_dac;
 632        }
 633        return limit;
 634}
 635
 636/*
 637 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
 638 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
 639 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
 640 * The helpers' return value is the rate of the clock that is fed to the
 641 * display engine's pipe which can be the above fast dot clock rate or a
 642 * divided-down version of it.
 643 */
 644/* m1 is reserved as 0 in Pineview, n is a ring counter */
 645static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
 646{
 647        clock->m = clock->m2 + 2;
 648        clock->p = clock->p1 * clock->p2;
 649        if (WARN_ON(clock->n == 0 || clock->p == 0))
 650                return 0;
 651        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 652        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 653
 654        return clock->dot;
 655}
 656
 657static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
 658{
 659        return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 660}
 661
 662static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
 663{
 664        clock->m = i9xx_dpll_compute_m(clock);
 665        clock->p = clock->p1 * clock->p2;
 666        if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
 667                return 0;
 668        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
 669        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 670
 671        return clock->dot;
 672}
 673
 674static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
 675{
 676        clock->m = clock->m1 * clock->m2;
 677        clock->p = clock->p1 * clock->p2;
 678        if (WARN_ON(clock->n == 0 || clock->p == 0))
 679                return 0;
 680        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 681        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 682
 683        return clock->dot / 5;
 684}
 685
 686int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
 687{
 688        clock->m = clock->m1 * clock->m2;
 689        clock->p = clock->p1 * clock->p2;
 690        if (WARN_ON(clock->n == 0 || clock->p == 0))
 691                return 0;
 692        clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
 693                        clock->n << 22);
 694        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 695
 696        return clock->dot / 5;
 697}
 698
 699#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
 700/**
 701 * Returns whether the given set of divisors are valid for a given refclk with
 702 * the given connectors.
 703 */
 704
 705static bool intel_PLL_is_valid(struct drm_device *dev,
 706                               const intel_limit_t *limit,
 707                               const intel_clock_t *clock)
 708{
 709        if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
 710                INTELPllInvalid("n out of range\n");
 711        if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 712                INTELPllInvalid("p1 out of range\n");
 713        if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 714                INTELPllInvalid("m2 out of range\n");
 715        if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 716                INTELPllInvalid("m1 out of range\n");
 717
 718        if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
 719            !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
 720                if (clock->m1 <= clock->m2)
 721                        INTELPllInvalid("m1 <= m2\n");
 722
 723        if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
 724                if (clock->p < limit->p.min || limit->p.max < clock->p)
 725                        INTELPllInvalid("p out of range\n");
 726                if (clock->m < limit->m.min || limit->m.max < clock->m)
 727                        INTELPllInvalid("m out of range\n");
 728        }
 729
 730        if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 731                INTELPllInvalid("vco out of range\n");
 732        /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
 733         * connector, etc., rather than just a single range.
 734         */
 735        if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
 736                INTELPllInvalid("dot out of range\n");
 737
 738        return true;
 739}
 740
 741static int
 742i9xx_select_p2_div(const intel_limit_t *limit,
 743                   const struct intel_crtc_state *crtc_state,
 744                   int target)
 745{
 746        struct drm_device *dev = crtc_state->base.crtc->dev;
 747
 748        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 749                /*
 750                 * For LVDS just rely on its current settings for dual-channel.
 751                 * We haven't figured out how to reliably set up different
 752                 * single/dual channel state, if we even can.
 753                 */
 754                if (intel_is_dual_link_lvds(dev))
 755                        return limit->p2.p2_fast;
 756                else
 757                        return limit->p2.p2_slow;
 758        } else {
 759                if (target < limit->p2.dot_limit)
 760                        return limit->p2.p2_slow;
 761                else
 762                        return limit->p2.p2_fast;
 763        }
 764}
 765
 766static bool
 767i9xx_find_best_dpll(const intel_limit_t *limit,
 768                    struct intel_crtc_state *crtc_state,
 769                    int target, int refclk, intel_clock_t *match_clock,
 770                    intel_clock_t *best_clock)
 771{
 772        struct drm_device *dev = crtc_state->base.crtc->dev;
 773        intel_clock_t clock;
 774        int err = target;
 775
 776        memset(best_clock, 0, sizeof(*best_clock));
 777
 778        clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
 779
 780        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 781             clock.m1++) {
 782                for (clock.m2 = limit->m2.min;
 783                     clock.m2 <= limit->m2.max; clock.m2++) {
 784                        if (clock.m2 >= clock.m1)
 785                                break;
 786                        for (clock.n = limit->n.min;
 787                             clock.n <= limit->n.max; clock.n++) {
 788                                for (clock.p1 = limit->p1.min;
 789                                        clock.p1 <= limit->p1.max; clock.p1++) {
 790                                        int this_err;
 791
 792                                        i9xx_calc_dpll_params(refclk, &clock);
 793                                        if (!intel_PLL_is_valid(dev, limit,
 794                                                                &clock))
 795                                                continue;
 796                                        if (match_clock &&
 797                                            clock.p != match_clock->p)
 798                                                continue;
 799
 800                                        this_err = abs(clock.dot - target);
 801                                        if (this_err < err) {
 802                                                *best_clock = clock;
 803                                                err = this_err;
 804                                        }
 805                                }
 806                        }
 807                }
 808        }
 809
 810        return (err != target);
 811}
 812
 813static bool
 814pnv_find_best_dpll(const intel_limit_t *limit,
 815                   struct intel_crtc_state *crtc_state,
 816                   int target, int refclk, intel_clock_t *match_clock,
 817                   intel_clock_t *best_clock)
 818{
 819        struct drm_device *dev = crtc_state->base.crtc->dev;
 820        intel_clock_t clock;
 821        int err = target;
 822
 823        memset(best_clock, 0, sizeof(*best_clock));
 824
 825        clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
 826
 827        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 828             clock.m1++) {
 829                for (clock.m2 = limit->m2.min;
 830                     clock.m2 <= limit->m2.max; clock.m2++) {
 831                        for (clock.n = limit->n.min;
 832                             clock.n <= limit->n.max; clock.n++) {
 833                                for (clock.p1 = limit->p1.min;
 834                                        clock.p1 <= limit->p1.max; clock.p1++) {
 835                                        int this_err;
 836
 837                                        pnv_calc_dpll_params(refclk, &clock);
 838                                        if (!intel_PLL_is_valid(dev, limit,
 839                                                                &clock))
 840                                                continue;
 841                                        if (match_clock &&
 842                                            clock.p != match_clock->p)
 843                                                continue;
 844
 845                                        this_err = abs(clock.dot - target);
 846                                        if (this_err < err) {
 847                                                *best_clock = clock;
 848                                                err = this_err;
 849                                        }
 850                                }
 851                        }
 852                }
 853        }
 854
 855        return (err != target);
 856}
 857
 858static bool
 859g4x_find_best_dpll(const intel_limit_t *limit,
 860                   struct intel_crtc_state *crtc_state,
 861                   int target, int refclk, intel_clock_t *match_clock,
 862                   intel_clock_t *best_clock)
 863{
 864        struct drm_device *dev = crtc_state->base.crtc->dev;
 865        intel_clock_t clock;
 866        int max_n;
 867        bool found = false;
 868        /* approximately equals target * 0.00585 */
 869        int err_most = (target >> 8) + (target >> 9);
 870
 871        memset(best_clock, 0, sizeof(*best_clock));
 872
 873        clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
 874
 875        max_n = limit->n.max;
 876        /* based on hardware requirement, prefer smaller n to precision */
 877        for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 878                /* based on hardware requirement, prefere larger m1,m2 */
 879                for (clock.m1 = limit->m1.max;
 880                     clock.m1 >= limit->m1.min; clock.m1--) {
 881                        for (clock.m2 = limit->m2.max;
 882                             clock.m2 >= limit->m2.min; clock.m2--) {
 883                                for (clock.p1 = limit->p1.max;
 884                                     clock.p1 >= limit->p1.min; clock.p1--) {
 885                                        int this_err;
 886
 887                                        i9xx_calc_dpll_params(refclk, &clock);
 888                                        if (!intel_PLL_is_valid(dev, limit,
 889                                                                &clock))
 890                                                continue;
 891
 892                                        this_err = abs(clock.dot - target);
 893                                        if (this_err < err_most) {
 894                                                *best_clock = clock;
 895                                                err_most = this_err;
 896                                                max_n = clock.n;
 897                                                found = true;
 898                                        }
 899                                }
 900                        }
 901                }
 902        }
 903        return found;
 904}
 905
 906/*
 907 * Check if the calculated PLL configuration is more optimal compared to the
 908 * best configuration and error found so far. Return the calculated error.
 909 */
 910static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
 911                               const intel_clock_t *calculated_clock,
 912                               const intel_clock_t *best_clock,
 913                               unsigned int best_error_ppm,
 914                               unsigned int *error_ppm)
 915{
 916        /*
 917         * For CHV ignore the error and consider only the P value.
 918         * Prefer a bigger P value based on HW requirements.
 919         */
 920        if (IS_CHERRYVIEW(dev)) {
 921                *error_ppm = 0;
 922
 923                return calculated_clock->p > best_clock->p;
 924        }
 925
 926        if (WARN_ON_ONCE(!target_freq))
 927                return false;
 928
 929        *error_ppm = div_u64(1000000ULL *
 930                                abs(target_freq - calculated_clock->dot),
 931                             target_freq);
 932        /*
 933         * Prefer a better P value over a better (smaller) error if the error
 934         * is small. Ensure this preference for future configurations too by
 935         * setting the error to 0.
 936         */
 937        if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
 938                *error_ppm = 0;
 939
 940                return true;
 941        }
 942
 943        return *error_ppm + 10 < best_error_ppm;
 944}
 945
 946static bool
 947vlv_find_best_dpll(const intel_limit_t *limit,
 948                   struct intel_crtc_state *crtc_state,
 949                   int target, int refclk, intel_clock_t *match_clock,
 950                   intel_clock_t *best_clock)
 951{
 952        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 953        struct drm_device *dev = crtc->base.dev;
 954        intel_clock_t clock;
 955        unsigned int bestppm = 1000000;
 956        /* min update 19.2 MHz */
 957        int max_n = min(limit->n.max, refclk / 19200);
 958        bool found = false;
 959
 960        target *= 5; /* fast clock */
 961
 962        memset(best_clock, 0, sizeof(*best_clock));
 963
 964        /* based on hardware requirement, prefer smaller n to precision */
 965        for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 966                for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
 967                        for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
 968                             clock.p2 -= clock.p2 > 10 ? 2 : 1) {
 969                                clock.p = clock.p1 * clock.p2;
 970                                /* based on hardware requirement, prefer bigger m1,m2 values */
 971                                for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
 972                                        unsigned int ppm;
 973
 974                                        clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
 975                                                                     refclk * clock.m1);
 976
 977                                        vlv_calc_dpll_params(refclk, &clock);
 978
 979                                        if (!intel_PLL_is_valid(dev, limit,
 980                                                                &clock))
 981                                                continue;
 982
 983                                        if (!vlv_PLL_is_optimal(dev, target,
 984                                                                &clock,
 985                                                                best_clock,
 986                                                                bestppm, &ppm))
 987                                                continue;
 988
 989                                        *best_clock = clock;
 990                                        bestppm = ppm;
 991                                        found = true;
 992                                }
 993                        }
 994                }
 995        }
 996
 997        return found;
 998}
 999
1000static bool
1001chv_find_best_dpll(const intel_limit_t *limit,
1002                   struct intel_crtc_state *crtc_state,
1003                   int target, int refclk, intel_clock_t *match_clock,
1004                   intel_clock_t *best_clock)
1005{
1006        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1007        struct drm_device *dev = crtc->base.dev;
1008        unsigned int best_error_ppm;
1009        intel_clock_t clock;
1010        uint64_t m2;
1011        int found = false;
1012
1013        memset(best_clock, 0, sizeof(*best_clock));
1014        best_error_ppm = 1000000;
1015
1016        /*
1017         * Based on hardware doc, the n always set to 1, and m1 always
1018         * set to 2.  If requires to support 200Mhz refclk, we need to
1019         * revisit this because n may not 1 anymore.
1020         */
1021        clock.n = 1, clock.m1 = 2;
1022        target *= 5;    /* fast clock */
1023
1024        for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1025                for (clock.p2 = limit->p2.p2_fast;
1026                                clock.p2 >= limit->p2.p2_slow;
1027                                clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1028                        unsigned int error_ppm;
1029
1030                        clock.p = clock.p1 * clock.p2;
1031
1032                        m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1033                                        clock.n) << 22, refclk * clock.m1);
1034
1035                        if (m2 > INT_MAX/clock.m1)
1036                                continue;
1037
1038                        clock.m2 = m2;
1039
1040                        chv_calc_dpll_params(refclk, &clock);
1041
1042                        if (!intel_PLL_is_valid(dev, limit, &clock))
1043                                continue;
1044
1045                        if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1046                                                best_error_ppm, &error_ppm))
1047                                continue;
1048
1049                        *best_clock = clock;
1050                        best_error_ppm = error_ppm;
1051                        found = true;
1052                }
1053        }
1054
1055        return found;
1056}
1057
1058bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1059                        intel_clock_t *best_clock)
1060{
1061        int refclk = i9xx_get_refclk(crtc_state, 0);
1062
1063        return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1064                                  target_clock, refclk, NULL, best_clock);
1065}
1066
1067bool intel_crtc_active(struct drm_crtc *crtc)
1068{
1069        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1070
1071        /* Be paranoid as we can arrive here with only partial
1072         * state retrieved from the hardware during setup.
1073         *
1074         * We can ditch the adjusted_mode.crtc_clock check as soon
1075         * as Haswell has gained clock readout/fastboot support.
1076         *
1077         * We can ditch the crtc->primary->fb check as soon as we can
1078         * properly reconstruct framebuffers.
1079         *
1080         * FIXME: The intel_crtc->active here should be switched to
1081         * crtc->state->active once we have proper CRTC states wired up
1082         * for atomic.
1083         */
1084        return intel_crtc->active && crtc->primary->state->fb &&
1085                intel_crtc->config->base.adjusted_mode.crtc_clock;
1086}
1087
1088enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1089                                             enum pipe pipe)
1090{
1091        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1092        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1093
1094        return intel_crtc->config->cpu_transcoder;
1095}
1096
1097static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1098{
1099        struct drm_i915_private *dev_priv = dev->dev_private;
1100        i915_reg_t reg = PIPEDSL(pipe);
1101        u32 line1, line2;
1102        u32 line_mask;
1103
1104        if (IS_GEN2(dev))
1105                line_mask = DSL_LINEMASK_GEN2;
1106        else
1107                line_mask = DSL_LINEMASK_GEN3;
1108
1109        line1 = I915_READ(reg) & line_mask;
1110        msleep(5);
1111        line2 = I915_READ(reg) & line_mask;
1112
1113        return line1 == line2;
1114}
1115
1116/*
1117 * intel_wait_for_pipe_off - wait for pipe to turn off
1118 * @crtc: crtc whose pipe to wait for
1119 *
1120 * After disabling a pipe, we can't wait for vblank in the usual way,
1121 * spinning on the vblank interrupt status bit, since we won't actually
1122 * see an interrupt when the pipe is disabled.
1123 *
1124 * On Gen4 and above:
1125 *   wait for the pipe register state bit to turn off
1126 *
1127 * Otherwise:
1128 *   wait for the display line value to settle (it usually
1129 *   ends up stopping at the start of the next frame).
1130 *
1131 */
1132static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1133{
1134        struct drm_device *dev = crtc->base.dev;
1135        struct drm_i915_private *dev_priv = dev->dev_private;
1136        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1137        enum pipe pipe = crtc->pipe;
1138
1139        if (INTEL_INFO(dev)->gen >= 4) {
1140                i915_reg_t reg = PIPECONF(cpu_transcoder);
1141
1142                /* Wait for the Pipe State to go off */
1143                if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1144                             100))
1145                        WARN(1, "pipe_off wait timed out\n");
1146        } else {
1147                /* Wait for the display line to settle */
1148                if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1149                        WARN(1, "pipe_off wait timed out\n");
1150        }
1151}
1152
1153/* Only for pre-ILK configs */
1154void assert_pll(struct drm_i915_private *dev_priv,
1155                enum pipe pipe, bool state)
1156{
1157        u32 val;
1158        bool cur_state;
1159
1160        val = I915_READ(DPLL(pipe));
1161        cur_state = !!(val & DPLL_VCO_ENABLE);
1162        I915_STATE_WARN(cur_state != state,
1163             "PLL state assertion failure (expected %s, current %s)\n",
1164                        onoff(state), onoff(cur_state));
1165}
1166
1167/* XXX: the dsi pll is shared between MIPI DSI ports */
1168static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1169{
1170        u32 val;
1171        bool cur_state;
1172
1173        mutex_lock(&dev_priv->sb_lock);
1174        val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1175        mutex_unlock(&dev_priv->sb_lock);
1176
1177        cur_state = val & DSI_PLL_VCO_EN;
1178        I915_STATE_WARN(cur_state != state,
1179             "DSI PLL state assertion failure (expected %s, current %s)\n",
1180                        onoff(state), onoff(cur_state));
1181}
1182#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1183#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1184
1185struct intel_shared_dpll *
1186intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1187{
1188        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1189
1190        if (crtc->config->shared_dpll < 0)
1191                return NULL;
1192
1193        return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1194}
1195
1196/* For ILK+ */
1197void assert_shared_dpll(struct drm_i915_private *dev_priv,
1198                        struct intel_shared_dpll *pll,
1199                        bool state)
1200{
1201        bool cur_state;
1202        struct intel_dpll_hw_state hw_state;
1203
1204        if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
1205                return;
1206
1207        cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1208        I915_STATE_WARN(cur_state != state,
1209             "%s assertion failure (expected %s, current %s)\n",
1210                        pll->name, onoff(state), onoff(cur_state));
1211}
1212
1213static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1214                          enum pipe pipe, bool state)
1215{
1216        bool cur_state;
1217        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1218                                                                      pipe);
1219
1220        if (HAS_DDI(dev_priv->dev)) {
1221                /* DDI does not have a specific FDI_TX register */
1222                u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1223                cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1224        } else {
1225                u32 val = I915_READ(FDI_TX_CTL(pipe));
1226                cur_state = !!(val & FDI_TX_ENABLE);
1227        }
1228        I915_STATE_WARN(cur_state != state,
1229             "FDI TX state assertion failure (expected %s, current %s)\n",
1230                        onoff(state), onoff(cur_state));
1231}
1232#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1233#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1234
1235static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1236                          enum pipe pipe, bool state)
1237{
1238        u32 val;
1239        bool cur_state;
1240
1241        val = I915_READ(FDI_RX_CTL(pipe));
1242        cur_state = !!(val & FDI_RX_ENABLE);
1243        I915_STATE_WARN(cur_state != state,
1244             "FDI RX state assertion failure (expected %s, current %s)\n",
1245                        onoff(state), onoff(cur_state));
1246}
1247#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1248#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1249
1250static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1251                                      enum pipe pipe)
1252{
1253        u32 val;
1254
1255        /* ILK FDI PLL is always enabled */
1256        if (INTEL_INFO(dev_priv->dev)->gen == 5)
1257                return;
1258
1259        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1260        if (HAS_DDI(dev_priv->dev))
1261                return;
1262
1263        val = I915_READ(FDI_TX_CTL(pipe));
1264        I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1265}
1266
1267void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1268                       enum pipe pipe, bool state)
1269{
1270        u32 val;
1271        bool cur_state;
1272
1273        val = I915_READ(FDI_RX_CTL(pipe));
1274        cur_state = !!(val & FDI_RX_PLL_ENABLE);
1275        I915_STATE_WARN(cur_state != state,
1276             "FDI RX PLL assertion failure (expected %s, current %s)\n",
1277                        onoff(state), onoff(cur_state));
1278}
1279
1280void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1281                           enum pipe pipe)
1282{
1283        struct drm_device *dev = dev_priv->dev;
1284        i915_reg_t pp_reg;
1285        u32 val;
1286        enum pipe panel_pipe = PIPE_A;
1287        bool locked = true;
1288
1289        if (WARN_ON(HAS_DDI(dev)))
1290                return;
1291
1292        if (HAS_PCH_SPLIT(dev)) {
1293                u32 port_sel;
1294
1295                pp_reg = PCH_PP_CONTROL;
1296                port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1297
1298                if (port_sel == PANEL_PORT_SELECT_LVDS &&
1299                    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1300                        panel_pipe = PIPE_B;
1301                /* XXX: else fix for eDP */
1302        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1303                /* presumably write lock depends on pipe, not port select */
1304                pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1305                panel_pipe = pipe;
1306        } else {
1307                pp_reg = PP_CONTROL;
1308                if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1309                        panel_pipe = PIPE_B;
1310        }
1311
1312        val = I915_READ(pp_reg);
1313        if (!(val & PANEL_POWER_ON) ||
1314            ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1315                locked = false;
1316
1317        I915_STATE_WARN(panel_pipe == pipe && locked,
1318             "panel assertion failure, pipe %c regs locked\n",
1319             pipe_name(pipe));
1320}
1321
1322static void assert_cursor(struct drm_i915_private *dev_priv,
1323                          enum pipe pipe, bool state)
1324{
1325        struct drm_device *dev = dev_priv->dev;
1326        bool cur_state;
1327
1328        if (IS_845G(dev) || IS_I865G(dev))
1329                cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1330        else
1331                cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1332
1333        I915_STATE_WARN(cur_state != state,
1334             "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1335                        pipe_name(pipe), onoff(state), onoff(cur_state));
1336}
1337#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1338#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1339
1340void assert_pipe(struct drm_i915_private *dev_priv,
1341                 enum pipe pipe, bool state)
1342{
1343        bool cur_state;
1344        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1345                                                                      pipe);
1346        enum intel_display_power_domain power_domain;
1347
1348        /* if we need the pipe quirk it must be always on */
1349        if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1350            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1351                state = true;
1352
1353        power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1354        if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1355                u32 val = I915_READ(PIPECONF(cpu_transcoder));
1356                cur_state = !!(val & PIPECONF_ENABLE);
1357
1358                intel_display_power_put(dev_priv, power_domain);
1359        } else {
1360                cur_state = false;
1361        }
1362
1363        I915_STATE_WARN(cur_state != state,
1364             "pipe %c assertion failure (expected %s, current %s)\n",
1365                        pipe_name(pipe), onoff(state), onoff(cur_state));
1366}
1367
1368static void assert_plane(struct drm_i915_private *dev_priv,
1369                         enum plane plane, bool state)
1370{
1371        u32 val;
1372        bool cur_state;
1373
1374        val = I915_READ(DSPCNTR(plane));
1375        cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1376        I915_STATE_WARN(cur_state != state,
1377             "plane %c assertion failure (expected %s, current %s)\n",
1378                        plane_name(plane), onoff(state), onoff(cur_state));
1379}
1380
1381#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1382#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1383
1384static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1385                                   enum pipe pipe)
1386{
1387        struct drm_device *dev = dev_priv->dev;
1388        int i;
1389
1390        /* Primary planes are fixed to pipes on gen4+ */
1391        if (INTEL_INFO(dev)->gen >= 4) {
1392                u32 val = I915_READ(DSPCNTR(pipe));
1393                I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1394                     "plane %c assertion failure, should be disabled but not\n",
1395                     plane_name(pipe));
1396                return;
1397        }
1398
1399        /* Need to check both planes against the pipe */
1400        for_each_pipe(dev_priv, i) {
1401                u32 val = I915_READ(DSPCNTR(i));
1402                enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1403                        DISPPLANE_SEL_PIPE_SHIFT;
1404                I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1405                     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1406                     plane_name(i), pipe_name(pipe));
1407        }
1408}
1409
1410static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1411                                    enum pipe pipe)
1412{
1413        struct drm_device *dev = dev_priv->dev;
1414        int sprite;
1415
1416        if (INTEL_INFO(dev)->gen >= 9) {
1417                for_each_sprite(dev_priv, pipe, sprite) {
1418                        u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1419                        I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1420                             "plane %d assertion failure, should be off on pipe %c but is still active\n",
1421                             sprite, pipe_name(pipe));
1422                }
1423        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1424                for_each_sprite(dev_priv, pipe, sprite) {
1425                        u32 val = I915_READ(SPCNTR(pipe, sprite));
1426                        I915_STATE_WARN(val & SP_ENABLE,
1427                             "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1428                             sprite_name(pipe, sprite), pipe_name(pipe));
1429                }
1430        } else if (INTEL_INFO(dev)->gen >= 7) {
1431                u32 val = I915_READ(SPRCTL(pipe));
1432                I915_STATE_WARN(val & SPRITE_ENABLE,
1433                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1434                     plane_name(pipe), pipe_name(pipe));
1435        } else if (INTEL_INFO(dev)->gen >= 5) {
1436                u32 val = I915_READ(DVSCNTR(pipe));
1437                I915_STATE_WARN(val & DVS_ENABLE,
1438                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1439                     plane_name(pipe), pipe_name(pipe));
1440        }
1441}
1442
1443static void assert_vblank_disabled(struct drm_crtc *crtc)
1444{
1445        if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1446                drm_crtc_vblank_put(crtc);
1447}
1448
1449static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1450{
1451        u32 val;
1452        bool enabled;
1453
1454        I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1455
1456        val = I915_READ(PCH_DREF_CONTROL);
1457        enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1458                            DREF_SUPERSPREAD_SOURCE_MASK));
1459        I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1460}
1461
1462static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1463                                           enum pipe pipe)
1464{
1465        u32 val;
1466        bool enabled;
1467
1468        val = I915_READ(PCH_TRANSCONF(pipe));
1469        enabled = !!(val & TRANS_ENABLE);
1470        I915_STATE_WARN(enabled,
1471             "transcoder assertion failed, should be off on pipe %c but is still active\n",
1472             pipe_name(pipe));
1473}
1474
1475static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1476                            enum pipe pipe, u32 port_sel, u32 val)
1477{
1478        if ((val & DP_PORT_EN) == 0)
1479                return false;
1480
1481        if (HAS_PCH_CPT(dev_priv->dev)) {
1482                u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1483                if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1484                        return false;
1485        } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1486                if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1487                        return false;
1488        } else {
1489                if ((val & DP_PIPE_MASK) != (pipe << 30))
1490                        return false;
1491        }
1492        return true;
1493}
1494
1495static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1496                              enum pipe pipe, u32 val)
1497{
1498        if ((val & SDVO_ENABLE) == 0)
1499                return false;
1500
1501        if (HAS_PCH_CPT(dev_priv->dev)) {
1502                if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1503                        return false;
1504        } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1505                if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1506                        return false;
1507        } else {
1508                if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1509                        return false;
1510        }
1511        return true;
1512}
1513
1514static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1515                              enum pipe pipe, u32 val)
1516{
1517        if ((val & LVDS_PORT_EN) == 0)
1518                return false;
1519
1520        if (HAS_PCH_CPT(dev_priv->dev)) {
1521                if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1522                        return false;
1523        } else {
1524                if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1525                        return false;
1526        }
1527        return true;
1528}
1529
1530static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1531                              enum pipe pipe, u32 val)
1532{
1533        if ((val & ADPA_DAC_ENABLE) == 0)
1534                return false;
1535        if (HAS_PCH_CPT(dev_priv->dev)) {
1536                if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1537                        return false;
1538        } else {
1539                if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1540                        return false;
1541        }
1542        return true;
1543}
1544
1545static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1546                                   enum pipe pipe, i915_reg_t reg,
1547                                   u32 port_sel)
1548{
1549        u32 val = I915_READ(reg);
1550        I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1551             "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1552             i915_mmio_reg_offset(reg), pipe_name(pipe));
1553
1554        I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1555             && (val & DP_PIPEB_SELECT),
1556             "IBX PCH dp port still using transcoder B\n");
1557}
1558
1559static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1560                                     enum pipe pipe, i915_reg_t reg)
1561{
1562        u32 val = I915_READ(reg);
1563        I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1564             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1565             i915_mmio_reg_offset(reg), pipe_name(pipe));
1566
1567        I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1568             && (val & SDVO_PIPE_B_SELECT),
1569             "IBX PCH hdmi port still using transcoder B\n");
1570}
1571
1572static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1573                                      enum pipe pipe)
1574{
1575        u32 val;
1576
1577        assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1578        assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1579        assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1580
1581        val = I915_READ(PCH_ADPA);
1582        I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1583             "PCH VGA enabled on transcoder %c, should be disabled\n",
1584             pipe_name(pipe));
1585
1586        val = I915_READ(PCH_LVDS);
1587        I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1588             "PCH LVDS enabled on transcoder %c, should be disabled\n",
1589             pipe_name(pipe));
1590
1591        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1592        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1593        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1594}
1595
1596static void vlv_enable_pll(struct intel_crtc *crtc,
1597                           const struct intel_crtc_state *pipe_config)
1598{
1599        struct drm_device *dev = crtc->base.dev;
1600        struct drm_i915_private *dev_priv = dev->dev_private;
1601        i915_reg_t reg = DPLL(crtc->pipe);
1602        u32 dpll = pipe_config->dpll_hw_state.dpll;
1603
1604        assert_pipe_disabled(dev_priv, crtc->pipe);
1605
1606        /* PLL is protected by panel, make sure we can write it */
1607        if (IS_MOBILE(dev_priv->dev))
1608                assert_panel_unlocked(dev_priv, crtc->pipe);
1609
1610        I915_WRITE(reg, dpll);
1611        POSTING_READ(reg);
1612        udelay(150);
1613
1614        if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1615                DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1616
1617        I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1618        POSTING_READ(DPLL_MD(crtc->pipe));
1619
1620        /* We do this three times for luck */
1621        I915_WRITE(reg, dpll);
1622        POSTING_READ(reg);
1623        udelay(150); /* wait for warmup */
1624        I915_WRITE(reg, dpll);
1625        POSTING_READ(reg);
1626        udelay(150); /* wait for warmup */
1627        I915_WRITE(reg, dpll);
1628        POSTING_READ(reg);
1629        udelay(150); /* wait for warmup */
1630}
1631
1632static void chv_enable_pll(struct intel_crtc *crtc,
1633                           const struct intel_crtc_state *pipe_config)
1634{
1635        struct drm_device *dev = crtc->base.dev;
1636        struct drm_i915_private *dev_priv = dev->dev_private;
1637        int pipe = crtc->pipe;
1638        enum dpio_channel port = vlv_pipe_to_channel(pipe);
1639        u32 tmp;
1640
1641        assert_pipe_disabled(dev_priv, crtc->pipe);
1642
1643        mutex_lock(&dev_priv->sb_lock);
1644
1645        /* Enable back the 10bit clock to display controller */
1646        tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1647        tmp |= DPIO_DCLKP_EN;
1648        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1649
1650        mutex_unlock(&dev_priv->sb_lock);
1651
1652        /*
1653         * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1654         */
1655        udelay(1);
1656
1657        /* Enable PLL */
1658        I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1659
1660        /* Check PLL is locked */
1661        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1662                DRM_ERROR("PLL %d failed to lock\n", pipe);
1663
1664        /* not sure when this should be written */
1665        I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1666        POSTING_READ(DPLL_MD(pipe));
1667}
1668
1669static int intel_num_dvo_pipes(struct drm_device *dev)
1670{
1671        struct intel_crtc *crtc;
1672        int count = 0;
1673
1674        for_each_intel_crtc(dev, crtc)
1675                count += crtc->base.state->active &&
1676                        intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1677
1678        return count;
1679}
1680
1681static void i9xx_enable_pll(struct intel_crtc *crtc)
1682{
1683        struct drm_device *dev = crtc->base.dev;
1684        struct drm_i915_private *dev_priv = dev->dev_private;
1685        i915_reg_t reg = DPLL(crtc->pipe);
1686        u32 dpll = crtc->config->dpll_hw_state.dpll;
1687
1688        assert_pipe_disabled(dev_priv, crtc->pipe);
1689
1690        /* No really, not for ILK+ */
1691        BUG_ON(INTEL_INFO(dev)->gen >= 5);
1692
1693        /* PLL is protected by panel, make sure we can write it */
1694        if (IS_MOBILE(dev) && !IS_I830(dev))
1695                assert_panel_unlocked(dev_priv, crtc->pipe);
1696
1697        /* Enable DVO 2x clock on both PLLs if necessary */
1698        if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1699                /*
1700                 * It appears to be important that we don't enable this
1701                 * for the current pipe before otherwise configuring the
1702                 * PLL. No idea how this should be handled if multiple
1703                 * DVO outputs are enabled simultaneosly.
1704                 */
1705                dpll |= DPLL_DVO_2X_MODE;
1706                I915_WRITE(DPLL(!crtc->pipe),
1707                           I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1708        }
1709
1710        /*
1711         * Apparently we need to have VGA mode enabled prior to changing
1712         * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1713         * dividers, even though the register value does change.
1714         */
1715        I915_WRITE(reg, 0);
1716
1717        I915_WRITE(reg, dpll);
1718
1719        /* Wait for the clocks to stabilize. */
1720        POSTING_READ(reg);
1721        udelay(150);
1722
1723        if (INTEL_INFO(dev)->gen >= 4) {
1724                I915_WRITE(DPLL_MD(crtc->pipe),
1725                           crtc->config->dpll_hw_state.dpll_md);
1726        } else {
1727                /* The pixel multiplier can only be updated once the
1728                 * DPLL is enabled and the clocks are stable.
1729                 *
1730                 * So write it again.
1731                 */
1732                I915_WRITE(reg, dpll);
1733        }
1734
1735        /* We do this three times for luck */
1736        I915_WRITE(reg, dpll);
1737        POSTING_READ(reg);
1738        udelay(150); /* wait for warmup */
1739        I915_WRITE(reg, dpll);
1740        POSTING_READ(reg);
1741        udelay(150); /* wait for warmup */
1742        I915_WRITE(reg, dpll);
1743        POSTING_READ(reg);
1744        udelay(150); /* wait for warmup */
1745}
1746
1747/**
1748 * i9xx_disable_pll - disable a PLL
1749 * @dev_priv: i915 private structure
1750 * @pipe: pipe PLL to disable
1751 *
1752 * Disable the PLL for @pipe, making sure the pipe is off first.
1753 *
1754 * Note!  This is for pre-ILK only.
1755 */
1756static void i9xx_disable_pll(struct intel_crtc *crtc)
1757{
1758        struct drm_device *dev = crtc->base.dev;
1759        struct drm_i915_private *dev_priv = dev->dev_private;
1760        enum pipe pipe = crtc->pipe;
1761
1762        /* Disable DVO 2x clock on both PLLs if necessary */
1763        if (IS_I830(dev) &&
1764            intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1765            !intel_num_dvo_pipes(dev)) {
1766                I915_WRITE(DPLL(PIPE_B),
1767                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1768                I915_WRITE(DPLL(PIPE_A),
1769                           I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1770        }
1771
1772        /* Don't disable pipe or pipe PLLs if needed */
1773        if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1774            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1775                return;
1776
1777        /* Make sure the pipe isn't still relying on us */
1778        assert_pipe_disabled(dev_priv, pipe);
1779
1780        I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1781        POSTING_READ(DPLL(pipe));
1782}
1783
1784static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1785{
1786        u32 val;
1787
1788        /* Make sure the pipe isn't still relying on us */
1789        assert_pipe_disabled(dev_priv, pipe);
1790
1791        /*
1792         * Leave integrated clock source and reference clock enabled for pipe B.
1793         * The latter is needed for VGA hotplug / manual detection.
1794         */
1795        val = DPLL_VGA_MODE_DIS;
1796        if (pipe == PIPE_B)
1797                val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1798        I915_WRITE(DPLL(pipe), val);
1799        POSTING_READ(DPLL(pipe));
1800
1801}
1802
1803static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1804{
1805        enum dpio_channel port = vlv_pipe_to_channel(pipe);
1806        u32 val;
1807
1808        /* Make sure the pipe isn't still relying on us */
1809        assert_pipe_disabled(dev_priv, pipe);
1810
1811        /* Set PLL en = 0 */
1812        val = DPLL_SSC_REF_CLK_CHV |
1813                DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1814        if (pipe != PIPE_A)
1815                val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1816        I915_WRITE(DPLL(pipe), val);
1817        POSTING_READ(DPLL(pipe));
1818
1819        mutex_lock(&dev_priv->sb_lock);
1820
1821        /* Disable 10bit clock to display controller */
1822        val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1823        val &= ~DPIO_DCLKP_EN;
1824        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1825
1826        mutex_unlock(&dev_priv->sb_lock);
1827}
1828
1829void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1830                         struct intel_digital_port *dport,
1831                         unsigned int expected_mask)
1832{
1833        u32 port_mask;
1834        i915_reg_t dpll_reg;
1835
1836        switch (dport->port) {
1837        case PORT_B:
1838                port_mask = DPLL_PORTB_READY_MASK;
1839                dpll_reg = DPLL(0);
1840                break;
1841        case PORT_C:
1842                port_mask = DPLL_PORTC_READY_MASK;
1843                dpll_reg = DPLL(0);
1844                expected_mask <<= 4;
1845                break;
1846        case PORT_D:
1847                port_mask = DPLL_PORTD_READY_MASK;
1848                dpll_reg = DPIO_PHY_STATUS;
1849                break;
1850        default:
1851                BUG();
1852        }
1853
1854        if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1855                WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1856                     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1857}
1858
1859static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1860{
1861        struct drm_device *dev = crtc->base.dev;
1862        struct drm_i915_private *dev_priv = dev->dev_private;
1863        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1864
1865        if (WARN_ON(pll == NULL))
1866                return;
1867
1868        WARN_ON(!pll->config.crtc_mask);
1869        if (pll->active == 0) {
1870                DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1871                WARN_ON(pll->on);
1872                assert_shared_dpll_disabled(dev_priv, pll);
1873
1874                pll->mode_set(dev_priv, pll);
1875        }
1876}
1877
1878/**
1879 * intel_enable_shared_dpll - enable PCH PLL
1880 * @dev_priv: i915 private structure
1881 * @pipe: pipe PLL to enable
1882 *
1883 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1884 * drives the transcoder clock.
1885 */
1886static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1887{
1888        struct drm_device *dev = crtc->base.dev;
1889        struct drm_i915_private *dev_priv = dev->dev_private;
1890        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1891
1892        if (WARN_ON(pll == NULL))
1893                return;
1894
1895        if (WARN_ON(pll->config.crtc_mask == 0))
1896                return;
1897
1898        DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1899                      pll->name, pll->active, pll->on,
1900                      crtc->base.base.id);
1901
1902        if (pll->active++) {
1903                WARN_ON(!pll->on);
1904                assert_shared_dpll_enabled(dev_priv, pll);
1905                return;
1906        }
1907        WARN_ON(pll->on);
1908
1909        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1910
1911        DRM_DEBUG_KMS("enabling %s\n", pll->name);
1912        pll->enable(dev_priv, pll);
1913        pll->on = true;
1914}
1915
1916static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1917{
1918        struct drm_device *dev = crtc->base.dev;
1919        struct drm_i915_private *dev_priv = dev->dev_private;
1920        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1921
1922        /* PCH only available on ILK+ */
1923        if (INTEL_INFO(dev)->gen < 5)
1924                return;
1925
1926        if (pll == NULL)
1927                return;
1928
1929        if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1930                return;
1931
1932        DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1933                      pll->name, pll->active, pll->on,
1934                      crtc->base.base.id);
1935
1936        if (WARN_ON(pll->active == 0)) {
1937                assert_shared_dpll_disabled(dev_priv, pll);
1938                return;
1939        }
1940
1941        assert_shared_dpll_enabled(dev_priv, pll);
1942        WARN_ON(!pll->on);
1943        if (--pll->active)
1944                return;
1945
1946        DRM_DEBUG_KMS("disabling %s\n", pll->name);
1947        pll->disable(dev_priv, pll);
1948        pll->on = false;
1949
1950        intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1951}
1952
1953static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1954                                           enum pipe pipe)
1955{
1956        struct drm_device *dev = dev_priv->dev;
1957        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1958        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1959        i915_reg_t reg;
1960        uint32_t val, pipeconf_val;
1961
1962        /* PCH only available on ILK+ */
1963        BUG_ON(!HAS_PCH_SPLIT(dev));
1964
1965        /* Make sure PCH DPLL is enabled */
1966        assert_shared_dpll_enabled(dev_priv,
1967                                   intel_crtc_to_shared_dpll(intel_crtc));
1968
1969        /* FDI must be feeding us bits for PCH ports */
1970        assert_fdi_tx_enabled(dev_priv, pipe);
1971        assert_fdi_rx_enabled(dev_priv, pipe);
1972
1973        if (HAS_PCH_CPT(dev)) {
1974                /* Workaround: Set the timing override bit before enabling the
1975                 * pch transcoder. */
1976                reg = TRANS_CHICKEN2(pipe);
1977                val = I915_READ(reg);
1978                val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1979                I915_WRITE(reg, val);
1980        }
1981
1982        reg = PCH_TRANSCONF(pipe);
1983        val = I915_READ(reg);
1984        pipeconf_val = I915_READ(PIPECONF(pipe));
1985
1986        if (HAS_PCH_IBX(dev_priv->dev)) {
1987                /*
1988                 * Make the BPC in transcoder be consistent with
1989                 * that in pipeconf reg. For HDMI we must use 8bpc
1990                 * here for both 8bpc and 12bpc.
1991                 */
1992                val &= ~PIPECONF_BPC_MASK;
1993                if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1994                        val |= PIPECONF_8BPC;
1995                else
1996                        val |= pipeconf_val & PIPECONF_BPC_MASK;
1997        }
1998
1999        val &= ~TRANS_INTERLACE_MASK;
2000        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2001                if (HAS_PCH_IBX(dev_priv->dev) &&
2002                    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2003                        val |= TRANS_LEGACY_INTERLACED_ILK;
2004                else
2005                        val |= TRANS_INTERLACED;
2006        else
2007                val |= TRANS_PROGRESSIVE;
2008
2009        I915_WRITE(reg, val | TRANS_ENABLE);
2010        if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2011                DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2012}
2013
2014static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2015                                      enum transcoder cpu_transcoder)
2016{
2017        u32 val, pipeconf_val;
2018
2019        /* PCH only available on ILK+ */
2020        BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2021
2022        /* FDI must be feeding us bits for PCH ports */
2023        assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2024        assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2025
2026        /* Workaround: set timing override bit. */
2027        val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2028        val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2029        I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2030
2031        val = TRANS_ENABLE;
2032        pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2033
2034        if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2035            PIPECONF_INTERLACED_ILK)
2036                val |= TRANS_INTERLACED;
2037        else
2038                val |= TRANS_PROGRESSIVE;
2039
2040        I915_WRITE(LPT_TRANSCONF, val);
2041        if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2042                DRM_ERROR("Failed to enable PCH transcoder\n");
2043}
2044
2045static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2046                                            enum pipe pipe)
2047{
2048        struct drm_device *dev = dev_priv->dev;
2049        i915_reg_t reg;
2050        uint32_t val;
2051
2052        /* FDI relies on the transcoder */
2053        assert_fdi_tx_disabled(dev_priv, pipe);
2054        assert_fdi_rx_disabled(dev_priv, pipe);
2055
2056        /* Ports must be off as well */
2057        assert_pch_ports_disabled(dev_priv, pipe);
2058
2059        reg = PCH_TRANSCONF(pipe);
2060        val = I915_READ(reg);
2061        val &= ~TRANS_ENABLE;
2062        I915_WRITE(reg, val);
2063        /* wait for PCH transcoder off, transcoder state */
2064        if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2065                DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2066
2067        if (HAS_PCH_CPT(dev)) {
2068                /* Workaround: Clear the timing override chicken bit again. */
2069                reg = TRANS_CHICKEN2(pipe);
2070                val = I915_READ(reg);
2071                val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2072                I915_WRITE(reg, val);
2073        }
2074}
2075
2076static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2077{
2078        u32 val;
2079
2080        val = I915_READ(LPT_TRANSCONF);
2081        val &= ~TRANS_ENABLE;
2082        I915_WRITE(LPT_TRANSCONF, val);
2083        /* wait for PCH transcoder off, transcoder state */
2084        if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2085                DRM_ERROR("Failed to disable PCH transcoder\n");
2086
2087        /* Workaround: clear timing override bit. */
2088        val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2089        val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2090        I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2091}
2092
2093/**
2094 * intel_enable_pipe - enable a pipe, asserting requirements
2095 * @crtc: crtc responsible for the pipe
2096 *
2097 * Enable @crtc's pipe, making sure that various hardware specific requirements
2098 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2099 */
2100static void intel_enable_pipe(struct intel_crtc *crtc)
2101{
2102        struct drm_device *dev = crtc->base.dev;
2103        struct drm_i915_private *dev_priv = dev->dev_private;
2104        enum pipe pipe = crtc->pipe;
2105        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2106        enum pipe pch_transcoder;
2107        i915_reg_t reg;
2108        u32 val;
2109
2110        DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2111
2112        assert_planes_disabled(dev_priv, pipe);
2113        assert_cursor_disabled(dev_priv, pipe);
2114        assert_sprites_disabled(dev_priv, pipe);
2115
2116        if (HAS_PCH_LPT(dev_priv->dev))
2117                pch_transcoder = TRANSCODER_A;
2118        else
2119                pch_transcoder = pipe;
2120
2121        /*
2122         * A pipe without a PLL won't actually be able to drive bits from
2123         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2124         * need the check.
2125         */
2126        if (HAS_GMCH_DISPLAY(dev_priv->dev))
2127                if (crtc->config->has_dsi_encoder)
2128                        assert_dsi_pll_enabled(dev_priv);
2129                else
2130                        assert_pll_enabled(dev_priv, pipe);
2131        else {
2132                if (crtc->config->has_pch_encoder) {
2133                        /* if driving the PCH, we need FDI enabled */
2134                        assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2135                        assert_fdi_tx_pll_enabled(dev_priv,
2136                                                  (enum pipe) cpu_transcoder);
2137                }
2138                /* FIXME: assert CPU port conditions for SNB+ */
2139        }
2140
2141        reg = PIPECONF(cpu_transcoder);
2142        val = I915_READ(reg);
2143        if (val & PIPECONF_ENABLE) {
2144                WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2145                          (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2146                return;
2147        }
2148
2149        I915_WRITE(reg, val | PIPECONF_ENABLE);
2150        POSTING_READ(reg);
2151
2152        /*
2153         * Until the pipe starts DSL will read as 0, which would cause
2154         * an apparent vblank timestamp jump, which messes up also the
2155         * frame count when it's derived from the timestamps. So let's
2156         * wait for the pipe to start properly before we call
2157         * drm_crtc_vblank_on()
2158         */
2159        if (dev->max_vblank_count == 0 &&
2160            wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2161                DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2162}
2163
2164/**
2165 * intel_disable_pipe - disable a pipe, asserting requirements
2166 * @crtc: crtc whose pipes is to be disabled
2167 *
2168 * Disable the pipe of @crtc, making sure that various hardware
2169 * specific requirements are met, if applicable, e.g. plane
2170 * disabled, panel fitter off, etc.
2171 *
2172 * Will wait until the pipe has shut down before returning.
2173 */
2174static void intel_disable_pipe(struct intel_crtc *crtc)
2175{
2176        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2177        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2178        enum pipe pipe = crtc->pipe;
2179        i915_reg_t reg;
2180        u32 val;
2181
2182        DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2183
2184        /*
2185         * Make sure planes won't keep trying to pump pixels to us,
2186         * or we might hang the display.
2187         */
2188        assert_planes_disabled(dev_priv, pipe);
2189        assert_cursor_disabled(dev_priv, pipe);
2190        assert_sprites_disabled(dev_priv, pipe);
2191
2192        reg = PIPECONF(cpu_transcoder);
2193        val = I915_READ(reg);
2194        if ((val & PIPECONF_ENABLE) == 0)
2195                return;
2196
2197        /*
2198         * Double wide has implications for planes
2199         * so best keep it disabled when not needed.
2200         */
2201        if (crtc->config->double_wide)
2202                val &= ~PIPECONF_DOUBLE_WIDE;
2203
2204        /* Don't disable pipe or pipe PLLs if needed */
2205        if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2206            !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2207                val &= ~PIPECONF_ENABLE;
2208
2209        I915_WRITE(reg, val);
2210        if ((val & PIPECONF_ENABLE) == 0)
2211                intel_wait_for_pipe_off(crtc);
2212}
2213
2214static bool need_vtd_wa(struct drm_device *dev)
2215{
2216#ifdef CONFIG_INTEL_IOMMU
2217        if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2218                return true;
2219#endif
2220        return false;
2221}
2222
2223static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2224{
2225        return IS_GEN2(dev_priv) ? 2048 : 4096;
2226}
2227
2228static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
2229                                     uint64_t fb_modifier, unsigned int cpp)
2230{
2231        switch (fb_modifier) {
2232        case DRM_FORMAT_MOD_NONE:
2233                return cpp;
2234        case I915_FORMAT_MOD_X_TILED:
2235                if (IS_GEN2(dev_priv))
2236                        return 128;
2237                else
2238                        return 512;
2239        case I915_FORMAT_MOD_Y_TILED:
2240                if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2241                        return 128;
2242                else
2243                        return 512;
2244        case I915_FORMAT_MOD_Yf_TILED:
2245                switch (cpp) {
2246                case 1:
2247                        return 64;
2248                case 2:
2249                case 4:
2250                        return 128;
2251                case 8:
2252                case 16:
2253                        return 256;
2254                default:
2255                        MISSING_CASE(cpp);
2256                        return cpp;
2257                }
2258                break;
2259        default:
2260                MISSING_CASE(fb_modifier);
2261                return cpp;
2262        }
2263}
2264
2265unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2266                               uint64_t fb_modifier, unsigned int cpp)
2267{
2268        if (fb_modifier == DRM_FORMAT_MOD_NONE)
2269                return 1;
2270        else
2271                return intel_tile_size(dev_priv) /
2272                        intel_tile_width(dev_priv, fb_modifier, cpp);
2273}
2274
2275unsigned int
2276intel_fb_align_height(struct drm_device *dev, unsigned int height,
2277                      uint32_t pixel_format, uint64_t fb_modifier)
2278{
2279        unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2280        unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2281
2282        return ALIGN(height, tile_height);
2283}
2284
2285static void
2286intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2287                        const struct drm_plane_state *plane_state)
2288{
2289        struct drm_i915_private *dev_priv = to_i915(fb->dev);
2290        struct intel_rotation_info *info = &view->params.rotated;
2291        unsigned int tile_size, tile_width, tile_height, cpp;
2292
2293        *view = i915_ggtt_view_normal;
2294
2295        if (!plane_state)
2296                return;
2297
2298        if (!intel_rotation_90_or_270(plane_state->rotation))
2299                return;
2300
2301        *view = i915_ggtt_view_rotated;
2302
2303        info->height = fb->height;
2304        info->pixel_format = fb->pixel_format;
2305        info->pitch = fb->pitches[0];
2306        info->uv_offset = fb->offsets[1];
2307        info->fb_modifier = fb->modifier[0];
2308
2309        tile_size = intel_tile_size(dev_priv);
2310
2311        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2312        tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
2313        tile_height = tile_size / tile_width;
2314
2315        info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
2316        info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2317        info->size = info->width_pages * info->height_pages * tile_size;
2318
2319        if (info->pixel_format == DRM_FORMAT_NV12) {
2320                cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2321                tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
2322                tile_height = tile_size / tile_width;
2323
2324                info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
2325                info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
2326                info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
2327        }
2328}
2329
2330static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2331{
2332        if (INTEL_INFO(dev_priv)->gen >= 9)
2333                return 256 * 1024;
2334        else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2335                 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2336                return 128 * 1024;
2337        else if (INTEL_INFO(dev_priv)->gen >= 4)
2338                return 4 * 1024;
2339        else
2340                return 0;
2341}
2342
2343static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2344                                         uint64_t fb_modifier)
2345{
2346        switch (fb_modifier) {
2347        case DRM_FORMAT_MOD_NONE:
2348                return intel_linear_alignment(dev_priv);
2349        case I915_FORMAT_MOD_X_TILED:
2350                if (INTEL_INFO(dev_priv)->gen >= 9)
2351                        return 256 * 1024;
2352                return 0;
2353        case I915_FORMAT_MOD_Y_TILED:
2354        case I915_FORMAT_MOD_Yf_TILED:
2355                return 1 * 1024 * 1024;
2356        default:
2357                MISSING_CASE(fb_modifier);
2358                return 0;
2359        }
2360}
2361
2362int
2363intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2364                           struct drm_framebuffer *fb,
2365                           const struct drm_plane_state *plane_state)
2366{
2367        struct drm_device *dev = fb->dev;
2368        struct drm_i915_private *dev_priv = dev->dev_private;
2369        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2370        struct i915_ggtt_view view;
2371        u32 alignment;
2372        int ret;
2373
2374        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2375
2376        alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2377
2378        intel_fill_fb_ggtt_view(&view, fb, plane_state);
2379
2380        /* Note that the w/a also requires 64 PTE of padding following the
2381         * bo. We currently fill all unused PTE with the shadow page and so
2382         * we should always have valid PTE following the scanout preventing
2383         * the VT-d warning.
2384         */
2385        if (need_vtd_wa(dev) && alignment < 256 * 1024)
2386                alignment = 256 * 1024;
2387
2388        /*
2389         * Global gtt pte registers are special registers which actually forward
2390         * writes to a chunk of system memory. Which means that there is no risk
2391         * that the register values disappear as soon as we call
2392         * intel_runtime_pm_put(), so it is correct to wrap only the
2393         * pin/unpin/fence and not more.
2394         */
2395        intel_runtime_pm_get(dev_priv);
2396
2397        ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2398                                                   &view);
2399        if (ret)
2400                goto err_pm;
2401
2402        /* Install a fence for tiled scan-out. Pre-i965 always needs a
2403         * fence, whereas 965+ only requires a fence if using
2404         * framebuffer compression.  For simplicity, we always install
2405         * a fence as the cost is not that onerous.
2406         */
2407        if (view.type == I915_GGTT_VIEW_NORMAL) {
2408                ret = i915_gem_object_get_fence(obj);
2409                if (ret == -EDEADLK) {
2410                        /*
2411                         * -EDEADLK means there are no free fences
2412                         * no pending flips.
2413                         *
2414                         * This is propagated to atomic, but it uses
2415                         * -EDEADLK to force a locking recovery, so
2416                         * change the returned error to -EBUSY.
2417                         */
2418                        ret = -EBUSY;
2419                        goto err_unpin;
2420                } else if (ret)
2421                        goto err_unpin;
2422
2423                i915_gem_object_pin_fence(obj);
2424        }
2425
2426        intel_runtime_pm_put(dev_priv);
2427        return 0;
2428
2429err_unpin:
2430        i915_gem_object_unpin_from_display_plane(obj, &view);
2431err_pm:
2432        intel_runtime_pm_put(dev_priv);
2433        return ret;
2434}
2435
2436static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2437                               const struct drm_plane_state *plane_state)
2438{
2439        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2440        struct i915_ggtt_view view;
2441
2442        WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2443
2444        intel_fill_fb_ggtt_view(&view, fb, plane_state);
2445
2446        if (view.type == I915_GGTT_VIEW_NORMAL)
2447                i915_gem_object_unpin_fence(obj);
2448
2449        i915_gem_object_unpin_from_display_plane(obj, &view);
2450}
2451
2452/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2453 * is assumed to be a power-of-two. */
2454u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
2455                              int *x, int *y,
2456                              uint64_t fb_modifier,
2457                              unsigned int cpp,
2458                              unsigned int pitch)
2459{
2460        if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2461                unsigned int tile_size, tile_width, tile_height;
2462                unsigned int tile_rows, tiles;
2463
2464                tile_size = intel_tile_size(dev_priv);
2465                tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
2466                tile_height = tile_size / tile_width;
2467
2468                tile_rows = *y / tile_height;
2469                *y %= tile_height;
2470
2471                tiles = *x / (tile_width/cpp);
2472                *x %= tile_width/cpp;
2473
2474                return tile_rows * pitch * tile_height + tiles * tile_size;
2475        } else {
2476                unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2477                unsigned int offset;
2478
2479                offset = *y * pitch + *x * cpp;
2480                *y = (offset & alignment) / pitch;
2481                *x = ((offset & alignment) - *y * pitch) / cpp;
2482                return offset & ~alignment;
2483        }
2484}
2485
2486static int i9xx_format_to_fourcc(int format)
2487{
2488        switch (format) {
2489        case DISPPLANE_8BPP:
2490                return DRM_FORMAT_C8;
2491        case DISPPLANE_BGRX555:
2492                return DRM_FORMAT_XRGB1555;
2493        case DISPPLANE_BGRX565:
2494                return DRM_FORMAT_RGB565;
2495        default:
2496        case DISPPLANE_BGRX888:
2497                return DRM_FORMAT_XRGB8888;
2498        case DISPPLANE_RGBX888:
2499                return DRM_FORMAT_XBGR8888;
2500        case DISPPLANE_BGRX101010:
2501                return DRM_FORMAT_XRGB2101010;
2502        case DISPPLANE_RGBX101010:
2503                return DRM_FORMAT_XBGR2101010;
2504        }
2505}
2506
2507static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2508{
2509        switch (format) {
2510        case PLANE_CTL_FORMAT_RGB_565:
2511                return DRM_FORMAT_RGB565;
2512        default:
2513        case PLANE_CTL_FORMAT_XRGB_8888:
2514                if (rgb_order) {
2515                        if (alpha)
2516                                return DRM_FORMAT_ABGR8888;
2517                        else
2518                                return DRM_FORMAT_XBGR8888;
2519                } else {
2520                        if (alpha)
2521                                return DRM_FORMAT_ARGB8888;
2522                        else
2523                                return DRM_FORMAT_XRGB8888;
2524                }
2525        case PLANE_CTL_FORMAT_XRGB_2101010:
2526                if (rgb_order)
2527                        return DRM_FORMAT_XBGR2101010;
2528                else
2529                        return DRM_FORMAT_XRGB2101010;
2530        }
2531}
2532
2533static bool
2534intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2535                              struct intel_initial_plane_config *plane_config)
2536{
2537        struct drm_device *dev = crtc->base.dev;
2538        struct drm_i915_private *dev_priv = to_i915(dev);
2539        struct drm_i915_gem_object *obj = NULL;
2540        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2541        struct drm_framebuffer *fb = &plane_config->fb->base;
2542        u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2543        u32 size_aligned = round_up(plane_config->base + plane_config->size,
2544                                    PAGE_SIZE);
2545
2546        size_aligned -= base_aligned;
2547
2548        if (plane_config->size == 0)
2549                return false;
2550
2551        /* If the FB is too big, just don't use it since fbdev is not very
2552         * important and we should probably use that space with FBC or other
2553         * features. */
2554        if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2555                return false;
2556
2557        mutex_lock(&dev->struct_mutex);
2558
2559        obj = i915_gem_object_create_stolen_for_preallocated(dev,
2560                                                             base_aligned,
2561                                                             base_aligned,
2562                                                             size_aligned);
2563        if (!obj) {
2564                mutex_unlock(&dev->struct_mutex);
2565                return false;
2566        }
2567
2568        obj->tiling_mode = plane_config->tiling;
2569        if (obj->tiling_mode == I915_TILING_X)
2570                obj->stride = fb->pitches[0];
2571
2572        mode_cmd.pixel_format = fb->pixel_format;
2573        mode_cmd.width = fb->width;
2574        mode_cmd.height = fb->height;
2575        mode_cmd.pitches[0] = fb->pitches[0];
2576        mode_cmd.modifier[0] = fb->modifier[0];
2577        mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2578
2579        if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2580                                   &mode_cmd, obj)) {
2581                DRM_DEBUG_KMS("intel fb init failed\n");
2582                goto out_unref_obj;
2583        }
2584
2585        mutex_unlock(&dev->struct_mutex);
2586
2587        DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2588        return true;
2589
2590out_unref_obj:
2591        drm_gem_object_unreference(&obj->base);
2592        mutex_unlock(&dev->struct_mutex);
2593        return false;
2594}
2595
2596/* Update plane->state->fb to match plane->fb after driver-internal updates */
2597static void
2598update_state_fb(struct drm_plane *plane)
2599{
2600        if (plane->fb == plane->state->fb)
2601                return;
2602
2603        if (plane->state->fb)
2604                drm_framebuffer_unreference(plane->state->fb);
2605        plane->state->fb = plane->fb;
2606        if (plane->state->fb)
2607                drm_framebuffer_reference(plane->state->fb);
2608}
2609
2610static void
2611intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2612                             struct intel_initial_plane_config *plane_config)
2613{
2614        struct drm_device *dev = intel_crtc->base.dev;
2615        struct drm_i915_private *dev_priv = dev->dev_private;
2616        struct drm_crtc *c;
2617        struct intel_crtc *i;
2618        struct drm_i915_gem_object *obj;
2619        struct drm_plane *primary = intel_crtc->base.primary;
2620        struct drm_plane_state *plane_state = primary->state;
2621        struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2622        struct intel_plane *intel_plane = to_intel_plane(primary);
2623        struct intel_plane_state *intel_state =
2624                to_intel_plane_state(plane_state);
2625        struct drm_framebuffer *fb;
2626
2627        if (!plane_config->fb)
2628                return;
2629
2630        if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2631                fb = &plane_config->fb->base;
2632                goto valid_fb;
2633        }
2634
2635        kfree(plane_config->fb);
2636
2637        /*
2638         * Failed to alloc the obj, check to see if we should share
2639         * an fb with another CRTC instead
2640         */
2641        for_each_crtc(dev, c) {
2642                i = to_intel_crtc(c);
2643
2644                if (c == &intel_crtc->base)
2645                        continue;
2646
2647                if (!i->active)
2648                        continue;
2649
2650                fb = c->primary->fb;
2651                if (!fb)
2652                        continue;
2653
2654                obj = intel_fb_obj(fb);
2655                if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2656                        drm_framebuffer_reference(fb);
2657                        goto valid_fb;
2658                }
2659        }
2660
2661        /*
2662         * We've failed to reconstruct the BIOS FB.  Current display state
2663         * indicates that the primary plane is visible, but has a NULL FB,
2664         * which will lead to problems later if we don't fix it up.  The
2665         * simplest solution is to just disable the primary plane now and
2666         * pretend the BIOS never had it enabled.
2667         */
2668        to_intel_plane_state(plane_state)->visible = false;
2669        crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2670        intel_pre_disable_primary(&intel_crtc->base);
2671        intel_plane->disable_plane(primary, &intel_crtc->base);
2672
2673        return;
2674
2675valid_fb:
2676        plane_state->src_x = 0;
2677        plane_state->src_y = 0;
2678        plane_state->src_w = fb->width << 16;
2679        plane_state->src_h = fb->height << 16;
2680
2681        plane_state->crtc_x = 0;
2682        plane_state->crtc_y = 0;
2683        plane_state->crtc_w = fb->width;
2684        plane_state->crtc_h = fb->height;
2685
2686        intel_state->src.x1 = plane_state->src_x;
2687        intel_state->src.y1 = plane_state->src_y;
2688        intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2689        intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2690        intel_state->dst.x1 = plane_state->crtc_x;
2691        intel_state->dst.y1 = plane_state->crtc_y;
2692        intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2693        intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2694
2695        obj = intel_fb_obj(fb);
2696        if (obj->tiling_mode != I915_TILING_NONE)
2697                dev_priv->preserve_bios_swizzle = true;
2698
2699        drm_framebuffer_reference(fb);
2700        primary->fb = primary->state->fb = fb;
2701        primary->crtc = primary->state->crtc = &intel_crtc->base;
2702        intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2703        obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2704}
2705
2706static void i9xx_update_primary_plane(struct drm_plane *primary,
2707                                      const struct intel_crtc_state *crtc_state,
2708                                      const struct intel_plane_state *plane_state)
2709{
2710        struct drm_device *dev = primary->dev;
2711        struct drm_i915_private *dev_priv = dev->dev_private;
2712        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2713        struct drm_framebuffer *fb = plane_state->base.fb;
2714        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2715        int plane = intel_crtc->plane;
2716        u32 linear_offset;
2717        u32 dspcntr;
2718        i915_reg_t reg = DSPCNTR(plane);
2719        int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2720        int x = plane_state->src.x1 >> 16;
2721        int y = plane_state->src.y1 >> 16;
2722
2723        dspcntr = DISPPLANE_GAMMA_ENABLE;
2724
2725        dspcntr |= DISPLAY_PLANE_ENABLE;
2726
2727        if (INTEL_INFO(dev)->gen < 4) {
2728                if (intel_crtc->pipe == PIPE_B)
2729                        dspcntr |= DISPPLANE_SEL_PIPE_B;
2730
2731                /* pipesrc and dspsize control the size that is scaled from,
2732                 * which should always be the user's requested size.
2733                 */
2734                I915_WRITE(DSPSIZE(plane),
2735                           ((crtc_state->pipe_src_h - 1) << 16) |
2736                           (crtc_state->pipe_src_w - 1));
2737                I915_WRITE(DSPPOS(plane), 0);
2738        } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2739                I915_WRITE(PRIMSIZE(plane),
2740                           ((crtc_state->pipe_src_h - 1) << 16) |
2741                           (crtc_state->pipe_src_w - 1));
2742                I915_WRITE(PRIMPOS(plane), 0);
2743                I915_WRITE(PRIMCNSTALPHA(plane), 0);
2744        }
2745
2746        switch (fb->pixel_format) {
2747        case DRM_FORMAT_C8:
2748                dspcntr |= DISPPLANE_8BPP;
2749                break;
2750        case DRM_FORMAT_XRGB1555:
2751                dspcntr |= DISPPLANE_BGRX555;
2752                break;
2753        case DRM_FORMAT_RGB565:
2754                dspcntr |= DISPPLANE_BGRX565;
2755                break;
2756        case DRM_FORMAT_XRGB8888:
2757                dspcntr |= DISPPLANE_BGRX888;
2758                break;
2759        case DRM_FORMAT_XBGR8888:
2760                dspcntr |= DISPPLANE_RGBX888;
2761                break;
2762        case DRM_FORMAT_XRGB2101010:
2763                dspcntr |= DISPPLANE_BGRX101010;
2764                break;
2765        case DRM_FORMAT_XBGR2101010:
2766                dspcntr |= DISPPLANE_RGBX101010;
2767                break;
2768        default:
2769                BUG();
2770        }
2771
2772        if (INTEL_INFO(dev)->gen >= 4 &&
2773            obj->tiling_mode != I915_TILING_NONE)
2774                dspcntr |= DISPPLANE_TILED;
2775
2776        if (IS_G4X(dev))
2777                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2778
2779        linear_offset = y * fb->pitches[0] + x * cpp;
2780
2781        if (INTEL_INFO(dev)->gen >= 4) {
2782                intel_crtc->dspaddr_offset =
2783                        intel_compute_tile_offset(dev_priv, &x, &y,
2784                                                  fb->modifier[0], cpp,
2785                                                  fb->pitches[0]);
2786                linear_offset -= intel_crtc->dspaddr_offset;
2787        } else {
2788                intel_crtc->dspaddr_offset = linear_offset;
2789        }
2790
2791        if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
2792                dspcntr |= DISPPLANE_ROTATE_180;
2793
2794                x += (crtc_state->pipe_src_w - 1);
2795                y += (crtc_state->pipe_src_h - 1);
2796
2797                /* Finding the last pixel of the last line of the display
2798                data and adding to linear_offset*/
2799                linear_offset +=
2800                        (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2801                        (crtc_state->pipe_src_w - 1) * cpp;
2802        }
2803
2804        intel_crtc->adjusted_x = x;
2805        intel_crtc->adjusted_y = y;
2806
2807        I915_WRITE(reg, dspcntr);
2808
2809        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2810        if (INTEL_INFO(dev)->gen >= 4) {
2811                I915_WRITE(DSPSURF(plane),
2812                           i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2813                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2814                I915_WRITE(DSPLINOFF(plane), linear_offset);
2815        } else
2816                I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2817        POSTING_READ(reg);
2818}
2819
2820static void i9xx_disable_primary_plane(struct drm_plane *primary,
2821                                       struct drm_crtc *crtc)
2822{
2823        struct drm_device *dev = crtc->dev;
2824        struct drm_i915_private *dev_priv = dev->dev_private;
2825        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2826        int plane = intel_crtc->plane;
2827
2828        I915_WRITE(DSPCNTR(plane), 0);
2829        if (INTEL_INFO(dev_priv)->gen >= 4)
2830                I915_WRITE(DSPSURF(plane), 0);
2831        else
2832                I915_WRITE(DSPADDR(plane), 0);
2833        POSTING_READ(DSPCNTR(plane));
2834}
2835
2836static void ironlake_update_primary_plane(struct drm_plane *primary,
2837                                          const struct intel_crtc_state *crtc_state,
2838                                          const struct intel_plane_state *plane_state)
2839{
2840        struct drm_device *dev = primary->dev;
2841        struct drm_i915_private *dev_priv = dev->dev_private;
2842        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2843        struct drm_framebuffer *fb = plane_state->base.fb;
2844        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2845        int plane = intel_crtc->plane;
2846        u32 linear_offset;
2847        u32 dspcntr;
2848        i915_reg_t reg = DSPCNTR(plane);
2849        int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2850        int x = plane_state->src.x1 >> 16;
2851        int y = plane_state->src.y1 >> 16;
2852
2853        dspcntr = DISPPLANE_GAMMA_ENABLE;
2854        dspcntr |= DISPLAY_PLANE_ENABLE;
2855
2856        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2857                dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2858
2859        switch (fb->pixel_format) {
2860        case DRM_FORMAT_C8:
2861                dspcntr |= DISPPLANE_8BPP;
2862                break;
2863        case DRM_FORMAT_RGB565:
2864                dspcntr |= DISPPLANE_BGRX565;
2865                break;
2866        case DRM_FORMAT_XRGB8888:
2867                dspcntr |= DISPPLANE_BGRX888;
2868                break;
2869        case DRM_FORMAT_XBGR8888:
2870                dspcntr |= DISPPLANE_RGBX888;
2871                break;
2872        case DRM_FORMAT_XRGB2101010:
2873                dspcntr |= DISPPLANE_BGRX101010;
2874                break;
2875        case DRM_FORMAT_XBGR2101010:
2876                dspcntr |= DISPPLANE_RGBX101010;
2877                break;
2878        default:
2879                BUG();
2880        }
2881
2882        if (obj->tiling_mode != I915_TILING_NONE)
2883                dspcntr |= DISPPLANE_TILED;
2884
2885        if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2886                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2887
2888        linear_offset = y * fb->pitches[0] + x * cpp;
2889        intel_crtc->dspaddr_offset =
2890                intel_compute_tile_offset(dev_priv, &x, &y,
2891                                          fb->modifier[0], cpp,
2892                                          fb->pitches[0]);
2893        linear_offset -= intel_crtc->dspaddr_offset;
2894        if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
2895                dspcntr |= DISPPLANE_ROTATE_180;
2896
2897                if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2898                        x += (crtc_state->pipe_src_w - 1);
2899                        y += (crtc_state->pipe_src_h - 1);
2900
2901                        /* Finding the last pixel of the last line of the display
2902                        data and adding to linear_offset*/
2903                        linear_offset +=
2904                                (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2905                                (crtc_state->pipe_src_w - 1) * cpp;
2906                }
2907        }
2908
2909        intel_crtc->adjusted_x = x;
2910        intel_crtc->adjusted_y = y;
2911
2912        I915_WRITE(reg, dspcntr);
2913
2914        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2915        I915_WRITE(DSPSURF(plane),
2916                   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2917        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2918                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2919        } else {
2920                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2921                I915_WRITE(DSPLINOFF(plane), linear_offset);
2922        }
2923        POSTING_READ(reg);
2924}
2925
2926u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2927                              uint64_t fb_modifier, uint32_t pixel_format)
2928{
2929        if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2930                return 64;
2931        } else {
2932                int cpp = drm_format_plane_cpp(pixel_format, 0);
2933
2934                return intel_tile_width(dev_priv, fb_modifier, cpp);
2935        }
2936}
2937
2938u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2939                           struct drm_i915_gem_object *obj,
2940                           unsigned int plane)
2941{
2942        struct i915_ggtt_view view;
2943        struct i915_vma *vma;
2944        u64 offset;
2945
2946        intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2947                                intel_plane->base.state);
2948
2949        vma = i915_gem_obj_to_ggtt_view(obj, &view);
2950        if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2951                view.type))
2952                return -1;
2953
2954        offset = vma->node.start;
2955
2956        if (plane == 1) {
2957                offset += vma->ggtt_view.params.rotated.uv_start_page *
2958                          PAGE_SIZE;
2959        }
2960
2961        WARN_ON(upper_32_bits(offset));
2962
2963        return lower_32_bits(offset);
2964}
2965
2966static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2967{
2968        struct drm_device *dev = intel_crtc->base.dev;
2969        struct drm_i915_private *dev_priv = dev->dev_private;
2970
2971        I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2972        I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2973        I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2974}
2975
2976/*
2977 * This function detaches (aka. unbinds) unused scalers in hardware
2978 */
2979static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2980{
2981        struct intel_crtc_scaler_state *scaler_state;
2982        int i;
2983
2984        scaler_state = &intel_crtc->config->scaler_state;
2985
2986        /* loop through and disable scalers that aren't in use */
2987        for (i = 0; i < intel_crtc->num_scalers; i++) {
2988                if (!scaler_state->scalers[i].in_use)
2989                        skl_detach_scaler(intel_crtc, i);
2990        }
2991}
2992
2993u32 skl_plane_ctl_format(uint32_t pixel_format)
2994{
2995        switch (pixel_format) {
2996        case DRM_FORMAT_C8:
2997                return PLANE_CTL_FORMAT_INDEXED;
2998        case DRM_FORMAT_RGB565:
2999                return PLANE_CTL_FORMAT_RGB_565;
3000        case DRM_FORMAT_XBGR8888:
3001                return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3002        case DRM_FORMAT_XRGB8888:
3003                return PLANE_CTL_FORMAT_XRGB_8888;
3004        /*
3005         * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3006         * to be already pre-multiplied. We need to add a knob (or a different
3007         * DRM_FORMAT) for user-space to configure that.
3008         */
3009        case DRM_FORMAT_ABGR8888:
3010                return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3011                        PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3012        case DRM_FORMAT_ARGB8888:
3013                return PLANE_CTL_FORMAT_XRGB_8888 |
3014                        PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3015        case DRM_FORMAT_XRGB2101010:
3016                return PLANE_CTL_FORMAT_XRGB_2101010;
3017        case DRM_FORMAT_XBGR2101010:
3018                return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3019        case DRM_FORMAT_YUYV:
3020                return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3021        case DRM_FORMAT_YVYU:
3022                return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3023        case DRM_FORMAT_UYVY:
3024                return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3025        case DRM_FORMAT_VYUY:
3026                return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3027        default:
3028                MISSING_CASE(pixel_format);
3029        }
3030
3031        return 0;
3032}
3033
3034u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3035{
3036        switch (fb_modifier) {
3037        case DRM_FORMAT_MOD_NONE:
3038                break;
3039        case I915_FORMAT_MOD_X_TILED:
3040                return PLANE_CTL_TILED_X;
3041        case I915_FORMAT_MOD_Y_TILED:
3042                return PLANE_CTL_TILED_Y;
3043        case I915_FORMAT_MOD_Yf_TILED:
3044                return PLANE_CTL_TILED_YF;
3045        default:
3046                MISSING_CASE(fb_modifier);
3047        }
3048
3049        return 0;
3050}
3051
3052u32 skl_plane_ctl_rotation(unsigned int rotation)
3053{
3054        switch (rotation) {
3055        case BIT(DRM_ROTATE_0):
3056                break;
3057        /*
3058         * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3059         * while i915 HW rotation is clockwise, thats why this swapping.
3060         */
3061        case BIT(DRM_ROTATE_90):
3062                return PLANE_CTL_ROTATE_270;
3063        case BIT(DRM_ROTATE_180):
3064                return PLANE_CTL_ROTATE_180;
3065        case BIT(DRM_ROTATE_270):
3066                return PLANE_CTL_ROTATE_90;
3067        default:
3068                MISSING_CASE(rotation);
3069        }
3070
3071        return 0;
3072}
3073
3074static void skylake_update_primary_plane(struct drm_plane *plane,
3075                                         const struct intel_crtc_state *crtc_state,
3076                                         const struct intel_plane_state *plane_state)
3077{
3078        struct drm_device *dev = plane->dev;
3079        struct drm_i915_private *dev_priv = dev->dev_private;
3080        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3081        struct drm_framebuffer *fb = plane_state->base.fb;
3082        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3083        int pipe = intel_crtc->pipe;
3084        u32 plane_ctl, stride_div, stride;
3085        u32 tile_height, plane_offset, plane_size;
3086        unsigned int rotation = plane_state->base.rotation;
3087        int x_offset, y_offset;
3088        u32 surf_addr;
3089        int scaler_id = plane_state->scaler_id;
3090        int src_x = plane_state->src.x1 >> 16;
3091        int src_y = plane_state->src.y1 >> 16;
3092        int src_w = drm_rect_width(&plane_state->src) >> 16;
3093        int src_h = drm_rect_height(&plane_state->src) >> 16;
3094        int dst_x = plane_state->dst.x1;
3095        int dst_y = plane_state->dst.y1;
3096        int dst_w = drm_rect_width(&plane_state->dst);
3097        int dst_h = drm_rect_height(&plane_state->dst);
3098
3099        plane_ctl = PLANE_CTL_ENABLE |
3100                    PLANE_CTL_PIPE_GAMMA_ENABLE |
3101                    PLANE_CTL_PIPE_CSC_ENABLE;
3102
3103        plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3104        plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3105        plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3106        plane_ctl |= skl_plane_ctl_rotation(rotation);
3107
3108        stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3109                                               fb->pixel_format);
3110        surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3111
3112        WARN_ON(drm_rect_width(&plane_state->src) == 0);
3113
3114        if (intel_rotation_90_or_270(rotation)) {
3115                int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3116
3117                /* stride = Surface height in tiles */
3118                tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3119                stride = DIV_ROUND_UP(fb->height, tile_height);
3120                x_offset = stride * tile_height - src_y - src_h;
3121                y_offset = src_x;
3122                plane_size = (src_w - 1) << 16 | (src_h - 1);
3123        } else {
3124                stride = fb->pitches[0] / stride_div;
3125                x_offset = src_x;
3126                y_offset = src_y;
3127                plane_size = (src_h - 1) << 16 | (src_w - 1);
3128        }
3129        plane_offset = y_offset << 16 | x_offset;
3130
3131        intel_crtc->adjusted_x = x_offset;
3132        intel_crtc->adjusted_y = y_offset;
3133
3134        I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3135        I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3136        I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3137        I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3138
3139        if (scaler_id >= 0) {
3140                uint32_t ps_ctrl = 0;
3141
3142                WARN_ON(!dst_w || !dst_h);
3143                ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3144                        crtc_state->scaler_state.scalers[scaler_id].mode;
3145                I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3146                I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3147                I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3148                I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3149                I915_WRITE(PLANE_POS(pipe, 0), 0);
3150        } else {
3151                I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3152        }
3153
3154        I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3155
3156        POSTING_READ(PLANE_SURF(pipe, 0));
3157}
3158
3159static void skylake_disable_primary_plane(struct drm_plane *primary,
3160                                          struct drm_crtc *crtc)
3161{
3162        struct drm_device *dev = crtc->dev;
3163        struct drm_i915_private *dev_priv = dev->dev_private;
3164        int pipe = to_intel_crtc(crtc)->pipe;
3165
3166        I915_WRITE(PLANE_CTL(pipe, 0), 0);
3167        I915_WRITE(PLANE_SURF(pipe, 0), 0);
3168        POSTING_READ(PLANE_SURF(pipe, 0));
3169}
3170
3171/* Assume fb object is pinned & idle & fenced and just update base pointers */
3172static int
3173intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3174                           int x, int y, enum mode_set_atomic state)
3175{
3176        /* Support for kgdboc is disabled, this needs a major rework. */
3177        DRM_ERROR("legacy panic handler not supported any more.\n");
3178
3179        return -ENODEV;
3180}
3181
3182static void intel_complete_page_flips(struct drm_device *dev)
3183{
3184        struct drm_crtc *crtc;
3185
3186        for_each_crtc(dev, crtc) {
3187                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3188                enum plane plane = intel_crtc->plane;
3189
3190                intel_prepare_page_flip(dev, plane);
3191                intel_finish_page_flip_plane(dev, plane);
3192        }
3193}
3194
3195static void intel_update_primary_planes(struct drm_device *dev)
3196{
3197        struct drm_crtc *crtc;
3198
3199        for_each_crtc(dev, crtc) {
3200                struct intel_plane *plane = to_intel_plane(crtc->primary);
3201                struct intel_plane_state *plane_state;
3202
3203                drm_modeset_lock_crtc(crtc, &plane->base);
3204                plane_state = to_intel_plane_state(plane->base.state);
3205
3206                if (plane_state->visible)
3207                        plane->update_plane(&plane->base,
3208                                            to_intel_crtc_state(crtc->state),
3209                                            plane_state);
3210
3211                drm_modeset_unlock_crtc(crtc);
3212        }
3213}
3214
3215void intel_prepare_reset(struct drm_device *dev)
3216{
3217        /* no reset support for gen2 */
3218        if (IS_GEN2(dev))
3219                return;
3220
3221        /* reset doesn't touch the display */
3222        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3223                return;
3224
3225        drm_modeset_lock_all(dev);
3226        /*
3227         * Disabling the crtcs gracefully seems nicer. Also the
3228         * g33 docs say we should at least disable all the planes.
3229         */
3230        intel_display_suspend(dev);
3231}
3232
3233void intel_finish_reset(struct drm_device *dev)
3234{
3235        struct drm_i915_private *dev_priv = to_i915(dev);
3236
3237        /*
3238         * Flips in the rings will be nuked by the reset,
3239         * so complete all pending flips so that user space
3240         * will get its events and not get stuck.
3241         */
3242        intel_complete_page_flips(dev);
3243
3244        /* no reset support for gen2 */
3245        if (IS_GEN2(dev))
3246                return;
3247
3248        /* reset doesn't touch the display */
3249        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3250                /*
3251                 * Flips in the rings have been nuked by the reset,
3252                 * so update the base address of all primary
3253                 * planes to the the last fb to make sure we're
3254                 * showing the correct fb after a reset.
3255                 *
3256                 * FIXME: Atomic will make this obsolete since we won't schedule
3257                 * CS-based flips (which might get lost in gpu resets) any more.
3258                 */
3259                intel_update_primary_planes(dev);
3260                return;
3261        }
3262
3263        /*
3264         * The display has been reset as well,
3265         * so need a full re-initialization.
3266         */
3267        intel_runtime_pm_disable_interrupts(dev_priv);
3268        intel_runtime_pm_enable_interrupts(dev_priv);
3269
3270        intel_modeset_init_hw(dev);
3271
3272        spin_lock_irq(&dev_priv->irq_lock);
3273        if (dev_priv->display.hpd_irq_setup)
3274                dev_priv->display.hpd_irq_setup(dev);
3275        spin_unlock_irq(&dev_priv->irq_lock);
3276
3277        intel_display_resume(dev);
3278
3279        intel_hpd_init(dev_priv);
3280
3281        drm_modeset_unlock_all(dev);
3282}
3283
3284static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3285{
3286        struct drm_device *dev = crtc->dev;
3287        struct drm_i915_private *dev_priv = dev->dev_private;
3288        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3289        bool pending;
3290
3291        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3292            intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3293                return false;
3294
3295        spin_lock_irq(&dev->event_lock);
3296        pending = to_intel_crtc(crtc)->unpin_work != NULL;
3297        spin_unlock_irq(&dev->event_lock);
3298
3299        return pending;
3300}
3301
3302static void intel_update_pipe_config(struct intel_crtc *crtc,
3303                                     struct intel_crtc_state *old_crtc_state)
3304{
3305        struct drm_device *dev = crtc->base.dev;
3306        struct drm_i915_private *dev_priv = dev->dev_private;
3307        struct intel_crtc_state *pipe_config =
3308                to_intel_crtc_state(crtc->base.state);
3309
3310        /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3311        crtc->base.mode = crtc->base.state->mode;
3312
3313        DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3314                      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3315                      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3316
3317        if (HAS_DDI(dev))
3318                intel_set_pipe_csc(&crtc->base);
3319
3320        /*
3321         * Update pipe size and adjust fitter if needed: the reason for this is
3322         * that in compute_mode_changes we check the native mode (not the pfit
3323         * mode) to see if we can flip rather than do a full mode set. In the
3324         * fastboot case, we'll flip, but if we don't update the pipesrc and
3325         * pfit state, we'll end up with a big fb scanned out into the wrong
3326         * sized surface.
3327         */
3328
3329        I915_WRITE(PIPESRC(crtc->pipe),
3330                   ((pipe_config->pipe_src_w - 1) << 16) |
3331                   (pipe_config->pipe_src_h - 1));
3332
3333        /* on skylake this is done by detaching scalers */
3334        if (INTEL_INFO(dev)->gen >= 9) {
3335                skl_detach_scalers(crtc);
3336
3337                if (pipe_config->pch_pfit.enabled)
3338                        skylake_pfit_enable(crtc);
3339        } else if (HAS_PCH_SPLIT(dev)) {
3340                if (pipe_config->pch_pfit.enabled)
3341                        ironlake_pfit_enable(crtc);
3342                else if (old_crtc_state->pch_pfit.enabled)
3343                        ironlake_pfit_disable(crtc, true);
3344        }
3345}
3346
3347static void intel_fdi_normal_train(struct drm_crtc *crtc)
3348{
3349        struct drm_device *dev = crtc->dev;
3350        struct drm_i915_private *dev_priv = dev->dev_private;
3351        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3352        int pipe = intel_crtc->pipe;
3353        i915_reg_t reg;
3354        u32 temp;
3355
3356        /* enable normal train */
3357        reg = FDI_TX_CTL(pipe);
3358        temp = I915_READ(reg);
3359        if (IS_IVYBRIDGE(dev)) {
3360                temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3361                temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3362        } else {
3363                temp &= ~FDI_LINK_TRAIN_NONE;
3364                temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3365        }
3366        I915_WRITE(reg, temp);
3367
3368        reg = FDI_RX_CTL(pipe);
3369        temp = I915_READ(reg);
3370        if (HAS_PCH_CPT(dev)) {
3371                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3372                temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3373        } else {
3374                temp &= ~FDI_LINK_TRAIN_NONE;
3375                temp |= FDI_LINK_TRAIN_NONE;
3376        }
3377        I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3378
3379        /* wait one idle pattern time */
3380        POSTING_READ(reg);
3381        udelay(1000);
3382
3383        /* IVB wants error correction enabled */
3384        if (IS_IVYBRIDGE(dev))
3385                I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3386                           FDI_FE_ERRC_ENABLE);
3387}
3388
3389/* The FDI link training functions for ILK/Ibexpeak. */
3390static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3391{
3392        struct drm_device *dev = crtc->dev;
3393        struct drm_i915_private *dev_priv = dev->dev_private;
3394        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3395        int pipe = intel_crtc->pipe;
3396        i915_reg_t reg;
3397        u32 temp, tries;
3398
3399        /* FDI needs bits from pipe first */
3400        assert_pipe_enabled(dev_priv, pipe);
3401
3402        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3403           for train result */
3404        reg = FDI_RX_IMR(pipe);
3405        temp = I915_READ(reg);
3406        temp &= ~FDI_RX_SYMBOL_LOCK;
3407        temp &= ~FDI_RX_BIT_LOCK;
3408        I915_WRITE(reg, temp);
3409        I915_READ(reg);
3410        udelay(150);
3411
3412        /* enable CPU FDI TX and PCH FDI RX */
3413        reg = FDI_TX_CTL(pipe);
3414        temp = I915_READ(reg);
3415        temp &= ~FDI_DP_PORT_WIDTH_MASK;
3416        temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3417        temp &= ~FDI_LINK_TRAIN_NONE;
3418        temp |= FDI_LINK_TRAIN_PATTERN_1;
3419        I915_WRITE(reg, temp | FDI_TX_ENABLE);
3420
3421        reg = FDI_RX_CTL(pipe);
3422        temp = I915_READ(reg);
3423        temp &= ~FDI_LINK_TRAIN_NONE;
3424        temp |= FDI_LINK_TRAIN_PATTERN_1;
3425        I915_WRITE(reg, temp | FDI_RX_ENABLE);
3426
3427        POSTING_READ(reg);
3428        udelay(150);
3429
3430        /* Ironlake workaround, enable clock pointer after FDI enable*/
3431        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3432        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3433                   FDI_RX_PHASE_SYNC_POINTER_EN);
3434
3435        reg = FDI_RX_IIR(pipe);
3436        for (tries = 0; tries < 5; tries++) {
3437                temp = I915_READ(reg);
3438                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3439
3440                if ((temp & FDI_RX_BIT_LOCK)) {
3441                        DRM_DEBUG_KMS("FDI train 1 done.\n");
3442                        I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3443                        break;
3444                }
3445        }
3446        if (tries == 5)
3447                DRM_ERROR("FDI train 1 fail!\n");
3448
3449        /* Train 2 */
3450        reg = FDI_TX_CTL(pipe);
3451        temp = I915_READ(reg);
3452        temp &= ~FDI_LINK_TRAIN_NONE;
3453        temp |= FDI_LINK_TRAIN_PATTERN_2;
3454        I915_WRITE(reg, temp);
3455
3456        reg = FDI_RX_CTL(pipe);
3457        temp = I915_READ(reg);
3458        temp &= ~FDI_LINK_TRAIN_NONE;
3459        temp |= FDI_LINK_TRAIN_PATTERN_2;
3460        I915_WRITE(reg, temp);
3461
3462        POSTING_READ(reg);
3463        udelay(150);
3464
3465        reg = FDI_RX_IIR(pipe);
3466        for (tries = 0; tries < 5; tries++) {
3467                temp = I915_READ(reg);
3468                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3469
3470                if (temp & FDI_RX_SYMBOL_LOCK) {
3471                        I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3472                        DRM_DEBUG_KMS("FDI train 2 done.\n");
3473                        break;
3474                }
3475        }
3476        if (tries == 5)
3477                DRM_ERROR("FDI train 2 fail!\n");
3478
3479        DRM_DEBUG_KMS("FDI train done\n");
3480
3481}
3482
3483static const int snb_b_fdi_train_param[] = {
3484        FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3485        FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3486        FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3487        FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3488};
3489
3490/* The FDI link training functions for SNB/Cougarpoint. */
3491static void gen6_fdi_link_train(struct drm_crtc *crtc)
3492{
3493        struct drm_device *dev = crtc->dev;
3494        struct drm_i915_private *dev_priv = dev->dev_private;
3495        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3496        int pipe = intel_crtc->pipe;
3497        i915_reg_t reg;
3498        u32 temp, i, retry;
3499
3500        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3501           for train result */
3502        reg = FDI_RX_IMR(pipe);
3503        temp = I915_READ(reg);
3504        temp &= ~FDI_RX_SYMBOL_LOCK;
3505        temp &= ~FDI_RX_BIT_LOCK;
3506        I915_WRITE(reg, temp);
3507
3508        POSTING_READ(reg);
3509        udelay(150);
3510
3511        /* enable CPU FDI TX and PCH FDI RX */
3512        reg = FDI_TX_CTL(pipe);
3513        temp = I915_READ(reg);
3514        temp &= ~FDI_DP_PORT_WIDTH_MASK;
3515        temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3516        temp &= ~FDI_LINK_TRAIN_NONE;
3517        temp |= FDI_LINK_TRAIN_PATTERN_1;
3518        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3519        /* SNB-B */
3520        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3521        I915_WRITE(reg, temp | FDI_TX_ENABLE);
3522
3523        I915_WRITE(FDI_RX_MISC(pipe),
3524                   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3525
3526        reg = FDI_RX_CTL(pipe);
3527        temp = I915_READ(reg);
3528        if (HAS_PCH_CPT(dev)) {
3529                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3530                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3531        } else {
3532                temp &= ~FDI_LINK_TRAIN_NONE;
3533                temp |= FDI_LINK_TRAIN_PATTERN_1;
3534        }
3535        I915_WRITE(reg, temp | FDI_RX_ENABLE);
3536
3537        POSTING_READ(reg);
3538        udelay(150);
3539
3540        for (i = 0; i < 4; i++) {
3541                reg = FDI_TX_CTL(pipe);
3542                temp = I915_READ(reg);
3543                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3544                temp |= snb_b_fdi_train_param[i];
3545                I915_WRITE(reg, temp);
3546
3547                POSTING_READ(reg);
3548                udelay(500);
3549
3550                for (retry = 0; retry < 5; retry++) {
3551                        reg = FDI_RX_IIR(pipe);
3552                        temp = I915_READ(reg);
3553                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3554                        if (temp & FDI_RX_BIT_LOCK) {
3555                                I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3556                                DRM_DEBUG_KMS("FDI train 1 done.\n");
3557                                break;
3558                        }
3559                        udelay(50);
3560                }
3561                if (retry < 5)
3562                        break;
3563        }
3564        if (i == 4)
3565                DRM_ERROR("FDI train 1 fail!\n");
3566
3567        /* Train 2 */
3568        reg = FDI_TX_CTL(pipe);
3569        temp = I915_READ(reg);
3570        temp &= ~FDI_LINK_TRAIN_NONE;
3571        temp |= FDI_LINK_TRAIN_PATTERN_2;
3572        if (IS_GEN6(dev)) {
3573                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3574                /* SNB-B */
3575                temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3576        }
3577        I915_WRITE(reg, temp);
3578
3579        reg = FDI_RX_CTL(pipe);
3580        temp = I915_READ(reg);
3581        if (HAS_PCH_CPT(dev)) {
3582                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3583                temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3584        } else {
3585                temp &= ~FDI_LINK_TRAIN_NONE;
3586                temp |= FDI_LINK_TRAIN_PATTERN_2;
3587        }
3588        I915_WRITE(reg, temp);
3589
3590        POSTING_READ(reg);
3591        udelay(150);
3592
3593        for (i = 0; i < 4; i++) {
3594                reg = FDI_TX_CTL(pipe);
3595                temp = I915_READ(reg);
3596                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3597                temp |= snb_b_fdi_train_param[i];
3598                I915_WRITE(reg, temp);
3599
3600                POSTING_READ(reg);
3601                udelay(500);
3602
3603                for (retry = 0; retry < 5; retry++) {
3604                        reg = FDI_RX_IIR(pipe);
3605                        temp = I915_READ(reg);
3606                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3607                        if (temp & FDI_RX_SYMBOL_LOCK) {
3608                                I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3609                                DRM_DEBUG_KMS("FDI train 2 done.\n");
3610                                break;
3611                        }
3612                        udelay(50);
3613                }
3614                if (retry < 5)
3615                        break;
3616        }
3617        if (i == 4)
3618                DRM_ERROR("FDI train 2 fail!\n");
3619
3620        DRM_DEBUG_KMS("FDI train done.\n");
3621}
3622
3623/* Manual link training for Ivy Bridge A0 parts */
3624static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3625{
3626        struct drm_device *dev = crtc->dev;
3627        struct drm_i915_private *dev_priv = dev->dev_private;
3628        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3629        int pipe = intel_crtc->pipe;
3630        i915_reg_t reg;
3631        u32 temp, i, j;
3632
3633        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3634           for train result */
3635        reg = FDI_RX_IMR(pipe);
3636        temp = I915_READ(reg);
3637        temp &= ~FDI_RX_SYMBOL_LOCK;
3638        temp &= ~FDI_RX_BIT_LOCK;
3639        I915_WRITE(reg, temp);
3640
3641        POSTING_READ(reg);
3642        udelay(150);
3643
3644        DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3645                      I915_READ(FDI_RX_IIR(pipe)));
3646
3647        /* Try each vswing and preemphasis setting twice before moving on */
3648        for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3649                /* disable first in case we need to retry */
3650                reg = FDI_TX_CTL(pipe);
3651                temp = I915_READ(reg);
3652                temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3653                temp &= ~FDI_TX_ENABLE;
3654                I915_WRITE(reg, temp);
3655
3656                reg = FDI_RX_CTL(pipe);
3657                temp = I915_READ(reg);
3658                temp &= ~FDI_LINK_TRAIN_AUTO;
3659                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3660                temp &= ~FDI_RX_ENABLE;
3661                I915_WRITE(reg, temp);
3662
3663                /* enable CPU FDI TX and PCH FDI RX */
3664                reg = FDI_TX_CTL(pipe);
3665                temp = I915_READ(reg);
3666                temp &= ~FDI_DP_PORT_WIDTH_MASK;
3667                temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3668                temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3669                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3670                temp |= snb_b_fdi_train_param[j/2];
3671                temp |= FDI_COMPOSITE_SYNC;
3672                I915_WRITE(reg, temp | FDI_TX_ENABLE);
3673
3674                I915_WRITE(FDI_RX_MISC(pipe),
3675                           FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3676
3677                reg = FDI_RX_CTL(pipe);
3678                temp = I915_READ(reg);
3679                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3680                temp |= FDI_COMPOSITE_SYNC;
3681                I915_WRITE(reg, temp | FDI_RX_ENABLE);
3682
3683                POSTING_READ(reg);
3684                udelay(1); /* should be 0.5us */
3685
3686                for (i = 0; i < 4; i++) {
3687                        reg = FDI_RX_IIR(pipe);
3688                        temp = I915_READ(reg);
3689                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3690
3691                        if (temp & FDI_RX_BIT_LOCK ||
3692                            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3693                                I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3694                                DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3695                                              i);
3696                                break;
3697                        }
3698                        udelay(1); /* should be 0.5us */
3699                }
3700                if (i == 4) {
3701                        DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3702                        continue;
3703                }
3704
3705                /* Train 2 */
3706                reg = FDI_TX_CTL(pipe);
3707                temp = I915_READ(reg);
3708                temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3709                temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3710                I915_WRITE(reg, temp);
3711
3712                reg = FDI_RX_CTL(pipe);
3713                temp = I915_READ(reg);
3714                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3715                temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3716                I915_WRITE(reg, temp);
3717
3718                POSTING_READ(reg);
3719                udelay(2); /* should be 1.5us */
3720
3721                for (i = 0; i < 4; i++) {
3722                        reg = FDI_RX_IIR(pipe);
3723                        temp = I915_READ(reg);
3724                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3725
3726                        if (temp & FDI_RX_SYMBOL_LOCK ||
3727                            (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3728                                I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3729                                DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3730                                              i);
3731                                goto train_done;
3732                        }
3733                        udelay(2); /* should be 1.5us */
3734                }
3735                if (i == 4)
3736                        DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3737        }
3738
3739train_done:
3740        DRM_DEBUG_KMS("FDI train done.\n");
3741}
3742
3743static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3744{
3745        struct drm_device *dev = intel_crtc->base.dev;
3746        struct drm_i915_private *dev_priv = dev->dev_private;
3747        int pipe = intel_crtc->pipe;
3748        i915_reg_t reg;
3749        u32 temp;
3750
3751        /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3752        reg = FDI_RX_CTL(pipe);
3753        temp = I915_READ(reg);
3754        temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3755        temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3756        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3757        I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3758
3759        POSTING_READ(reg);
3760        udelay(200);
3761
3762        /* Switch from Rawclk to PCDclk */
3763        temp = I915_READ(reg);
3764        I915_WRITE(reg, temp | FDI_PCDCLK);
3765
3766        POSTING_READ(reg);
3767        udelay(200);
3768
3769        /* Enable CPU FDI TX PLL, always on for Ironlake */
3770        reg = FDI_TX_CTL(pipe);
3771        temp = I915_READ(reg);
3772        if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3773                I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3774
3775                POSTING_READ(reg);
3776                udelay(100);
3777        }
3778}
3779
3780static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3781{
3782        struct drm_device *dev = intel_crtc->base.dev;
3783        struct drm_i915_private *dev_priv = dev->dev_private;
3784        int pipe = intel_crtc->pipe;
3785        i915_reg_t reg;
3786        u32 temp;
3787
3788        /* Switch from PCDclk to Rawclk */
3789        reg = FDI_RX_CTL(pipe);
3790        temp = I915_READ(reg);
3791        I915_WRITE(reg, temp & ~FDI_PCDCLK);
3792
3793        /* Disable CPU FDI TX PLL */
3794        reg = FDI_TX_CTL(pipe);
3795        temp = I915_READ(reg);
3796        I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3797
3798        POSTING_READ(reg);
3799        udelay(100);
3800
3801        reg = FDI_RX_CTL(pipe);
3802        temp = I915_READ(reg);
3803        I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3804
3805        /* Wait for the clocks to turn off. */
3806        POSTING_READ(reg);
3807        udelay(100);
3808}
3809
3810static void ironlake_fdi_disable(struct drm_crtc *crtc)
3811{
3812        struct drm_device *dev = crtc->dev;
3813        struct drm_i915_private *dev_priv = dev->dev_private;
3814        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3815        int pipe = intel_crtc->pipe;
3816        i915_reg_t reg;
3817        u32 temp;
3818
3819        /* disable CPU FDI tx and PCH FDI rx */
3820        reg = FDI_TX_CTL(pipe);
3821        temp = I915_READ(reg);
3822        I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3823        POSTING_READ(reg);
3824
3825        reg = FDI_RX_CTL(pipe);
3826        temp = I915_READ(reg);
3827        temp &= ~(0x7 << 16);
3828        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3829        I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3830
3831        POSTING_READ(reg);
3832        udelay(100);
3833
3834        /* Ironlake workaround, disable clock pointer after downing FDI */
3835        if (HAS_PCH_IBX(dev))
3836                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3837
3838        /* still set train pattern 1 */
3839        reg = FDI_TX_CTL(pipe);
3840        temp = I915_READ(reg);
3841        temp &= ~FDI_LINK_TRAIN_NONE;
3842        temp |= FDI_LINK_TRAIN_PATTERN_1;
3843        I915_WRITE(reg, temp);
3844
3845        reg = FDI_RX_CTL(pipe);
3846        temp = I915_READ(reg);
3847        if (HAS_PCH_CPT(dev)) {
3848                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3849                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3850        } else {
3851                temp &= ~FDI_LINK_TRAIN_NONE;
3852                temp |= FDI_LINK_TRAIN_PATTERN_1;
3853        }
3854        /* BPC in FDI rx is consistent with that in PIPECONF */
3855        temp &= ~(0x07 << 16);
3856        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3857        I915_WRITE(reg, temp);
3858
3859        POSTING_READ(reg);
3860        udelay(100);
3861}
3862
3863bool intel_has_pending_fb_unpin(struct drm_device *dev)
3864{
3865        struct intel_crtc *crtc;
3866
3867        /* Note that we don't need to be called with mode_config.lock here
3868         * as our list of CRTC objects is static for the lifetime of the
3869         * device and so cannot disappear as we iterate. Similarly, we can
3870         * happily treat the predicates as racy, atomic checks as userspace
3871         * cannot claim and pin a new fb without at least acquring the
3872         * struct_mutex and so serialising with us.
3873         */
3874        for_each_intel_crtc(dev, crtc) {
3875                if (atomic_read(&crtc->unpin_work_count) == 0)
3876                        continue;
3877
3878                if (crtc->unpin_work)
3879                        intel_wait_for_vblank(dev, crtc->pipe);
3880
3881                return true;
3882        }
3883
3884        return false;
3885}
3886
3887static void page_flip_completed(struct intel_crtc *intel_crtc)
3888{
3889        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3890        struct intel_unpin_work *work = intel_crtc->unpin_work;
3891
3892        /* ensure that the unpin work is consistent wrt ->pending. */
3893        smp_rmb();
3894        intel_crtc->unpin_work = NULL;
3895
3896        if (work->event)
3897                drm_send_vblank_event(intel_crtc->base.dev,
3898                                      intel_crtc->pipe,
3899                                      work->event);
3900
3901        drm_crtc_vblank_put(&intel_crtc->base);
3902
3903        wake_up_all(&dev_priv->pending_flip_queue);
3904        queue_work(dev_priv->wq, &work->work);
3905
3906        trace_i915_flip_complete(intel_crtc->plane,
3907                                 work->pending_flip_obj);
3908}
3909
3910static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3911{
3912        struct drm_device *dev = crtc->dev;
3913        struct drm_i915_private *dev_priv = dev->dev_private;
3914        long ret;
3915
3916        WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3917
3918        ret = wait_event_interruptible_timeout(
3919                                        dev_priv->pending_flip_queue,
3920                                        !intel_crtc_has_pending_flip(crtc),
3921                                        60*HZ);
3922
3923        if (ret < 0)
3924                return ret;
3925
3926        if (ret == 0) {
3927                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3928
3929                spin_lock_irq(&dev->event_lock);
3930                if (intel_crtc->unpin_work) {
3931                        WARN_ONCE(1, "Removing stuck page flip\n");
3932                        page_flip_completed(intel_crtc);
3933                }
3934                spin_unlock_irq(&dev->event_lock);
3935        }
3936
3937        return 0;
3938}
3939
3940static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3941{
3942        u32 temp;
3943
3944        I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3945
3946        mutex_lock(&dev_priv->sb_lock);
3947
3948        temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3949        temp |= SBI_SSCCTL_DISABLE;
3950        intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3951
3952        mutex_unlock(&dev_priv->sb_lock);
3953}
3954
3955/* Program iCLKIP clock to the desired frequency */
3956static void lpt_program_iclkip(struct drm_crtc *crtc)
3957{
3958        struct drm_device *dev = crtc->dev;
3959        struct drm_i915_private *dev_priv = dev->dev_private;
3960        int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3961        u32 divsel, phaseinc, auxdiv, phasedir = 0;
3962        u32 temp;
3963
3964        lpt_disable_iclkip(dev_priv);
3965
3966        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3967        if (clock == 20000) {
3968                auxdiv = 1;
3969                divsel = 0x41;
3970                phaseinc = 0x20;
3971        } else {
3972                /* The iCLK virtual clock root frequency is in MHz,
3973                 * but the adjusted_mode->crtc_clock in in KHz. To get the
3974                 * divisors, it is necessary to divide one by another, so we
3975                 * convert the virtual clock precision to KHz here for higher
3976                 * precision.
3977                 */
3978                u32 iclk_virtual_root_freq = 172800 * 1000;
3979                u32 iclk_pi_range = 64;
3980                u32 desired_divisor, msb_divisor_value, pi_value;
3981
3982                desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
3983                msb_divisor_value = desired_divisor / iclk_pi_range;
3984                pi_value = desired_divisor % iclk_pi_range;
3985
3986                auxdiv = 0;
3987                divsel = msb_divisor_value - 2;
3988                phaseinc = pi_value;
3989        }
3990
3991        /* This should not happen with any sane values */
3992        WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3993                ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3994        WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3995                ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3996
3997        DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3998                        clock,
3999                        auxdiv,
4000                        divsel,
4001                        phasedir,
4002                        phaseinc);
4003
4004        mutex_lock(&dev_priv->sb_lock);
4005
4006        /* Program SSCDIVINTPHASE6 */
4007        temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4008        temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4009        temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4010        temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4011        temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4012        temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4013        temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4014        intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4015
4016        /* Program SSCAUXDIV */
4017        temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4018        temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4019        temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4020        intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4021
4022        /* Enable modulator and associated divider */
4023        temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4024        temp &= ~SBI_SSCCTL_DISABLE;
4025        intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4026
4027        mutex_unlock(&dev_priv->sb_lock);
4028
4029        /* Wait for initialization time */
4030        udelay(24);
4031
4032        I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4033}
4034
4035static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4036                                                enum pipe pch_transcoder)
4037{
4038        struct drm_device *dev = crtc->base.dev;
4039        struct drm_i915_private *dev_priv = dev->dev_private;
4040        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4041
4042        I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4043                   I915_READ(HTOTAL(cpu_transcoder)));
4044        I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4045                   I915_READ(HBLANK(cpu_transcoder)));
4046        I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4047                   I915_READ(HSYNC(cpu_transcoder)));
4048
4049        I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4050                   I915_READ(VTOTAL(cpu_transcoder)));
4051        I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4052                   I915_READ(VBLANK(cpu_transcoder)));
4053        I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4054                   I915_READ(VSYNC(cpu_transcoder)));
4055        I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4056                   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4057}
4058
4059static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4060{
4061        struct drm_i915_private *dev_priv = dev->dev_private;
4062        uint32_t temp;
4063
4064        temp = I915_READ(SOUTH_CHICKEN1);
4065        if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4066                return;
4067
4068        WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4069        WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4070
4071        temp &= ~FDI_BC_BIFURCATION_SELECT;
4072        if (enable)
4073                temp |= FDI_BC_BIFURCATION_SELECT;
4074
4075        DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4076        I915_WRITE(SOUTH_CHICKEN1, temp);
4077        POSTING_READ(SOUTH_CHICKEN1);
4078}
4079
4080static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4081{
4082        struct drm_device *dev = intel_crtc->base.dev;
4083
4084        switch (intel_crtc->pipe) {
4085        case PIPE_A:
4086                break;
4087        case PIPE_B:
4088                if (intel_crtc->config->fdi_lanes > 2)
4089                        cpt_set_fdi_bc_bifurcation(dev, false);
4090                else
4091                        cpt_set_fdi_bc_bifurcation(dev, true);
4092
4093                break;
4094        case PIPE_C:
4095                cpt_set_fdi_bc_bifurcation(dev, true);
4096
4097                break;
4098        default:
4099                BUG();
4100        }
4101}
4102
4103/* Return which DP Port should be selected for Transcoder DP control */
4104static enum port
4105intel_trans_dp_port_sel(struct drm_crtc *crtc)
4106{
4107        struct drm_device *dev = crtc->dev;
4108        struct intel_encoder *encoder;
4109
4110        for_each_encoder_on_crtc(dev, crtc, encoder) {
4111                if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4112                    encoder->type == INTEL_OUTPUT_EDP)
4113                        return enc_to_dig_port(&encoder->base)->port;
4114        }
4115
4116        return -1;
4117}
4118
4119/*
4120 * Enable PCH resources required for PCH ports:
4121 *   - PCH PLLs
4122 *   - FDI training & RX/TX
4123 *   - update transcoder timings
4124 *   - DP transcoding bits
4125 *   - transcoder
4126 */
4127static void ironlake_pch_enable(struct drm_crtc *crtc)
4128{
4129        struct drm_device *dev = crtc->dev;
4130        struct drm_i915_private *dev_priv = dev->dev_private;
4131        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4132        int pipe = intel_crtc->pipe;
4133        u32 temp;
4134
4135        assert_pch_transcoder_disabled(dev_priv, pipe);
4136
4137        if (IS_IVYBRIDGE(dev))
4138                ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4139
4140        /* Write the TU size bits before fdi link training, so that error
4141         * detection works. */
4142        I915_WRITE(FDI_RX_TUSIZE1(pipe),
4143                   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4144
4145        /*
4146         * Sometimes spurious CPU pipe underruns happen during FDI
4147         * training, at least with VGA+HDMI cloning. Suppress them.
4148         */
4149        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4150
4151        /* For PCH output, training FDI link */
4152        dev_priv->display.fdi_link_train(crtc);
4153
4154        /* We need to program the right clock selection before writing the pixel
4155         * mutliplier into the DPLL. */
4156        if (HAS_PCH_CPT(dev)) {
4157                u32 sel;
4158
4159                temp = I915_READ(PCH_DPLL_SEL);
4160                temp |= TRANS_DPLL_ENABLE(pipe);
4161                sel = TRANS_DPLLB_SEL(pipe);
4162                if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4163                        temp |= sel;
4164                else
4165                        temp &= ~sel;
4166                I915_WRITE(PCH_DPLL_SEL, temp);
4167        }
4168
4169        /* XXX: pch pll's can be enabled any time before we enable the PCH
4170         * transcoder, and we actually should do this to not upset any PCH
4171         * transcoder that already use the clock when we share it.
4172         *
4173         * Note that enable_shared_dpll tries to do the right thing, but
4174         * get_shared_dpll unconditionally resets the pll - we need that to have
4175         * the right LVDS enable sequence. */
4176        intel_enable_shared_dpll(intel_crtc);
4177
4178        /* set transcoder timing, panel must allow it */
4179        assert_panel_unlocked(dev_priv, pipe);
4180        ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4181
4182        intel_fdi_normal_train(crtc);
4183
4184        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4185
4186        /* For PCH DP, enable TRANS_DP_CTL */
4187        if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4188                const struct drm_display_mode *adjusted_mode =
4189                        &intel_crtc->config->base.adjusted_mode;
4190                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4191                i915_reg_t reg = TRANS_DP_CTL(pipe);
4192                temp = I915_READ(reg);
4193                temp &= ~(TRANS_DP_PORT_SEL_MASK |
4194                          TRANS_DP_SYNC_MASK |
4195                          TRANS_DP_BPC_MASK);
4196                temp |= TRANS_DP_OUTPUT_ENABLE;
4197                temp |= bpc << 9; /* same format but at 11:9 */
4198
4199                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4200                        temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4201                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4202                        temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4203
4204                switch (intel_trans_dp_port_sel(crtc)) {
4205                case PORT_B:
4206                        temp |= TRANS_DP_PORT_SEL_B;
4207                        break;
4208                case PORT_C:
4209                        temp |= TRANS_DP_PORT_SEL_C;
4210                        break;
4211                case PORT_D:
4212                        temp |= TRANS_DP_PORT_SEL_D;
4213                        break;
4214                default:
4215                        BUG();
4216                }
4217
4218                I915_WRITE(reg, temp);
4219        }
4220
4221        ironlake_enable_pch_transcoder(dev_priv, pipe);
4222}
4223
4224static void lpt_pch_enable(struct drm_crtc *crtc)
4225{
4226        struct drm_device *dev = crtc->dev;
4227        struct drm_i915_private *dev_priv = dev->dev_private;
4228        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4229        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4230
4231        assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4232
4233        lpt_program_iclkip(crtc);
4234
4235        /* Set transcoder timing. */
4236        ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4237
4238        lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4239}
4240
4241struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4242                                                struct intel_crtc_state *crtc_state)
4243{
4244        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4245        struct intel_shared_dpll *pll;
4246        struct intel_shared_dpll_config *shared_dpll;
4247        enum intel_dpll_id i;
4248        int max = dev_priv->num_shared_dpll;
4249
4250        shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4251
4252        if (HAS_PCH_IBX(dev_priv->dev)) {
4253                /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4254                i = (enum intel_dpll_id) crtc->pipe;
4255                pll = &dev_priv->shared_dplls[i];
4256
4257                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4258                              crtc->base.base.id, pll->name);
4259
4260                WARN_ON(shared_dpll[i].crtc_mask);
4261
4262                goto found;
4263        }
4264
4265        if (IS_BROXTON(dev_priv->dev)) {
4266                /* PLL is attached to port in bxt */
4267                struct intel_encoder *encoder;
4268                struct intel_digital_port *intel_dig_port;
4269
4270                encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4271                if (WARN_ON(!encoder))
4272                        return NULL;
4273
4274                intel_dig_port = enc_to_dig_port(&encoder->base);
4275                /* 1:1 mapping between ports and PLLs */
4276                i = (enum intel_dpll_id)intel_dig_port->port;
4277                pll = &dev_priv->shared_dplls[i];
4278                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4279                        crtc->base.base.id, pll->name);
4280                WARN_ON(shared_dpll[i].crtc_mask);
4281
4282                goto found;
4283        } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4284                /* Do not consider SPLL */
4285                max = 2;
4286
4287        for (i = 0; i < max; i++) {
4288                pll = &dev_priv->shared_dplls[i];
4289
4290                /* Only want to check enabled timings first */
4291                if (shared_dpll[i].crtc_mask == 0)
4292                        continue;
4293
4294                if (memcmp(&crtc_state->dpll_hw_state,
4295                           &shared_dpll[i].hw_state,
4296                           sizeof(crtc_state->dpll_hw_state)) == 0) {
4297                        DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4298                                      crtc->base.base.id, pll->name,
4299                                      shared_dpll[i].crtc_mask,
4300                                      pll->active);
4301                        goto found;
4302                }
4303        }
4304
4305        /* Ok no matching timings, maybe there's a free one? */
4306        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4307                pll = &dev_priv->shared_dplls[i];
4308                if (shared_dpll[i].crtc_mask == 0) {
4309                        DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4310                                      crtc->base.base.id, pll->name);
4311                        goto found;
4312                }
4313        }
4314
4315        return NULL;
4316
4317found:
4318        if (shared_dpll[i].crtc_mask == 0)
4319                shared_dpll[i].hw_state =
4320                        crtc_state->dpll_hw_state;
4321
4322        crtc_state->shared_dpll = i;
4323        DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4324                         pipe_name(crtc->pipe));
4325
4326        shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4327
4328        return pll;
4329}
4330
4331static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4332{
4333        struct drm_i915_private *dev_priv = to_i915(state->dev);
4334        struct intel_shared_dpll_config *shared_dpll;
4335        struct intel_shared_dpll *pll;
4336        enum intel_dpll_id i;
4337
4338        if (!to_intel_atomic_state(state)->dpll_set)
4339                return;
4340
4341        shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4342        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4343                pll = &dev_priv->shared_dplls[i];
4344                pll->config = shared_dpll[i];
4345        }
4346}
4347
4348static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4349{
4350        struct drm_i915_private *dev_priv = dev->dev_private;
4351        i915_reg_t dslreg = PIPEDSL(pipe);
4352        u32 temp;
4353
4354        temp = I915_READ(dslreg);
4355        udelay(500);
4356        if (wait_for(I915_READ(dslreg) != temp, 5)) {
4357                if (wait_for(I915_READ(dslreg) != temp, 5))
4358                        DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4359        }
4360}
4361
4362static int
4363skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4364                  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4365                  int src_w, int src_h, int dst_w, int dst_h)
4366{
4367        struct intel_crtc_scaler_state *scaler_state =
4368                &crtc_state->scaler_state;
4369        struct intel_crtc *intel_crtc =
4370                to_intel_crtc(crtc_state->base.crtc);
4371        int need_scaling;
4372
4373        need_scaling = intel_rotation_90_or_270(rotation) ?
4374                (src_h != dst_w || src_w != dst_h):
4375                (src_w != dst_w || src_h != dst_h);
4376
4377        /*
4378         * if plane is being disabled or scaler is no more required or force detach
4379         *  - free scaler binded to this plane/crtc
4380         *  - in order to do this, update crtc->scaler_usage
4381         *
4382         * Here scaler state in crtc_state is set free so that
4383         * scaler can be assigned to other user. Actual register
4384         * update to free the scaler is done in plane/panel-fit programming.
4385         * For this purpose crtc/plane_state->scaler_id isn't reset here.
4386         */
4387        if (force_detach || !need_scaling) {
4388                if (*scaler_id >= 0) {
4389                        scaler_state->scaler_users &= ~(1 << scaler_user);
4390                        scaler_state->scalers[*scaler_id].in_use = 0;
4391
4392                        DRM_DEBUG_KMS("scaler_user index %u.%u: "
4393                                "Staged freeing scaler id %d scaler_users = 0x%x\n",
4394                                intel_crtc->pipe, scaler_user, *scaler_id,
4395                                scaler_state->scaler_users);
4396                        *scaler_id = -1;
4397                }
4398                return 0;
4399        }
4400
4401        /* range checks */
4402        if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4403                dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4404
4405                src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4406                dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4407                DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4408                        "size is out of scaler range\n",
4409                        intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4410                return -EINVAL;
4411        }
4412
4413        /* mark this plane as a scaler user in crtc_state */
4414        scaler_state->scaler_users |= (1 << scaler_user);
4415        DRM_DEBUG_KMS("scaler_user index %u.%u: "
4416                "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4417                intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4418                scaler_state->scaler_users);
4419
4420        return 0;
4421}
4422
4423/**
4424 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4425 *
4426 * @state: crtc's scaler state
4427 *
4428 * Return
4429 *     0 - scaler_usage updated successfully
4430 *    error - requested scaling cannot be supported or other error condition
4431 */
4432int skl_update_scaler_crtc(struct intel_crtc_state *state)
4433{
4434        struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4435        const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4436
4437        DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4438                      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4439
4440        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4441                &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4442                state->pipe_src_w, state->pipe_src_h,
4443                adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4444}
4445
4446/**
4447 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4448 *
4449 * @state: crtc's scaler state
4450 * @plane_state: atomic plane state to update
4451 *
4452 * Return
4453 *     0 - scaler_usage updated successfully
4454 *    error - requested scaling cannot be supported or other error condition
4455 */
4456static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4457                                   struct intel_plane_state *plane_state)
4458{
4459
4460        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4461        struct intel_plane *intel_plane =
4462                to_intel_plane(plane_state->base.plane);
4463        struct drm_framebuffer *fb = plane_state->base.fb;
4464        int ret;
4465
4466        bool force_detach = !fb || !plane_state->visible;
4467
4468        DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4469                      intel_plane->base.base.id, intel_crtc->pipe,
4470                      drm_plane_index(&intel_plane->base));
4471
4472        ret = skl_update_scaler(crtc_state, force_detach,
4473                                drm_plane_index(&intel_plane->base),
4474                                &plane_state->scaler_id,
4475                                plane_state->base.rotation,
4476                                drm_rect_width(&plane_state->src) >> 16,
4477                                drm_rect_height(&plane_state->src) >> 16,
4478                                drm_rect_width(&plane_state->dst),
4479                                drm_rect_height(&plane_state->dst));
4480
4481        if (ret || plane_state->scaler_id < 0)
4482                return ret;
4483
4484        /* check colorkey */
4485        if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4486                DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4487                              intel_plane->base.base.id);
4488                return -EINVAL;
4489        }
4490
4491        /* Check src format */
4492        switch (fb->pixel_format) {
4493        case DRM_FORMAT_RGB565:
4494        case DRM_FORMAT_XBGR8888:
4495        case DRM_FORMAT_XRGB8888:
4496        case DRM_FORMAT_ABGR8888:
4497        case DRM_FORMAT_ARGB8888:
4498        case DRM_FORMAT_XRGB2101010:
4499        case DRM_FORMAT_XBGR2101010:
4500        case DRM_FORMAT_YUYV:
4501        case DRM_FORMAT_YVYU:
4502        case DRM_FORMAT_UYVY:
4503        case DRM_FORMAT_VYUY:
4504                break;
4505        default:
4506                DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4507                        intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4508                return -EINVAL;
4509        }
4510
4511        return 0;
4512}
4513
4514static void skylake_scaler_disable(struct intel_crtc *crtc)
4515{
4516        int i;
4517
4518        for (i = 0; i < crtc->num_scalers; i++)
4519                skl_detach_scaler(crtc, i);
4520}
4521
4522static void skylake_pfit_enable(struct intel_crtc *crtc)
4523{
4524        struct drm_device *dev = crtc->base.dev;
4525        struct drm_i915_private *dev_priv = dev->dev_private;
4526        int pipe = crtc->pipe;
4527        struct intel_crtc_scaler_state *scaler_state =
4528                &crtc->config->scaler_state;
4529
4530        DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4531
4532        if (crtc->config->pch_pfit.enabled) {
4533                int id;
4534
4535                if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4536                        DRM_ERROR("Requesting pfit without getting a scaler first\n");
4537                        return;
4538                }
4539
4540                id = scaler_state->scaler_id;
4541                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4542                        PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4543                I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4544                I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4545
4546                DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4547        }
4548}
4549
4550static void ironlake_pfit_enable(struct intel_crtc *crtc)
4551{
4552        struct drm_device *dev = crtc->base.dev;
4553        struct drm_i915_private *dev_priv = dev->dev_private;
4554        int pipe = crtc->pipe;
4555
4556        if (crtc->config->pch_pfit.enabled) {
4557                /* Force use of hard-coded filter coefficients
4558                 * as some pre-programmed values are broken,
4559                 * e.g. x201.
4560                 */
4561                if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4562                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4563                                                 PF_PIPE_SEL_IVB(pipe));
4564                else
4565                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4566                I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4567                I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4568        }
4569}
4570
4571void hsw_enable_ips(struct intel_crtc *crtc)
4572{
4573        struct drm_device *dev = crtc->base.dev;
4574        struct drm_i915_private *dev_priv = dev->dev_private;
4575
4576        if (!crtc->config->ips_enabled)
4577                return;
4578
4579        /* We can only enable IPS after we enable a plane and wait for a vblank */
4580        intel_wait_for_vblank(dev, crtc->pipe);
4581
4582        assert_plane_enabled(dev_priv, crtc->plane);
4583        if (IS_BROADWELL(dev)) {
4584                mutex_lock(&dev_priv->rps.hw_lock);
4585                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4586                mutex_unlock(&dev_priv->rps.hw_lock);
4587                /* Quoting Art Runyan: "its not safe to expect any particular
4588                 * value in IPS_CTL bit 31 after enabling IPS through the
4589                 * mailbox." Moreover, the mailbox may return a bogus state,
4590                 * so we need to just enable it and continue on.
4591                 */
4592        } else {
4593                I915_WRITE(IPS_CTL, IPS_ENABLE);
4594                /* The bit only becomes 1 in the next vblank, so this wait here
4595                 * is essentially intel_wait_for_vblank. If we don't have this
4596                 * and don't wait for vblanks until the end of crtc_enable, then
4597                 * the HW state readout code will complain that the expected
4598                 * IPS_CTL value is not the one we read. */
4599                if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4600                        DRM_ERROR("Timed out waiting for IPS enable\n");
4601        }
4602}
4603
4604void hsw_disable_ips(struct intel_crtc *crtc)
4605{
4606        struct drm_device *dev = crtc->base.dev;
4607        struct drm_i915_private *dev_priv = dev->dev_private;
4608
4609        if (!crtc->config->ips_enabled)
4610                return;
4611
4612        assert_plane_enabled(dev_priv, crtc->plane);
4613        if (IS_BROADWELL(dev)) {
4614                mutex_lock(&dev_priv->rps.hw_lock);
4615                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4616                mutex_unlock(&dev_priv->rps.hw_lock);
4617                /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4618                if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4619                        DRM_ERROR("Timed out waiting for IPS disable\n");
4620        } else {
4621                I915_WRITE(IPS_CTL, 0);
4622                POSTING_READ(IPS_CTL);
4623        }
4624
4625        /* We need to wait for a vblank before we can disable the plane. */
4626        intel_wait_for_vblank(dev, crtc->pipe);
4627}
4628
4629/** Loads the palette/gamma unit for the CRTC with the prepared values */
4630static void intel_crtc_load_lut(struct drm_crtc *crtc)
4631{
4632        struct drm_device *dev = crtc->dev;
4633        struct drm_i915_private *dev_priv = dev->dev_private;
4634        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4635        enum pipe pipe = intel_crtc->pipe;
4636        int i;
4637        bool reenable_ips = false;
4638
4639        /* The clocks have to be on to load the palette. */
4640        if (!crtc->state->active)
4641                return;
4642
4643        if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4644                if (intel_crtc->config->has_dsi_encoder)
4645                        assert_dsi_pll_enabled(dev_priv);
4646                else
4647                        assert_pll_enabled(dev_priv, pipe);
4648        }
4649
4650        /* Workaround : Do not read or write the pipe palette/gamma data while
4651         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4652         */
4653        if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4654            ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4655             GAMMA_MODE_MODE_SPLIT)) {
4656                hsw_disable_ips(intel_crtc);
4657                reenable_ips = true;
4658        }
4659
4660        for (i = 0; i < 256; i++) {
4661                i915_reg_t palreg;
4662
4663                if (HAS_GMCH_DISPLAY(dev))
4664                        palreg = PALETTE(pipe, i);
4665                else
4666                        palreg = LGC_PALETTE(pipe, i);
4667
4668                I915_WRITE(palreg,
4669                           (intel_crtc->lut_r[i] << 16) |
4670                           (intel_crtc->lut_g[i] << 8) |
4671                           intel_crtc->lut_b[i]);
4672        }
4673
4674        if (reenable_ips)
4675                hsw_enable_ips(intel_crtc);
4676}
4677
4678static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4679{
4680        if (intel_crtc->overlay) {
4681                struct drm_device *dev = intel_crtc->base.dev;
4682                struct drm_i915_private *dev_priv = dev->dev_private;
4683
4684                mutex_lock(&dev->struct_mutex);
4685                dev_priv->mm.interruptible = false;
4686                (void) intel_overlay_switch_off(intel_crtc->overlay);
4687                dev_priv->mm.interruptible = true;
4688                mutex_unlock(&dev->struct_mutex);
4689        }
4690
4691        /* Let userspace switch the overlay on again. In most cases userspace
4692         * has to recompute where to put it anyway.
4693         */
4694}
4695
4696/**
4697 * intel_post_enable_primary - Perform operations after enabling primary plane
4698 * @crtc: the CRTC whose primary plane was just enabled
4699 *
4700 * Performs potentially sleeping operations that must be done after the primary
4701 * plane is enabled, such as updating FBC and IPS.  Note that this may be
4702 * called due to an explicit primary plane update, or due to an implicit
4703 * re-enable that is caused when a sprite plane is updated to no longer
4704 * completely hide the primary plane.
4705 */
4706static void
4707intel_post_enable_primary(struct drm_crtc *crtc)
4708{
4709        struct drm_device *dev = crtc->dev;
4710        struct drm_i915_private *dev_priv = dev->dev_private;
4711        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4712        int pipe = intel_crtc->pipe;
4713
4714        /*
4715         * FIXME IPS should be fine as long as one plane is
4716         * enabled, but in practice it seems to have problems
4717         * when going from primary only to sprite only and vice
4718         * versa.
4719         */
4720        hsw_enable_ips(intel_crtc);
4721
4722        /*
4723         * Gen2 reports pipe underruns whenever all planes are disabled.
4724         * So don't enable underrun reporting before at least some planes
4725         * are enabled.
4726         * FIXME: Need to fix the logic to work when we turn off all planes
4727         * but leave the pipe running.
4728         */
4729        if (IS_GEN2(dev))
4730                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4731
4732        /* Underruns don't always raise interrupts, so check manually. */
4733        intel_check_cpu_fifo_underruns(dev_priv);
4734        intel_check_pch_fifo_underruns(dev_priv);
4735}
4736
4737/**
4738 * intel_pre_disable_primary - Perform operations before disabling primary plane
4739 * @crtc: the CRTC whose primary plane is to be disabled
4740 *
4741 * Performs potentially sleeping operations that must be done before the
4742 * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4743 * be called due to an explicit primary plane update, or due to an implicit
4744 * disable that is caused when a sprite plane completely hides the primary
4745 * plane.
4746 */
4747static void
4748intel_pre_disable_primary(struct drm_crtc *crtc)
4749{
4750        struct drm_device *dev = crtc->dev;
4751        struct drm_i915_private *dev_priv = dev->dev_private;
4752        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4753        int pipe = intel_crtc->pipe;
4754
4755        /*
4756         * Gen2 reports pipe underruns whenever all planes are disabled.
4757         * So diasble underrun reporting before all the planes get disabled.
4758         * FIXME: Need to fix the logic to work when we turn off all planes
4759         * but leave the pipe running.
4760         */
4761        if (IS_GEN2(dev))
4762                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4763
4764        /*
4765         * Vblank time updates from the shadow to live plane control register
4766         * are blocked if the memory self-refresh mode is active at that
4767         * moment. So to make sure the plane gets truly disabled, disable
4768         * first the self-refresh mode. The self-refresh enable bit in turn
4769         * will be checked/applied by the HW only at the next frame start
4770         * event which is after the vblank start event, so we need to have a
4771         * wait-for-vblank between disabling the plane and the pipe.
4772         */
4773        if (HAS_GMCH_DISPLAY(dev)) {
4774                intel_set_memory_cxsr(dev_priv, false);
4775                dev_priv->wm.vlv.cxsr = false;
4776                intel_wait_for_vblank(dev, pipe);
4777        }
4778
4779        /*
4780         * FIXME IPS should be fine as long as one plane is
4781         * enabled, but in practice it seems to have problems
4782         * when going from primary only to sprite only and vice
4783         * versa.
4784         */
4785        hsw_disable_ips(intel_crtc);
4786}
4787
4788static void intel_post_plane_update(struct intel_crtc *crtc)
4789{
4790        struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4791        struct intel_crtc_state *pipe_config =
4792                to_intel_crtc_state(crtc->base.state);
4793        struct drm_device *dev = crtc->base.dev;
4794
4795        intel_frontbuffer_flip(dev, atomic->fb_bits);
4796
4797        crtc->wm.cxsr_allowed = true;
4798
4799        if (pipe_config->wm_changed && pipe_config->base.active)
4800                intel_update_watermarks(&crtc->base);
4801
4802        if (atomic->update_fbc)
4803                intel_fbc_post_update(crtc);
4804
4805        if (atomic->post_enable_primary)
4806                intel_post_enable_primary(&crtc->base);
4807
4808        memset(atomic, 0, sizeof(*atomic));
4809}
4810
4811static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4812{
4813        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4814        struct drm_device *dev = crtc->base.dev;
4815        struct drm_i915_private *dev_priv = dev->dev_private;
4816        struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4817        struct intel_crtc_state *pipe_config =
4818                to_intel_crtc_state(crtc->base.state);
4819        struct drm_atomic_state *old_state = old_crtc_state->base.state;
4820        struct drm_plane *primary = crtc->base.primary;
4821        struct drm_plane_state *old_pri_state =
4822                drm_atomic_get_existing_plane_state(old_state, primary);
4823        bool modeset = needs_modeset(&pipe_config->base);
4824
4825        if (atomic->update_fbc)
4826                intel_fbc_pre_update(crtc);
4827
4828        if (old_pri_state) {
4829                struct intel_plane_state *primary_state =
4830                        to_intel_plane_state(primary->state);
4831                struct intel_plane_state *old_primary_state =
4832                        to_intel_plane_state(old_pri_state);
4833
4834                if (old_primary_state->visible &&
4835                    (modeset || !primary_state->visible))
4836                        intel_pre_disable_primary(&crtc->base);
4837        }
4838
4839        if (pipe_config->disable_cxsr) {
4840                crtc->wm.cxsr_allowed = false;
4841
4842                if (old_crtc_state->base.active)
4843                        intel_set_memory_cxsr(dev_priv, false);
4844        }
4845
4846        if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
4847                intel_update_watermarks(&crtc->base);
4848}
4849
4850static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4851{
4852        struct drm_device *dev = crtc->dev;
4853        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4854        struct drm_plane *p;
4855        int pipe = intel_crtc->pipe;
4856
4857        intel_crtc_dpms_overlay_disable(intel_crtc);
4858
4859        drm_for_each_plane_mask(p, dev, plane_mask)
4860                to_intel_plane(p)->disable_plane(p, crtc);
4861
4862        /*
4863         * FIXME: Once we grow proper nuclear flip support out of this we need
4864         * to compute the mask of flip planes precisely. For the time being
4865         * consider this a flip to a NULL plane.
4866         */
4867        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4868}
4869
4870static void ironlake_crtc_enable(struct drm_crtc *crtc)
4871{
4872        struct drm_device *dev = crtc->dev;
4873        struct drm_i915_private *dev_priv = dev->dev_private;
4874        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4875        struct intel_encoder *encoder;
4876        int pipe = intel_crtc->pipe;
4877
4878        if (WARN_ON(intel_crtc->active))
4879                return;
4880
4881        if (intel_crtc->config->has_pch_encoder)
4882                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4883
4884        if (intel_crtc->config->has_pch_encoder)
4885                intel_prepare_shared_dpll(intel_crtc);
4886
4887        if (intel_crtc->config->has_dp_encoder)
4888                intel_dp_set_m_n(intel_crtc, M1_N1);
4889
4890        intel_set_pipe_timings(intel_crtc);
4891
4892        if (intel_crtc->config->has_pch_encoder) {
4893                intel_cpu_transcoder_set_m_n(intel_crtc,
4894                                     &intel_crtc->config->fdi_m_n, NULL);
4895        }
4896
4897        ironlake_set_pipeconf(crtc);
4898
4899        intel_crtc->active = true;
4900
4901        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4902
4903        for_each_encoder_on_crtc(dev, crtc, encoder)
4904                if (encoder->pre_enable)
4905                        encoder->pre_enable(encoder);
4906
4907        if (intel_crtc->config->has_pch_encoder) {
4908                /* Note: FDI PLL enabling _must_ be done before we enable the
4909                 * cpu pipes, hence this is separate from all the other fdi/pch
4910                 * enabling. */
4911                ironlake_fdi_pll_enable(intel_crtc);
4912        } else {
4913                assert_fdi_tx_disabled(dev_priv, pipe);
4914                assert_fdi_rx_disabled(dev_priv, pipe);
4915        }
4916
4917        ironlake_pfit_enable(intel_crtc);
4918
4919        /*
4920         * On ILK+ LUT must be loaded before the pipe is running but with
4921         * clocks enabled
4922         */
4923        intel_crtc_load_lut(crtc);
4924
4925        intel_update_watermarks(crtc);
4926        intel_enable_pipe(intel_crtc);
4927
4928        if (intel_crtc->config->has_pch_encoder)
4929                ironlake_pch_enable(crtc);
4930
4931        assert_vblank_disabled(crtc);
4932        drm_crtc_vblank_on(crtc);
4933
4934        for_each_encoder_on_crtc(dev, crtc, encoder)
4935                encoder->enable(encoder);
4936
4937        if (HAS_PCH_CPT(dev))
4938                cpt_verify_modeset(dev, intel_crtc->pipe);
4939
4940        /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4941        if (intel_crtc->config->has_pch_encoder)
4942                intel_wait_for_vblank(dev, pipe);
4943        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4944}
4945
4946/* IPS only exists on ULT machines and is tied to pipe A. */
4947static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4948{
4949        return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4950}
4951
4952static void haswell_crtc_enable(struct drm_crtc *crtc)
4953{
4954        struct drm_device *dev = crtc->dev;
4955        struct drm_i915_private *dev_priv = dev->dev_private;
4956        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4957        struct intel_encoder *encoder;
4958        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4959        struct intel_crtc_state *pipe_config =
4960                to_intel_crtc_state(crtc->state);
4961
4962        if (WARN_ON(intel_crtc->active))
4963                return;
4964
4965        if (intel_crtc->config->has_pch_encoder)
4966                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4967                                                      false);
4968
4969        if (intel_crtc_to_shared_dpll(intel_crtc))
4970                intel_enable_shared_dpll(intel_crtc);
4971
4972        if (intel_crtc->config->has_dp_encoder)
4973                intel_dp_set_m_n(intel_crtc, M1_N1);
4974
4975        intel_set_pipe_timings(intel_crtc);
4976
4977        if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4978                I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4979                           intel_crtc->config->pixel_multiplier - 1);
4980        }
4981
4982        if (intel_crtc->config->has_pch_encoder) {
4983                intel_cpu_transcoder_set_m_n(intel_crtc,
4984                                     &intel_crtc->config->fdi_m_n, NULL);
4985        }
4986
4987        haswell_set_pipeconf(crtc);
4988
4989        intel_set_pipe_csc(crtc);
4990
4991        intel_crtc->active = true;
4992
4993        if (intel_crtc->config->has_pch_encoder)
4994                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4995        else
4996                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4997
4998        for_each_encoder_on_crtc(dev, crtc, encoder) {
4999                if (encoder->pre_enable)
5000                        encoder->pre_enable(encoder);
5001        }
5002
5003        if (intel_crtc->config->has_pch_encoder)
5004                dev_priv->display.fdi_link_train(crtc);
5005
5006        if (!intel_crtc->config->has_dsi_encoder)
5007                intel_ddi_enable_pipe_clock(intel_crtc);
5008
5009        if (INTEL_INFO(dev)->gen >= 9)
5010                skylake_pfit_enable(intel_crtc);
5011        else
5012                ironlake_pfit_enable(intel_crtc);
5013
5014        /*
5015         * On ILK+ LUT must be loaded before the pipe is running but with
5016         * clocks enabled
5017         */
5018        intel_crtc_load_lut(crtc);
5019
5020        intel_ddi_set_pipe_settings(crtc);
5021        if (!intel_crtc->config->has_dsi_encoder)
5022                intel_ddi_enable_transcoder_func(crtc);
5023
5024        intel_update_watermarks(crtc);
5025        intel_enable_pipe(intel_crtc);
5026
5027        if (intel_crtc->config->has_pch_encoder)
5028                lpt_pch_enable(crtc);
5029
5030        if (intel_crtc->config->dp_encoder_is_mst)
5031                intel_ddi_set_vc_payload_alloc(crtc, true);
5032
5033        assert_vblank_disabled(crtc);
5034        drm_crtc_vblank_on(crtc);
5035
5036        for_each_encoder_on_crtc(dev, crtc, encoder) {
5037                encoder->enable(encoder);
5038                intel_opregion_notify_encoder(encoder, true);
5039        }
5040
5041        if (intel_crtc->config->has_pch_encoder) {
5042                intel_wait_for_vblank(dev, pipe);
5043                intel_wait_for_vblank(dev, pipe);
5044                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5045                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5046                                                      true);
5047        }
5048
5049        /* If we change the relative order between pipe/planes enabling, we need
5050         * to change the workaround. */
5051        hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5052        if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5053                intel_wait_for_vblank(dev, hsw_workaround_pipe);
5054                intel_wait_for_vblank(dev, hsw_workaround_pipe);
5055        }
5056}
5057
5058static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5059{
5060        struct drm_device *dev = crtc->base.dev;
5061        struct drm_i915_private *dev_priv = dev->dev_private;
5062        int pipe = crtc->pipe;
5063
5064        /* To avoid upsetting the power well on haswell only disable the pfit if
5065         * it's in use. The hw state code will make sure we get this right. */
5066        if (force || crtc->config->pch_pfit.enabled) {
5067                I915_WRITE(PF_CTL(pipe), 0);
5068                I915_WRITE(PF_WIN_POS(pipe), 0);
5069                I915_WRITE(PF_WIN_SZ(pipe), 0);
5070        }
5071}
5072
5073static void ironlake_crtc_disable(struct drm_crtc *crtc)
5074{
5075        struct drm_device *dev = crtc->dev;
5076        struct drm_i915_private *dev_priv = dev->dev_private;
5077        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5078        struct intel_encoder *encoder;
5079        int pipe = intel_crtc->pipe;
5080
5081        if (intel_crtc->config->has_pch_encoder)
5082                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5083
5084        for_each_encoder_on_crtc(dev, crtc, encoder)
5085                encoder->disable(encoder);
5086
5087        drm_crtc_vblank_off(crtc);
5088        assert_vblank_disabled(crtc);
5089
5090        /*
5091         * Sometimes spurious CPU pipe underruns happen when the
5092         * pipe is already disabled, but FDI RX/TX is still enabled.
5093         * Happens at least with VGA+HDMI cloning. Suppress them.
5094         */
5095        if (intel_crtc->config->has_pch_encoder)
5096                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5097
5098        intel_disable_pipe(intel_crtc);
5099
5100        ironlake_pfit_disable(intel_crtc, false);
5101
5102        if (intel_crtc->config->has_pch_encoder) {
5103                ironlake_fdi_disable(crtc);
5104                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5105        }
5106
5107        for_each_encoder_on_crtc(dev, crtc, encoder)
5108                if (encoder->post_disable)
5109                        encoder->post_disable(encoder);
5110
5111        if (intel_crtc->config->has_pch_encoder) {
5112                ironlake_disable_pch_transcoder(dev_priv, pipe);
5113
5114                if (HAS_PCH_CPT(dev)) {
5115                        i915_reg_t reg;
5116                        u32 temp;
5117
5118                        /* disable TRANS_DP_CTL */
5119                        reg = TRANS_DP_CTL(pipe);
5120                        temp = I915_READ(reg);
5121                        temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5122                                  TRANS_DP_PORT_SEL_MASK);
5123                        temp |= TRANS_DP_PORT_SEL_NONE;
5124                        I915_WRITE(reg, temp);
5125
5126                        /* disable DPLL_SEL */
5127                        temp = I915_READ(PCH_DPLL_SEL);
5128                        temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5129                        I915_WRITE(PCH_DPLL_SEL, temp);
5130                }
5131
5132                ironlake_fdi_pll_disable(intel_crtc);
5133        }
5134
5135        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5136}
5137
5138static void haswell_crtc_disable(struct drm_crtc *crtc)
5139{
5140        struct drm_device *dev = crtc->dev;
5141        struct drm_i915_private *dev_priv = dev->dev_private;
5142        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5143        struct intel_encoder *encoder;
5144        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5145
5146        if (intel_crtc->config->has_pch_encoder)
5147                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5148                                                      false);
5149
5150        for_each_encoder_on_crtc(dev, crtc, encoder) {
5151                intel_opregion_notify_encoder(encoder, false);
5152                encoder->disable(encoder);
5153        }
5154
5155        drm_crtc_vblank_off(crtc);
5156        assert_vblank_disabled(crtc);
5157
5158        intel_disable_pipe(intel_crtc);
5159
5160        if (intel_crtc->config->dp_encoder_is_mst)
5161                intel_ddi_set_vc_payload_alloc(crtc, false);
5162
5163        if (!intel_crtc->config->has_dsi_encoder)
5164                intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5165
5166        if (INTEL_INFO(dev)->gen >= 9)
5167                skylake_scaler_disable(intel_crtc);
5168        else
5169                ironlake_pfit_disable(intel_crtc, false);
5170
5171        if (!intel_crtc->config->has_dsi_encoder)
5172                intel_ddi_disable_pipe_clock(intel_crtc);
5173
5174        for_each_encoder_on_crtc(dev, crtc, encoder)
5175                if (encoder->post_disable)
5176                        encoder->post_disable(encoder);
5177
5178        if (intel_crtc->config->has_pch_encoder) {
5179                lpt_disable_pch_transcoder(dev_priv);
5180                lpt_disable_iclkip(dev_priv);
5181                intel_ddi_fdi_disable(crtc);
5182
5183                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5184                                                      true);
5185        }
5186}
5187
5188static void i9xx_pfit_enable(struct intel_crtc *crtc)
5189{
5190        struct drm_device *dev = crtc->base.dev;
5191        struct drm_i915_private *dev_priv = dev->dev_private;
5192        struct intel_crtc_state *pipe_config = crtc->config;
5193
5194        if (!pipe_config->gmch_pfit.control)
5195                return;
5196
5197        /*
5198         * The panel fitter should only be adjusted whilst the pipe is disabled,
5199         * according to register description and PRM.
5200         */
5201        WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5202        assert_pipe_disabled(dev_priv, crtc->pipe);
5203
5204        I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5205        I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5206
5207        /* Border color in case we don't scale up to the full screen. Black by
5208         * default, change to something else for debugging. */
5209        I915_WRITE(BCLRPAT(crtc->pipe), 0);
5210}
5211
5212static enum intel_display_power_domain port_to_power_domain(enum port port)
5213{
5214        switch (port) {
5215        case PORT_A:
5216                return POWER_DOMAIN_PORT_DDI_A_LANES;
5217        case PORT_B:
5218                return POWER_DOMAIN_PORT_DDI_B_LANES;
5219        case PORT_C:
5220                return POWER_DOMAIN_PORT_DDI_C_LANES;
5221        case PORT_D:
5222                return POWER_DOMAIN_PORT_DDI_D_LANES;
5223        case PORT_E:
5224                return POWER_DOMAIN_PORT_DDI_E_LANES;
5225        default:
5226                MISSING_CASE(port);
5227                return POWER_DOMAIN_PORT_OTHER;
5228        }
5229}
5230
5231static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5232{
5233        switch (port) {
5234        case PORT_A:
5235                return POWER_DOMAIN_AUX_A;
5236        case PORT_B:
5237                return POWER_DOMAIN_AUX_B;
5238        case PORT_C:
5239                return POWER_DOMAIN_AUX_C;
5240        case PORT_D:
5241                return POWER_DOMAIN_AUX_D;
5242        case PORT_E:
5243                /* FIXME: Check VBT for actual wiring of PORT E */
5244                return POWER_DOMAIN_AUX_D;
5245        default:
5246                MISSING_CASE(port);
5247                return POWER_DOMAIN_AUX_A;
5248        }
5249}
5250
5251enum intel_display_power_domain
5252intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5253{
5254        struct drm_device *dev = intel_encoder->base.dev;
5255        struct intel_digital_port *intel_dig_port;
5256
5257        switch (intel_encoder->type) {
5258        case INTEL_OUTPUT_UNKNOWN:
5259                /* Only DDI platforms should ever use this output type */
5260                WARN_ON_ONCE(!HAS_DDI(dev));
5261        case INTEL_OUTPUT_DISPLAYPORT:
5262        case INTEL_OUTPUT_HDMI:
5263        case INTEL_OUTPUT_EDP:
5264                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5265                return port_to_power_domain(intel_dig_port->port);
5266        case INTEL_OUTPUT_DP_MST:
5267                intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5268                return port_to_power_domain(intel_dig_port->port);
5269        case INTEL_OUTPUT_ANALOG:
5270                return POWER_DOMAIN_PORT_CRT;
5271        case INTEL_OUTPUT_DSI:
5272                return POWER_DOMAIN_PORT_DSI;
5273        default:
5274                return POWER_DOMAIN_PORT_OTHER;
5275        }
5276}
5277
5278enum intel_display_power_domain
5279intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5280{
5281        struct drm_device *dev = intel_encoder->base.dev;
5282        struct intel_digital_port *intel_dig_port;
5283
5284        switch (intel_encoder->type) {
5285        case INTEL_OUTPUT_UNKNOWN:
5286        case INTEL_OUTPUT_HDMI:
5287                /*
5288                 * Only DDI platforms should ever use these output types.
5289                 * We can get here after the HDMI detect code has already set
5290                 * the type of the shared encoder. Since we can't be sure
5291                 * what's the status of the given connectors, play safe and
5292                 * run the DP detection too.
5293                 */
5294                WARN_ON_ONCE(!HAS_DDI(dev));
5295        case INTEL_OUTPUT_DISPLAYPORT:
5296        case INTEL_OUTPUT_EDP:
5297                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5298                return port_to_aux_power_domain(intel_dig_port->port);
5299        case INTEL_OUTPUT_DP_MST:
5300                intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5301                return port_to_aux_power_domain(intel_dig_port->port);
5302        default:
5303                MISSING_CASE(intel_encoder->type);
5304                return POWER_DOMAIN_AUX_A;
5305        }
5306}
5307
5308static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5309                                            struct intel_crtc_state *crtc_state)
5310{
5311        struct drm_device *dev = crtc->dev;
5312        struct drm_encoder *encoder;
5313        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5314        enum pipe pipe = intel_crtc->pipe;
5315        unsigned long mask;
5316        enum transcoder transcoder = crtc_state->cpu_transcoder;
5317
5318        if (!crtc_state->base.active)
5319                return 0;
5320
5321        mask = BIT(POWER_DOMAIN_PIPE(pipe));
5322        mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5323        if (crtc_state->pch_pfit.enabled ||
5324            crtc_state->pch_pfit.force_thru)
5325                mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5326
5327        drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5328                struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5329
5330                mask |= BIT(intel_display_port_power_domain(intel_encoder));
5331        }
5332
5333        return mask;
5334}
5335
5336static unsigned long
5337modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5338                               struct intel_crtc_state *crtc_state)
5339{
5340        struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5341        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5342        enum intel_display_power_domain domain;
5343        unsigned long domains, new_domains, old_domains;
5344
5345        old_domains = intel_crtc->enabled_power_domains;
5346        intel_crtc->enabled_power_domains = new_domains =
5347                get_crtc_power_domains(crtc, crtc_state);
5348
5349        domains = new_domains & ~old_domains;
5350
5351        for_each_power_domain(domain, domains)
5352                intel_display_power_get(dev_priv, domain);
5353
5354        return old_domains & ~new_domains;
5355}
5356
5357static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5358                                      unsigned long domains)
5359{
5360        enum intel_display_power_domain domain;
5361
5362        for_each_power_domain(domain, domains)
5363                intel_display_power_put(dev_priv, domain);
5364}
5365
5366static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5367{
5368        int max_cdclk_freq = dev_priv->max_cdclk_freq;
5369
5370        if (INTEL_INFO(dev_priv)->gen >= 9 ||
5371            IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5372                return max_cdclk_freq;
5373        else if (IS_CHERRYVIEW(dev_priv))
5374                return max_cdclk_freq*95/100;
5375        else if (INTEL_INFO(dev_priv)->gen < 4)
5376                return 2*max_cdclk_freq*90/100;
5377        else
5378                return max_cdclk_freq*90/100;
5379}
5380
5381static void intel_update_max_cdclk(struct drm_device *dev)
5382{
5383        struct drm_i915_private *dev_priv = dev->dev_private;
5384
5385        if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5386                u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5387
5388                if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5389                        dev_priv->max_cdclk_freq = 675000;
5390                else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5391                        dev_priv->max_cdclk_freq = 540000;
5392                else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5393                        dev_priv->max_cdclk_freq = 450000;
5394                else
5395                        dev_priv->max_cdclk_freq = 337500;
5396        } else if (IS_BROADWELL(dev))  {
5397                /*
5398                 * FIXME with extra cooling we can allow
5399                 * 540 MHz for ULX and 675 Mhz for ULT.
5400                 * How can we know if extra cooling is
5401                 * available? PCI ID, VTB, something else?
5402                 */
5403                if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5404                        dev_priv->max_cdclk_freq = 450000;
5405                else if (IS_BDW_ULX(dev))
5406                        dev_priv->max_cdclk_freq = 450000;
5407                else if (IS_BDW_ULT(dev))
5408                        dev_priv->max_cdclk_freq = 540000;
5409                else
5410                        dev_priv->max_cdclk_freq = 675000;
5411        } else if (IS_CHERRYVIEW(dev)) {
5412                dev_priv->max_cdclk_freq = 320000;
5413        } else if (IS_VALLEYVIEW(dev)) {
5414                dev_priv->max_cdclk_freq = 400000;
5415        } else {
5416                /* otherwise assume cdclk is fixed */
5417                dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5418        }
5419
5420        dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5421
5422        DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5423                         dev_priv->max_cdclk_freq);
5424
5425        DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5426                         dev_priv->max_dotclk_freq);
5427}
5428
5429static void intel_update_cdclk(struct drm_device *dev)
5430{
5431        struct drm_i915_private *dev_priv = dev->dev_private;
5432
5433        dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5434        DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5435                         dev_priv->cdclk_freq);
5436
5437        /*
5438         * Program the gmbus_freq based on the cdclk frequency.
5439         * BSpec erroneously claims we should aim for 4MHz, but
5440         * in fact 1MHz is the correct frequency.
5441         */
5442        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5443                /*
5444                 * Program the gmbus_freq based on the cdclk frequency.
5445                 * BSpec erroneously claims we should aim for 4MHz, but
5446                 * in fact 1MHz is the correct frequency.
5447                 */
5448                I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5449        }
5450
5451        if (dev_priv->max_cdclk_freq == 0)
5452                intel_update_max_cdclk(dev);
5453}
5454
5455static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5456{
5457        struct drm_i915_private *dev_priv = dev->dev_private;
5458        uint32_t divider;
5459        uint32_t ratio;
5460        uint32_t current_freq;
5461        int ret;
5462
5463        /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5464        switch (frequency) {
5465        case 144000:
5466                divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5467                ratio = BXT_DE_PLL_RATIO(60);
5468                break;
5469        case 288000:
5470                divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5471                ratio = BXT_DE_PLL_RATIO(60);
5472                break;
5473        case 384000:
5474                divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5475                ratio = BXT_DE_PLL_RATIO(60);
5476                break;
5477        case 576000:
5478                divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5479                ratio = BXT_DE_PLL_RATIO(60);
5480                break;
5481        case 624000:
5482                divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5483                ratio = BXT_DE_PLL_RATIO(65);
5484                break;
5485        case 19200:
5486                /*
5487                 * Bypass frequency with DE PLL disabled. Init ratio, divider
5488                 * to suppress GCC warning.
5489                 */
5490                ratio = 0;
5491                divider = 0;
5492                break;
5493        default:
5494                DRM_ERROR("unsupported CDCLK freq %d", frequency);
5495
5496                return;
5497        }
5498
5499        mutex_lock(&dev_priv->rps.hw_lock);
5500        /* Inform power controller of upcoming frequency change */
5501        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5502                                      0x80000000);
5503        mutex_unlock(&dev_priv->rps.hw_lock);
5504
5505        if (ret) {
5506                DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5507                          ret, frequency);
5508                return;
5509        }
5510
5511        current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5512        /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5513        current_freq = current_freq * 500 + 1000;
5514
5515        /*
5516         * DE PLL has to be disabled when
5517         * - setting to 19.2MHz (bypass, PLL isn't used)
5518         * - before setting to 624MHz (PLL needs toggling)
5519         * - before setting to any frequency from 624MHz (PLL needs toggling)
5520         */
5521        if (frequency == 19200 || frequency == 624000 ||
5522            current_freq == 624000) {
5523                I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5524                /* Timeout 200us */
5525                if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5526                             1))
5527                        DRM_ERROR("timout waiting for DE PLL unlock\n");
5528        }
5529
5530        if (frequency != 19200) {
5531                uint32_t val;
5532
5533                val = I915_READ(BXT_DE_PLL_CTL);
5534                val &= ~BXT_DE_PLL_RATIO_MASK;
5535                val |= ratio;
5536                I915_WRITE(BXT_DE_PLL_CTL, val);
5537
5538                I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5539                /* Timeout 200us */
5540                if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5541                        DRM_ERROR("timeout waiting for DE PLL lock\n");
5542
5543                val = I915_READ(CDCLK_CTL);
5544                val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5545                val |= divider;
5546                /*
5547                 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5548                 * enable otherwise.
5549                 */
5550                val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5551                if (frequency >= 500000)
5552                        val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5553
5554                val &= ~CDCLK_FREQ_DECIMAL_MASK;
5555                /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5556                val |= (frequency - 1000) / 500;
5557                I915_WRITE(CDCLK_CTL, val);
5558        }
5559
5560        mutex_lock(&dev_priv->rps.hw_lock);
5561        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5562                                      DIV_ROUND_UP(frequency, 25000));
5563        mutex_unlock(&dev_priv->rps.hw_lock);
5564
5565        if (ret) {
5566                DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5567                          ret, frequency);
5568                return;
5569        }
5570
5571        intel_update_cdclk(dev);
5572}
5573
5574void broxton_init_cdclk(struct drm_device *dev)
5575{
5576        struct drm_i915_private *dev_priv = dev->dev_private;
5577        uint32_t val;
5578
5579        /*
5580         * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5581         * or else the reset will hang because there is no PCH to respond.
5582         * Move the handshake programming to initialization sequence.
5583         * Previously was left up to BIOS.
5584         */
5585        val = I915_READ(HSW_NDE_RSTWRN_OPT);
5586        val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5587        I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5588
5589        /* Enable PG1 for cdclk */
5590        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5591
5592        /* check if cd clock is enabled */
5593        if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5594                DRM_DEBUG_KMS("Display already initialized\n");
5595                return;
5596        }
5597
5598        /*
5599         * FIXME:
5600         * - The initial CDCLK needs to be read from VBT.
5601         *   Need to make this change after VBT has changes for BXT.
5602         * - check if setting the max (or any) cdclk freq is really necessary
5603         *   here, it belongs to modeset time
5604         */
5605        broxton_set_cdclk(dev, 624000);
5606
5607        I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5608        POSTING_READ(DBUF_CTL);
5609
5610        udelay(10);
5611
5612        if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5613                DRM_ERROR("DBuf power enable timeout!\n");
5614}
5615
5616void broxton_uninit_cdclk(struct drm_device *dev)
5617{
5618        struct drm_i915_private *dev_priv = dev->dev_private;
5619
5620        I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5621        POSTING_READ(DBUF_CTL);
5622
5623        udelay(10);
5624
5625        if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5626                DRM_ERROR("DBuf power disable timeout!\n");
5627
5628        /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5629        broxton_set_cdclk(dev, 19200);
5630
5631        intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5632}
5633
5634static const struct skl_cdclk_entry {
5635        unsigned int freq;
5636        unsigned int vco;
5637} skl_cdclk_frequencies[] = {
5638        { .freq = 308570, .vco = 8640 },
5639        { .freq = 337500, .vco = 8100 },
5640        { .freq = 432000, .vco = 8640 },
5641        { .freq = 450000, .vco = 8100 },
5642        { .freq = 540000, .vco = 8100 },
5643        { .freq = 617140, .vco = 8640 },
5644        { .freq = 675000, .vco = 8100 },
5645};
5646
5647static unsigned int skl_cdclk_decimal(unsigned int freq)
5648{
5649        return (freq - 1000) / 500;
5650}
5651
5652static unsigned int skl_cdclk_get_vco(unsigned int freq)
5653{
5654        unsigned int i;
5655
5656        for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5657                const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5658
5659                if (e->freq == freq)
5660                        return e->vco;
5661        }
5662
5663        return 8100;
5664}
5665
5666static void
5667skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5668{
5669        unsigned int min_freq;
5670        u32 val;
5671
5672        /* select the minimum CDCLK before enabling DPLL 0 */
5673        val = I915_READ(CDCLK_CTL);
5674        val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5675        val |= CDCLK_FREQ_337_308;
5676
5677        if (required_vco == 8640)
5678                min_freq = 308570;
5679        else
5680                min_freq = 337500;
5681
5682        val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5683
5684        I915_WRITE(CDCLK_CTL, val);
5685        POSTING_READ(CDCLK_CTL);
5686
5687        /*
5688         * We always enable DPLL0 with the lowest link rate possible, but still
5689         * taking into account the VCO required to operate the eDP panel at the
5690         * desired frequency. The usual DP link rates operate with a VCO of
5691         * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5692         * The modeset code is responsible for the selection of the exact link
5693         * rate later on, with the constraint of choosing a frequency that
5694         * works with required_vco.
5695         */
5696        val = I915_READ(DPLL_CTRL1);
5697
5698        val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5699                 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5700        val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5701        if (required_vco == 8640)
5702                val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5703                                            SKL_DPLL0);
5704        else
5705                val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5706                                            SKL_DPLL0);
5707
5708        I915_WRITE(DPLL_CTRL1, val);
5709        POSTING_READ(DPLL_CTRL1);
5710
5711        I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5712
5713        if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5714                DRM_ERROR("DPLL0 not locked\n");
5715}
5716
5717static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5718{
5719        int ret;
5720        u32 val;
5721
5722        /* inform PCU we want to change CDCLK */
5723        val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5724        mutex_lock(&dev_priv->rps.hw_lock);
5725        ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5726        mutex_unlock(&dev_priv->rps.hw_lock);
5727
5728        return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5729}
5730
5731static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5732{
5733        unsigned int i;
5734
5735        for (i = 0; i < 15; i++) {
5736                if (skl_cdclk_pcu_ready(dev_priv))
5737                        return true;
5738                udelay(10);
5739        }
5740
5741        return false;
5742}
5743
5744static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5745{
5746        struct drm_device *dev = dev_priv->dev;
5747        u32 freq_select, pcu_ack;
5748
5749        DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5750
5751        if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5752                DRM_ERROR("failed to inform PCU about cdclk change\n");
5753                return;
5754        }
5755
5756        /* set CDCLK_CTL */
5757        switch(freq) {
5758        case 450000:
5759        case 432000:
5760                freq_select = CDCLK_FREQ_450_432;
5761                pcu_ack = 1;
5762                break;
5763        case 540000:
5764                freq_select = CDCLK_FREQ_540;
5765                pcu_ack = 2;
5766                break;
5767        case 308570:
5768        case 337500:
5769        default:
5770                freq_select = CDCLK_FREQ_337_308;
5771                pcu_ack = 0;
5772                break;
5773        case 617140:
5774        case 675000:
5775                freq_select = CDCLK_FREQ_675_617;
5776                pcu_ack = 3;
5777                break;
5778        }
5779
5780        I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5781        POSTING_READ(CDCLK_CTL);
5782
5783        /* inform PCU of the change */
5784        mutex_lock(&dev_priv->rps.hw_lock);
5785        sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5786        mutex_unlock(&dev_priv->rps.hw_lock);
5787
5788        intel_update_cdclk(dev);
5789}
5790
5791void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5792{
5793        /* disable DBUF power */
5794        I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5795        POSTING_READ(DBUF_CTL);
5796
5797        udelay(10);
5798
5799        if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5800                DRM_ERROR("DBuf power disable timeout\n");
5801
5802        /* disable DPLL0 */
5803        I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5804        if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5805                DRM_ERROR("Couldn't disable DPLL0\n");
5806}
5807
5808void skl_init_cdclk(struct drm_i915_private *dev_priv)
5809{
5810        unsigned int required_vco;
5811
5812        /* DPLL0 not enabled (happens on early BIOS versions) */
5813        if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5814                /* enable DPLL0 */
5815                required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5816                skl_dpll0_enable(dev_priv, required_vco);
5817        }
5818
5819        /* set CDCLK to the frequency the BIOS chose */
5820        skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5821
5822        /* enable DBUF power */
5823        I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5824        POSTING_READ(DBUF_CTL);
5825
5826        udelay(10);
5827
5828        if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5829                DRM_ERROR("DBuf power enable timeout\n");
5830}
5831
5832int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5833{
5834        uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5835        uint32_t cdctl = I915_READ(CDCLK_CTL);
5836        int freq = dev_priv->skl_boot_cdclk;
5837
5838        /*
5839         * check if the pre-os intialized the display
5840         * There is SWF18 scratchpad register defined which is set by the
5841         * pre-os which can be used by the OS drivers to check the status
5842         */
5843        if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5844                goto sanitize;
5845
5846        /* Is PLL enabled and locked ? */
5847        if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5848                goto sanitize;
5849
5850        /* DPLL okay; verify the cdclock
5851         *
5852         * Noticed in some instances that the freq selection is correct but
5853         * decimal part is programmed wrong from BIOS where pre-os does not
5854         * enable display. Verify the same as well.
5855         */
5856        if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5857                /* All well; nothing to sanitize */
5858                return false;
5859sanitize:
5860        /*
5861         * As of now initialize with max cdclk till
5862         * we get dynamic cdclk support
5863         * */
5864        dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5865        skl_init_cdclk(dev_priv);
5866
5867        /* we did have to sanitize */
5868        return true;
5869}
5870
5871/* Adjust CDclk dividers to allow high res or save power if possible */
5872static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5873{
5874        struct drm_i915_private *dev_priv = dev->dev_private;
5875        u32 val, cmd;
5876
5877        WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5878                                        != dev_priv->cdclk_freq);
5879
5880        if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5881                cmd = 2;
5882        else if (cdclk == 266667)
5883                cmd = 1;
5884        else
5885                cmd = 0;
5886
5887        mutex_lock(&dev_priv->rps.hw_lock);
5888        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5889        val &= ~DSPFREQGUAR_MASK;
5890        val |= (cmd << DSPFREQGUAR_SHIFT);
5891        vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5892        if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5893                      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5894                     50)) {
5895                DRM_ERROR("timed out waiting for CDclk change\n");
5896        }
5897        mutex_unlock(&dev_priv->rps.hw_lock);
5898
5899        mutex_lock(&dev_priv->sb_lock);
5900
5901        if (cdclk == 400000) {
5902                u32 divider;
5903
5904                divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5905
5906                /* adjust cdclk divider */
5907                val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5908                val &= ~CCK_FREQUENCY_VALUES;
5909                val |= divider;
5910                vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5911
5912                if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5913                              CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5914                             50))
5915                        DRM_ERROR("timed out waiting for CDclk change\n");
5916        }
5917
5918        /* adjust self-refresh exit latency value */
5919        val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5920        val &= ~0x7f;
5921
5922        /*
5923         * For high bandwidth configs, we set a higher latency in the bunit
5924         * so that the core display fetch happens in time to avoid underruns.
5925         */
5926        if (cdclk == 400000)
5927                val |= 4500 / 250; /* 4.5 usec */
5928        else
5929                val |= 3000 / 250; /* 3.0 usec */
5930        vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5931
5932        mutex_unlock(&dev_priv->sb_lock);
5933
5934        intel_update_cdclk(dev);
5935}
5936
5937static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5938{
5939        struct drm_i915_private *dev_priv = dev->dev_private;
5940        u32 val, cmd;
5941
5942        WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5943                                                != dev_priv->cdclk_freq);
5944
5945        switch (cdclk) {
5946        case 333333:
5947        case 320000:
5948        case 266667:
5949        case 200000:
5950                break;
5951        default:
5952                MISSING_CASE(cdclk);
5953                return;
5954        }
5955
5956        /*
5957         * Specs are full of misinformation, but testing on actual
5958         * hardware has shown that we just need to write the desired
5959         * CCK divider into the Punit register.
5960         */
5961        cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5962
5963        mutex_lock(&dev_priv->rps.hw_lock);
5964        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5965        val &= ~DSPFREQGUAR_MASK_CHV;
5966        val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5967        vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5968        if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5969                      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5970                     50)) {
5971                DRM_ERROR("timed out waiting for CDclk change\n");
5972        }
5973        mutex_unlock(&dev_priv->rps.hw_lock);
5974
5975        intel_update_cdclk(dev);
5976}
5977
5978static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5979                                 int max_pixclk)
5980{
5981        int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5982        int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5983
5984        /*
5985         * Really only a few cases to deal with, as only 4 CDclks are supported:
5986         *   200MHz
5987         *   267MHz
5988         *   320/333MHz (depends on HPLL freq)
5989         *   400MHz (VLV only)
5990         * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5991         * of the lower bin and adjust if needed.
5992         *
5993         * We seem to get an unstable or solid color picture at 200MHz.
5994         * Not sure what's wrong. For now use 200MHz only when all pipes
5995         * are off.
5996         */
5997        if (!IS_CHERRYVIEW(dev_priv) &&
5998            max_pixclk > freq_320*limit/100)
5999                return 400000;
6000        else if (max_pixclk > 266667*limit/100)
6001                return freq_320;
6002        else if (max_pixclk > 0)
6003                return 266667;
6004        else
6005                return 200000;
6006}
6007
6008static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6009                              int max_pixclk)
6010{
6011        /*
6012         * FIXME:
6013         * - remove the guardband, it's not needed on BXT
6014         * - set 19.2MHz bypass frequency if there are no active pipes
6015         */
6016        if (max_pixclk > 576000*9/10)
6017                return 624000;
6018        else if (max_pixclk > 384000*9/10)
6019                return 576000;
6020        else if (max_pixclk > 288000*9/10)
6021                return 384000;
6022        else if (max_pixclk > 144000*9/10)
6023                return 288000;
6024        else
6025                return 144000;
6026}
6027
6028/* Compute the max pixel clock for new configuration. */
6029static int intel_mode_max_pixclk(struct drm_device *dev,
6030                                 struct drm_atomic_state *state)
6031{
6032        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6033        struct drm_i915_private *dev_priv = dev->dev_private;
6034        struct drm_crtc *crtc;
6035        struct drm_crtc_state *crtc_state;
6036        unsigned max_pixclk = 0, i;
6037        enum pipe pipe;
6038
6039        memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6040               sizeof(intel_state->min_pixclk));
6041
6042        for_each_crtc_in_state(state, crtc, crtc_state, i) {
6043                int pixclk = 0;
6044
6045                if (crtc_state->enable)
6046                        pixclk = crtc_state->adjusted_mode.crtc_clock;
6047
6048                intel_state->min_pixclk[i] = pixclk;
6049        }
6050
6051        for_each_pipe(dev_priv, pipe)
6052                max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6053
6054        return max_pixclk;
6055}
6056
6057static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6058{
6059        struct drm_device *dev = state->dev;
6060        struct drm_i915_private *dev_priv = dev->dev_private;
6061        int max_pixclk = intel_mode_max_pixclk(dev, state);
6062        struct intel_atomic_state *intel_state =
6063                to_intel_atomic_state(state);
6064
6065        if (max_pixclk < 0)
6066                return max_pixclk;
6067
6068        intel_state->cdclk = intel_state->dev_cdclk =
6069                valleyview_calc_cdclk(dev_priv, max_pixclk);
6070
6071        if (!intel_state->active_crtcs)
6072                intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6073
6074        return 0;
6075}
6076
6077static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6078{
6079        struct drm_device *dev = state->dev;
6080        struct drm_i915_private *dev_priv = dev->dev_private;
6081        int max_pixclk = intel_mode_max_pixclk(dev, state);
6082        struct intel_atomic_state *intel_state =
6083                to_intel_atomic_state(state);
6084
6085        if (max_pixclk < 0)
6086                return max_pixclk;
6087
6088        intel_state->cdclk = intel_state->dev_cdclk =
6089                broxton_calc_cdclk(dev_priv, max_pixclk);
6090
6091        if (!intel_state->active_crtcs)
6092                intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
6093
6094        return 0;
6095}
6096
6097static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6098{
6099        unsigned int credits, default_credits;
6100
6101        if (IS_CHERRYVIEW(dev_priv))
6102                default_credits = PFI_CREDIT(12);
6103        else
6104                default_credits = PFI_CREDIT(8);
6105
6106        if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6107                /* CHV suggested value is 31 or 63 */
6108                if (IS_CHERRYVIEW(dev_priv))
6109                        credits = PFI_CREDIT_63;
6110                else
6111                        credits = PFI_CREDIT(15);
6112        } else {
6113                credits = default_credits;
6114        }
6115
6116        /*
6117         * WA - write default credits before re-programming
6118         * FIXME: should we also set the resend bit here?
6119         */
6120        I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6121                   default_credits);
6122
6123        I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6124                   credits | PFI_CREDIT_RESEND);
6125
6126        /*
6127         * FIXME is this guaranteed to clear
6128         * immediately or should we poll for it?
6129         */
6130        WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6131}
6132
6133static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6134{
6135        struct drm_device *dev = old_state->dev;
6136        struct drm_i915_private *dev_priv = dev->dev_private;
6137        struct intel_atomic_state *old_intel_state =
6138                to_intel_atomic_state(old_state);
6139        unsigned req_cdclk = old_intel_state->dev_cdclk;
6140
6141        /*
6142         * FIXME: We can end up here with all power domains off, yet
6143         * with a CDCLK frequency other than the minimum. To account
6144         * for this take the PIPE-A power domain, which covers the HW
6145         * blocks needed for the following programming. This can be
6146         * removed once it's guaranteed that we get here either with
6147         * the minimum CDCLK set, or the required power domains
6148         * enabled.
6149         */
6150        intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6151
6152        if (IS_CHERRYVIEW(dev))
6153                cherryview_set_cdclk(dev, req_cdclk);
6154        else
6155                valleyview_set_cdclk(dev, req_cdclk);
6156
6157        vlv_program_pfi_credits(dev_priv);
6158
6159        intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6160}
6161
6162static void valleyview_crtc_enable(struct drm_crtc *crtc)
6163{
6164        struct drm_device *dev = crtc->dev;
6165        struct drm_i915_private *dev_priv = to_i915(dev);
6166        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6167        struct intel_encoder *encoder;
6168        int pipe = intel_crtc->pipe;
6169
6170        if (WARN_ON(intel_crtc->active))
6171                return;
6172
6173        if (intel_crtc->config->has_dp_encoder)
6174                intel_dp_set_m_n(intel_crtc, M1_N1);
6175
6176        intel_set_pipe_timings(intel_crtc);
6177
6178        if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6179                struct drm_i915_private *dev_priv = dev->dev_private;
6180
6181                I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6182                I915_WRITE(CHV_CANVAS(pipe), 0);
6183        }
6184
6185        i9xx_set_pipeconf(intel_crtc);
6186
6187        intel_crtc->active = true;
6188
6189        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6190
6191        for_each_encoder_on_crtc(dev, crtc, encoder)
6192                if (encoder->pre_pll_enable)
6193                        encoder->pre_pll_enable(encoder);
6194
6195        if (!intel_crtc->config->has_dsi_encoder) {
6196                if (IS_CHERRYVIEW(dev)) {
6197                        chv_prepare_pll(intel_crtc, intel_crtc->config);
6198                        chv_enable_pll(intel_crtc, intel_crtc->config);
6199                } else {
6200                        vlv_prepare_pll(intel_crtc, intel_crtc->config);
6201                        vlv_enable_pll(intel_crtc, intel_crtc->config);
6202                }
6203        }
6204
6205        for_each_encoder_on_crtc(dev, crtc, encoder)
6206                if (encoder->pre_enable)
6207                        encoder->pre_enable(encoder);
6208
6209        i9xx_pfit_enable(intel_crtc);
6210
6211        intel_crtc_load_lut(crtc);
6212
6213        intel_enable_pipe(intel_crtc);
6214
6215        assert_vblank_disabled(crtc);
6216        drm_crtc_vblank_on(crtc);
6217
6218        for_each_encoder_on_crtc(dev, crtc, encoder)
6219                encoder->enable(encoder);
6220}
6221
6222static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6223{
6224        struct drm_device *dev = crtc->base.dev;
6225        struct drm_i915_private *dev_priv = dev->dev_private;
6226
6227        I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6228        I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6229}
6230
6231static void i9xx_crtc_enable(struct drm_crtc *crtc)
6232{
6233        struct drm_device *dev = crtc->dev;
6234        struct drm_i915_private *dev_priv = to_i915(dev);
6235        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6236        struct intel_encoder *encoder;
6237        int pipe = intel_crtc->pipe;
6238
6239        if (WARN_ON(intel_crtc->active))
6240                return;
6241
6242        i9xx_set_pll_dividers(intel_crtc);
6243
6244        if (intel_crtc->config->has_dp_encoder)
6245                intel_dp_set_m_n(intel_crtc, M1_N1);
6246
6247        intel_set_pipe_timings(intel_crtc);
6248
6249        i9xx_set_pipeconf(intel_crtc);
6250
6251        intel_crtc->active = true;
6252
6253        if (!IS_GEN2(dev))
6254                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6255
6256        for_each_encoder_on_crtc(dev, crtc, encoder)
6257                if (encoder->pre_enable)
6258                        encoder->pre_enable(encoder);
6259
6260        i9xx_enable_pll(intel_crtc);
6261
6262        i9xx_pfit_enable(intel_crtc);
6263
6264        intel_crtc_load_lut(crtc);
6265
6266        intel_update_watermarks(crtc);
6267        intel_enable_pipe(intel_crtc);
6268
6269        assert_vblank_disabled(crtc);
6270        drm_crtc_vblank_on(crtc);
6271
6272        for_each_encoder_on_crtc(dev, crtc, encoder)
6273                encoder->enable(encoder);
6274}
6275
6276static void i9xx_pfit_disable(struct intel_crtc *crtc)
6277{
6278        struct drm_device *dev = crtc->base.dev;
6279        struct drm_i915_private *dev_priv = dev->dev_private;
6280
6281        if (!crtc->config->gmch_pfit.control)
6282                return;
6283
6284        assert_pipe_disabled(dev_priv, crtc->pipe);
6285
6286        DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6287                         I915_READ(PFIT_CONTROL));
6288        I915_WRITE(PFIT_CONTROL, 0);
6289}
6290
6291static void i9xx_crtc_disable(struct drm_crtc *crtc)
6292{
6293        struct drm_device *dev = crtc->dev;
6294        struct drm_i915_private *dev_priv = dev->dev_private;
6295        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6296        struct intel_encoder *encoder;
6297        int pipe = intel_crtc->pipe;
6298
6299        /*
6300         * On gen2 planes are double buffered but the pipe isn't, so we must
6301         * wait for planes to fully turn off before disabling the pipe.
6302         * We also need to wait on all gmch platforms because of the
6303         * self-refresh mode constraint explained above.
6304         */
6305        intel_wait_for_vblank(dev, pipe);
6306
6307        for_each_encoder_on_crtc(dev, crtc, encoder)
6308                encoder->disable(encoder);
6309
6310        drm_crtc_vblank_off(crtc);
6311        assert_vblank_disabled(crtc);
6312
6313        intel_disable_pipe(intel_crtc);
6314
6315        i9xx_pfit_disable(intel_crtc);
6316
6317        for_each_encoder_on_crtc(dev, crtc, encoder)
6318                if (encoder->post_disable)
6319                        encoder->post_disable(encoder);
6320
6321        if (!intel_crtc->config->has_dsi_encoder) {
6322                if (IS_CHERRYVIEW(dev))
6323                        chv_disable_pll(dev_priv, pipe);
6324                else if (IS_VALLEYVIEW(dev))
6325                        vlv_disable_pll(dev_priv, pipe);
6326                else
6327                        i9xx_disable_pll(intel_crtc);
6328        }
6329
6330        for_each_encoder_on_crtc(dev, crtc, encoder)
6331                if (encoder->post_pll_disable)
6332                        encoder->post_pll_disable(encoder);
6333
6334        if (!IS_GEN2(dev))
6335                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6336}
6337
6338static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6339{
6340        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6341        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6342        enum intel_display_power_domain domain;
6343        unsigned long domains;
6344
6345        if (!intel_crtc->active)
6346                return;
6347
6348        if (to_intel_plane_state(crtc->primary->state)->visible) {
6349                WARN_ON(intel_crtc->unpin_work);
6350
6351                intel_pre_disable_primary(crtc);
6352
6353                intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6354                to_intel_plane_state(crtc->primary->state)->visible = false;
6355        }
6356
6357        dev_priv->display.crtc_disable(crtc);
6358        intel_crtc->active = false;
6359        intel_fbc_disable(intel_crtc);
6360        intel_update_watermarks(crtc);
6361        intel_disable_shared_dpll(intel_crtc);
6362
6363        domains = intel_crtc->enabled_power_domains;
6364        for_each_power_domain(domain, domains)
6365                intel_display_power_put(dev_priv, domain);
6366        intel_crtc->enabled_power_domains = 0;
6367
6368        dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6369        dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6370}
6371
6372/*
6373 * turn all crtc's off, but do not adjust state
6374 * This has to be paired with a call to intel_modeset_setup_hw_state.
6375 */
6376int intel_display_suspend(struct drm_device *dev)
6377{
6378        struct drm_i915_private *dev_priv = to_i915(dev);
6379        struct drm_atomic_state *state;
6380        int ret;
6381
6382        state = drm_atomic_helper_suspend(dev);
6383        ret = PTR_ERR_OR_ZERO(state);
6384        if (ret)
6385                DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6386        else
6387                dev_priv->modeset_restore_state = state;
6388        return ret;
6389}
6390
6391void intel_encoder_destroy(struct drm_encoder *encoder)
6392{
6393        struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6394
6395        drm_encoder_cleanup(encoder);
6396        kfree(intel_encoder);
6397}
6398
6399/* Cross check the actual hw state with our own modeset state tracking (and it's
6400 * internal consistency). */
6401static void intel_connector_check_state(struct intel_connector *connector)
6402{
6403        struct drm_crtc *crtc = connector->base.state->crtc;
6404
6405        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6406                      connector->base.base.id,
6407                      connector->base.name);
6408
6409        if (connector->get_hw_state(connector)) {
6410                struct intel_encoder *encoder = connector->encoder;
6411                struct drm_connector_state *conn_state = connector->base.state;
6412
6413                I915_STATE_WARN(!crtc,
6414                         "connector enabled without attached crtc\n");
6415
6416                if (!crtc)
6417                        return;
6418
6419                I915_STATE_WARN(!crtc->state->active,
6420                      "connector is active, but attached crtc isn't\n");
6421
6422                if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6423                        return;
6424
6425                I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6426                        "atomic encoder doesn't match attached encoder\n");
6427
6428                I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6429                        "attached encoder crtc differs from connector crtc\n");
6430        } else {
6431                I915_STATE_WARN(crtc && crtc->state->active,
6432                        "attached crtc is active, but connector isn't\n");
6433                I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6434                        "best encoder set without crtc!\n");
6435        }
6436}
6437
6438int intel_connector_init(struct intel_connector *connector)
6439{
6440        drm_atomic_helper_connector_reset(&connector->base);
6441
6442        if (!connector->base.state)
6443                return -ENOMEM;
6444
6445        return 0;
6446}
6447
6448struct intel_connector *intel_connector_alloc(void)
6449{
6450        struct intel_connector *connector;
6451
6452        connector = kzalloc(sizeof *connector, GFP_KERNEL);
6453        if (!connector)
6454                return NULL;
6455
6456        if (intel_connector_init(connector) < 0) {
6457                kfree(connector);
6458                return NULL;
6459        }
6460
6461        return connector;
6462}
6463
6464/* Simple connector->get_hw_state implementation for encoders that support only
6465 * one connector and no cloning and hence the encoder state determines the state
6466 * of the connector. */
6467bool intel_connector_get_hw_state(struct intel_connector *connector)
6468{
6469        enum pipe pipe = 0;
6470        struct intel_encoder *encoder = connector->encoder;
6471
6472        return encoder->get_hw_state(encoder, &pipe);
6473}
6474
6475static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6476{
6477        if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6478                return crtc_state->fdi_lanes;
6479
6480        return 0;
6481}
6482
6483static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6484                                     struct intel_crtc_state *pipe_config)
6485{
6486        struct drm_atomic_state *state = pipe_config->base.state;
6487        struct intel_crtc *other_crtc;
6488        struct intel_crtc_state *other_crtc_state;
6489
6490        DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6491                      pipe_name(pipe), pipe_config->fdi_lanes);
6492        if (pipe_config->fdi_lanes > 4) {
6493                DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6494                              pipe_name(pipe), pipe_config->fdi_lanes);
6495                return -EINVAL;
6496        }
6497
6498        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6499                if (pipe_config->fdi_lanes > 2) {
6500                        DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6501                                      pipe_config->fdi_lanes);
6502                        return -EINVAL;
6503                } else {
6504                        return 0;
6505                }
6506        }
6507
6508        if (INTEL_INFO(dev)->num_pipes == 2)
6509                return 0;
6510
6511        /* Ivybridge 3 pipe is really complicated */
6512        switch (pipe) {
6513        case PIPE_A:
6514                return 0;
6515        case PIPE_B:
6516                if (pipe_config->fdi_lanes <= 2)
6517                        return 0;
6518
6519                other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6520                other_crtc_state =
6521                        intel_atomic_get_crtc_state(state, other_crtc);
6522                if (IS_ERR(other_crtc_state))
6523                        return PTR_ERR(other_crtc_state);
6524
6525                if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6526                        DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6527                                      pipe_name(pipe), pipe_config->fdi_lanes);
6528                        return -EINVAL;
6529                }
6530                return 0;
6531        case PIPE_C:
6532                if (pipe_config->fdi_lanes > 2) {
6533                        DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6534                                      pipe_name(pipe), pipe_config->fdi_lanes);
6535                        return -EINVAL;
6536                }
6537
6538                other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6539                other_crtc_state =
6540                        intel_atomic_get_crtc_state(state, other_crtc);
6541                if (IS_ERR(other_crtc_state))
6542                        return PTR_ERR(other_crtc_state);
6543
6544                if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6545                        DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6546                        return -EINVAL;
6547                }
6548                return 0;
6549        default:
6550                BUG();
6551        }
6552}
6553
6554#define RETRY 1
6555static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6556                                       struct intel_crtc_state *pipe_config)
6557{
6558        struct drm_device *dev = intel_crtc->base.dev;
6559        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6560        int lane, link_bw, fdi_dotclock, ret;
6561        bool needs_recompute = false;
6562
6563retry:
6564        /* FDI is a binary signal running at ~2.7GHz, encoding
6565         * each output octet as 10 bits. The actual frequency
6566         * is stored as a divider into a 100MHz clock, and the
6567         * mode pixel clock is stored in units of 1KHz.
6568         * Hence the bw of each lane in terms of the mode signal
6569         * is:
6570         */
6571        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6572
6573        fdi_dotclock = adjusted_mode->crtc_clock;
6574
6575        lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6576                                           pipe_config->pipe_bpp);
6577
6578        pipe_config->fdi_lanes = lane;
6579
6580        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6581                               link_bw, &pipe_config->fdi_m_n);
6582
6583        ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6584                                       intel_crtc->pipe, pipe_config);
6585        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6586                pipe_config->pipe_bpp -= 2*3;
6587                DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6588                              pipe_config->pipe_bpp);
6589                needs_recompute = true;
6590                pipe_config->bw_constrained = true;
6591
6592                goto retry;
6593        }
6594
6595        if (needs_recompute)
6596                return RETRY;
6597
6598        return ret;
6599}
6600
6601static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6602                                     struct intel_crtc_state *pipe_config)
6603{
6604        if (pipe_config->pipe_bpp > 24)
6605                return false;
6606
6607        /* HSW can handle pixel rate up to cdclk? */
6608        if (IS_HASWELL(dev_priv->dev))
6609                return true;
6610
6611        /*
6612         * We compare against max which means we must take
6613         * the increased cdclk requirement into account when
6614         * calculating the new cdclk.
6615         *
6616         * Should measure whether using a lower cdclk w/o IPS
6617         */
6618        return ilk_pipe_pixel_rate(pipe_config) <=
6619                dev_priv->max_cdclk_freq * 95 / 100;
6620}
6621
6622static void hsw_compute_ips_config(struct intel_crtc *crtc,
6623                                   struct intel_crtc_state *pipe_config)
6624{
6625        struct drm_device *dev = crtc->base.dev;
6626        struct drm_i915_private *dev_priv = dev->dev_private;
6627
6628        pipe_config->ips_enabled = i915.enable_ips &&
6629                hsw_crtc_supports_ips(crtc) &&
6630                pipe_config_supports_ips(dev_priv, pipe_config);
6631}
6632
6633static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6634{
6635        const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6636
6637        /* GDG double wide on either pipe, otherwise pipe A only */
6638        return INTEL_INFO(dev_priv)->gen < 4 &&
6639                (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6640}
6641
6642static int intel_crtc_compute_config(struct intel_crtc *crtc,
6643                                     struct intel_crtc_state *pipe_config)
6644{
6645        struct drm_device *dev = crtc->base.dev;
6646        struct drm_i915_private *dev_priv = dev->dev_private;
6647        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6648
6649        /* FIXME should check pixel clock limits on all platforms */
6650        if (INTEL_INFO(dev)->gen < 4) {
6651                int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6652
6653                /*
6654                 * Enable double wide mode when the dot clock
6655                 * is > 90% of the (display) core speed.
6656                 */
6657                if (intel_crtc_supports_double_wide(crtc) &&
6658                    adjusted_mode->crtc_clock > clock_limit) {
6659                        clock_limit *= 2;
6660                        pipe_config->double_wide = true;
6661                }
6662
6663                if (adjusted_mode->crtc_clock > clock_limit) {
6664                        DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6665                                      adjusted_mode->crtc_clock, clock_limit,
6666                                      yesno(pipe_config->double_wide));
6667                        return -EINVAL;
6668                }
6669        }
6670
6671        /*
6672         * Pipe horizontal size must be even in:
6673         * - DVO ganged mode
6674         * - LVDS dual channel mode
6675         * - Double wide pipe
6676         */
6677        if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6678             intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6679                pipe_config->pipe_src_w &= ~1;
6680
6681        /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6682         * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6683         */
6684        if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6685                adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6686                return -EINVAL;
6687
6688        if (HAS_IPS(dev))
6689                hsw_compute_ips_config(crtc, pipe_config);
6690
6691        if (pipe_config->has_pch_encoder)
6692                return ironlake_fdi_compute_config(crtc, pipe_config);
6693
6694        return 0;
6695}
6696
6697static int skylake_get_display_clock_speed(struct drm_device *dev)
6698{
6699        struct drm_i915_private *dev_priv = to_i915(dev);
6700        uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6701        uint32_t cdctl = I915_READ(CDCLK_CTL);
6702        uint32_t linkrate;
6703
6704        if (!(lcpll1 & LCPLL_PLL_ENABLE))
6705                return 24000; /* 24MHz is the cd freq with NSSC ref */
6706
6707        if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6708                return 540000;
6709
6710        linkrate = (I915_READ(DPLL_CTRL1) &
6711                    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6712
6713        if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6714            linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6715                /* vco 8640 */
6716                switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6717                case CDCLK_FREQ_450_432:
6718                        return 432000;
6719                case CDCLK_FREQ_337_308:
6720                        return 308570;
6721                case CDCLK_FREQ_675_617:
6722                        return 617140;
6723                default:
6724                        WARN(1, "Unknown cd freq selection\n");
6725                }
6726        } else {
6727                /* vco 8100 */
6728                switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6729                case CDCLK_FREQ_450_432:
6730                        return 450000;
6731                case CDCLK_FREQ_337_308:
6732                        return 337500;
6733                case CDCLK_FREQ_675_617:
6734                        return 675000;
6735                default:
6736                        WARN(1, "Unknown cd freq selection\n");
6737                }
6738        }
6739
6740        /* error case, do as if DPLL0 isn't enabled */
6741        return 24000;
6742}
6743
6744static int broxton_get_display_clock_speed(struct drm_device *dev)
6745{
6746        struct drm_i915_private *dev_priv = to_i915(dev);
6747        uint32_t cdctl = I915_READ(CDCLK_CTL);
6748        uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6749        uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6750        int cdclk;
6751
6752        if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6753                return 19200;
6754
6755        cdclk = 19200 * pll_ratio / 2;
6756
6757        switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6758        case BXT_CDCLK_CD2X_DIV_SEL_1:
6759                return cdclk;  /* 576MHz or 624MHz */
6760        case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6761                return cdclk * 2 / 3; /* 384MHz */
6762        case BXT_CDCLK_CD2X_DIV_SEL_2:
6763                return cdclk / 2; /* 288MHz */
6764        case BXT_CDCLK_CD2X_DIV_SEL_4:
6765                return cdclk / 4; /* 144MHz */
6766        }
6767
6768        /* error case, do as if DE PLL isn't enabled */
6769        return 19200;
6770}
6771
6772static int broadwell_get_display_clock_speed(struct drm_device *dev)
6773{
6774        struct drm_i915_private *dev_priv = dev->dev_private;
6775        uint32_t lcpll = I915_READ(LCPLL_CTL);
6776        uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6777
6778        if (lcpll & LCPLL_CD_SOURCE_FCLK)
6779                return 800000;
6780        else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6781                return 450000;
6782        else if (freq == LCPLL_CLK_FREQ_450)
6783                return 450000;
6784        else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6785                return 540000;
6786        else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6787                return 337500;
6788        else
6789                return 675000;
6790}
6791
6792static int haswell_get_display_clock_speed(struct drm_device *dev)
6793{
6794        struct drm_i915_private *dev_priv = dev->dev_private;
6795        uint32_t lcpll = I915_READ(LCPLL_CTL);
6796        uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6797
6798        if (lcpll & LCPLL_CD_SOURCE_FCLK)
6799                return 800000;
6800        else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6801                return 450000;
6802        else if (freq == LCPLL_CLK_FREQ_450)
6803                return 450000;
6804        else if (IS_HSW_ULT(dev))
6805                return 337500;
6806        else
6807                return 540000;
6808}
6809
6810static int valleyview_get_display_clock_speed(struct drm_device *dev)
6811{
6812        return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6813                                      CCK_DISPLAY_CLOCK_CONTROL);
6814}
6815
6816static int ilk_get_display_clock_speed(struct drm_device *dev)
6817{
6818        return 450000;
6819}
6820
6821static int i945_get_display_clock_speed(struct drm_device *dev)
6822{
6823        return 400000;
6824}
6825
6826static int i915_get_display_clock_speed(struct drm_device *dev)
6827{
6828        return 333333;
6829}
6830
6831static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6832{
6833        return 200000;
6834}
6835
6836static int pnv_get_display_clock_speed(struct drm_device *dev)
6837{
6838        u16 gcfgc = 0;
6839
6840        pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6841
6842        switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6843        case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6844                return 266667;
6845        case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6846                return 333333;
6847        case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6848                return 444444;
6849        case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6850                return 200000;
6851        default:
6852                DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6853        case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6854                return 133333;
6855        case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6856                return 166667;
6857        }
6858}
6859
6860static int i915gm_get_display_clock_speed(struct drm_device *dev)
6861{
6862        u16 gcfgc = 0;
6863
6864        pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6865
6866        if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6867                return 133333;
6868        else {
6869                switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6870                case GC_DISPLAY_CLOCK_333_MHZ:
6871                        return 333333;
6872                default:
6873                case GC_DISPLAY_CLOCK_190_200_MHZ:
6874                        return 190000;
6875                }
6876        }
6877}
6878
6879static int i865_get_display_clock_speed(struct drm_device *dev)
6880{
6881        return 266667;
6882}
6883
6884static int i85x_get_display_clock_speed(struct drm_device *dev)
6885{
6886        u16 hpllcc = 0;
6887
6888        /*
6889         * 852GM/852GMV only supports 133 MHz and the HPLLCC
6890         * encoding is different :(
6891         * FIXME is this the right way to detect 852GM/852GMV?
6892         */
6893        if (dev->pdev->revision == 0x1)
6894                return 133333;
6895
6896        pci_bus_read_config_word(dev->pdev->bus,
6897                                 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6898
6899        /* Assume that the hardware is in the high speed state.  This
6900         * should be the default.
6901         */
6902        switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6903        case GC_CLOCK_133_200:
6904        case GC_CLOCK_133_200_2:
6905        case GC_CLOCK_100_200:
6906                return 200000;
6907        case GC_CLOCK_166_250:
6908                return 250000;
6909        case GC_CLOCK_100_133:
6910                return 133333;
6911        case GC_CLOCK_133_266:
6912        case GC_CLOCK_133_266_2:
6913        case GC_CLOCK_166_266:
6914                return 266667;
6915        }
6916
6917        /* Shouldn't happen */
6918        return 0;
6919}
6920
6921static int i830_get_display_clock_speed(struct drm_device *dev)
6922{
6923        return 133333;
6924}
6925
6926static unsigned int intel_hpll_vco(struct drm_device *dev)
6927{
6928        struct drm_i915_private *dev_priv = dev->dev_private;
6929        static const unsigned int blb_vco[8] = {
6930                [0] = 3200000,
6931                [1] = 4000000,
6932                [2] = 5333333,
6933                [3] = 4800000,
6934                [4] = 6400000,
6935        };
6936        static const unsigned int pnv_vco[8] = {
6937                [0] = 3200000,
6938                [1] = 4000000,
6939                [2] = 5333333,
6940                [3] = 4800000,
6941                [4] = 2666667,
6942        };
6943        static const unsigned int cl_vco[8] = {
6944                [0] = 3200000,
6945                [1] = 4000000,
6946                [2] = 5333333,
6947                [3] = 6400000,
6948                [4] = 3333333,
6949                [5] = 3566667,
6950                [6] = 4266667,
6951        };
6952        static const unsigned int elk_vco[8] = {
6953                [0] = 3200000,
6954                [1] = 4000000,
6955                [2] = 5333333,
6956                [3] = 4800000,
6957        };
6958        static const unsigned int ctg_vco[8] = {
6959                [0] = 3200000,
6960                [1] = 4000000,
6961                [2] = 5333333,
6962                [3] = 6400000,
6963                [4] = 2666667,
6964                [5] = 4266667,
6965        };
6966        const unsigned int *vco_table;
6967        unsigned int vco;
6968        uint8_t tmp = 0;
6969
6970        /* FIXME other chipsets? */
6971        if (IS_GM45(dev))
6972                vco_table = ctg_vco;
6973        else if (IS_G4X(dev))
6974                vco_table = elk_vco;
6975        else if (IS_CRESTLINE(dev))
6976                vco_table = cl_vco;
6977        else if (IS_PINEVIEW(dev))
6978                vco_table = pnv_vco;
6979        else if (IS_G33(dev))
6980                vco_table = blb_vco;
6981        else
6982                return 0;
6983
6984        tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6985
6986        vco = vco_table[tmp & 0x7];
6987        if (vco == 0)
6988                DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6989        else
6990                DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6991
6992        return vco;
6993}
6994
6995static int gm45_get_display_clock_speed(struct drm_device *dev)
6996{
6997        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6998        uint16_t tmp = 0;
6999
7000        pci_read_config_word(dev->pdev, GCFGC, &tmp);
7001
7002        cdclk_sel = (tmp >> 12) & 0x1;
7003
7004        switch (vco) {
7005        case 2666667:
7006        case 4000000:
7007        case 5333333:
7008                return cdclk_sel ? 333333 : 222222;
7009        case 3200000:
7010                return cdclk_sel ? 320000 : 228571;
7011        default:
7012                DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7013                return 222222;
7014        }
7015}
7016
7017static int i965gm_get_display_clock_speed(struct drm_device *dev)
7018{
7019        static const uint8_t div_3200[] = { 16, 10,  8 };
7020        static const uint8_t div_4000[] = { 20, 12, 10 };
7021        static const uint8_t div_5333[] = { 24, 16, 14 };
7022        const uint8_t *div_table;
7023        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7024        uint16_t tmp = 0;
7025
7026        pci_read_config_word(dev->pdev, GCFGC, &tmp);
7027
7028        cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7029
7030        if (cdclk_sel >= ARRAY_SIZE(div_3200))
7031                goto fail;
7032
7033        switch (vco) {
7034        case 3200000:
7035                div_table = div_3200;
7036                break;
7037        case 4000000:
7038                div_table = div_4000;
7039                break;
7040        case 5333333:
7041                div_table = div_5333;
7042                break;
7043        default:
7044                goto fail;
7045        }
7046
7047        return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7048
7049fail:
7050        DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7051        return 200000;
7052}
7053
7054static int g33_get_display_clock_speed(struct drm_device *dev)
7055{
7056        static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7057        static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7058        static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7059        static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7060        const uint8_t *div_table;
7061        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7062        uint16_t tmp = 0;
7063
7064        pci_read_config_word(dev->pdev, GCFGC, &tmp);
7065
7066        cdclk_sel = (tmp >> 4) & 0x7;
7067
7068        if (cdclk_sel >= ARRAY_SIZE(div_3200))
7069                goto fail;
7070
7071        switch (vco) {
7072        case 3200000:
7073                div_table = div_3200;
7074                break;
7075        case 4000000:
7076                div_table = div_4000;
7077                break;
7078        case 4800000:
7079                div_table = div_4800;
7080                break;
7081        case 5333333:
7082                div_table = div_5333;
7083                break;
7084        default:
7085                goto fail;
7086        }
7087
7088        return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7089
7090fail:
7091        DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7092        return 190476;
7093}
7094
7095static void
7096intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7097{
7098        while (*num > DATA_LINK_M_N_MASK ||
7099               *den > DATA_LINK_M_N_MASK) {
7100                *num >>= 1;
7101                *den >>= 1;
7102        }
7103}
7104
7105static void compute_m_n(unsigned int m, unsigned int n,
7106                        uint32_t *ret_m, uint32_t *ret_n)
7107{
7108        *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7109        *ret_m = div_u64((uint64_t) m * *ret_n, n);
7110        intel_reduce_m_n_ratio(ret_m, ret_n);
7111}
7112
7113void
7114intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7115                       int pixel_clock, int link_clock,
7116                       struct intel_link_m_n *m_n)
7117{
7118        m_n->tu = 64;
7119
7120        compute_m_n(bits_per_pixel * pixel_clock,
7121                    link_clock * nlanes * 8,
7122                    &m_n->gmch_m, &m_n->gmch_n);
7123
7124        compute_m_n(pixel_clock, link_clock,
7125                    &m_n->link_m, &m_n->link_n);
7126}
7127
7128static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7129{
7130        if (i915.panel_use_ssc >= 0)
7131                return i915.panel_use_ssc != 0;
7132        return dev_priv->vbt.lvds_use_ssc
7133                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7134}
7135
7136static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7137                           int num_connectors)
7138{
7139        struct drm_device *dev = crtc_state->base.crtc->dev;
7140        struct drm_i915_private *dev_priv = dev->dev_private;
7141        int refclk;
7142
7143        WARN_ON(!crtc_state->base.state);
7144
7145        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
7146                refclk = 100000;
7147        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7148            intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7149                refclk = dev_priv->vbt.lvds_ssc_freq;
7150                DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7151        } else if (!IS_GEN2(dev)) {
7152                refclk = 96000;
7153        } else {
7154                refclk = 48000;
7155        }
7156
7157        return refclk;
7158}
7159
7160static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7161{
7162        return (1 << dpll->n) << 16 | dpll->m2;
7163}
7164
7165static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7166{
7167        return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7168}
7169
7170static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7171                                     struct intel_crtc_state *crtc_state,
7172                                     intel_clock_t *reduced_clock)
7173{
7174        struct drm_device *dev = crtc->base.dev;
7175        u32 fp, fp2 = 0;
7176
7177        if (IS_PINEVIEW(dev)) {
7178                fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7179                if (reduced_clock)
7180                        fp2 = pnv_dpll_compute_fp(reduced_clock);
7181        } else {
7182                fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7183                if (reduced_clock)
7184                        fp2 = i9xx_dpll_compute_fp(reduced_clock);
7185        }
7186
7187        crtc_state->dpll_hw_state.fp0 = fp;
7188
7189        crtc->lowfreq_avail = false;
7190        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7191            reduced_clock) {
7192                crtc_state->dpll_hw_state.fp1 = fp2;
7193                crtc->lowfreq_avail = true;
7194        } else {
7195                crtc_state->dpll_hw_state.fp1 = fp;
7196        }
7197}
7198
7199static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7200                pipe)
7201{
7202        u32 reg_val;
7203
7204        /*
7205         * PLLB opamp always calibrates to max value of 0x3f, force enable it
7206         * and set it to a reasonable value instead.
7207         */
7208        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7209        reg_val &= 0xffffff00;
7210        reg_val |= 0x00000030;
7211        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7212
7213        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7214        reg_val &= 0x8cffffff;
7215        reg_val = 0x8c000000;
7216        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7217
7218        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7219        reg_val &= 0xffffff00;
7220        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7221
7222        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7223        reg_val &= 0x00ffffff;
7224        reg_val |= 0xb0000000;
7225        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7226}
7227
7228static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7229                                         struct intel_link_m_n *m_n)
7230{
7231        struct drm_device *dev = crtc->base.dev;
7232        struct drm_i915_private *dev_priv = dev->dev_private;
7233        int pipe = crtc->pipe;
7234
7235        I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7236        I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7237        I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7238        I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7239}
7240
7241static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7242                                         struct intel_link_m_n *m_n,
7243                                         struct intel_link_m_n *m2_n2)
7244{
7245        struct drm_device *dev = crtc->base.dev;
7246        struct drm_i915_private *dev_priv = dev->dev_private;
7247        int pipe = crtc->pipe;
7248        enum transcoder transcoder = crtc->config->cpu_transcoder;
7249
7250        if (INTEL_INFO(dev)->gen >= 5) {
7251                I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7252                I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7253                I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7254                I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7255                /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7256                 * for gen < 8) and if DRRS is supported (to make sure the
7257                 * registers are not unnecessarily accessed).
7258                 */
7259                if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7260                        crtc->config->has_drrs) {
7261                        I915_WRITE(PIPE_DATA_M2(transcoder),
7262                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7263                        I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7264                        I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7265                        I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7266                }
7267        } else {
7268                I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7269                I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7270                I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7271                I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7272        }
7273}
7274
7275void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7276{
7277        struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7278
7279        if (m_n == M1_N1) {
7280                dp_m_n = &crtc->config->dp_m_n;
7281                dp_m2_n2 = &crtc->config->dp_m2_n2;
7282        } else if (m_n == M2_N2) {
7283
7284                /*
7285                 * M2_N2 registers are not supported. Hence m2_n2 divider value
7286                 * needs to be programmed into M1_N1.
7287                 */
7288                dp_m_n = &crtc->config->dp_m2_n2;
7289        } else {
7290                DRM_ERROR("Unsupported divider value\n");
7291                return;
7292        }
7293
7294        if (crtc->config->has_pch_encoder)
7295                intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7296        else
7297                intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7298}
7299
7300static void vlv_compute_dpll(struct intel_crtc *crtc,
7301                             struct intel_crtc_state *pipe_config)
7302{
7303        u32 dpll, dpll_md;
7304
7305        /*
7306         * Enable DPIO clock input. We should never disable the reference
7307         * clock for pipe B, since VGA hotplug / manual detection depends
7308         * on it.
7309         */
7310        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7311                DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
7312        /* We should never disable this, set it here for state tracking */
7313        if (crtc->pipe == PIPE_B)
7314                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7315        dpll |= DPLL_VCO_ENABLE;
7316        pipe_config->dpll_hw_state.dpll = dpll;
7317
7318        dpll_md = (pipe_config->pixel_multiplier - 1)
7319                << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7320        pipe_config->dpll_hw_state.dpll_md = dpll_md;
7321}
7322
7323static void vlv_prepare_pll(struct intel_crtc *crtc,
7324                            const struct intel_crtc_state *pipe_config)
7325{
7326        struct drm_device *dev = crtc->base.dev;
7327        struct drm_i915_private *dev_priv = dev->dev_private;
7328        int pipe = crtc->pipe;
7329        u32 mdiv;
7330        u32 bestn, bestm1, bestm2, bestp1, bestp2;
7331        u32 coreclk, reg_val;
7332
7333        mutex_lock(&dev_priv->sb_lock);
7334
7335        bestn = pipe_config->dpll.n;
7336        bestm1 = pipe_config->dpll.m1;
7337        bestm2 = pipe_config->dpll.m2;
7338        bestp1 = pipe_config->dpll.p1;
7339        bestp2 = pipe_config->dpll.p2;
7340
7341        /* See eDP HDMI DPIO driver vbios notes doc */
7342
7343        /* PLL B needs special handling */
7344        if (pipe == PIPE_B)
7345                vlv_pllb_recal_opamp(dev_priv, pipe);
7346
7347        /* Set up Tx target for periodic Rcomp update */
7348        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7349
7350        /* Disable target IRef on PLL */
7351        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7352        reg_val &= 0x00ffffff;
7353        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7354
7355        /* Disable fast lock */
7356        vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7357
7358        /* Set idtafcrecal before PLL is enabled */
7359        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7360        mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7361        mdiv |= ((bestn << DPIO_N_SHIFT));
7362        mdiv |= (1 << DPIO_K_SHIFT);
7363
7364        /*
7365         * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7366         * but we don't support that).
7367         * Note: don't use the DAC post divider as it seems unstable.
7368         */
7369        mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7370        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7371
7372        mdiv |= DPIO_ENABLE_CALIBRATION;
7373        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7374
7375        /* Set HBR and RBR LPF coefficients */
7376        if (pipe_config->port_clock == 162000 ||
7377            intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7378            intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7379                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7380                                 0x009f0003);
7381        else
7382                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7383                                 0x00d0000f);
7384
7385        if (pipe_config->has_dp_encoder) {
7386                /* Use SSC source */
7387                if (pipe == PIPE_A)
7388                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7389                                         0x0df40000);
7390                else
7391                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7392                                         0x0df70000);
7393        } else { /* HDMI or VGA */
7394                /* Use bend source */
7395                if (pipe == PIPE_A)
7396                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7397                                         0x0df70000);
7398                else
7399                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7400                                         0x0df40000);
7401        }
7402
7403        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7404        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7405        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7406            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7407                coreclk |= 0x01000000;
7408        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7409
7410        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7411        mutex_unlock(&dev_priv->sb_lock);
7412}
7413
7414static void chv_compute_dpll(struct intel_crtc *crtc,
7415                             struct intel_crtc_state *pipe_config)
7416{
7417        pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7418                DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
7419                DPLL_VCO_ENABLE;
7420        if (crtc->pipe != PIPE_A)
7421                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7422
7423        pipe_config->dpll_hw_state.dpll_md =
7424                (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7425}
7426
7427static void chv_prepare_pll(struct intel_crtc *crtc,
7428                            const struct intel_crtc_state *pipe_config)
7429{
7430        struct drm_device *dev = crtc->base.dev;
7431        struct drm_i915_private *dev_priv = dev->dev_private;
7432        int pipe = crtc->pipe;
7433        i915_reg_t dpll_reg = DPLL(crtc->pipe);
7434        enum dpio_channel port = vlv_pipe_to_channel(pipe);
7435        u32 loopfilter, tribuf_calcntr;
7436        u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7437        u32 dpio_val;
7438        int vco;
7439
7440        bestn = pipe_config->dpll.n;
7441        bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7442        bestm1 = pipe_config->dpll.m1;
7443        bestm2 = pipe_config->dpll.m2 >> 22;
7444        bestp1 = pipe_config->dpll.p1;
7445        bestp2 = pipe_config->dpll.p2;
7446        vco = pipe_config->dpll.vco;
7447        dpio_val = 0;
7448        loopfilter = 0;
7449
7450        /*
7451         * Enable Refclk and SSC
7452         */
7453        I915_WRITE(dpll_reg,
7454                   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7455
7456        mutex_lock(&dev_priv->sb_lock);
7457
7458        /* p1 and p2 divider */
7459        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7460                        5 << DPIO_CHV_S1_DIV_SHIFT |
7461                        bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7462                        bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7463                        1 << DPIO_CHV_K_DIV_SHIFT);
7464
7465        /* Feedback post-divider - m2 */
7466        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7467
7468        /* Feedback refclk divider - n and m1 */
7469        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7470                        DPIO_CHV_M1_DIV_BY_2 |
7471                        1 << DPIO_CHV_N_DIV_SHIFT);
7472
7473        /* M2 fraction division */
7474        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7475
7476        /* M2 fraction division enable */
7477        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7478        dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7479        dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7480        if (bestm2_frac)
7481                dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7482        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7483
7484        /* Program digital lock detect threshold */
7485        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7486        dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7487                                        DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7488        dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7489        if (!bestm2_frac)
7490                dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7491        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7492
7493        /* Loop filter */
7494        if (vco == 5400000) {
7495                loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7496                loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7497                loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7498                tribuf_calcntr = 0x9;
7499        } else if (vco <= 6200000) {
7500                loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7501                loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7502                loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7503                tribuf_calcntr = 0x9;
7504        } else if (vco <= 6480000) {
7505                loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7506                loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7507                loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7508                tribuf_calcntr = 0x8;
7509        } else {
7510                /* Not supported. Apply the same limits as in the max case */
7511                loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7512                loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7513                loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7514                tribuf_calcntr = 0;
7515        }
7516        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7517
7518        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7519        dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7520        dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7521        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7522
7523        /* AFC Recal */
7524        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7525                        vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7526                        DPIO_AFC_RECAL);
7527
7528        mutex_unlock(&dev_priv->sb_lock);
7529}
7530
7531/**
7532 * vlv_force_pll_on - forcibly enable just the PLL
7533 * @dev_priv: i915 private structure
7534 * @pipe: pipe PLL to enable
7535 * @dpll: PLL configuration
7536 *
7537 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7538 * in cases where we need the PLL enabled even when @pipe is not going to
7539 * be enabled.
7540 */
7541int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7542                     const struct dpll *dpll)
7543{
7544        struct intel_crtc *crtc =
7545                to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7546        struct intel_crtc_state *pipe_config;
7547
7548        pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7549        if (!pipe_config)
7550                return -ENOMEM;
7551
7552        pipe_config->base.crtc = &crtc->base;
7553        pipe_config->pixel_multiplier = 1;
7554        pipe_config->dpll = *dpll;
7555
7556        if (IS_CHERRYVIEW(dev)) {
7557                chv_compute_dpll(crtc, pipe_config);
7558                chv_prepare_pll(crtc, pipe_config);
7559                chv_enable_pll(crtc, pipe_config);
7560        } else {
7561                vlv_compute_dpll(crtc, pipe_config);
7562                vlv_prepare_pll(crtc, pipe_config);
7563                vlv_enable_pll(crtc, pipe_config);
7564        }
7565
7566        kfree(pipe_config);
7567
7568        return 0;
7569}
7570
7571/**
7572 * vlv_force_pll_off - forcibly disable just the PLL
7573 * @dev_priv: i915 private structure
7574 * @pipe: pipe PLL to disable
7575 *
7576 * Disable the PLL for @pipe. To be used in cases where we need
7577 * the PLL enabled even when @pipe is not going to be enabled.
7578 */
7579void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7580{
7581        if (IS_CHERRYVIEW(dev))
7582                chv_disable_pll(to_i915(dev), pipe);
7583        else
7584                vlv_disable_pll(to_i915(dev), pipe);
7585}
7586
7587static void i9xx_compute_dpll(struct intel_crtc *crtc,
7588                              struct intel_crtc_state *crtc_state,
7589                              intel_clock_t *reduced_clock,
7590                              int num_connectors)
7591{
7592        struct drm_device *dev = crtc->base.dev;
7593        struct drm_i915_private *dev_priv = dev->dev_private;
7594        u32 dpll;
7595        bool is_sdvo;
7596        struct dpll *clock = &crtc_state->dpll;
7597
7598        i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7599
7600        is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7601                intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7602
7603        dpll = DPLL_VGA_MODE_DIS;
7604
7605        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7606                dpll |= DPLLB_MODE_LVDS;
7607        else
7608                dpll |= DPLLB_MODE_DAC_SERIAL;
7609
7610        if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7611                dpll |= (crtc_state->pixel_multiplier - 1)
7612                        << SDVO_MULTIPLIER_SHIFT_HIRES;
7613        }
7614
7615        if (is_sdvo)
7616                dpll |= DPLL_SDVO_HIGH_SPEED;
7617
7618        if (crtc_state->has_dp_encoder)
7619                dpll |= DPLL_SDVO_HIGH_SPEED;
7620
7621        /* compute bitmask from p1 value */
7622        if (IS_PINEVIEW(dev))
7623                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7624        else {
7625                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7626                if (IS_G4X(dev) && reduced_clock)
7627                        dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7628        }
7629        switch (clock->p2) {
7630        case 5:
7631                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7632                break;
7633        case 7:
7634                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7635                break;
7636        case 10:
7637                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7638                break;
7639        case 14:
7640                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7641                break;
7642        }
7643        if (INTEL_INFO(dev)->gen >= 4)
7644                dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7645
7646        if (crtc_state->sdvo_tv_clock)
7647                dpll |= PLL_REF_INPUT_TVCLKINBC;
7648        else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7649                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7650                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7651        else
7652                dpll |= PLL_REF_INPUT_DREFCLK;
7653
7654        dpll |= DPLL_VCO_ENABLE;
7655        crtc_state->dpll_hw_state.dpll = dpll;
7656
7657        if (INTEL_INFO(dev)->gen >= 4) {
7658                u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7659                        << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7660                crtc_state->dpll_hw_state.dpll_md = dpll_md;
7661        }
7662}
7663
7664static void i8xx_compute_dpll(struct intel_crtc *crtc,
7665                              struct intel_crtc_state *crtc_state,
7666                              intel_clock_t *reduced_clock,
7667                              int num_connectors)
7668{
7669        struct drm_device *dev = crtc->base.dev;
7670        struct drm_i915_private *dev_priv = dev->dev_private;
7671        u32 dpll;
7672        struct dpll *clock = &crtc_state->dpll;
7673
7674        i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7675
7676        dpll = DPLL_VGA_MODE_DIS;
7677
7678        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7679                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7680        } else {
7681                if (clock->p1 == 2)
7682                        dpll |= PLL_P1_DIVIDE_BY_TWO;
7683                else
7684                        dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7685                if (clock->p2 == 4)
7686                        dpll |= PLL_P2_DIVIDE_BY_4;
7687        }
7688
7689        if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7690                dpll |= DPLL_DVO_2X_MODE;
7691
7692        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7693                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7694                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7695        else
7696                dpll |= PLL_REF_INPUT_DREFCLK;
7697
7698        dpll |= DPLL_VCO_ENABLE;
7699        crtc_state->dpll_hw_state.dpll = dpll;
7700}
7701
7702static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7703{
7704        struct drm_device *dev = intel_crtc->base.dev;
7705        struct drm_i915_private *dev_priv = dev->dev_private;
7706        enum pipe pipe = intel_crtc->pipe;
7707        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7708        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7709        uint32_t crtc_vtotal, crtc_vblank_end;
7710        int vsyncshift = 0;
7711
7712        /* We need to be careful not to changed the adjusted mode, for otherwise
7713         * the hw state checker will get angry at the mismatch. */
7714        crtc_vtotal = adjusted_mode->crtc_vtotal;
7715        crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7716
7717        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7718                /* the chip adds 2 halflines automatically */
7719                crtc_vtotal -= 1;
7720                crtc_vblank_end -= 1;
7721
7722                if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7723                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7724                else
7725                        vsyncshift = adjusted_mode->crtc_hsync_start -
7726                                adjusted_mode->crtc_htotal / 2;
7727                if (vsyncshift < 0)
7728                        vsyncshift += adjusted_mode->crtc_htotal;
7729        }
7730
7731        if (INTEL_INFO(dev)->gen > 3)
7732                I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7733
7734        I915_WRITE(HTOTAL(cpu_transcoder),
7735                   (adjusted_mode->crtc_hdisplay - 1) |
7736                   ((adjusted_mode->crtc_htotal - 1) << 16));
7737        I915_WRITE(HBLANK(cpu_transcoder),
7738                   (adjusted_mode->crtc_hblank_start - 1) |
7739                   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7740        I915_WRITE(HSYNC(cpu_transcoder),
7741                   (adjusted_mode->crtc_hsync_start - 1) |
7742                   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7743
7744        I915_WRITE(VTOTAL(cpu_transcoder),
7745                   (adjusted_mode->crtc_vdisplay - 1) |
7746                   ((crtc_vtotal - 1) << 16));
7747        I915_WRITE(VBLANK(cpu_transcoder),
7748                   (adjusted_mode->crtc_vblank_start - 1) |
7749                   ((crtc_vblank_end - 1) << 16));
7750        I915_WRITE(VSYNC(cpu_transcoder),
7751                   (adjusted_mode->crtc_vsync_start - 1) |
7752                   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7753
7754        /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7755         * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7756         * documented on the DDI_FUNC_CTL register description, EDP Input Select
7757         * bits. */
7758        if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7759            (pipe == PIPE_B || pipe == PIPE_C))
7760                I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7761
7762        /* pipesrc controls the size that is scaled from, which should
7763         * always be the user's requested size.
7764         */
7765        I915_WRITE(PIPESRC(pipe),
7766                   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7767                   (intel_crtc->config->pipe_src_h - 1));
7768}
7769
7770static void intel_get_pipe_timings(struct intel_crtc *crtc,
7771                                   struct intel_crtc_state *pipe_config)
7772{
7773        struct drm_device *dev = crtc->base.dev;
7774        struct drm_i915_private *dev_priv = dev->dev_private;
7775        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7776        uint32_t tmp;
7777
7778        tmp = I915_READ(HTOTAL(cpu_transcoder));
7779        pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7780        pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7781        tmp = I915_READ(HBLANK(cpu_transcoder));
7782        pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7783        pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7784        tmp = I915_READ(HSYNC(cpu_transcoder));
7785        pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7786        pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7787
7788        tmp = I915_READ(VTOTAL(cpu_transcoder));
7789        pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7790        pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7791        tmp = I915_READ(VBLANK(cpu_transcoder));
7792        pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7793        pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7794        tmp = I915_READ(VSYNC(cpu_transcoder));
7795        pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7796        pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7797
7798        if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7799                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7800                pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7801                pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7802        }
7803
7804        tmp = I915_READ(PIPESRC(crtc->pipe));
7805        pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7806        pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7807
7808        pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7809        pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7810}
7811
7812void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7813                                 struct intel_crtc_state *pipe_config)
7814{
7815        mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7816        mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7817        mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7818        mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7819
7820        mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7821        mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7822        mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7823        mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7824
7825        mode->flags = pipe_config->base.adjusted_mode.flags;
7826        mode->type = DRM_MODE_TYPE_DRIVER;
7827
7828        mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7829        mode->flags |= pipe_config->base.adjusted_mode.flags;
7830
7831        mode->hsync = drm_mode_hsync(mode);
7832        mode->vrefresh = drm_mode_vrefresh(mode);
7833        drm_mode_set_name(mode);
7834}
7835
7836static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7837{
7838        struct drm_device *dev = intel_crtc->base.dev;
7839        struct drm_i915_private *dev_priv = dev->dev_private;
7840        uint32_t pipeconf;
7841
7842        pipeconf = 0;
7843
7844        if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7845            (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7846                pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7847
7848        if (intel_crtc->config->double_wide)
7849                pipeconf |= PIPECONF_DOUBLE_WIDE;
7850
7851        /* only g4x and later have fancy bpc/dither controls */
7852        if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7853                /* Bspec claims that we can't use dithering for 30bpp pipes. */
7854                if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7855                        pipeconf |= PIPECONF_DITHER_EN |
7856                                    PIPECONF_DITHER_TYPE_SP;
7857
7858                switch (intel_crtc->config->pipe_bpp) {
7859                case 18:
7860                        pipeconf |= PIPECONF_6BPC;
7861                        break;
7862                case 24:
7863                        pipeconf |= PIPECONF_8BPC;
7864                        break;
7865                case 30:
7866                        pipeconf |= PIPECONF_10BPC;
7867                        break;
7868                default:
7869                        /* Case prevented by intel_choose_pipe_bpp_dither. */
7870                        BUG();
7871                }
7872        }
7873
7874        if (HAS_PIPE_CXSR(dev)) {
7875                if (intel_crtc->lowfreq_avail) {
7876                        DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7877                        pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7878                } else {
7879                        DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7880                }
7881        }
7882
7883        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7884                if (INTEL_INFO(dev)->gen < 4 ||
7885                    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7886                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7887                else
7888                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7889        } else
7890                pipeconf |= PIPECONF_PROGRESSIVE;
7891
7892        if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7893             intel_crtc->config->limited_color_range)
7894                pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7895
7896        I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7897        POSTING_READ(PIPECONF(intel_crtc->pipe));
7898}
7899
7900static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7901                                   struct intel_crtc_state *crtc_state)
7902{
7903        struct drm_device *dev = crtc->base.dev;
7904        struct drm_i915_private *dev_priv = dev->dev_private;
7905        int refclk, num_connectors = 0;
7906        intel_clock_t clock;
7907        bool ok;
7908        const intel_limit_t *limit;
7909        struct drm_atomic_state *state = crtc_state->base.state;
7910        struct drm_connector *connector;
7911        struct drm_connector_state *connector_state;
7912        int i;
7913
7914        memset(&crtc_state->dpll_hw_state, 0,
7915               sizeof(crtc_state->dpll_hw_state));
7916
7917        if (crtc_state->has_dsi_encoder)
7918                return 0;
7919
7920        for_each_connector_in_state(state, connector, connector_state, i) {
7921                if (connector_state->crtc == &crtc->base)
7922                        num_connectors++;
7923        }
7924
7925        if (!crtc_state->clock_set) {
7926                refclk = i9xx_get_refclk(crtc_state, num_connectors);
7927
7928                /*
7929                 * Returns a set of divisors for the desired target clock with
7930                 * the given refclk, or FALSE.  The returned values represent
7931                 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7932                 * 2) / p1 / p2.
7933                 */
7934                limit = intel_limit(crtc_state, refclk);
7935                ok = dev_priv->display.find_dpll(limit, crtc_state,
7936                                                 crtc_state->port_clock,
7937                                                 refclk, NULL, &clock);
7938                if (!ok) {
7939                        DRM_ERROR("Couldn't find PLL settings for mode!\n");
7940                        return -EINVAL;
7941                }
7942
7943                /* Compat-code for transition, will disappear. */
7944                crtc_state->dpll.n = clock.n;
7945                crtc_state->dpll.m1 = clock.m1;
7946                crtc_state->dpll.m2 = clock.m2;
7947                crtc_state->dpll.p1 = clock.p1;
7948                crtc_state->dpll.p2 = clock.p2;
7949        }
7950
7951        if (IS_GEN2(dev)) {
7952                i8xx_compute_dpll(crtc, crtc_state, NULL,
7953                                  num_connectors);
7954        } else if (IS_CHERRYVIEW(dev)) {
7955                chv_compute_dpll(crtc, crtc_state);
7956        } else if (IS_VALLEYVIEW(dev)) {
7957                vlv_compute_dpll(crtc, crtc_state);
7958        } else {
7959                i9xx_compute_dpll(crtc, crtc_state, NULL,
7960                                  num_connectors);
7961        }
7962
7963        return 0;
7964}
7965
7966static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7967                                 struct intel_crtc_state *pipe_config)
7968{
7969        struct drm_device *dev = crtc->base.dev;
7970        struct drm_i915_private *dev_priv = dev->dev_private;
7971        uint32_t tmp;
7972
7973        if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7974                return;
7975
7976        tmp = I915_READ(PFIT_CONTROL);
7977        if (!(tmp & PFIT_ENABLE))
7978                return;
7979
7980        /* Check whether the pfit is attached to our pipe. */
7981        if (INTEL_INFO(dev)->gen < 4) {
7982                if (crtc->pipe != PIPE_B)
7983                        return;
7984        } else {
7985                if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7986                        return;
7987        }
7988
7989        pipe_config->gmch_pfit.control = tmp;
7990        pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7991}
7992
7993static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7994                               struct intel_crtc_state *pipe_config)
7995{
7996        struct drm_device *dev = crtc->base.dev;
7997        struct drm_i915_private *dev_priv = dev->dev_private;
7998        int pipe = pipe_config->cpu_transcoder;
7999        intel_clock_t clock;
8000        u32 mdiv;
8001        int refclk = 100000;
8002
8003        /* In case of MIPI DPLL will not even be used */
8004        if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8005                return;
8006
8007        mutex_lock(&dev_priv->sb_lock);
8008        mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8009        mutex_unlock(&dev_priv->sb_lock);
8010
8011        clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8012        clock.m2 = mdiv & DPIO_M2DIV_MASK;
8013        clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8014        clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8015        clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8016
8017        pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8018}
8019
8020static void
8021i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8022                              struct intel_initial_plane_config *plane_config)
8023{
8024        struct drm_device *dev = crtc->base.dev;
8025        struct drm_i915_private *dev_priv = dev->dev_private;
8026        u32 val, base, offset;
8027        int pipe = crtc->pipe, plane = crtc->plane;
8028        int fourcc, pixel_format;
8029        unsigned int aligned_height;
8030        struct drm_framebuffer *fb;
8031        struct intel_framebuffer *intel_fb;
8032
8033        val = I915_READ(DSPCNTR(plane));
8034        if (!(val & DISPLAY_PLANE_ENABLE))
8035                return;
8036
8037        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8038        if (!intel_fb) {
8039                DRM_DEBUG_KMS("failed to alloc fb\n");
8040                return;
8041        }
8042
8043        fb = &intel_fb->base;
8044
8045        if (INTEL_INFO(dev)->gen >= 4) {
8046                if (val & DISPPLANE_TILED) {
8047                        plane_config->tiling = I915_TILING_X;
8048                        fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8049                }
8050        }
8051
8052        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8053        fourcc = i9xx_format_to_fourcc(pixel_format);
8054        fb->pixel_format = fourcc;
8055        fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8056
8057        if (INTEL_INFO(dev)->gen >= 4) {
8058                if (plane_config->tiling)
8059                        offset = I915_READ(DSPTILEOFF(plane));
8060                else
8061                        offset = I915_READ(DSPLINOFF(plane));
8062                base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8063        } else {
8064                base = I915_READ(DSPADDR(plane));
8065        }
8066        plane_config->base = base;
8067
8068        val = I915_READ(PIPESRC(pipe));
8069        fb->width = ((val >> 16) & 0xfff) + 1;
8070        fb->height = ((val >> 0) & 0xfff) + 1;
8071
8072        val = I915_READ(DSPSTRIDE(pipe));
8073        fb->pitches[0] = val & 0xffffffc0;
8074
8075        aligned_height = intel_fb_align_height(dev, fb->height,
8076                                               fb->pixel_format,
8077                                               fb->modifier[0]);
8078
8079        plane_config->size = fb->pitches[0] * aligned_height;
8080
8081        DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8082                      pipe_name(pipe), plane, fb->width, fb->height,
8083                      fb->bits_per_pixel, base, fb->pitches[0],
8084                      plane_config->size);
8085
8086        plane_config->fb = intel_fb;
8087}
8088
8089static void chv_crtc_clock_get(struct intel_crtc *crtc,
8090                               struct intel_crtc_state *pipe_config)
8091{
8092        struct drm_device *dev = crtc->base.dev;
8093        struct drm_i915_private *dev_priv = dev->dev_private;
8094        int pipe = pipe_config->cpu_transcoder;
8095        enum dpio_channel port = vlv_pipe_to_channel(pipe);
8096        intel_clock_t clock;
8097        u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8098        int refclk = 100000;
8099
8100        mutex_lock(&dev_priv->sb_lock);
8101        cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8102        pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8103        pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8104        pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8105        pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8106        mutex_unlock(&dev_priv->sb_lock);
8107
8108        clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8109        clock.m2 = (pll_dw0 & 0xff) << 22;
8110        if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8111                clock.m2 |= pll_dw2 & 0x3fffff;
8112        clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8113        clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8114        clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8115
8116        pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8117}
8118
8119static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8120                                 struct intel_crtc_state *pipe_config)
8121{
8122        struct drm_device *dev = crtc->base.dev;
8123        struct drm_i915_private *dev_priv = dev->dev_private;
8124        enum intel_display_power_domain power_domain;
8125        uint32_t tmp;
8126        bool ret;
8127
8128        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8129        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8130                return false;
8131
8132        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8133        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8134
8135        ret = false;
8136
8137        tmp = I915_READ(PIPECONF(crtc->pipe));
8138        if (!(tmp & PIPECONF_ENABLE))
8139                goto out;
8140
8141        if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8142                switch (tmp & PIPECONF_BPC_MASK) {
8143                case PIPECONF_6BPC:
8144                        pipe_config->pipe_bpp = 18;
8145                        break;
8146                case PIPECONF_8BPC:
8147                        pipe_config->pipe_bpp = 24;
8148                        break;
8149                case PIPECONF_10BPC:
8150                        pipe_config->pipe_bpp = 30;
8151                        break;
8152                default:
8153                        break;
8154                }
8155        }
8156
8157        if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8158            (tmp & PIPECONF_COLOR_RANGE_SELECT))
8159                pipe_config->limited_color_range = true;
8160
8161        if (INTEL_INFO(dev)->gen < 4)
8162                pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8163
8164        intel_get_pipe_timings(crtc, pipe_config);
8165
8166        i9xx_get_pfit_config(crtc, pipe_config);
8167
8168        if (INTEL_INFO(dev)->gen >= 4) {
8169                tmp = I915_READ(DPLL_MD(crtc->pipe));
8170                pipe_config->pixel_multiplier =
8171                        ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8172                         >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8173                pipe_config->dpll_hw_state.dpll_md = tmp;
8174        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8175                tmp = I915_READ(DPLL(crtc->pipe));
8176                pipe_config->pixel_multiplier =
8177                        ((tmp & SDVO_MULTIPLIER_MASK)
8178                         >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8179        } else {
8180                /* Note that on i915G/GM the pixel multiplier is in the sdvo
8181                 * port and will be fixed up in the encoder->get_config
8182                 * function. */
8183                pipe_config->pixel_multiplier = 1;
8184        }
8185        pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8186        if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8187                /*
8188                 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8189                 * on 830. Filter it out here so that we don't
8190                 * report errors due to that.
8191                 */
8192                if (IS_I830(dev))
8193                        pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8194
8195                pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8196                pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8197        } else {
8198                /* Mask out read-only status bits. */
8199                pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8200                                                     DPLL_PORTC_READY_MASK |
8201                                                     DPLL_PORTB_READY_MASK);
8202        }
8203
8204        if (IS_CHERRYVIEW(dev))
8205                chv_crtc_clock_get(crtc, pipe_config);
8206        else if (IS_VALLEYVIEW(dev))
8207                vlv_crtc_clock_get(crtc, pipe_config);
8208        else
8209                i9xx_crtc_clock_get(crtc, pipe_config);
8210
8211        /*
8212         * Normally the dotclock is filled in by the encoder .get_config()
8213         * but in case the pipe is enabled w/o any ports we need a sane
8214         * default.
8215         */
8216        pipe_config->base.adjusted_mode.crtc_clock =
8217                pipe_config->port_clock / pipe_config->pixel_multiplier;
8218
8219        ret = true;
8220
8221out:
8222        intel_display_power_put(dev_priv, power_domain);
8223
8224        return ret;
8225}
8226
8227static void ironlake_init_pch_refclk(struct drm_device *dev)
8228{
8229        struct drm_i915_private *dev_priv = dev->dev_private;
8230        struct intel_encoder *encoder;
8231        u32 val, final;
8232        bool has_lvds = false;
8233        bool has_cpu_edp = false;
8234        bool has_panel = false;
8235        bool has_ck505 = false;
8236        bool can_ssc = false;
8237
8238        /* We need to take the global config into account */
8239        for_each_intel_encoder(dev, encoder) {
8240                switch (encoder->type) {
8241                case INTEL_OUTPUT_LVDS:
8242                        has_panel = true;
8243                        has_lvds = true;
8244                        break;
8245                case INTEL_OUTPUT_EDP:
8246                        has_panel = true;
8247                        if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8248                                has_cpu_edp = true;
8249                        break;
8250                default:
8251                        break;
8252                }
8253        }
8254
8255        if (HAS_PCH_IBX(dev)) {
8256                has_ck505 = dev_priv->vbt.display_clock_mode;
8257                can_ssc = has_ck505;
8258        } else {
8259                has_ck505 = false;
8260                can_ssc = true;
8261        }
8262
8263        DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8264                      has_panel, has_lvds, has_ck505);
8265
8266        /* Ironlake: try to setup display ref clock before DPLL
8267         * enabling. This is only under driver's control after
8268         * PCH B stepping, previous chipset stepping should be
8269         * ignoring this setting.
8270         */
8271        val = I915_READ(PCH_DREF_CONTROL);
8272
8273        /* As we must carefully and slowly disable/enable each source in turn,
8274         * compute the final state we want first and check if we need to
8275         * make any changes at all.
8276         */
8277        final = val;
8278        final &= ~DREF_NONSPREAD_SOURCE_MASK;
8279        if (has_ck505)
8280                final |= DREF_NONSPREAD_CK505_ENABLE;
8281        else
8282                final |= DREF_NONSPREAD_SOURCE_ENABLE;
8283
8284        final &= ~DREF_SSC_SOURCE_MASK;
8285        final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8286        final &= ~DREF_SSC1_ENABLE;
8287
8288        if (has_panel) {
8289                final |= DREF_SSC_SOURCE_ENABLE;
8290
8291                if (intel_panel_use_ssc(dev_priv) && can_ssc)
8292                        final |= DREF_SSC1_ENABLE;
8293
8294                if (has_cpu_edp) {
8295                        if (intel_panel_use_ssc(dev_priv) && can_ssc)
8296                                final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8297                        else
8298                                final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8299                } else
8300                        final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8301        } else {
8302                final |= DREF_SSC_SOURCE_DISABLE;
8303                final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8304        }
8305
8306        if (final == val)
8307                return;
8308
8309        /* Always enable nonspread source */
8310        val &= ~DREF_NONSPREAD_SOURCE_MASK;
8311
8312        if (has_ck505)
8313                val |= DREF_NONSPREAD_CK505_ENABLE;
8314        else
8315                val |= DREF_NONSPREAD_SOURCE_ENABLE;
8316
8317        if (has_panel) {
8318                val &= ~DREF_SSC_SOURCE_MASK;
8319                val |= DREF_SSC_SOURCE_ENABLE;
8320
8321                /* SSC must be turned on before enabling the CPU output  */
8322                if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8323                        DRM_DEBUG_KMS("Using SSC on panel\n");
8324                        val |= DREF_SSC1_ENABLE;
8325                } else
8326                        val &= ~DREF_SSC1_ENABLE;
8327
8328                /* Get SSC going before enabling the outputs */
8329                I915_WRITE(PCH_DREF_CONTROL, val);
8330                POSTING_READ(PCH_DREF_CONTROL);
8331                udelay(200);
8332
8333                val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8334
8335                /* Enable CPU source on CPU attached eDP */
8336                if (has_cpu_edp) {
8337                        if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8338                                DRM_DEBUG_KMS("Using SSC on eDP\n");
8339                                val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8340                        } else
8341                                val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8342                } else
8343                        val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8344
8345                I915_WRITE(PCH_DREF_CONTROL, val);
8346                POSTING_READ(PCH_DREF_CONTROL);
8347                udelay(200);
8348        } else {
8349                DRM_DEBUG_KMS("Disabling SSC entirely\n");
8350
8351                val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8352
8353                /* Turn off CPU output */
8354                val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8355
8356                I915_WRITE(PCH_DREF_CONTROL, val);
8357                POSTING_READ(PCH_DREF_CONTROL);
8358                udelay(200);
8359
8360                /* Turn off the SSC source */
8361                val &= ~DREF_SSC_SOURCE_MASK;
8362                val |= DREF_SSC_SOURCE_DISABLE;
8363
8364                /* Turn off SSC1 */
8365                val &= ~DREF_SSC1_ENABLE;
8366
8367                I915_WRITE(PCH_DREF_CONTROL, val);
8368                POSTING_READ(PCH_DREF_CONTROL);
8369                udelay(200);
8370        }
8371
8372        BUG_ON(val != final);
8373}
8374
8375static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8376{
8377        uint32_t tmp;
8378
8379        tmp = I915_READ(SOUTH_CHICKEN2);
8380        tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8381        I915_WRITE(SOUTH_CHICKEN2, tmp);
8382
8383        if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8384                               FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8385                DRM_ERROR("FDI mPHY reset assert timeout\n");
8386
8387        tmp = I915_READ(SOUTH_CHICKEN2);
8388        tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8389        I915_WRITE(SOUTH_CHICKEN2, tmp);
8390
8391        if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8392                                FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8393                DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8394}
8395
8396/* WaMPhyProgramming:hsw */
8397static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8398{
8399        uint32_t tmp;
8400
8401        tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8402        tmp &= ~(0xFF << 24);
8403        tmp |= (0x12 << 24);
8404        intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8405
8406        tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8407        tmp |= (1 << 11);
8408        intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8409
8410        tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8411        tmp |= (1 << 11);
8412        intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8413
8414        tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8415        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8416        intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8417
8418        tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8419        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8420        intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8421
8422        tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8423        tmp &= ~(7 << 13);
8424        tmp |= (5 << 13);
8425        intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8426
8427        tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8428        tmp &= ~(7 << 13);
8429        tmp |= (5 << 13);
8430        intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8431
8432        tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8433        tmp &= ~0xFF;
8434        tmp |= 0x1C;
8435        intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8436
8437        tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8438        tmp &= ~0xFF;
8439        tmp |= 0x1C;
8440        intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8441
8442        tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8443        tmp &= ~(0xFF << 16);
8444        tmp |= (0x1C << 16);
8445        intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8446
8447        tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8448        tmp &= ~(0xFF << 16);
8449        tmp |= (0x1C << 16);
8450        intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8451
8452        tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8453        tmp |= (1 << 27);
8454        intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8455
8456        tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8457        tmp |= (1 << 27);
8458        intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8459
8460        tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8461        tmp &= ~(0xF << 28);
8462        tmp |= (4 << 28);
8463        intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8464
8465        tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8466        tmp &= ~(0xF << 28);
8467        tmp |= (4 << 28);
8468        intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8469}
8470
8471/* Implements 3 different sequences from BSpec chapter "Display iCLK
8472 * Programming" based on the parameters passed:
8473 * - Sequence to enable CLKOUT_DP
8474 * - Sequence to enable CLKOUT_DP without spread
8475 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8476 */
8477static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8478                                 bool with_fdi)
8479{
8480        struct drm_i915_private *dev_priv = dev->dev_private;
8481        uint32_t reg, tmp;
8482
8483        if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8484                with_spread = true;
8485        if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8486                with_fdi = false;
8487
8488        mutex_lock(&dev_priv->sb_lock);
8489
8490        tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8491        tmp &= ~SBI_SSCCTL_DISABLE;
8492        tmp |= SBI_SSCCTL_PATHALT;
8493        intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8494
8495        udelay(24);
8496
8497        if (with_spread) {
8498                tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8499                tmp &= ~SBI_SSCCTL_PATHALT;
8500                intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8501
8502                if (with_fdi) {
8503                        lpt_reset_fdi_mphy(dev_priv);
8504                        lpt_program_fdi_mphy(dev_priv);
8505                }
8506        }
8507
8508        reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8509        tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8510        tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8511        intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8512
8513        mutex_unlock(&dev_priv->sb_lock);
8514}
8515
8516/* Sequence to disable CLKOUT_DP */
8517static void lpt_disable_clkout_dp(struct drm_device *dev)
8518{
8519        struct drm_i915_private *dev_priv = dev->dev_private;
8520        uint32_t reg, tmp;
8521
8522        mutex_lock(&dev_priv->sb_lock);
8523
8524        reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8525        tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8526        tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8527        intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8528
8529        tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8530        if (!(tmp & SBI_SSCCTL_DISABLE)) {
8531                if (!(tmp & SBI_SSCCTL_PATHALT)) {
8532                        tmp |= SBI_SSCCTL_PATHALT;
8533                        intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8534                        udelay(32);
8535                }
8536                tmp |= SBI_SSCCTL_DISABLE;
8537                intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8538        }
8539
8540        mutex_unlock(&dev_priv->sb_lock);
8541}
8542
8543#define BEND_IDX(steps) ((50 + (steps)) / 5)
8544
8545static const uint16_t sscdivintphase[] = {
8546        [BEND_IDX( 50)] = 0x3B23,
8547        [BEND_IDX( 45)] = 0x3B23,
8548        [BEND_IDX( 40)] = 0x3C23,
8549        [BEND_IDX( 35)] = 0x3C23,
8550        [BEND_IDX( 30)] = 0x3D23,
8551        [BEND_IDX( 25)] = 0x3D23,
8552        [BEND_IDX( 20)] = 0x3E23,
8553        [BEND_IDX( 15)] = 0x3E23,
8554        [BEND_IDX( 10)] = 0x3F23,
8555        [BEND_IDX(  5)] = 0x3F23,
8556        [BEND_IDX(  0)] = 0x0025,
8557        [BEND_IDX( -5)] = 0x0025,
8558        [BEND_IDX(-10)] = 0x0125,
8559        [BEND_IDX(-15)] = 0x0125,
8560        [BEND_IDX(-20)] = 0x0225,
8561        [BEND_IDX(-25)] = 0x0225,
8562        [BEND_IDX(-30)] = 0x0325,
8563        [BEND_IDX(-35)] = 0x0325,
8564        [BEND_IDX(-40)] = 0x0425,
8565        [BEND_IDX(-45)] = 0x0425,
8566        [BEND_IDX(-50)] = 0x0525,
8567};
8568
8569/*
8570 * Bend CLKOUT_DP
8571 * steps -50 to 50 inclusive, in steps of 5
8572 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8573 * change in clock period = -(steps / 10) * 5.787 ps
8574 */
8575static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8576{
8577        uint32_t tmp;
8578        int idx = BEND_IDX(steps);
8579
8580        if (WARN_ON(steps % 5 != 0))
8581                return;
8582
8583        if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8584                return;
8585
8586        mutex_lock(&dev_priv->sb_lock);
8587
8588        if (steps % 10 != 0)
8589                tmp = 0xAAAAAAAB;
8590        else
8591                tmp = 0x00000000;
8592        intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8593
8594        tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8595        tmp &= 0xffff0000;
8596        tmp |= sscdivintphase[idx];
8597        intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8598
8599        mutex_unlock(&dev_priv->sb_lock);
8600}
8601
8602#undef BEND_IDX
8603
8604static void lpt_init_pch_refclk(struct drm_device *dev)
8605{
8606        struct intel_encoder *encoder;
8607        bool has_vga = false;
8608
8609        for_each_intel_encoder(dev, encoder) {
8610                switch (encoder->type) {
8611                case INTEL_OUTPUT_ANALOG:
8612                        has_vga = true;
8613                        break;
8614                default:
8615                        break;
8616                }
8617        }
8618
8619        if (has_vga) {
8620                lpt_bend_clkout_dp(to_i915(dev), 0);
8621                lpt_enable_clkout_dp(dev, true, true);
8622        } else {
8623                lpt_disable_clkout_dp(dev);
8624        }
8625}
8626
8627/*
8628 * Initialize reference clocks when the driver loads
8629 */
8630void intel_init_pch_refclk(struct drm_device *dev)
8631{
8632        if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8633                ironlake_init_pch_refclk(dev);
8634        else if (HAS_PCH_LPT(dev))
8635                lpt_init_pch_refclk(dev);
8636}
8637
8638static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
8639{
8640        struct drm_device *dev = crtc_state->base.crtc->dev;
8641        struct drm_i915_private *dev_priv = dev->dev_private;
8642        struct drm_atomic_state *state = crtc_state->base.state;
8643        struct drm_connector *connector;
8644        struct drm_connector_state *connector_state;
8645        struct intel_encoder *encoder;
8646        int num_connectors = 0, i;
8647        bool is_lvds = false;
8648
8649        for_each_connector_in_state(state, connector, connector_state, i) {
8650                if (connector_state->crtc != crtc_state->base.crtc)
8651                        continue;
8652
8653                encoder = to_intel_encoder(connector_state->best_encoder);
8654
8655                switch (encoder->type) {
8656                case INTEL_OUTPUT_LVDS:
8657                        is_lvds = true;
8658                        break;
8659                default:
8660                        break;
8661                }
8662                num_connectors++;
8663        }
8664
8665        if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
8666                DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8667                              dev_priv->vbt.lvds_ssc_freq);
8668                return dev_priv->vbt.lvds_ssc_freq;
8669        }
8670
8671        return 120000;
8672}
8673
8674static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8675{
8676        struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8677        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8678        int pipe = intel_crtc->pipe;
8679        uint32_t val;
8680
8681        val = 0;
8682
8683        switch (intel_crtc->config->pipe_bpp) {
8684        case 18:
8685                val |= PIPECONF_6BPC;
8686                break;
8687        case 24:
8688                val |= PIPECONF_8BPC;
8689                break;
8690        case 30:
8691                val |= PIPECONF_10BPC;
8692                break;
8693        case 36:
8694                val |= PIPECONF_12BPC;
8695                break;
8696        default:
8697                /* Case prevented by intel_choose_pipe_bpp_dither. */
8698                BUG();
8699        }
8700
8701        if (intel_crtc->config->dither)
8702                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8703
8704        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8705                val |= PIPECONF_INTERLACED_ILK;
8706        else
8707                val |= PIPECONF_PROGRESSIVE;
8708
8709        if (intel_crtc->config->limited_color_range)
8710                val |= PIPECONF_COLOR_RANGE_SELECT;
8711
8712        I915_WRITE(PIPECONF(pipe), val);
8713        POSTING_READ(PIPECONF(pipe));
8714}
8715
8716/*
8717 * Set up the pipe CSC unit.
8718 *
8719 * Currently only full range RGB to limited range RGB conversion
8720 * is supported, but eventually this should handle various
8721 * RGB<->YCbCr scenarios as well.
8722 */
8723static void intel_set_pipe_csc(struct drm_crtc *crtc)
8724{
8725        struct drm_device *dev = crtc->dev;
8726        struct drm_i915_private *dev_priv = dev->dev_private;
8727        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8728        int pipe = intel_crtc->pipe;
8729        uint16_t coeff = 0x7800; /* 1.0 */
8730
8731        /*
8732         * TODO: Check what kind of values actually come out of the pipe
8733         * with these coeff/postoff values and adjust to get the best
8734         * accuracy. Perhaps we even need to take the bpc value into
8735         * consideration.
8736         */
8737
8738        if (intel_crtc->config->limited_color_range)
8739                coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8740
8741        /*
8742         * GY/GU and RY/RU should be the other way around according
8743         * to BSpec, but reality doesn't agree. Just set them up in
8744         * a way that results in the correct picture.
8745         */
8746        I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8747        I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8748
8749        I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8750        I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8751
8752        I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8753        I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8754
8755        I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8756        I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8757        I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8758
8759        if (INTEL_INFO(dev)->gen > 6) {
8760                uint16_t postoff = 0;
8761
8762                if (intel_crtc->config->limited_color_range)
8763                        postoff = (16 * (1 << 12) / 255) & 0x1fff;
8764
8765                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8766                I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8767                I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8768
8769                I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8770        } else {
8771                uint32_t mode = CSC_MODE_YUV_TO_RGB;
8772
8773                if (intel_crtc->config->limited_color_range)
8774                        mode |= CSC_BLACK_SCREEN_OFFSET;
8775
8776                I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8777        }
8778}
8779
8780static void haswell_set_pipeconf(struct drm_crtc *crtc)
8781{
8782        struct drm_device *dev = crtc->dev;
8783        struct drm_i915_private *dev_priv = dev->dev_private;
8784        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8785        enum pipe pipe = intel_crtc->pipe;
8786        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8787        uint32_t val;
8788
8789        val = 0;
8790
8791        if (IS_HASWELL(dev) && intel_crtc->config->dither)
8792                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8793
8794        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8795                val |= PIPECONF_INTERLACED_ILK;
8796        else
8797                val |= PIPECONF_PROGRESSIVE;
8798
8799        I915_WRITE(PIPECONF(cpu_transcoder), val);
8800        POSTING_READ(PIPECONF(cpu_transcoder));
8801
8802        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8803        POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
8804
8805        if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
8806                val = 0;
8807
8808                switch (intel_crtc->config->pipe_bpp) {
8809                case 18:
8810                        val |= PIPEMISC_DITHER_6_BPC;
8811                        break;
8812                case 24:
8813                        val |= PIPEMISC_DITHER_8_BPC;
8814                        break;
8815                case 30:
8816                        val |= PIPEMISC_DITHER_10_BPC;
8817                        break;
8818                case 36:
8819                        val |= PIPEMISC_DITHER_12_BPC;
8820                        break;
8821                default:
8822                        /* Case prevented by pipe_config_set_bpp. */
8823                        BUG();
8824                }
8825
8826                if (intel_crtc->config->dither)
8827                        val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8828
8829                I915_WRITE(PIPEMISC(pipe), val);
8830        }
8831}
8832
8833static bool ironlake_compute_clocks(struct drm_crtc *crtc,
8834                                    struct intel_crtc_state *crtc_state,
8835                                    intel_clock_t *clock,
8836                                    bool *has_reduced_clock,
8837                                    intel_clock_t *reduced_clock)
8838{
8839        struct drm_device *dev = crtc->dev;
8840        struct drm_i915_private *dev_priv = dev->dev_private;
8841        int refclk;
8842        const intel_limit_t *limit;
8843        bool ret;
8844
8845        refclk = ironlake_get_refclk(crtc_state);
8846
8847        /*
8848         * Returns a set of divisors for the desired target clock with the given
8849         * refclk, or FALSE.  The returned values represent the clock equation:
8850         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8851         */
8852        limit = intel_limit(crtc_state, refclk);
8853        ret = dev_priv->display.find_dpll(limit, crtc_state,
8854                                          crtc_state->port_clock,
8855                                          refclk, NULL, clock);
8856        if (!ret)
8857                return false;
8858
8859        return true;
8860}
8861
8862int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8863{
8864        /*
8865         * Account for spread spectrum to avoid
8866         * oversubscribing the link. Max center spread
8867         * is 2.5%; use 5% for safety's sake.
8868         */
8869        u32 bps = target_clock * bpp * 21 / 20;
8870        return DIV_ROUND_UP(bps, link_bw * 8);
8871}
8872
8873static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8874{
8875        return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8876}
8877
8878static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8879                                      struct intel_crtc_state *crtc_state,
8880                                      u32 *fp,
8881                                      intel_clock_t *reduced_clock, u32 *fp2)
8882{
8883        struct drm_crtc *crtc = &intel_crtc->base;
8884        struct drm_device *dev = crtc->dev;
8885        struct drm_i915_private *dev_priv = dev->dev_private;
8886        struct drm_atomic_state *state = crtc_state->base.state;
8887        struct drm_connector *connector;
8888        struct drm_connector_state *connector_state;
8889        struct intel_encoder *encoder;
8890        uint32_t dpll;
8891        int factor, num_connectors = 0, i;
8892        bool is_lvds = false, is_sdvo = false;
8893
8894        for_each_connector_in_state(state, connector, connector_state, i) {
8895                if (connector_state->crtc != crtc_state->base.crtc)
8896                        continue;
8897
8898                encoder = to_intel_encoder(connector_state->best_encoder);
8899
8900                switch (encoder->type) {
8901                case INTEL_OUTPUT_LVDS:
8902                        is_lvds = true;
8903                        break;
8904                case INTEL_OUTPUT_SDVO:
8905                case INTEL_OUTPUT_HDMI:
8906                        is_sdvo = true;
8907                        break;
8908                default:
8909                        break;
8910                }
8911
8912                num_connectors++;
8913        }
8914
8915        /* Enable autotuning of the PLL clock (if permissible) */
8916        factor = 21;
8917        if (is_lvds) {
8918                if ((intel_panel_use_ssc(dev_priv) &&
8919                     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8920                    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8921                        factor = 25;
8922        } else if (crtc_state->sdvo_tv_clock)
8923                factor = 20;
8924
8925        if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8926                *fp |= FP_CB_TUNE;
8927
8928        if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8929                *fp2 |= FP_CB_TUNE;
8930
8931        dpll = 0;
8932
8933        if (is_lvds)
8934                dpll |= DPLLB_MODE_LVDS;
8935        else
8936                dpll |= DPLLB_MODE_DAC_SERIAL;
8937
8938        dpll |= (crtc_state->pixel_multiplier - 1)
8939                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8940
8941        if (is_sdvo)
8942                dpll |= DPLL_SDVO_HIGH_SPEED;
8943        if (crtc_state->has_dp_encoder)
8944                dpll |= DPLL_SDVO_HIGH_SPEED;
8945
8946        /* compute bitmask from p1 value */
8947        dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8948        /* also FPA1 */
8949        dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8950
8951        switch (crtc_state->dpll.p2) {
8952        case 5:
8953                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8954                break;
8955        case 7:
8956                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8957                break;
8958        case 10:
8959                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8960                break;
8961        case 14:
8962                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8963                break;
8964        }
8965
8966        if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
8967                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8968        else
8969                dpll |= PLL_REF_INPUT_DREFCLK;
8970
8971        return dpll | DPLL_VCO_ENABLE;
8972}
8973
8974static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8975                                       struct intel_crtc_state *crtc_state)
8976{
8977        struct drm_device *dev = crtc->base.dev;
8978        intel_clock_t clock, reduced_clock;
8979        u32 dpll = 0, fp = 0, fp2 = 0;
8980        bool ok, has_reduced_clock = false;
8981        bool is_lvds = false;
8982        struct intel_shared_dpll *pll;
8983
8984        memset(&crtc_state->dpll_hw_state, 0,
8985               sizeof(crtc_state->dpll_hw_state));
8986
8987        is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
8988
8989        WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
8990             "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
8991
8992        ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
8993                                     &has_reduced_clock, &reduced_clock);
8994        if (!ok && !crtc_state->clock_set) {
8995                DRM_ERROR("Couldn't find PLL settings for mode!\n");
8996                return -EINVAL;
8997        }
8998        /* Compat-code for transition, will disappear. */
8999        if (!crtc_state->clock_set) {
9000                crtc_state->dpll.n = clock.n;
9001                crtc_state->dpll.m1 = clock.m1;
9002                crtc_state->dpll.m2 = clock.m2;
9003                crtc_state->dpll.p1 = clock.p1;
9004                crtc_state->dpll.p2 = clock.p2;
9005        }
9006
9007        /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9008        if (crtc_state->has_pch_encoder) {
9009                fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9010                if (has_reduced_clock)
9011                        fp2 = i9xx_dpll_compute_fp(&reduced_clock);
9012
9013                dpll = ironlake_compute_dpll(crtc, crtc_state,
9014                                             &fp, &reduced_clock,
9015                                             has_reduced_clock ? &fp2 : NULL);
9016
9017                crtc_state->dpll_hw_state.dpll = dpll;
9018                crtc_state->dpll_hw_state.fp0 = fp;
9019                if (has_reduced_clock)
9020                        crtc_state->dpll_hw_state.fp1 = fp2;
9021                else
9022                        crtc_state->dpll_hw_state.fp1 = fp;
9023
9024                pll = intel_get_shared_dpll(crtc, crtc_state);
9025                if (pll == NULL) {
9026                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9027                                         pipe_name(crtc->pipe));
9028                        return -EINVAL;
9029                }
9030        }
9031
9032        if (is_lvds && has_reduced_clock)
9033                crtc->lowfreq_avail = true;
9034        else
9035                crtc->lowfreq_avail = false;
9036
9037        return 0;
9038}
9039
9040static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9041                                         struct intel_link_m_n *m_n)
9042{
9043        struct drm_device *dev = crtc->base.dev;
9044        struct drm_i915_private *dev_priv = dev->dev_private;
9045        enum pipe pipe = crtc->pipe;
9046
9047        m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9048        m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9049        m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9050                & ~TU_SIZE_MASK;
9051        m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9052        m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9053                    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9054}
9055
9056static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9057                                         enum transcoder transcoder,
9058                                         struct intel_link_m_n *m_n,
9059                                         struct intel_link_m_n *m2_n2)
9060{
9061        struct drm_device *dev = crtc->base.dev;
9062        struct drm_i915_private *dev_priv = dev->dev_private;
9063        enum pipe pipe = crtc->pipe;
9064
9065        if (INTEL_INFO(dev)->gen >= 5) {
9066                m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9067                m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9068                m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9069                        & ~TU_SIZE_MASK;
9070                m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9071                m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9072                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9073                /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9074                 * gen < 8) and if DRRS is supported (to make sure the
9075                 * registers are not unnecessarily read).
9076                 */
9077                if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9078                        crtc->config->has_drrs) {
9079                        m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9080                        m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9081                        m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9082                                        & ~TU_SIZE_MASK;
9083                        m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9084                        m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9085                                        & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9086                }
9087        } else {
9088                m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9089                m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9090                m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9091                        & ~TU_SIZE_MASK;
9092                m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9093                m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9094                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9095        }
9096}
9097
9098void intel_dp_get_m_n(struct intel_crtc *crtc,
9099                      struct intel_crtc_state *pipe_config)
9100{
9101        if (pipe_config->has_pch_encoder)
9102                intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9103        else
9104                intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9105                                             &pipe_config->dp_m_n,
9106                                             &pipe_config->dp_m2_n2);
9107}
9108
9109static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9110                                        struct intel_crtc_state *pipe_config)
9111{
9112        intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9113                                     &pipe_config->fdi_m_n, NULL);
9114}
9115
9116static void skylake_get_pfit_config(struct intel_crtc *crtc,
9117                                    struct intel_crtc_state *pipe_config)
9118{
9119        struct drm_device *dev = crtc->base.dev;
9120        struct drm_i915_private *dev_priv = dev->dev_private;
9121        struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9122        uint32_t ps_ctrl = 0;
9123        int id = -1;
9124        int i;
9125
9126        /* find scaler attached to this pipe */
9127        for (i = 0; i < crtc->num_scalers; i++) {
9128                ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9129                if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9130                        id = i;
9131                        pipe_config->pch_pfit.enabled = true;
9132                        pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9133                        pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9134                        break;
9135                }
9136        }
9137
9138        scaler_state->scaler_id = id;
9139        if (id >= 0) {
9140                scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9141        } else {
9142                scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9143        }
9144}
9145
9146static void
9147skylake_get_initial_plane_config(struct intel_crtc *crtc,
9148                                 struct intel_initial_plane_config *plane_config)
9149{
9150        struct drm_device *dev = crtc->base.dev;
9151        struct drm_i915_private *dev_priv = dev->dev_private;
9152        u32 val, base, offset, stride_mult, tiling;
9153        int pipe = crtc->pipe;
9154        int fourcc, pixel_format;
9155        unsigned int aligned_height;
9156        struct drm_framebuffer *fb;
9157        struct intel_framebuffer *intel_fb;
9158
9159        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9160        if (!intel_fb) {
9161                DRM_DEBUG_KMS("failed to alloc fb\n");
9162                return;
9163        }
9164
9165        fb = &intel_fb->base;
9166
9167        val = I915_READ(PLANE_CTL(pipe, 0));
9168        if (!(val & PLANE_CTL_ENABLE))
9169                goto error;
9170
9171        pixel_format = val & PLANE_CTL_FORMAT_MASK;
9172        fourcc = skl_format_to_fourcc(pixel_format,
9173                                      val & PLANE_CTL_ORDER_RGBX,
9174                                      val & PLANE_CTL_ALPHA_MASK);
9175        fb->pixel_format = fourcc;
9176        fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9177
9178        tiling = val & PLANE_CTL_TILED_MASK;
9179        switch (tiling) {
9180        case PLANE_CTL_TILED_LINEAR:
9181                fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9182                break;
9183        case PLANE_CTL_TILED_X:
9184                plane_config->tiling = I915_TILING_X;
9185                fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9186                break;
9187        case PLANE_CTL_TILED_Y:
9188                fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9189                break;
9190        case PLANE_CTL_TILED_YF:
9191                fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9192                break;
9193        default:
9194                MISSING_CASE(tiling);
9195                goto error;
9196        }
9197
9198        base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9199        plane_config->base = base;
9200
9201        offset = I915_READ(PLANE_OFFSET(pipe, 0));
9202
9203        val = I915_READ(PLANE_SIZE(pipe, 0));
9204        fb->height = ((val >> 16) & 0xfff) + 1;
9205        fb->width = ((val >> 0) & 0x1fff) + 1;
9206
9207        val = I915_READ(PLANE_STRIDE(pipe, 0));
9208        stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9209                                                fb->pixel_format);
9210        fb->pitches[0] = (val & 0x3ff) * stride_mult;
9211
9212        aligned_height = intel_fb_align_height(dev, fb->height,
9213                                               fb->pixel_format,
9214                                               fb->modifier[0]);
9215
9216        plane_config->size = fb->pitches[0] * aligned_height;
9217
9218        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9219                      pipe_name(pipe), fb->width, fb->height,
9220                      fb->bits_per_pixel, base, fb->pitches[0],
9221                      plane_config->size);
9222
9223        plane_config->fb = intel_fb;
9224        return;
9225
9226error:
9227        kfree(fb);
9228}
9229
9230static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9231                                     struct intel_crtc_state *pipe_config)
9232{
9233        struct drm_device *dev = crtc->base.dev;
9234        struct drm_i915_private *dev_priv = dev->dev_private;
9235        uint32_t tmp;
9236
9237        tmp = I915_READ(PF_CTL(crtc->pipe));
9238
9239        if (tmp & PF_ENABLE) {
9240                pipe_config->pch_pfit.enabled = true;
9241                pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9242                pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9243
9244                /* We currently do not free assignements of panel fitters on
9245                 * ivb/hsw (since we don't use the higher upscaling modes which
9246                 * differentiates them) so just WARN about this case for now. */
9247                if (IS_GEN7(dev)) {
9248                        WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9249                                PF_PIPE_SEL_IVB(crtc->pipe));
9250                }
9251        }
9252}
9253
9254static void
9255ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9256                                  struct intel_initial_plane_config *plane_config)
9257{
9258        struct drm_device *dev = crtc->base.dev;
9259        struct drm_i915_private *dev_priv = dev->dev_private;
9260        u32 val, base, offset;
9261        int pipe = crtc->pipe;
9262        int fourcc, pixel_format;
9263        unsigned int aligned_height;
9264        struct drm_framebuffer *fb;
9265        struct intel_framebuffer *intel_fb;
9266
9267        val = I915_READ(DSPCNTR(pipe));
9268        if (!(val & DISPLAY_PLANE_ENABLE))
9269                return;
9270
9271        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9272        if (!intel_fb) {
9273                DRM_DEBUG_KMS("failed to alloc fb\n");
9274                return;
9275        }
9276
9277        fb = &intel_fb->base;
9278
9279        if (INTEL_INFO(dev)->gen >= 4) {
9280                if (val & DISPPLANE_TILED) {
9281                        plane_config->tiling = I915_TILING_X;
9282                        fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9283                }
9284        }
9285
9286        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9287        fourcc = i9xx_format_to_fourcc(pixel_format);
9288        fb->pixel_format = fourcc;
9289        fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9290
9291        base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9292        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9293                offset = I915_READ(DSPOFFSET(pipe));
9294        } else {
9295                if (plane_config->tiling)
9296                        offset = I915_READ(DSPTILEOFF(pipe));
9297                else
9298                        offset = I915_READ(DSPLINOFF(pipe));
9299        }
9300        plane_config->base = base;
9301
9302        val = I915_READ(PIPESRC(pipe));
9303        fb->width = ((val >> 16) & 0xfff) + 1;
9304        fb->height = ((val >> 0) & 0xfff) + 1;
9305
9306        val = I915_READ(DSPSTRIDE(pipe));
9307        fb->pitches[0] = val & 0xffffffc0;
9308
9309        aligned_height = intel_fb_align_height(dev, fb->height,
9310                                               fb->pixel_format,
9311                                               fb->modifier[0]);
9312
9313        plane_config->size = fb->pitches[0] * aligned_height;
9314
9315        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9316                      pipe_name(pipe), fb->width, fb->height,
9317                      fb->bits_per_pixel, base, fb->pitches[0],
9318                      plane_config->size);
9319
9320        plane_config->fb = intel_fb;
9321}
9322
9323static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9324                                     struct intel_crtc_state *pipe_config)
9325{
9326        struct drm_device *dev = crtc->base.dev;
9327        struct drm_i915_private *dev_priv = dev->dev_private;
9328        enum intel_display_power_domain power_domain;
9329        uint32_t tmp;
9330        bool ret;
9331
9332        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9333        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9334                return false;
9335
9336        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9337        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9338
9339        ret = false;
9340        tmp = I915_READ(PIPECONF(crtc->pipe));
9341        if (!(tmp & PIPECONF_ENABLE))
9342                goto out;
9343
9344        switch (tmp & PIPECONF_BPC_MASK) {
9345        case PIPECONF_6BPC:
9346                pipe_config->pipe_bpp = 18;
9347                break;
9348        case PIPECONF_8BPC:
9349                pipe_config->pipe_bpp = 24;
9350                break;
9351        case PIPECONF_10BPC:
9352                pipe_config->pipe_bpp = 30;
9353                break;
9354        case PIPECONF_12BPC:
9355                pipe_config->pipe_bpp = 36;
9356                break;
9357        default:
9358                break;
9359        }
9360
9361        if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9362                pipe_config->limited_color_range = true;
9363
9364        if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9365                struct intel_shared_dpll *pll;
9366
9367                pipe_config->has_pch_encoder = true;
9368
9369                tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9370                pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9371                                          FDI_DP_PORT_WIDTH_SHIFT) + 1;
9372
9373                ironlake_get_fdi_m_n_config(crtc, pipe_config);
9374
9375                if (HAS_PCH_IBX(dev_priv->dev)) {
9376                        pipe_config->shared_dpll =
9377                                (enum intel_dpll_id) crtc->pipe;
9378                } else {
9379                        tmp = I915_READ(PCH_DPLL_SEL);
9380                        if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9381                                pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9382                        else
9383                                pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9384                }
9385
9386                pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9387
9388                WARN_ON(!pll->get_hw_state(dev_priv, pll,
9389                                           &pipe_config->dpll_hw_state));
9390
9391                tmp = pipe_config->dpll_hw_state.dpll;
9392                pipe_config->pixel_multiplier =
9393                        ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9394                         >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9395
9396                ironlake_pch_clock_get(crtc, pipe_config);
9397        } else {
9398                pipe_config->pixel_multiplier = 1;
9399        }
9400
9401        intel_get_pipe_timings(crtc, pipe_config);
9402
9403        ironlake_get_pfit_config(crtc, pipe_config);
9404
9405        ret = true;
9406
9407out:
9408        intel_display_power_put(dev_priv, power_domain);
9409
9410        return ret;
9411}
9412
9413static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9414{
9415        struct drm_device *dev = dev_priv->dev;
9416        struct intel_crtc *crtc;
9417
9418        for_each_intel_crtc(dev, crtc)
9419                I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9420                     pipe_name(crtc->pipe));
9421
9422        I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9423        I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9424        I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9425        I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9426        I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9427        I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9428             "CPU PWM1 enabled\n");
9429        if (IS_HASWELL(dev))
9430                I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9431                     "CPU PWM2 enabled\n");
9432        I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9433             "PCH PWM1 enabled\n");
9434        I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9435             "Utility pin enabled\n");
9436        I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9437
9438        /*
9439         * In theory we can still leave IRQs enabled, as long as only the HPD
9440         * interrupts remain enabled. We used to check for that, but since it's
9441         * gen-specific and since we only disable LCPLL after we fully disable
9442         * the interrupts, the check below should be enough.
9443         */
9444        I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9445}
9446
9447static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9448{
9449        struct drm_device *dev = dev_priv->dev;
9450
9451        if (IS_HASWELL(dev))
9452                return I915_READ(D_COMP_HSW);
9453        else
9454                return I915_READ(D_COMP_BDW);
9455}
9456
9457static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9458{
9459        struct drm_device *dev = dev_priv->dev;
9460
9461        if (IS_HASWELL(dev)) {
9462                mutex_lock(&dev_priv->rps.hw_lock);
9463                if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9464                                            val))
9465                        DRM_ERROR("Failed to write to D_COMP\n");
9466                mutex_unlock(&dev_priv->rps.hw_lock);
9467        } else {
9468                I915_WRITE(D_COMP_BDW, val);
9469                POSTING_READ(D_COMP_BDW);
9470        }
9471}
9472
9473/*
9474 * This function implements pieces of two sequences from BSpec:
9475 * - Sequence for display software to disable LCPLL
9476 * - Sequence for display software to allow package C8+
9477 * The steps implemented here are just the steps that actually touch the LCPLL
9478 * register. Callers should take care of disabling all the display engine
9479 * functions, doing the mode unset, fixing interrupts, etc.
9480 */
9481static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9482                              bool switch_to_fclk, bool allow_power_down)
9483{
9484        uint32_t val;
9485
9486        assert_can_disable_lcpll(dev_priv);
9487
9488        val = I915_READ(LCPLL_CTL);
9489
9490        if (switch_to_fclk) {
9491                val |= LCPLL_CD_SOURCE_FCLK;
9492                I915_WRITE(LCPLL_CTL, val);
9493
9494                if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9495                                       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9496                        DRM_ERROR("Switching to FCLK failed\n");
9497
9498                val = I915_READ(LCPLL_CTL);
9499        }
9500
9501        val |= LCPLL_PLL_DISABLE;
9502        I915_WRITE(LCPLL_CTL, val);
9503        POSTING_READ(LCPLL_CTL);
9504
9505        if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9506                DRM_ERROR("LCPLL still locked\n");
9507
9508        val = hsw_read_dcomp(dev_priv);
9509        val |= D_COMP_COMP_DISABLE;
9510        hsw_write_dcomp(dev_priv, val);
9511        ndelay(100);
9512
9513        if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9514                     1))
9515                DRM_ERROR("D_COMP RCOMP still in progress\n");
9516
9517        if (allow_power_down) {
9518                val = I915_READ(LCPLL_CTL);
9519                val |= LCPLL_POWER_DOWN_ALLOW;
9520                I915_WRITE(LCPLL_CTL, val);
9521                POSTING_READ(LCPLL_CTL);
9522        }
9523}
9524
9525/*
9526 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9527 * source.
9528 */
9529static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9530{
9531        uint32_t val;
9532
9533        val = I915_READ(LCPLL_CTL);
9534
9535        if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9536                    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9537                return;
9538
9539        /*
9540         * Make sure we're not on PC8 state before disabling PC8, otherwise
9541         * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9542         */
9543        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9544
9545        if (val & LCPLL_POWER_DOWN_ALLOW) {
9546                val &= ~LCPLL_POWER_DOWN_ALLOW;
9547                I915_WRITE(LCPLL_CTL, val);
9548                POSTING_READ(LCPLL_CTL);
9549        }
9550
9551        val = hsw_read_dcomp(dev_priv);
9552        val |= D_COMP_COMP_FORCE;
9553        val &= ~D_COMP_COMP_DISABLE;
9554        hsw_write_dcomp(dev_priv, val);
9555
9556        val = I915_READ(LCPLL_CTL);
9557        val &= ~LCPLL_PLL_DISABLE;
9558        I915_WRITE(LCPLL_CTL, val);
9559
9560        if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9561                DRM_ERROR("LCPLL not locked yet\n");
9562
9563        if (val & LCPLL_CD_SOURCE_FCLK) {
9564                val = I915_READ(LCPLL_CTL);
9565                val &= ~LCPLL_CD_SOURCE_FCLK;
9566                I915_WRITE(LCPLL_CTL, val);
9567
9568                if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9569                                        LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9570                        DRM_ERROR("Switching back to LCPLL failed\n");
9571        }
9572
9573        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9574        intel_update_cdclk(dev_priv->dev);
9575}
9576
9577/*
9578 * Package states C8 and deeper are really deep PC states that can only be
9579 * reached when all the devices on the system allow it, so even if the graphics
9580 * device allows PC8+, it doesn't mean the system will actually get to these
9581 * states. Our driver only allows PC8+ when going into runtime PM.
9582 *
9583 * The requirements for PC8+ are that all the outputs are disabled, the power
9584 * well is disabled and most interrupts are disabled, and these are also
9585 * requirements for runtime PM. When these conditions are met, we manually do
9586 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9587 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9588 * hang the machine.
9589 *
9590 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9591 * the state of some registers, so when we come back from PC8+ we need to
9592 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9593 * need to take care of the registers kept by RC6. Notice that this happens even
9594 * if we don't put the device in PCI D3 state (which is what currently happens
9595 * because of the runtime PM support).
9596 *
9597 * For more, read "Display Sequences for Package C8" on the hardware
9598 * documentation.
9599 */
9600void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9601{
9602        struct drm_device *dev = dev_priv->dev;
9603        uint32_t val;
9604
9605        DRM_DEBUG_KMS("Enabling package C8+\n");
9606
9607        if (HAS_PCH_LPT_LP(dev)) {
9608                val = I915_READ(SOUTH_DSPCLK_GATE_D);
9609                val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9610                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9611        }
9612
9613        lpt_disable_clkout_dp(dev);
9614        hsw_disable_lcpll(dev_priv, true, true);
9615}
9616
9617void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9618{
9619        struct drm_device *dev = dev_priv->dev;
9620        uint32_t val;
9621
9622        DRM_DEBUG_KMS("Disabling package C8+\n");
9623
9624        hsw_restore_lcpll(dev_priv);
9625        lpt_init_pch_refclk(dev);
9626
9627        if (HAS_PCH_LPT_LP(dev)) {
9628                val = I915_READ(SOUTH_DSPCLK_GATE_D);
9629                val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9630                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9631        }
9632}
9633
9634static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9635{
9636        struct drm_device *dev = old_state->dev;
9637        struct intel_atomic_state *old_intel_state =
9638                to_intel_atomic_state(old_state);
9639        unsigned int req_cdclk = old_intel_state->dev_cdclk;
9640
9641        broxton_set_cdclk(dev, req_cdclk);
9642}
9643
9644/* compute the max rate for new configuration */
9645static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9646{
9647        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9648        struct drm_i915_private *dev_priv = state->dev->dev_private;
9649        struct drm_crtc *crtc;
9650        struct drm_crtc_state *cstate;
9651        struct intel_crtc_state *crtc_state;
9652        unsigned max_pixel_rate = 0, i;
9653        enum pipe pipe;
9654
9655        memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9656               sizeof(intel_state->min_pixclk));
9657
9658        for_each_crtc_in_state(state, crtc, cstate, i) {
9659                int pixel_rate;
9660
9661                crtc_state = to_intel_crtc_state(cstate);
9662                if (!crtc_state->base.enable) {
9663                        intel_state->min_pixclk[i] = 0;
9664                        continue;
9665                }
9666
9667                pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9668
9669                /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9670                if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9671                        pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9672
9673                intel_state->min_pixclk[i] = pixel_rate;
9674        }
9675
9676        for_each_pipe(dev_priv, pipe)
9677                max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9678
9679        return max_pixel_rate;
9680}
9681
9682static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9683{
9684        struct drm_i915_private *dev_priv = dev->dev_private;
9685        uint32_t val, data;
9686        int ret;
9687
9688        if (WARN((I915_READ(LCPLL_CTL) &
9689                  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9690                   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9691                   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9692                   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9693                 "trying to change cdclk frequency with cdclk not enabled\n"))
9694                return;
9695
9696        mutex_lock(&dev_priv->rps.hw_lock);
9697        ret = sandybridge_pcode_write(dev_priv,
9698                                      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9699        mutex_unlock(&dev_priv->rps.hw_lock);
9700        if (ret) {
9701                DRM_ERROR("failed to inform pcode about cdclk change\n");
9702                return;
9703        }
9704
9705        val = I915_READ(LCPLL_CTL);
9706        val |= LCPLL_CD_SOURCE_FCLK;
9707        I915_WRITE(LCPLL_CTL, val);
9708
9709        if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9710                               LCPLL_CD_SOURCE_FCLK_DONE, 1))
9711                DRM_ERROR("Switching to FCLK failed\n");
9712
9713        val = I915_READ(LCPLL_CTL);
9714        val &= ~LCPLL_CLK_FREQ_MASK;
9715
9716        switch (cdclk) {
9717        case 450000:
9718                val |= LCPLL_CLK_FREQ_450;
9719                data = 0;
9720                break;
9721        case 540000:
9722                val |= LCPLL_CLK_FREQ_54O_BDW;
9723                data = 1;
9724                break;
9725        case 337500:
9726                val |= LCPLL_CLK_FREQ_337_5_BDW;
9727                data = 2;
9728                break;
9729        case 675000:
9730                val |= LCPLL_CLK_FREQ_675_BDW;
9731                data = 3;
9732                break;
9733        default:
9734                WARN(1, "invalid cdclk frequency\n");
9735                return;
9736        }
9737
9738        I915_WRITE(LCPLL_CTL, val);
9739
9740        val = I915_READ(LCPLL_CTL);
9741        val &= ~LCPLL_CD_SOURCE_FCLK;
9742        I915_WRITE(LCPLL_CTL, val);
9743
9744        if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9745                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9746                DRM_ERROR("Switching back to LCPLL failed\n");
9747
9748        mutex_lock(&dev_priv->rps.hw_lock);
9749        sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9750        mutex_unlock(&dev_priv->rps.hw_lock);
9751
9752        I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9753
9754        intel_update_cdclk(dev);
9755
9756        WARN(cdclk != dev_priv->cdclk_freq,
9757             "cdclk requested %d kHz but got %d kHz\n",
9758             cdclk, dev_priv->cdclk_freq);
9759}
9760
9761static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9762{
9763        struct drm_i915_private *dev_priv = to_i915(state->dev);
9764        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9765        int max_pixclk = ilk_max_pixel_rate(state);
9766        int cdclk;
9767
9768        /*
9769         * FIXME should also account for plane ratio
9770         * once 64bpp pixel formats are supported.
9771         */
9772        if (max_pixclk > 540000)
9773                cdclk = 675000;
9774        else if (max_pixclk > 450000)
9775                cdclk = 540000;
9776        else if (max_pixclk > 337500)
9777                cdclk = 450000;
9778        else
9779                cdclk = 337500;
9780
9781        if (cdclk > dev_priv->max_cdclk_freq) {
9782                DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9783                              cdclk, dev_priv->max_cdclk_freq);
9784                return -EINVAL;
9785        }
9786
9787        intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9788        if (!intel_state->active_crtcs)
9789                intel_state->dev_cdclk = 337500;
9790
9791        return 0;
9792}
9793
9794static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9795{
9796        struct drm_device *dev = old_state->dev;
9797        struct intel_atomic_state *old_intel_state =
9798                to_intel_atomic_state(old_state);
9799        unsigned req_cdclk = old_intel_state->dev_cdclk;
9800
9801        broadwell_set_cdclk(dev, req_cdclk);
9802}
9803
9804static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9805                                      struct intel_crtc_state *crtc_state)
9806{
9807        struct intel_encoder *intel_encoder =
9808                intel_ddi_get_crtc_new_encoder(crtc_state);
9809
9810        if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9811                if (!intel_ddi_pll_select(crtc, crtc_state))
9812                        return -EINVAL;
9813        }
9814
9815        crtc->lowfreq_avail = false;
9816
9817        return 0;
9818}
9819
9820static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9821                                enum port port,
9822                                struct intel_crtc_state *pipe_config)
9823{
9824        switch (port) {
9825        case PORT_A:
9826                pipe_config->ddi_pll_sel = SKL_DPLL0;
9827                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9828                break;
9829        case PORT_B:
9830                pipe_config->ddi_pll_sel = SKL_DPLL1;
9831                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9832                break;
9833        case PORT_C:
9834                pipe_config->ddi_pll_sel = SKL_DPLL2;
9835                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9836                break;
9837        default:
9838                DRM_ERROR("Incorrect port type\n");
9839        }
9840}
9841
9842static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9843                                enum port port,
9844                                struct intel_crtc_state *pipe_config)
9845{
9846        u32 temp, dpll_ctl1;
9847
9848        temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9849        pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9850
9851        switch (pipe_config->ddi_pll_sel) {
9852        case SKL_DPLL0:
9853                /*
9854                 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9855                 * of the shared DPLL framework and thus needs to be read out
9856                 * separately
9857                 */
9858                dpll_ctl1 = I915_READ(DPLL_CTRL1);
9859                pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9860                break;
9861        case SKL_DPLL1:
9862                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9863                break;
9864        case SKL_DPLL2:
9865                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9866                break;
9867        case SKL_DPLL3:
9868                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9869                break;
9870        }
9871}
9872
9873static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9874                                enum port port,
9875                                struct intel_crtc_state *pipe_config)
9876{
9877        pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9878
9879        switch (pipe_config->ddi_pll_sel) {
9880        case PORT_CLK_SEL_WRPLL1:
9881                pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9882                break;
9883        case PORT_CLK_SEL_WRPLL2:
9884                pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9885                break;
9886        case PORT_CLK_SEL_SPLL:
9887                pipe_config->shared_dpll = DPLL_ID_SPLL;
9888                break;
9889        }
9890}
9891
9892static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9893                                       struct intel_crtc_state *pipe_config)
9894{
9895        struct drm_device *dev = crtc->base.dev;
9896        struct drm_i915_private *dev_priv = dev->dev_private;
9897        struct intel_shared_dpll *pll;
9898        enum port port;
9899        uint32_t tmp;
9900
9901        tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9902
9903        port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9904
9905        if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9906                skylake_get_ddi_pll(dev_priv, port, pipe_config);
9907        else if (IS_BROXTON(dev))
9908                bxt_get_ddi_pll(dev_priv, port, pipe_config);
9909        else
9910                haswell_get_ddi_pll(dev_priv, port, pipe_config);
9911
9912        if (pipe_config->shared_dpll >= 0) {
9913                pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9914
9915                WARN_ON(!pll->get_hw_state(dev_priv, pll,
9916                                           &pipe_config->dpll_hw_state));
9917        }
9918
9919        /*
9920         * Haswell has only FDI/PCH transcoder A. It is which is connected to
9921         * DDI E. So just check whether this pipe is wired to DDI E and whether
9922         * the PCH transcoder is on.
9923         */
9924        if (INTEL_INFO(dev)->gen < 9 &&
9925            (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9926                pipe_config->has_pch_encoder = true;
9927
9928                tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9929                pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9930                                          FDI_DP_PORT_WIDTH_SHIFT) + 1;
9931
9932                ironlake_get_fdi_m_n_config(crtc, pipe_config);
9933        }
9934}
9935
9936static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9937                                    struct intel_crtc_state *pipe_config)
9938{
9939        struct drm_device *dev = crtc->base.dev;
9940        struct drm_i915_private *dev_priv = dev->dev_private;
9941        enum intel_display_power_domain power_domain;
9942        unsigned long power_domain_mask;
9943        uint32_t tmp;
9944        bool ret;
9945
9946        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9947        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9948                return false;
9949        power_domain_mask = BIT(power_domain);
9950
9951        ret = false;
9952
9953        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9954        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9955
9956        tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9957        if (tmp & TRANS_DDI_FUNC_ENABLE) {
9958                enum pipe trans_edp_pipe;
9959                switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9960                default:
9961                        WARN(1, "unknown pipe linked to edp transcoder\n");
9962                case TRANS_DDI_EDP_INPUT_A_ONOFF:
9963                case TRANS_DDI_EDP_INPUT_A_ON:
9964                        trans_edp_pipe = PIPE_A;
9965                        break;
9966                case TRANS_DDI_EDP_INPUT_B_ONOFF:
9967                        trans_edp_pipe = PIPE_B;
9968                        break;
9969                case TRANS_DDI_EDP_INPUT_C_ONOFF:
9970                        trans_edp_pipe = PIPE_C;
9971                        break;
9972                }
9973
9974                if (trans_edp_pipe == crtc->pipe)
9975                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
9976        }
9977
9978        power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9979        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9980                goto out;
9981        power_domain_mask |= BIT(power_domain);
9982
9983        tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9984        if (!(tmp & PIPECONF_ENABLE))
9985                goto out;
9986
9987        haswell_get_ddi_port_state(crtc, pipe_config);
9988
9989        intel_get_pipe_timings(crtc, pipe_config);
9990
9991        if (INTEL_INFO(dev)->gen >= 9) {
9992                skl_init_scalers(dev, crtc, pipe_config);
9993        }
9994
9995        if (INTEL_INFO(dev)->gen >= 9) {
9996                pipe_config->scaler_state.scaler_id = -1;
9997                pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9998        }
9999
10000        power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10001        if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10002                power_domain_mask |= BIT(power_domain);
10003                if (INTEL_INFO(dev)->gen >= 9)
10004                        skylake_get_pfit_config(crtc, pipe_config);
10005                else
10006                        ironlake_get_pfit_config(crtc, pipe_config);
10007        }
10008
10009        if (IS_HASWELL(dev))
10010                pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10011                        (I915_READ(IPS_CTL) & IPS_ENABLE);
10012
10013        if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
10014                pipe_config->pixel_multiplier =
10015                        I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10016        } else {
10017                pipe_config->pixel_multiplier = 1;
10018        }
10019
10020        ret = true;
10021
10022out:
10023        for_each_power_domain(power_domain, power_domain_mask)
10024                intel_display_power_put(dev_priv, power_domain);
10025
10026        return ret;
10027}
10028
10029static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10030                               const struct intel_plane_state *plane_state)
10031{
10032        struct drm_device *dev = crtc->dev;
10033        struct drm_i915_private *dev_priv = dev->dev_private;
10034        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10035        uint32_t cntl = 0, size = 0;
10036
10037        if (plane_state && plane_state->visible) {
10038                unsigned int width = plane_state->base.crtc_w;
10039                unsigned int height = plane_state->base.crtc_h;
10040                unsigned int stride = roundup_pow_of_two(width) * 4;
10041
10042                switch (stride) {
10043                default:
10044                        WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10045                                  width, stride);
10046                        stride = 256;
10047                        /* fallthrough */
10048                case 256:
10049                case 512:
10050                case 1024:
10051                case 2048:
10052                        break;
10053                }
10054
10055                cntl |= CURSOR_ENABLE |
10056                        CURSOR_GAMMA_ENABLE |
10057                        CURSOR_FORMAT_ARGB |
10058                        CURSOR_STRIDE(stride);
10059
10060                size = (height << 12) | width;
10061        }
10062
10063        if (intel_crtc->cursor_cntl != 0 &&
10064            (intel_crtc->cursor_base != base ||
10065             intel_crtc->cursor_size != size ||
10066             intel_crtc->cursor_cntl != cntl)) {
10067                /* On these chipsets we can only modify the base/size/stride
10068                 * whilst the cursor is disabled.
10069                 */
10070                I915_WRITE(CURCNTR(PIPE_A), 0);
10071                POSTING_READ(CURCNTR(PIPE_A));
10072                intel_crtc->cursor_cntl = 0;
10073        }
10074
10075        if (intel_crtc->cursor_base != base) {
10076                I915_WRITE(CURBASE(PIPE_A), base);
10077                intel_crtc->cursor_base = base;
10078        }
10079
10080        if (intel_crtc->cursor_size != size) {
10081                I915_WRITE(CURSIZE, size);
10082                intel_crtc->cursor_size = size;
10083        }
10084
10085        if (intel_crtc->cursor_cntl != cntl) {
10086                I915_WRITE(CURCNTR(PIPE_A), cntl);
10087                POSTING_READ(CURCNTR(PIPE_A));
10088                intel_crtc->cursor_cntl = cntl;
10089        }
10090}
10091
10092static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10093                               const struct intel_plane_state *plane_state)
10094{
10095        struct drm_device *dev = crtc->dev;
10096        struct drm_i915_private *dev_priv = dev->dev_private;
10097        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10098        int pipe = intel_crtc->pipe;
10099        uint32_t cntl = 0;
10100
10101        if (plane_state && plane_state->visible) {
10102                cntl = MCURSOR_GAMMA_ENABLE;
10103                switch (plane_state->base.crtc_w) {
10104                        case 64:
10105                                cntl |= CURSOR_MODE_64_ARGB_AX;
10106                                break;
10107                        case 128:
10108                                cntl |= CURSOR_MODE_128_ARGB_AX;
10109                                break;
10110                        case 256:
10111                                cntl |= CURSOR_MODE_256_ARGB_AX;
10112                                break;
10113                        default:
10114                                MISSING_CASE(plane_state->base.crtc_w);
10115                                return;
10116                }
10117                cntl |= pipe << 28; /* Connect to correct pipe */
10118
10119                if (HAS_DDI(dev))
10120                        cntl |= CURSOR_PIPE_CSC_ENABLE;
10121
10122                if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10123                        cntl |= CURSOR_ROTATE_180;
10124        }
10125
10126        if (intel_crtc->cursor_cntl != cntl) {
10127                I915_WRITE(CURCNTR(pipe), cntl);
10128                POSTING_READ(CURCNTR(pipe));
10129                intel_crtc->cursor_cntl = cntl;
10130        }
10131
10132        /* and commit changes on next vblank */
10133        I915_WRITE(CURBASE(pipe), base);
10134        POSTING_READ(CURBASE(pipe));
10135
10136        intel_crtc->cursor_base = base;
10137}
10138
10139/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10140static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10141                                     const struct intel_plane_state *plane_state)
10142{
10143        struct drm_device *dev = crtc->dev;
10144        struct drm_i915_private *dev_priv = dev->dev_private;
10145        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10146        int pipe = intel_crtc->pipe;
10147        u32 base = intel_crtc->cursor_addr;
10148        u32 pos = 0;
10149
10150        if (plane_state) {
10151                int x = plane_state->base.crtc_x;
10152                int y = plane_state->base.crtc_y;
10153
10154                if (x < 0) {
10155                        pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10156                        x = -x;
10157                }
10158                pos |= x << CURSOR_X_SHIFT;
10159
10160                if (y < 0) {
10161                        pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10162                        y = -y;
10163                }
10164                pos |= y << CURSOR_Y_SHIFT;
10165
10166                /* ILK+ do this automagically */
10167                if (HAS_GMCH_DISPLAY(dev) &&
10168                    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10169                        base += (plane_state->base.crtc_h *
10170                                 plane_state->base.crtc_w - 1) * 4;
10171                }
10172        }
10173
10174        I915_WRITE(CURPOS(pipe), pos);
10175
10176        if (IS_845G(dev) || IS_I865G(dev))
10177                i845_update_cursor(crtc, base, plane_state);
10178        else
10179                i9xx_update_cursor(crtc, base, plane_state);
10180}
10181
10182static bool cursor_size_ok(struct drm_device *dev,
10183                           uint32_t width, uint32_t height)
10184{
10185        if (width == 0 || height == 0)
10186                return false;
10187
10188        /*
10189         * 845g/865g are special in that they are only limited by
10190         * the width of their cursors, the height is arbitrary up to
10191         * the precision of the register. Everything else requires
10192         * square cursors, limited to a few power-of-two sizes.
10193         */
10194        if (IS_845G(dev) || IS_I865G(dev)) {
10195                if ((width & 63) != 0)
10196                        return false;
10197
10198                if (width > (IS_845G(dev) ? 64 : 512))
10199                        return false;
10200
10201                if (height > 1023)
10202                        return false;
10203        } else {
10204                switch (width | height) {
10205                case 256:
10206                case 128:
10207                        if (IS_GEN2(dev))
10208                                return false;
10209                case 64:
10210                        break;
10211                default:
10212                        return false;
10213                }
10214        }
10215
10216        return true;
10217}
10218
10219static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10220                                 u16 *blue, uint32_t start, uint32_t size)
10221{
10222        int end = (start + size > 256) ? 256 : start + size, i;
10223        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10224
10225        for (i = start; i < end; i++) {
10226                intel_crtc->lut_r[i] = red[i] >> 8;
10227                intel_crtc->lut_g[i] = green[i] >> 8;
10228                intel_crtc->lut_b[i] = blue[i] >> 8;
10229        }
10230
10231        intel_crtc_load_lut(crtc);
10232}
10233
10234/* VESA 640x480x72Hz mode to set on the pipe */
10235static struct drm_display_mode load_detect_mode = {
10236        DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10237                 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10238};
10239
10240struct drm_framebuffer *
10241__intel_framebuffer_create(struct drm_device *dev,
10242                           struct drm_mode_fb_cmd2 *mode_cmd,
10243                           struct drm_i915_gem_object *obj)
10244{
10245        struct intel_framebuffer *intel_fb;
10246        int ret;
10247
10248        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10249        if (!intel_fb)
10250                return ERR_PTR(-ENOMEM);
10251
10252        ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10253        if (ret)
10254                goto err;
10255
10256        return &intel_fb->base;
10257
10258err:
10259        kfree(intel_fb);
10260        return ERR_PTR(ret);
10261}
10262
10263static struct drm_framebuffer *
10264intel_framebuffer_create(struct drm_device *dev,
10265                         struct drm_mode_fb_cmd2 *mode_cmd,
10266                         struct drm_i915_gem_object *obj)
10267{
10268        struct drm_framebuffer *fb;
10269        int ret;
10270
10271        ret = i915_mutex_lock_interruptible(dev);
10272        if (ret)
10273                return ERR_PTR(ret);
10274        fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10275        mutex_unlock(&dev->struct_mutex);
10276
10277        return fb;
10278}
10279
10280static u32
10281intel_framebuffer_pitch_for_width(int width, int bpp)
10282{
10283        u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10284        return ALIGN(pitch, 64);
10285}
10286
10287static u32
10288intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10289{
10290        u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10291        return PAGE_ALIGN(pitch * mode->vdisplay);
10292}
10293
10294static struct drm_framebuffer *
10295intel_framebuffer_create_for_mode(struct drm_device *dev,
10296                                  struct drm_display_mode *mode,
10297                                  int depth, int bpp)
10298{
10299        struct drm_framebuffer *fb;
10300        struct drm_i915_gem_object *obj;
10301        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10302
10303        obj = i915_gem_alloc_object(dev,
10304                                    intel_framebuffer_size_for_mode(mode, bpp));
10305        if (obj == NULL)
10306                return ERR_PTR(-ENOMEM);
10307
10308        mode_cmd.width = mode->hdisplay;
10309        mode_cmd.height = mode->vdisplay;
10310        mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10311                                                                bpp);
10312        mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10313
10314        fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10315        if (IS_ERR(fb))
10316                drm_gem_object_unreference_unlocked(&obj->base);
10317
10318        return fb;
10319}
10320
10321static struct drm_framebuffer *
10322mode_fits_in_fbdev(struct drm_device *dev,
10323                   struct drm_display_mode *mode)
10324{
10325#ifdef CONFIG_DRM_FBDEV_EMULATION
10326        struct drm_i915_private *dev_priv = dev->dev_private;
10327        struct drm_i915_gem_object *obj;
10328        struct drm_framebuffer *fb;
10329
10330        if (!dev_priv->fbdev)
10331                return NULL;
10332
10333        if (!dev_priv->fbdev->fb)
10334                return NULL;
10335
10336        obj = dev_priv->fbdev->fb->obj;
10337        BUG_ON(!obj);
10338
10339        fb = &dev_priv->fbdev->fb->base;
10340        if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10341                                                               fb->bits_per_pixel))
10342                return NULL;
10343
10344        if (obj->base.size < mode->vdisplay * fb->pitches[0])
10345                return NULL;
10346
10347        drm_framebuffer_reference(fb);
10348        return fb;
10349#else
10350        return NULL;
10351#endif
10352}
10353
10354static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10355                                           struct drm_crtc *crtc,
10356                                           struct drm_display_mode *mode,
10357                                           struct drm_framebuffer *fb,
10358                                           int x, int y)
10359{
10360        struct drm_plane_state *plane_state;
10361        int hdisplay, vdisplay;
10362        int ret;
10363
10364        plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10365        if (IS_ERR(plane_state))
10366                return PTR_ERR(plane_state);
10367
10368        if (mode)
10369                drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10370        else
10371                hdisplay = vdisplay = 0;
10372
10373        ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10374        if (ret)
10375                return ret;
10376        drm_atomic_set_fb_for_plane(plane_state, fb);
10377        plane_state->crtc_x = 0;
10378        plane_state->crtc_y = 0;
10379        plane_state->crtc_w = hdisplay;
10380        plane_state->crtc_h = vdisplay;
10381        plane_state->src_x = x << 16;
10382        plane_state->src_y = y << 16;
10383        plane_state->src_w = hdisplay << 16;
10384        plane_state->src_h = vdisplay << 16;
10385
10386        return 0;
10387}
10388
10389bool intel_get_load_detect_pipe(struct drm_connector *connector,
10390                                struct drm_display_mode *mode,
10391                                struct intel_load_detect_pipe *old,
10392                                struct drm_modeset_acquire_ctx *ctx)
10393{
10394        struct intel_crtc *intel_crtc;
10395        struct intel_encoder *intel_encoder =
10396                intel_attached_encoder(connector);
10397        struct drm_crtc *possible_crtc;
10398        struct drm_encoder *encoder = &intel_encoder->base;
10399        struct drm_crtc *crtc = NULL;
10400        struct drm_device *dev = encoder->dev;
10401        struct drm_framebuffer *fb;
10402        struct drm_mode_config *config = &dev->mode_config;
10403        struct drm_atomic_state *state = NULL, *restore_state = NULL;
10404        struct drm_connector_state *connector_state;
10405        struct intel_crtc_state *crtc_state;
10406        int ret, i = -1;
10407
10408        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10409                      connector->base.id, connector->name,
10410                      encoder->base.id, encoder->name);
10411
10412        old->restore_state = NULL;
10413
10414retry:
10415        ret = drm_modeset_lock(&config->connection_mutex, ctx);
10416        if (ret)
10417                goto fail;
10418
10419        /*
10420         * Algorithm gets a little messy:
10421         *
10422         *   - if the connector already has an assigned crtc, use it (but make
10423         *     sure it's on first)
10424         *
10425         *   - try to find the first unused crtc that can drive this connector,
10426         *     and use that if we find one
10427         */
10428
10429        /* See if we already have a CRTC for this connector */
10430        if (connector->state->crtc) {
10431                crtc = connector->state->crtc;
10432
10433                ret = drm_modeset_lock(&crtc->mutex, ctx);
10434                if (ret)
10435                        goto fail;
10436
10437                /* Make sure the crtc and connector are running */
10438                goto found;
10439        }
10440
10441        /* Find an unused one (if possible) */
10442        for_each_crtc(dev, possible_crtc) {
10443                i++;
10444                if (!(encoder->possible_crtcs & (1 << i)))
10445                        continue;
10446
10447                ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10448                if (ret)
10449                        goto fail;
10450
10451                if (possible_crtc->state->enable) {
10452                        drm_modeset_unlock(&possible_crtc->mutex);
10453                        continue;
10454                }
10455
10456                crtc = possible_crtc;
10457                break;
10458        }
10459
10460        /*
10461         * If we didn't find an unused CRTC, don't use any.
10462         */
10463        if (!crtc) {
10464                DRM_DEBUG_KMS("no pipe available for load-detect\n");
10465                goto fail;
10466        }
10467
10468found:
10469        intel_crtc = to_intel_crtc(crtc);
10470
10471        ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10472        if (ret)
10473                goto fail;
10474
10475        state = drm_atomic_state_alloc(dev);
10476        restore_state = drm_atomic_state_alloc(dev);
10477        if (!state || !restore_state) {
10478                ret = -ENOMEM;
10479                goto fail;
10480        }
10481
10482        state->acquire_ctx = ctx;
10483        restore_state->acquire_ctx = ctx;
10484
10485        connector_state = drm_atomic_get_connector_state(state, connector);
10486        if (IS_ERR(connector_state)) {
10487                ret = PTR_ERR(connector_state);
10488                goto fail;
10489        }
10490
10491        ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10492        if (ret)
10493                goto fail;
10494
10495        crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10496        if (IS_ERR(crtc_state)) {
10497                ret = PTR_ERR(crtc_state);
10498                goto fail;
10499        }
10500
10501        crtc_state->base.active = crtc_state->base.enable = true;
10502
10503        if (!mode)
10504                mode = &load_detect_mode;
10505
10506        /* We need a framebuffer large enough to accommodate all accesses
10507         * that the plane may generate whilst we perform load detection.
10508         * We can not rely on the fbcon either being present (we get called
10509         * during its initialisation to detect all boot displays, or it may
10510         * not even exist) or that it is large enough to satisfy the
10511         * requested mode.
10512         */
10513        fb = mode_fits_in_fbdev(dev, mode);
10514        if (fb == NULL) {
10515                DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10516                fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10517        } else
10518                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10519        if (IS_ERR(fb)) {
10520                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10521                goto fail;
10522        }
10523
10524        ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10525        if (ret)
10526                goto fail;
10527
10528        drm_framebuffer_unreference(fb);
10529
10530        ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10531        if (ret)
10532                goto fail;
10533
10534        ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10535        if (!ret)
10536                ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10537        if (!ret)
10538                ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10539        if (ret) {
10540                DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10541                goto fail;
10542        }
10543
10544        ret = drm_atomic_commit(state);
10545        if (ret) {
10546                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10547                goto fail;
10548        }
10549
10550        old->restore_state = restore_state;
10551
10552        /* let the connector get through one full cycle before testing */
10553        intel_wait_for_vblank(dev, intel_crtc->pipe);
10554        return true;
10555
10556fail:
10557        drm_atomic_state_free(state);
10558        drm_atomic_state_free(restore_state);
10559        restore_state = state = NULL;
10560
10561        if (ret == -EDEADLK) {
10562                drm_modeset_backoff(ctx);
10563                goto retry;
10564        }
10565
10566        return false;
10567}
10568
10569void intel_release_load_detect_pipe(struct drm_connector *connector,
10570                                    struct intel_load_detect_pipe *old,
10571                                    struct drm_modeset_acquire_ctx *ctx)
10572{
10573        struct intel_encoder *intel_encoder =
10574                intel_attached_encoder(connector);
10575        struct drm_encoder *encoder = &intel_encoder->base;
10576        struct drm_atomic_state *state = old->restore_state;
10577        int ret;
10578
10579        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10580                      connector->base.id, connector->name,
10581                      encoder->base.id, encoder->name);
10582
10583        if (!state)
10584                return;
10585
10586        ret = drm_atomic_commit(state);
10587        if (ret) {
10588                DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10589                drm_atomic_state_free(state);
10590        }
10591}
10592
10593static int i9xx_pll_refclk(struct drm_device *dev,
10594                           const struct intel_crtc_state *pipe_config)
10595{
10596        struct drm_i915_private *dev_priv = dev->dev_private;
10597        u32 dpll = pipe_config->dpll_hw_state.dpll;
10598
10599        if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10600                return dev_priv->vbt.lvds_ssc_freq;
10601        else if (HAS_PCH_SPLIT(dev))
10602                return 120000;
10603        else if (!IS_GEN2(dev))
10604                return 96000;
10605        else
10606                return 48000;
10607}
10608
10609/* Returns the clock of the currently programmed mode of the given pipe. */
10610static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10611                                struct intel_crtc_state *pipe_config)
10612{
10613        struct drm_device *dev = crtc->base.dev;
10614        struct drm_i915_private *dev_priv = dev->dev_private;
10615        int pipe = pipe_config->cpu_transcoder;
10616        u32 dpll = pipe_config->dpll_hw_state.dpll;
10617        u32 fp;
10618        intel_clock_t clock;
10619        int port_clock;
10620        int refclk = i9xx_pll_refclk(dev, pipe_config);
10621
10622        if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10623                fp = pipe_config->dpll_hw_state.fp0;
10624        else
10625                fp = pipe_config->dpll_hw_state.fp1;
10626
10627        clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10628        if (IS_PINEVIEW(dev)) {
10629                clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10630                clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10631        } else {
10632                clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10633                clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10634        }
10635
10636        if (!IS_GEN2(dev)) {
10637                if (IS_PINEVIEW(dev))
10638                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10639                                DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10640                else
10641                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10642                               DPLL_FPA01_P1_POST_DIV_SHIFT);
10643
10644                switch (dpll & DPLL_MODE_MASK) {
10645                case DPLLB_MODE_DAC_SERIAL:
10646                        clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10647                                5 : 10;
10648                        break;
10649                case DPLLB_MODE_LVDS:
10650                        clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10651                                7 : 14;
10652                        break;
10653                default:
10654                        DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10655                                  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10656                        return;
10657                }
10658
10659                if (IS_PINEVIEW(dev))
10660                        port_clock = pnv_calc_dpll_params(refclk, &clock);
10661                else
10662                        port_clock = i9xx_calc_dpll_params(refclk, &clock);
10663        } else {
10664                u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10665                bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10666
10667                if (is_lvds) {
10668                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10669                                       DPLL_FPA01_P1_POST_DIV_SHIFT);
10670
10671                        if (lvds & LVDS_CLKB_POWER_UP)
10672                                clock.p2 = 7;
10673                        else
10674                                clock.p2 = 14;
10675                } else {
10676                        if (dpll & PLL_P1_DIVIDE_BY_TWO)
10677                                clock.p1 = 2;
10678                        else {
10679                                clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10680                                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10681                        }
10682                        if (dpll & PLL_P2_DIVIDE_BY_4)
10683                                clock.p2 = 4;
10684                        else
10685                                clock.p2 = 2;
10686                }
10687
10688                port_clock = i9xx_calc_dpll_params(refclk, &clock);
10689        }
10690
10691        /*
10692         * This value includes pixel_multiplier. We will use
10693         * port_clock to compute adjusted_mode.crtc_clock in the
10694         * encoder's get_config() function.
10695         */
10696        pipe_config->port_clock = port_clock;
10697}
10698
10699int intel_dotclock_calculate(int link_freq,
10700                             const struct intel_link_m_n *m_n)
10701{
10702        /*
10703         * The calculation for the data clock is:
10704         * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10705         * But we want to avoid losing precison if possible, so:
10706         * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10707         *
10708         * and the link clock is simpler:
10709         * link_clock = (m * link_clock) / n
10710         */
10711
10712        if (!m_n->link_n)
10713                return 0;
10714
10715        return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10716}
10717
10718static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10719                                   struct intel_crtc_state *pipe_config)
10720{
10721        struct drm_device *dev = crtc->base.dev;
10722
10723        /* read out port_clock from the DPLL */
10724        i9xx_crtc_clock_get(crtc, pipe_config);
10725
10726        /*
10727         * This value does not include pixel_multiplier.
10728         * We will check that port_clock and adjusted_mode.crtc_clock
10729         * agree once we know their relationship in the encoder's
10730         * get_config() function.
10731         */
10732        pipe_config->base.adjusted_mode.crtc_clock =
10733                intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10734                                         &pipe_config->fdi_m_n);
10735}
10736
10737/** Returns the currently programmed mode of the given pipe. */
10738struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10739                                             struct drm_crtc *crtc)
10740{
10741        struct drm_i915_private *dev_priv = dev->dev_private;
10742        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10743        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10744        struct drm_display_mode *mode;
10745        struct intel_crtc_state *pipe_config;
10746        int htot = I915_READ(HTOTAL(cpu_transcoder));
10747        int hsync = I915_READ(HSYNC(cpu_transcoder));
10748        int vtot = I915_READ(VTOTAL(cpu_transcoder));
10749        int vsync = I915_READ(VSYNC(cpu_transcoder));
10750        enum pipe pipe = intel_crtc->pipe;
10751
10752        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10753        if (!mode)
10754                return NULL;
10755
10756        pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10757        if (!pipe_config) {
10758                kfree(mode);
10759                return NULL;
10760        }
10761
10762        /*
10763         * Construct a pipe_config sufficient for getting the clock info
10764         * back out of crtc_clock_get.
10765         *
10766         * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10767         * to use a real value here instead.
10768         */
10769        pipe_config->cpu_transcoder = (enum transcoder) pipe;
10770        pipe_config->pixel_multiplier = 1;
10771        pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10772        pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10773        pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10774        i9xx_crtc_clock_get(intel_crtc, pipe_config);
10775
10776        mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10777        mode->hdisplay = (htot & 0xffff) + 1;
10778        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10779        mode->hsync_start = (hsync & 0xffff) + 1;
10780        mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10781        mode->vdisplay = (vtot & 0xffff) + 1;
10782        mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10783        mode->vsync_start = (vsync & 0xffff) + 1;
10784        mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10785
10786        drm_mode_set_name(mode);
10787
10788        kfree(pipe_config);
10789
10790        return mode;
10791}
10792
10793void intel_mark_busy(struct drm_device *dev)
10794{
10795        struct drm_i915_private *dev_priv = dev->dev_private;
10796
10797        if (dev_priv->mm.busy)
10798                return;
10799
10800        intel_runtime_pm_get(dev_priv);
10801        i915_update_gfx_val(dev_priv);
10802        if (INTEL_INFO(dev)->gen >= 6)
10803                gen6_rps_busy(dev_priv);
10804        dev_priv->mm.busy = true;
10805}
10806
10807void intel_mark_idle(struct drm_device *dev)
10808{
10809        struct drm_i915_private *dev_priv = dev->dev_private;
10810
10811        if (!dev_priv->mm.busy)
10812                return;
10813
10814        dev_priv->mm.busy = false;
10815
10816        if (INTEL_INFO(dev)->gen >= 6)
10817                gen6_rps_idle(dev->dev_private);
10818
10819        intel_runtime_pm_put(dev_priv);
10820}
10821
10822static void intel_crtc_destroy(struct drm_crtc *crtc)
10823{
10824        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10825        struct drm_device *dev = crtc->dev;
10826        struct intel_unpin_work *work;
10827
10828        spin_lock_irq(&dev->event_lock);
10829        work = intel_crtc->unpin_work;
10830        intel_crtc->unpin_work = NULL;
10831        spin_unlock_irq(&dev->event_lock);
10832
10833        if (work) {
10834                cancel_work_sync(&work->work);
10835                kfree(work);
10836        }
10837
10838        drm_crtc_cleanup(crtc);
10839
10840        kfree(intel_crtc);
10841}
10842
10843static void intel_unpin_work_fn(struct work_struct *__work)
10844{
10845        struct intel_unpin_work *work =
10846                container_of(__work, struct intel_unpin_work, work);
10847        struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10848        struct drm_device *dev = crtc->base.dev;
10849        struct drm_plane *primary = crtc->base.primary;
10850
10851        mutex_lock(&dev->struct_mutex);
10852        intel_unpin_fb_obj(work->old_fb, primary->state);
10853        drm_gem_object_unreference(&work->pending_flip_obj->base);
10854
10855        if (work->flip_queued_req)
10856                i915_gem_request_assign(&work->flip_queued_req, NULL);
10857        mutex_unlock(&dev->struct_mutex);
10858
10859        intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10860        intel_fbc_post_update(crtc);
10861        drm_framebuffer_unreference(work->old_fb);
10862
10863        BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10864        atomic_dec(&crtc->unpin_work_count);
10865
10866        kfree(work);
10867}
10868
10869static void do_intel_finish_page_flip(struct drm_device *dev,
10870                                      struct drm_crtc *crtc)
10871{
10872        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10873        struct intel_unpin_work *work;
10874        unsigned long flags;
10875
10876        /* Ignore early vblank irqs */
10877        if (intel_crtc == NULL)
10878                return;
10879
10880        /*
10881         * This is called both by irq handlers and the reset code (to complete
10882         * lost pageflips) so needs the full irqsave spinlocks.
10883         */
10884        spin_lock_irqsave(&dev->event_lock, flags);
10885        work = intel_crtc->unpin_work;
10886
10887        /* Ensure we don't miss a work->pending update ... */
10888        smp_rmb();
10889
10890        if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10891                spin_unlock_irqrestore(&dev->event_lock, flags);
10892                return;
10893        }
10894
10895        page_flip_completed(intel_crtc);
10896
10897        spin_unlock_irqrestore(&dev->event_lock, flags);
10898}
10899
10900void intel_finish_page_flip(struct drm_device *dev, int pipe)
10901{
10902        struct drm_i915_private *dev_priv = dev->dev_private;
10903        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10904
10905        do_intel_finish_page_flip(dev, crtc);
10906}
10907
10908void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10909{
10910        struct drm_i915_private *dev_priv = dev->dev_private;
10911        struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10912
10913        do_intel_finish_page_flip(dev, crtc);
10914}
10915
10916/* Is 'a' after or equal to 'b'? */
10917static bool g4x_flip_count_after_eq(u32 a, u32 b)
10918{
10919        return !((a - b) & 0x80000000);
10920}
10921
10922static bool page_flip_finished(struct intel_crtc *crtc)
10923{
10924        struct drm_device *dev = crtc->base.dev;
10925        struct drm_i915_private *dev_priv = dev->dev_private;
10926
10927        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10928            crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10929                return true;
10930
10931        /*
10932         * The relevant registers doen't exist on pre-ctg.
10933         * As the flip done interrupt doesn't trigger for mmio
10934         * flips on gmch platforms, a flip count check isn't
10935         * really needed there. But since ctg has the registers,
10936         * include it in the check anyway.
10937         */
10938        if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10939                return true;
10940
10941        /*
10942         * BDW signals flip done immediately if the plane
10943         * is disabled, even if the plane enable is already
10944         * armed to occur at the next vblank :(
10945         */
10946
10947        /*
10948         * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10949         * used the same base address. In that case the mmio flip might
10950         * have completed, but the CS hasn't even executed the flip yet.
10951         *
10952         * A flip count check isn't enough as the CS might have updated
10953         * the base address just after start of vblank, but before we
10954         * managed to process the interrupt. This means we'd complete the
10955         * CS flip too soon.
10956         *
10957         * Combining both checks should get us a good enough result. It may
10958         * still happen that the CS flip has been executed, but has not
10959         * yet actually completed. But in case the base address is the same
10960         * anyway, we don't really care.
10961         */
10962        return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10963                crtc->unpin_work->gtt_offset &&
10964                g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10965                                    crtc->unpin_work->flip_count);
10966}
10967
10968void intel_prepare_page_flip(struct drm_device *dev, int plane)
10969{
10970        struct drm_i915_private *dev_priv = dev->dev_private;
10971        struct intel_crtc *intel_crtc =
10972                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10973        unsigned long flags;
10974
10975
10976        /*
10977         * This is called both by irq handlers and the reset code (to complete
10978         * lost pageflips) so needs the full irqsave spinlocks.
10979         *
10980         * NB: An MMIO update of the plane base pointer will also
10981         * generate a page-flip completion irq, i.e. every modeset
10982         * is also accompanied by a spurious intel_prepare_page_flip().
10983         */
10984        spin_lock_irqsave(&dev->event_lock, flags);
10985        if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
10986                atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
10987        spin_unlock_irqrestore(&dev->event_lock, flags);
10988}
10989
10990static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
10991{
10992        /* Ensure that the work item is consistent when activating it ... */
10993        smp_wmb();
10994        atomic_set(&work->pending, INTEL_FLIP_PENDING);
10995        /* and that it is marked active as soon as the irq could fire. */
10996        smp_wmb();
10997}
10998
10999static int intel_gen2_queue_flip(struct drm_device *dev,
11000                                 struct drm_crtc *crtc,
11001                                 struct drm_framebuffer *fb,
11002                                 struct drm_i915_gem_object *obj,
11003                                 struct drm_i915_gem_request *req,
11004                                 uint32_t flags)
11005{
11006        struct intel_engine_cs *ring = req->ring;
11007        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11008        u32 flip_mask;
11009        int ret;
11010
11011        ret = intel_ring_begin(req, 6);
11012        if (ret)
11013                return ret;
11014
11015        /* Can't queue multiple flips, so wait for the previous
11016         * one to finish before executing the next.
11017         */
11018        if (intel_crtc->plane)
11019                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11020        else
11021                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11022        intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11023        intel_ring_emit(ring, MI_NOOP);
11024        intel_ring_emit(ring, MI_DISPLAY_FLIP |
11025                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11026        intel_ring_emit(ring, fb->pitches[0]);
11027        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11028        intel_ring_emit(ring, 0); /* aux display base address, unused */
11029
11030        intel_mark_page_flip_active(intel_crtc->unpin_work);
11031        return 0;
11032}
11033
11034static int intel_gen3_queue_flip(struct drm_device *dev,
11035                                 struct drm_crtc *crtc,
11036                                 struct drm_framebuffer *fb,
11037                                 struct drm_i915_gem_object *obj,
11038                                 struct drm_i915_gem_request *req,
11039                                 uint32_t flags)
11040{
11041        struct intel_engine_cs *ring = req->ring;
11042        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11043        u32 flip_mask;
11044        int ret;
11045
11046        ret = intel_ring_begin(req, 6);
11047        if (ret)
11048                return ret;
11049
11050        if (intel_crtc->plane)
11051                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11052        else
11053                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11054        intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11055        intel_ring_emit(ring, MI_NOOP);
11056        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11057                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11058        intel_ring_emit(ring, fb->pitches[0]);
11059        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11060        intel_ring_emit(ring, MI_NOOP);
11061
11062        intel_mark_page_flip_active(intel_crtc->unpin_work);
11063        return 0;
11064}
11065
11066static int intel_gen4_queue_flip(struct drm_device *dev,
11067                                 struct drm_crtc *crtc,
11068                                 struct drm_framebuffer *fb,
11069                                 struct drm_i915_gem_object *obj,
11070                                 struct drm_i915_gem_request *req,
11071                                 uint32_t flags)
11072{
11073        struct intel_engine_cs *ring = req->ring;
11074        struct drm_i915_private *dev_priv = dev->dev_private;
11075        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11076        uint32_t pf, pipesrc;
11077        int ret;
11078
11079        ret = intel_ring_begin(req, 4);
11080        if (ret)
11081                return ret;
11082
11083        /* i965+ uses the linear or tiled offsets from the
11084         * Display Registers (which do not change across a page-flip)
11085         * so we need only reprogram the base address.
11086         */
11087        intel_ring_emit(ring, MI_DISPLAY_FLIP |
11088                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11089        intel_ring_emit(ring, fb->pitches[0]);
11090        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
11091                        obj->tiling_mode);
11092
11093        /* XXX Enabling the panel-fitter across page-flip is so far
11094         * untested on non-native modes, so ignore it for now.
11095         * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11096         */
11097        pf = 0;
11098        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11099        intel_ring_emit(ring, pf | pipesrc);
11100
11101        intel_mark_page_flip_active(intel_crtc->unpin_work);
11102        return 0;
11103}
11104
11105static int intel_gen6_queue_flip(struct drm_device *dev,
11106                                 struct drm_crtc *crtc,
11107                                 struct drm_framebuffer *fb,
11108                                 struct drm_i915_gem_object *obj,
11109                                 struct drm_i915_gem_request *req,
11110                                 uint32_t flags)
11111{
11112        struct intel_engine_cs *ring = req->ring;
11113        struct drm_i915_private *dev_priv = dev->dev_private;
11114        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11115        uint32_t pf, pipesrc;
11116        int ret;
11117
11118        ret = intel_ring_begin(req, 4);
11119        if (ret)
11120                return ret;
11121
11122        intel_ring_emit(ring, MI_DISPLAY_FLIP |
11123                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11124        intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
11125        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11126
11127        /* Contrary to the suggestions in the documentation,
11128         * "Enable Panel Fitter" does not seem to be required when page
11129         * flipping with a non-native mode, and worse causes a normal
11130         * modeset to fail.
11131         * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11132         */
11133        pf = 0;
11134        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11135        intel_ring_emit(ring, pf | pipesrc);
11136
11137        intel_mark_page_flip_active(intel_crtc->unpin_work);
11138        return 0;
11139}
11140
11141static int intel_gen7_queue_flip(struct drm_device *dev,
11142                                 struct drm_crtc *crtc,
11143                                 struct drm_framebuffer *fb,
11144                                 struct drm_i915_gem_object *obj,
11145                                 struct drm_i915_gem_request *req,
11146                                 uint32_t flags)
11147{
11148        struct intel_engine_cs *ring = req->ring;
11149        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11150        uint32_t plane_bit = 0;
11151        int len, ret;
11152
11153        switch (intel_crtc->plane) {
11154        case PLANE_A:
11155                plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11156                break;
11157        case PLANE_B:
11158                plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11159                break;
11160        case PLANE_C:
11161                plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11162                break;
11163        default:
11164                WARN_ONCE(1, "unknown plane in flip command\n");
11165                return -ENODEV;
11166        }
11167
11168        len = 4;
11169        if (ring->id == RCS) {
11170                len += 6;
11171                /*
11172                 * On Gen 8, SRM is now taking an extra dword to accommodate
11173                 * 48bits addresses, and we need a NOOP for the batch size to
11174                 * stay even.
11175                 */
11176                if (IS_GEN8(dev))
11177                        len += 2;
11178        }
11179
11180        /*
11181         * BSpec MI_DISPLAY_FLIP for IVB:
11182         * "The full packet must be contained within the same cache line."
11183         *
11184         * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11185         * cacheline, if we ever start emitting more commands before
11186         * the MI_DISPLAY_FLIP we may need to first emit everything else,
11187         * then do the cacheline alignment, and finally emit the
11188         * MI_DISPLAY_FLIP.
11189         */
11190        ret = intel_ring_cacheline_align(req);
11191        if (ret)
11192                return ret;
11193
11194        ret = intel_ring_begin(req, len);
11195        if (ret)
11196                return ret;
11197
11198        /* Unmask the flip-done completion message. Note that the bspec says that
11199         * we should do this for both the BCS and RCS, and that we must not unmask
11200         * more than one flip event at any time (or ensure that one flip message
11201         * can be sent by waiting for flip-done prior to queueing new flips).
11202         * Experimentation says that BCS works despite DERRMR masking all
11203         * flip-done completion events and that unmasking all planes at once
11204         * for the RCS also doesn't appear to drop events. Setting the DERRMR
11205         * to zero does lead to lockups within MI_DISPLAY_FLIP.
11206         */
11207        if (ring->id == RCS) {
11208                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11209                intel_ring_emit_reg(ring, DERRMR);
11210                intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11211                                        DERRMR_PIPEB_PRI_FLIP_DONE |
11212                                        DERRMR_PIPEC_PRI_FLIP_DONE));
11213                if (IS_GEN8(dev))
11214                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11215                                              MI_SRM_LRM_GLOBAL_GTT);
11216                else
11217                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11218                                              MI_SRM_LRM_GLOBAL_GTT);
11219                intel_ring_emit_reg(ring, DERRMR);
11220                intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11221                if (IS_GEN8(dev)) {
11222                        intel_ring_emit(ring, 0);
11223                        intel_ring_emit(ring, MI_NOOP);
11224                }
11225        }
11226
11227        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11228        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
11229        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11230        intel_ring_emit(ring, (MI_NOOP));
11231
11232        intel_mark_page_flip_active(intel_crtc->unpin_work);
11233        return 0;
11234}
11235
11236static bool use_mmio_flip(struct intel_engine_cs *ring,
11237                          struct drm_i915_gem_object *obj)
11238{
11239        /*
11240         * This is not being used for older platforms, because
11241         * non-availability of flip done interrupt forces us to use
11242         * CS flips. Older platforms derive flip done using some clever
11243         * tricks involving the flip_pending status bits and vblank irqs.
11244         * So using MMIO flips there would disrupt this mechanism.
11245         */
11246
11247        if (ring == NULL)
11248                return true;
11249
11250        if (INTEL_INFO(ring->dev)->gen < 5)
11251                return false;
11252
11253        if (i915.use_mmio_flip < 0)
11254                return false;
11255        else if (i915.use_mmio_flip > 0)
11256                return true;
11257        else if (i915.enable_execlists)
11258                return true;
11259        else if (obj->base.dma_buf &&
11260                 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11261                                                       false))
11262                return true;
11263        else
11264                return ring != i915_gem_request_get_ring(obj->last_write_req);
11265}
11266
11267static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11268                             unsigned int rotation,
11269                             struct intel_unpin_work *work)
11270{
11271        struct drm_device *dev = intel_crtc->base.dev;
11272        struct drm_i915_private *dev_priv = dev->dev_private;
11273        struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11274        const enum pipe pipe = intel_crtc->pipe;
11275        u32 ctl, stride, tile_height;
11276
11277        ctl = I915_READ(PLANE_CTL(pipe, 0));
11278        ctl &= ~PLANE_CTL_TILED_MASK;
11279        switch (fb->modifier[0]) {
11280        case DRM_FORMAT_MOD_NONE:
11281                break;
11282        case I915_FORMAT_MOD_X_TILED:
11283                ctl |= PLANE_CTL_TILED_X;
11284                break;
11285        case I915_FORMAT_MOD_Y_TILED:
11286                ctl |= PLANE_CTL_TILED_Y;
11287                break;
11288        case I915_FORMAT_MOD_Yf_TILED:
11289                ctl |= PLANE_CTL_TILED_YF;
11290                break;
11291        default:
11292                MISSING_CASE(fb->modifier[0]);
11293        }
11294
11295        /*
11296         * The stride is either expressed as a multiple of 64 bytes chunks for
11297         * linear buffers or in number of tiles for tiled buffers.
11298         */
11299        if (intel_rotation_90_or_270(rotation)) {
11300                /* stride = Surface height in tiles */
11301                tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11302                stride = DIV_ROUND_UP(fb->height, tile_height);
11303        } else {
11304                stride = fb->pitches[0] /
11305                        intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11306                                                  fb->pixel_format);
11307        }
11308
11309        /*
11310         * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11311         * PLANE_SURF updates, the update is then guaranteed to be atomic.
11312         */
11313        I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11314        I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11315
11316        I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11317        POSTING_READ(PLANE_SURF(pipe, 0));
11318}
11319
11320static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11321                             struct intel_unpin_work *work)
11322{
11323        struct drm_device *dev = intel_crtc->base.dev;
11324        struct drm_i915_private *dev_priv = dev->dev_private;
11325        struct intel_framebuffer *intel_fb =
11326                to_intel_framebuffer(intel_crtc->base.primary->fb);
11327        struct drm_i915_gem_object *obj = intel_fb->obj;
11328        i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11329        u32 dspcntr;
11330
11331        dspcntr = I915_READ(reg);
11332
11333        if (obj->tiling_mode != I915_TILING_NONE)
11334                dspcntr |= DISPPLANE_TILED;
11335        else
11336                dspcntr &= ~DISPPLANE_TILED;
11337
11338        I915_WRITE(reg, dspcntr);
11339
11340        I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11341        POSTING_READ(DSPSURF(intel_crtc->plane));
11342}
11343
11344/*
11345 * XXX: This is the temporary way to update the plane registers until we get
11346 * around to using the usual plane update functions for MMIO flips
11347 */
11348static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11349{
11350        struct intel_crtc *crtc = mmio_flip->crtc;
11351        struct intel_unpin_work *work;
11352
11353        spin_lock_irq(&crtc->base.dev->event_lock);
11354        work = crtc->unpin_work;
11355        spin_unlock_irq(&crtc->base.dev->event_lock);
11356        if (work == NULL)
11357                return;
11358
11359        intel_mark_page_flip_active(work);
11360
11361        intel_pipe_update_start(crtc);
11362
11363        if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11364                skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11365        else
11366                /* use_mmio_flip() retricts MMIO flips to ilk+ */
11367                ilk_do_mmio_flip(crtc, work);
11368
11369        intel_pipe_update_end(crtc);
11370}
11371
11372static void intel_mmio_flip_work_func(struct work_struct *work)
11373{
11374        struct intel_mmio_flip *mmio_flip =
11375                container_of(work, struct intel_mmio_flip, work);
11376        struct intel_framebuffer *intel_fb =
11377                to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11378        struct drm_i915_gem_object *obj = intel_fb->obj;
11379
11380        if (mmio_flip->req) {
11381                WARN_ON(__i915_wait_request(mmio_flip->req,
11382                                            mmio_flip->crtc->reset_counter,
11383                                            false, NULL,
11384                                            &mmio_flip->i915->rps.mmioflips));
11385                i915_gem_request_unreference__unlocked(mmio_flip->req);
11386        }
11387
11388        /* For framebuffer backed by dmabuf, wait for fence */
11389        if (obj->base.dma_buf)
11390                WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11391                                                            false, false,
11392                                                            MAX_SCHEDULE_TIMEOUT) < 0);
11393
11394        intel_do_mmio_flip(mmio_flip);
11395        kfree(mmio_flip);
11396}
11397
11398static int intel_queue_mmio_flip(struct drm_device *dev,
11399                                 struct drm_crtc *crtc,
11400                                 struct drm_i915_gem_object *obj)
11401{
11402        struct intel_mmio_flip *mmio_flip;
11403
11404        mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11405        if (mmio_flip == NULL)
11406                return -ENOMEM;
11407
11408        mmio_flip->i915 = to_i915(dev);
11409        mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11410        mmio_flip->crtc = to_intel_crtc(crtc);
11411        mmio_flip->rotation = crtc->primary->state->rotation;
11412
11413        INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11414        schedule_work(&mmio_flip->work);
11415
11416        return 0;
11417}
11418
11419static int intel_default_queue_flip(struct drm_device *dev,
11420                                    struct drm_crtc *crtc,
11421                                    struct drm_framebuffer *fb,
11422                                    struct drm_i915_gem_object *obj,
11423                                    struct drm_i915_gem_request *req,
11424                                    uint32_t flags)
11425{
11426        return -ENODEV;
11427}
11428
11429static bool __intel_pageflip_stall_check(struct drm_device *dev,
11430                                         struct drm_crtc *crtc)
11431{
11432        struct drm_i915_private *dev_priv = dev->dev_private;
11433        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11434        struct intel_unpin_work *work = intel_crtc->unpin_work;
11435        u32 addr;
11436
11437        if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11438                return true;
11439
11440        if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11441                return false;
11442
11443        if (!work->enable_stall_check)
11444                return false;
11445
11446        if (work->flip_ready_vblank == 0) {
11447                if (work->flip_queued_req &&
11448                    !i915_gem_request_completed(work->flip_queued_req, true))
11449                        return false;
11450
11451                work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11452        }
11453
11454        if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11455                return false;
11456
11457        /* Potential stall - if we see that the flip has happened,
11458         * assume a missed interrupt. */
11459        if (INTEL_INFO(dev)->gen >= 4)
11460                addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11461        else
11462                addr = I915_READ(DSPADDR(intel_crtc->plane));
11463
11464        /* There is a potential issue here with a false positive after a flip
11465         * to the same address. We could address this by checking for a
11466         * non-incrementing frame counter.
11467         */
11468        return addr == work->gtt_offset;
11469}
11470
11471void intel_check_page_flip(struct drm_device *dev, int pipe)
11472{
11473        struct drm_i915_private *dev_priv = dev->dev_private;
11474        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11475        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11476        struct intel_unpin_work *work;
11477
11478        WARN_ON(!in_interrupt());
11479
11480        if (crtc == NULL)
11481                return;
11482
11483        spin_lock(&dev->event_lock);
11484        work = intel_crtc->unpin_work;
11485        if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11486                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11487                         work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11488                page_flip_completed(intel_crtc);
11489                work = NULL;
11490        }
11491        if (work != NULL &&
11492            drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11493                intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11494        spin_unlock(&dev->event_lock);
11495}
11496
11497static int intel_crtc_page_flip(struct drm_crtc *crtc,
11498                                struct drm_framebuffer *fb,
11499                                struct drm_pending_vblank_event *event,
11500                                uint32_t page_flip_flags)
11501{
11502        struct drm_device *dev = crtc->dev;
11503        struct drm_i915_private *dev_priv = dev->dev_private;
11504        struct drm_framebuffer *old_fb = crtc->primary->fb;
11505        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11506        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11507        struct drm_plane *primary = crtc->primary;
11508        enum pipe pipe = intel_crtc->pipe;
11509        struct intel_unpin_work *work;
11510        struct intel_engine_cs *ring;
11511        bool mmio_flip;
11512        struct drm_i915_gem_request *request = NULL;
11513        int ret;
11514
11515        /*
11516         * drm_mode_page_flip_ioctl() should already catch this, but double
11517         * check to be safe.  In the future we may enable pageflipping from
11518         * a disabled primary plane.
11519         */
11520        if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11521                return -EBUSY;
11522
11523        /* Can't change pixel format via MI display flips. */
11524        if (fb->pixel_format != crtc->primary->fb->pixel_format)
11525                return -EINVAL;
11526
11527        /*
11528         * TILEOFF/LINOFF registers can't be changed via MI display flips.
11529         * Note that pitch changes could also affect these register.
11530         */
11531        if (INTEL_INFO(dev)->gen > 3 &&
11532            (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11533             fb->pitches[0] != crtc->primary->fb->pitches[0]))
11534                return -EINVAL;
11535
11536        if (i915_terminally_wedged(&dev_priv->gpu_error))
11537                goto out_hang;
11538
11539        work = kzalloc(sizeof(*work), GFP_KERNEL);
11540        if (work == NULL)
11541                return -ENOMEM;
11542
11543        work->event = event;
11544        work->crtc = crtc;
11545        work->old_fb = old_fb;
11546        INIT_WORK(&work->work, intel_unpin_work_fn);
11547
11548        ret = drm_crtc_vblank_get(crtc);
11549        if (ret)
11550                goto free_work;
11551
11552        /* We borrow the event spin lock for protecting unpin_work */
11553        spin_lock_irq(&dev->event_lock);
11554        if (intel_crtc->unpin_work) {
11555                /* Before declaring the flip queue wedged, check if
11556                 * the hardware completed the operation behind our backs.
11557                 */
11558                if (__intel_pageflip_stall_check(dev, crtc)) {
11559                        DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11560                        page_flip_completed(intel_crtc);
11561                } else {
11562                        DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11563                        spin_unlock_irq(&dev->event_lock);
11564
11565                        drm_crtc_vblank_put(crtc);
11566                        kfree(work);
11567                        return -EBUSY;
11568                }
11569        }
11570        intel_crtc->unpin_work = work;
11571        spin_unlock_irq(&dev->event_lock);
11572
11573        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11574                flush_workqueue(dev_priv->wq);
11575
11576        /* Reference the objects for the scheduled work. */
11577        drm_framebuffer_reference(work->old_fb);
11578        drm_gem_object_reference(&obj->base);
11579
11580        crtc->primary->fb = fb;
11581        update_state_fb(crtc->primary);
11582        intel_fbc_pre_update(intel_crtc);
11583
11584        work->pending_flip_obj = obj;
11585
11586        ret = i915_mutex_lock_interruptible(dev);
11587        if (ret)
11588                goto cleanup;
11589
11590        atomic_inc(&intel_crtc->unpin_work_count);
11591        intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11592
11593        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11594                work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11595
11596        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11597                ring = &dev_priv->ring[BCS];
11598                if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11599                        /* vlv: DISPLAY_FLIP fails to change tiling */
11600                        ring = NULL;
11601        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11602                ring = &dev_priv->ring[BCS];
11603        } else if (INTEL_INFO(dev)->gen >= 7) {
11604                ring = i915_gem_request_get_ring(obj->last_write_req);
11605                if (ring == NULL || ring->id != RCS)
11606                        ring = &dev_priv->ring[BCS];
11607        } else {
11608                ring = &dev_priv->ring[RCS];
11609        }
11610
11611        mmio_flip = use_mmio_flip(ring, obj);
11612
11613        /* When using CS flips, we want to emit semaphores between rings.
11614         * However, when using mmio flips we will create a task to do the
11615         * synchronisation, so all we want here is to pin the framebuffer
11616         * into the display plane and skip any waits.
11617         */
11618        if (!mmio_flip) {
11619                ret = i915_gem_object_sync(obj, ring, &request);
11620                if (ret)
11621                        goto cleanup_pending;
11622        }
11623
11624        ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11625                                         crtc->primary->state);
11626        if (ret)
11627                goto cleanup_pending;
11628
11629        work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11630                                                  obj, 0);
11631        work->gtt_offset += intel_crtc->dspaddr_offset;
11632
11633        if (mmio_flip) {
11634                ret = intel_queue_mmio_flip(dev, crtc, obj);
11635                if (ret)
11636                        goto cleanup_unpin;
11637
11638                i915_gem_request_assign(&work->flip_queued_req,
11639                                        obj->last_write_req);
11640        } else {
11641                if (!request) {
11642                        request = i915_gem_request_alloc(ring, NULL);
11643                        if (IS_ERR(request)) {
11644                                ret = PTR_ERR(request);
11645                                goto cleanup_unpin;
11646                        }
11647                }
11648
11649                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11650                                                   page_flip_flags);
11651                if (ret)
11652                        goto cleanup_unpin;
11653
11654                i915_gem_request_assign(&work->flip_queued_req, request);
11655        }
11656
11657        if (request)
11658                i915_add_request_no_flush(request);
11659
11660        work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11661        work->enable_stall_check = true;
11662
11663        i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11664                          to_intel_plane(primary)->frontbuffer_bit);
11665        mutex_unlock(&dev->struct_mutex);
11666
11667        intel_frontbuffer_flip_prepare(dev,
11668                                       to_intel_plane(primary)->frontbuffer_bit);
11669
11670        trace_i915_flip_request(intel_crtc->plane, obj);
11671
11672        return 0;
11673
11674cleanup_unpin:
11675        intel_unpin_fb_obj(fb, crtc->primary->state);
11676cleanup_pending:
11677        if (!IS_ERR_OR_NULL(request))
11678                i915_gem_request_cancel(request);
11679        atomic_dec(&intel_crtc->unpin_work_count);
11680        mutex_unlock(&dev->struct_mutex);
11681cleanup:
11682        crtc->primary->fb = old_fb;
11683        update_state_fb(crtc->primary);
11684
11685        drm_gem_object_unreference_unlocked(&obj->base);
11686        drm_framebuffer_unreference(work->old_fb);
11687
11688        spin_lock_irq(&dev->event_lock);
11689        intel_crtc->unpin_work = NULL;
11690        spin_unlock_irq(&dev->event_lock);
11691
11692        drm_crtc_vblank_put(crtc);
11693free_work:
11694        kfree(work);
11695
11696        if (ret == -EIO) {
11697                struct drm_atomic_state *state;
11698                struct drm_plane_state *plane_state;
11699
11700out_hang:
11701                state = drm_atomic_state_alloc(dev);
11702                if (!state)
11703                        return -ENOMEM;
11704                state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11705
11706retry:
11707                plane_state = drm_atomic_get_plane_state(state, primary);
11708                ret = PTR_ERR_OR_ZERO(plane_state);
11709                if (!ret) {
11710                        drm_atomic_set_fb_for_plane(plane_state, fb);
11711
11712                        ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11713                        if (!ret)
11714                                ret = drm_atomic_commit(state);
11715                }
11716
11717                if (ret == -EDEADLK) {
11718                        drm_modeset_backoff(state->acquire_ctx);
11719                        drm_atomic_state_clear(state);
11720                        goto retry;
11721                }
11722
11723                if (ret)
11724                        drm_atomic_state_free(state);
11725
11726                if (ret == 0 && event) {
11727                        spin_lock_irq(&dev->event_lock);
11728                        drm_send_vblank_event(dev, pipe, event);
11729                        spin_unlock_irq(&dev->event_lock);
11730                }
11731        }
11732        return ret;
11733}
11734
11735
11736/**
11737 * intel_wm_need_update - Check whether watermarks need updating
11738 * @plane: drm plane
11739 * @state: new plane state
11740 *
11741 * Check current plane state versus the new one to determine whether
11742 * watermarks need to be recalculated.
11743 *
11744 * Returns true or false.
11745 */
11746static bool intel_wm_need_update(struct drm_plane *plane,
11747                                 struct drm_plane_state *state)
11748{
11749        struct intel_plane_state *new = to_intel_plane_state(state);
11750        struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11751
11752        /* Update watermarks on tiling or size changes. */
11753        if (new->visible != cur->visible)
11754                return true;
11755
11756        if (!cur->base.fb || !new->base.fb)
11757                return false;
11758
11759        if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11760            cur->base.rotation != new->base.rotation ||
11761            drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11762            drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11763            drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11764            drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11765                return true;
11766
11767        return false;
11768}
11769
11770static bool needs_scaling(struct intel_plane_state *state)
11771{
11772        int src_w = drm_rect_width(&state->src) >> 16;
11773        int src_h = drm_rect_height(&state->src) >> 16;
11774        int dst_w = drm_rect_width(&state->dst);
11775        int dst_h = drm_rect_height(&state->dst);
11776
11777        return (src_w != dst_w || src_h != dst_h);
11778}
11779
11780int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11781                                    struct drm_plane_state *plane_state)
11782{
11783        struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11784        struct drm_crtc *crtc = crtc_state->crtc;
11785        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11786        struct drm_plane *plane = plane_state->plane;
11787        struct drm_device *dev = crtc->dev;
11788        struct intel_plane_state *old_plane_state =
11789                to_intel_plane_state(plane->state);
11790        int idx = intel_crtc->base.base.id, ret;
11791        bool mode_changed = needs_modeset(crtc_state);
11792        bool was_crtc_enabled = crtc->state->active;
11793        bool is_crtc_enabled = crtc_state->active;
11794        bool turn_off, turn_on, visible, was_visible;
11795        struct drm_framebuffer *fb = plane_state->fb;
11796
11797        if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11798            plane->type != DRM_PLANE_TYPE_CURSOR) {
11799                ret = skl_update_scaler_plane(
11800                        to_intel_crtc_state(crtc_state),
11801                        to_intel_plane_state(plane_state));
11802                if (ret)
11803                        return ret;
11804        }
11805
11806        was_visible = old_plane_state->visible;
11807        visible = to_intel_plane_state(plane_state)->visible;
11808
11809        if (!was_crtc_enabled && WARN_ON(was_visible))
11810                was_visible = false;
11811
11812        /*
11813         * Visibility is calculated as if the crtc was on, but
11814         * after scaler setup everything depends on it being off
11815         * when the crtc isn't active.
11816         */
11817        if (!is_crtc_enabled)
11818                to_intel_plane_state(plane_state)->visible = visible = false;
11819
11820        if (!was_visible && !visible)
11821                return 0;
11822
11823        if (fb != old_plane_state->base.fb)
11824                pipe_config->fb_changed = true;
11825
11826        turn_off = was_visible && (!visible || mode_changed);
11827        turn_on = visible && (!was_visible || mode_changed);
11828
11829        DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11830                         plane->base.id, fb ? fb->base.id : -1);
11831
11832        DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11833                         plane->base.id, was_visible, visible,
11834                         turn_off, turn_on, mode_changed);
11835
11836        if (turn_on || turn_off) {
11837                pipe_config->wm_changed = true;
11838
11839                /* must disable cxsr around plane enable/disable */
11840                if (plane->type != DRM_PLANE_TYPE_CURSOR)
11841                        pipe_config->disable_cxsr = true;
11842        } else if (intel_wm_need_update(plane, plane_state)) {
11843                pipe_config->wm_changed = true;
11844        }
11845
11846        if (visible || was_visible)
11847                intel_crtc->atomic.fb_bits |=
11848                        to_intel_plane(plane)->frontbuffer_bit;
11849
11850        switch (plane->type) {
11851        case DRM_PLANE_TYPE_PRIMARY:
11852                intel_crtc->atomic.post_enable_primary = turn_on;
11853                intel_crtc->atomic.update_fbc = true;
11854
11855                break;
11856        case DRM_PLANE_TYPE_CURSOR:
11857                break;
11858        case DRM_PLANE_TYPE_OVERLAY:
11859                /*
11860                 * WaCxSRDisabledForSpriteScaling:ivb
11861                 *
11862                 * cstate->update_wm was already set above, so this flag will
11863                 * take effect when we commit and program watermarks.
11864                 */
11865                if (IS_IVYBRIDGE(dev) &&
11866                    needs_scaling(to_intel_plane_state(plane_state)) &&
11867                    !needs_scaling(old_plane_state))
11868                        pipe_config->disable_lp_wm = true;
11869
11870                break;
11871        }
11872        return 0;
11873}
11874
11875static bool encoders_cloneable(const struct intel_encoder *a,
11876                               const struct intel_encoder *b)
11877{
11878        /* masks could be asymmetric, so check both ways */
11879        return a == b || (a->cloneable & (1 << b->type) &&
11880                          b->cloneable & (1 << a->type));
11881}
11882
11883static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11884                                         struct intel_crtc *crtc,
11885                                         struct intel_encoder *encoder)
11886{
11887        struct intel_encoder *source_encoder;
11888        struct drm_connector *connector;
11889        struct drm_connector_state *connector_state;
11890        int i;
11891
11892        for_each_connector_in_state(state, connector, connector_state, i) {
11893                if (connector_state->crtc != &crtc->base)
11894                        continue;
11895
11896                source_encoder =
11897                        to_intel_encoder(connector_state->best_encoder);
11898                if (!encoders_cloneable(encoder, source_encoder))
11899                        return false;
11900        }
11901
11902        return true;
11903}
11904
11905static bool check_encoder_cloning(struct drm_atomic_state *state,
11906                                  struct intel_crtc *crtc)
11907{
11908        struct intel_encoder *encoder;
11909        struct drm_connector *connector;
11910        struct drm_connector_state *connector_state;
11911        int i;
11912
11913        for_each_connector_in_state(state, connector, connector_state, i) {
11914                if (connector_state->crtc != &crtc->base)
11915                        continue;
11916
11917                encoder = to_intel_encoder(connector_state->best_encoder);
11918                if (!check_single_encoder_cloning(state, crtc, encoder))
11919                        return false;
11920        }
11921
11922        return true;
11923}
11924
11925static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11926                                   struct drm_crtc_state *crtc_state)
11927{
11928        struct drm_device *dev = crtc->dev;
11929        struct drm_i915_private *dev_priv = dev->dev_private;
11930        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11931        struct intel_crtc_state *pipe_config =
11932                to_intel_crtc_state(crtc_state);
11933        struct drm_atomic_state *state = crtc_state->state;
11934        int ret;
11935        bool mode_changed = needs_modeset(crtc_state);
11936
11937        if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11938                DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11939                return -EINVAL;
11940        }
11941
11942        if (mode_changed && !crtc_state->active)
11943                pipe_config->wm_changed = true;
11944
11945        if (mode_changed && crtc_state->enable &&
11946            dev_priv->display.crtc_compute_clock &&
11947            !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11948                ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11949                                                           pipe_config);
11950                if (ret)
11951                        return ret;
11952        }
11953
11954        ret = 0;
11955        if (dev_priv->display.compute_pipe_wm) {
11956                ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
11957                if (ret)
11958                        return ret;
11959        }
11960
11961        if (INTEL_INFO(dev)->gen >= 9) {
11962                if (mode_changed)
11963                        ret = skl_update_scaler_crtc(pipe_config);
11964
11965                if (!ret)
11966                        ret = intel_atomic_setup_scalers(dev, intel_crtc,
11967                                                         pipe_config);
11968        }
11969
11970        return ret;
11971}
11972
11973static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11974        .mode_set_base_atomic = intel_pipe_set_base_atomic,
11975        .load_lut = intel_crtc_load_lut,
11976        .atomic_begin = intel_begin_crtc_commit,
11977        .atomic_flush = intel_finish_crtc_commit,
11978        .atomic_check = intel_crtc_atomic_check,
11979};
11980
11981static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11982{
11983        struct intel_connector *connector;
11984
11985        for_each_intel_connector(dev, connector) {
11986                if (connector->base.encoder) {
11987                        connector->base.state->best_encoder =
11988                                connector->base.encoder;
11989                        connector->base.state->crtc =
11990                                connector->base.encoder->crtc;
11991                } else {
11992                        connector->base.state->best_encoder = NULL;
11993                        connector->base.state->crtc = NULL;
11994                }
11995        }
11996}
11997
11998static void
11999connected_sink_compute_bpp(struct intel_connector *connector,
12000                           struct intel_crtc_state *pipe_config)
12001{
12002        int bpp = pipe_config->pipe_bpp;
12003
12004        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12005                connector->base.base.id,
12006                connector->base.name);
12007
12008        /* Don't use an invalid EDID bpc value */
12009        if (connector->base.display_info.bpc &&
12010            connector->base.display_info.bpc * 3 < bpp) {
12011                DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12012                              bpp, connector->base.display_info.bpc*3);
12013                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12014        }
12015
12016        /* Clamp bpp to default limit on screens without EDID 1.4 */
12017        if (connector->base.display_info.bpc == 0) {
12018                int type = connector->base.connector_type;
12019                int clamp_bpp = 24;
12020
12021                /* Fall back to 18 bpp when DP sink capability is unknown. */
12022                if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12023                    type == DRM_MODE_CONNECTOR_eDP)
12024                        clamp_bpp = 18;
12025
12026                if (bpp > clamp_bpp) {
12027                        DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12028                                      bpp, clamp_bpp);
12029                        pipe_config->pipe_bpp = clamp_bpp;
12030                }
12031        }
12032}
12033
12034static int
12035compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12036                          struct intel_crtc_state *pipe_config)
12037{
12038        struct drm_device *dev = crtc->base.dev;
12039        struct drm_atomic_state *state;
12040        struct drm_connector *connector;
12041        struct drm_connector_state *connector_state;
12042        int bpp, i;
12043
12044        if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12045                bpp = 10*3;
12046        else if (INTEL_INFO(dev)->gen >= 5)
12047                bpp = 12*3;
12048        else
12049                bpp = 8*3;
12050
12051
12052        pipe_config->pipe_bpp = bpp;
12053
12054        state = pipe_config->base.state;
12055
12056        /* Clamp display bpp to EDID value */
12057        for_each_connector_in_state(state, connector, connector_state, i) {
12058                if (connector_state->crtc != &crtc->base)
12059                        continue;
12060
12061                connected_sink_compute_bpp(to_intel_connector(connector),
12062                                           pipe_config);
12063        }
12064
12065        return bpp;
12066}
12067
12068static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12069{
12070        DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12071                        "type: 0x%x flags: 0x%x\n",
12072                mode->crtc_clock,
12073                mode->crtc_hdisplay, mode->crtc_hsync_start,
12074                mode->crtc_hsync_end, mode->crtc_htotal,
12075                mode->crtc_vdisplay, mode->crtc_vsync_start,
12076                mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12077}
12078
12079static void intel_dump_pipe_config(struct intel_crtc *crtc,
12080                                   struct intel_crtc_state *pipe_config,
12081                                   const char *context)
12082{
12083        struct drm_device *dev = crtc->base.dev;
12084        struct drm_plane *plane;
12085        struct intel_plane *intel_plane;
12086        struct intel_plane_state *state;
12087        struct drm_framebuffer *fb;
12088
12089        DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12090                      context, pipe_config, pipe_name(crtc->pipe));
12091
12092        DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12093        DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12094                      pipe_config->pipe_bpp, pipe_config->dither);
12095        DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12096                      pipe_config->has_pch_encoder,
12097                      pipe_config->fdi_lanes,
12098                      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12099                      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12100                      pipe_config->fdi_m_n.tu);
12101        DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12102                      pipe_config->has_dp_encoder,
12103                      pipe_config->lane_count,
12104                      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12105                      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12106                      pipe_config->dp_m_n.tu);
12107
12108        DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12109                      pipe_config->has_dp_encoder,
12110                      pipe_config->lane_count,
12111                      pipe_config->dp_m2_n2.gmch_m,
12112                      pipe_config->dp_m2_n2.gmch_n,
12113                      pipe_config->dp_m2_n2.link_m,
12114                      pipe_config->dp_m2_n2.link_n,
12115                      pipe_config->dp_m2_n2.tu);
12116
12117        DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12118                      pipe_config->has_audio,
12119                      pipe_config->has_infoframe);
12120
12121        DRM_DEBUG_KMS("requested mode:\n");
12122        drm_mode_debug_printmodeline(&pipe_config->base.mode);
12123        DRM_DEBUG_KMS("adjusted mode:\n");
12124        drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12125        intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12126        DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12127        DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12128                      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12129        DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12130                      crtc->num_scalers,
12131                      pipe_config->scaler_state.scaler_users,
12132                      pipe_config->scaler_state.scaler_id);
12133        DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12134                      pipe_config->gmch_pfit.control,
12135                      pipe_config->gmch_pfit.pgm_ratios,
12136                      pipe_config->gmch_pfit.lvds_border_bits);
12137        DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12138                      pipe_config->pch_pfit.pos,
12139                      pipe_config->pch_pfit.size,
12140                      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12141        DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12142        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12143
12144        if (IS_BROXTON(dev)) {
12145                DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12146                              "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12147                              "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12148                              pipe_config->ddi_pll_sel,
12149                              pipe_config->dpll_hw_state.ebb0,
12150                              pipe_config->dpll_hw_state.ebb4,
12151                              pipe_config->dpll_hw_state.pll0,
12152                              pipe_config->dpll_hw_state.pll1,
12153                              pipe_config->dpll_hw_state.pll2,
12154                              pipe_config->dpll_hw_state.pll3,
12155                              pipe_config->dpll_hw_state.pll6,
12156                              pipe_config->dpll_hw_state.pll8,
12157                              pipe_config->dpll_hw_state.pll9,
12158                              pipe_config->dpll_hw_state.pll10,
12159                              pipe_config->dpll_hw_state.pcsdw12);
12160        } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12161                DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12162                              "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12163                              pipe_config->ddi_pll_sel,
12164                              pipe_config->dpll_hw_state.ctrl1,
12165                              pipe_config->dpll_hw_state.cfgcr1,
12166                              pipe_config->dpll_hw_state.cfgcr2);
12167        } else if (HAS_DDI(dev)) {
12168                DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12169                              pipe_config->ddi_pll_sel,
12170                              pipe_config->dpll_hw_state.wrpll,
12171                              pipe_config->dpll_hw_state.spll);
12172        } else {
12173                DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12174                              "fp0: 0x%x, fp1: 0x%x\n",
12175                              pipe_config->dpll_hw_state.dpll,
12176                              pipe_config->dpll_hw_state.dpll_md,
12177                              pipe_config->dpll_hw_state.fp0,
12178                              pipe_config->dpll_hw_state.fp1);
12179        }
12180
12181        DRM_DEBUG_KMS("planes on this crtc\n");
12182        list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12183                intel_plane = to_intel_plane(plane);
12184                if (intel_plane->pipe != crtc->pipe)
12185                        continue;
12186
12187                state = to_intel_plane_state(plane->state);
12188                fb = state->base.fb;
12189                if (!fb) {
12190                        DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12191                                "disabled, scaler_id = %d\n",
12192                                plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12193                                plane->base.id, intel_plane->pipe,
12194                                (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12195                                drm_plane_index(plane), state->scaler_id);
12196                        continue;
12197                }
12198
12199                DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12200                        plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12201                        plane->base.id, intel_plane->pipe,
12202                        crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12203                        drm_plane_index(plane));
12204                DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12205                        fb->base.id, fb->width, fb->height, fb->pixel_format);
12206                DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12207                        state->scaler_id,
12208                        state->src.x1 >> 16, state->src.y1 >> 16,
12209                        drm_rect_width(&state->src) >> 16,
12210                        drm_rect_height(&state->src) >> 16,
12211                        state->dst.x1, state->dst.y1,
12212                        drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12213        }
12214}
12215
12216static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12217{
12218        struct drm_device *dev = state->dev;
12219        struct drm_connector *connector;
12220        unsigned int used_ports = 0;
12221
12222        /*
12223         * Walk the connector list instead of the encoder
12224         * list to detect the problem on ddi platforms
12225         * where there's just one encoder per digital port.
12226         */
12227        drm_for_each_connector(connector, dev) {
12228                struct drm_connector_state *connector_state;
12229                struct intel_encoder *encoder;
12230
12231                connector_state = drm_atomic_get_existing_connector_state(state, connector);
12232                if (!connector_state)
12233                        connector_state = connector->state;
12234
12235                if (!connector_state->best_encoder)
12236                        continue;
12237
12238                encoder = to_intel_encoder(connector_state->best_encoder);
12239
12240                WARN_ON(!connector_state->crtc);
12241
12242                switch (encoder->type) {
12243                        unsigned int port_mask;
12244                case INTEL_OUTPUT_UNKNOWN:
12245                        if (WARN_ON(!HAS_DDI(dev)))
12246                                break;
12247                case INTEL_OUTPUT_DISPLAYPORT:
12248                case INTEL_OUTPUT_HDMI:
12249                case INTEL_OUTPUT_EDP:
12250                        port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12251
12252                        /* the same port mustn't appear more than once */
12253                        if (used_ports & port_mask)
12254                                return false;
12255
12256                        used_ports |= port_mask;
12257                default:
12258                        break;
12259                }
12260        }
12261
12262        return true;
12263}
12264
12265static void
12266clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12267{
12268        struct drm_crtc_state tmp_state;
12269        struct intel_crtc_scaler_state scaler_state;
12270        struct intel_dpll_hw_state dpll_hw_state;
12271        enum intel_dpll_id shared_dpll;
12272        uint32_t ddi_pll_sel;
12273        bool force_thru;
12274
12275        /* FIXME: before the switch to atomic started, a new pipe_config was
12276         * kzalloc'd. Code that depends on any field being zero should be
12277         * fixed, so that the crtc_state can be safely duplicated. For now,
12278         * only fields that are know to not cause problems are preserved. */
12279
12280        tmp_state = crtc_state->base;
12281        scaler_state = crtc_state->scaler_state;
12282        shared_dpll = crtc_state->shared_dpll;
12283        dpll_hw_state = crtc_state->dpll_hw_state;
12284        ddi_pll_sel = crtc_state->ddi_pll_sel;
12285        force_thru = crtc_state->pch_pfit.force_thru;
12286
12287        memset(crtc_state, 0, sizeof *crtc_state);
12288
12289        crtc_state->base = tmp_state;
12290        crtc_state->scaler_state = scaler_state;
12291        crtc_state->shared_dpll = shared_dpll;
12292        crtc_state->dpll_hw_state = dpll_hw_state;
12293        crtc_state->ddi_pll_sel = ddi_pll_sel;
12294        crtc_state->pch_pfit.force_thru = force_thru;
12295}
12296
12297static int
12298intel_modeset_pipe_config(struct drm_crtc *crtc,
12299                          struct intel_crtc_state *pipe_config)
12300{
12301        struct drm_atomic_state *state = pipe_config->base.state;
12302        struct intel_encoder *encoder;
12303        struct drm_connector *connector;
12304        struct drm_connector_state *connector_state;
12305        int base_bpp, ret = -EINVAL;
12306        int i;
12307        bool retry = true;
12308
12309        clear_intel_crtc_state(pipe_config);
12310
12311        pipe_config->cpu_transcoder =
12312                (enum transcoder) to_intel_crtc(crtc)->pipe;
12313
12314        /*
12315         * Sanitize sync polarity flags based on requested ones. If neither
12316         * positive or negative polarity is requested, treat this as meaning
12317         * negative polarity.
12318         */
12319        if (!(pipe_config->base.adjusted_mode.flags &
12320              (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12321                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12322
12323        if (!(pipe_config->base.adjusted_mode.flags &
12324              (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12325                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12326
12327        base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12328                                             pipe_config);
12329        if (base_bpp < 0)
12330                goto fail;
12331
12332        /*
12333         * Determine the real pipe dimensions. Note that stereo modes can
12334         * increase the actual pipe size due to the frame doubling and
12335         * insertion of additional space for blanks between the frame. This
12336         * is stored in the crtc timings. We use the requested mode to do this
12337         * computation to clearly distinguish it from the adjusted mode, which
12338         * can be changed by the connectors in the below retry loop.
12339         */
12340        drm_crtc_get_hv_timing(&pipe_config->base.mode,
12341                               &pipe_config->pipe_src_w,
12342                               &pipe_config->pipe_src_h);
12343
12344encoder_retry:
12345        /* Ensure the port clock defaults are reset when retrying. */
12346        pipe_config->port_clock = 0;
12347        pipe_config->pixel_multiplier = 1;
12348
12349        /* Fill in default crtc timings, allow encoders to overwrite them. */
12350        drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12351                              CRTC_STEREO_DOUBLE);
12352
12353        /* Pass our mode to the connectors and the CRTC to give them a chance to
12354         * adjust it according to limitations or connector properties, and also
12355         * a chance to reject the mode entirely.
12356         */
12357        for_each_connector_in_state(state, connector, connector_state, i) {
12358                if (connector_state->crtc != crtc)
12359                        continue;
12360
12361                encoder = to_intel_encoder(connector_state->best_encoder);
12362
12363                if (!(encoder->compute_config(encoder, pipe_config))) {
12364                        DRM_DEBUG_KMS("Encoder config failure\n");
12365                        goto fail;
12366                }
12367        }
12368
12369        /* Set default port clock if not overwritten by the encoder. Needs to be
12370         * done afterwards in case the encoder adjusts the mode. */
12371        if (!pipe_config->port_clock)
12372                pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12373                        * pipe_config->pixel_multiplier;
12374
12375        ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12376        if (ret < 0) {
12377                DRM_DEBUG_KMS("CRTC fixup failed\n");
12378                goto fail;
12379        }
12380
12381        if (ret == RETRY) {
12382                if (WARN(!retry, "loop in pipe configuration computation\n")) {
12383                        ret = -EINVAL;
12384                        goto fail;
12385                }
12386
12387                DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12388                retry = false;
12389                goto encoder_retry;
12390        }
12391
12392        /* Dithering seems to not pass-through bits correctly when it should, so
12393         * only enable it on 6bpc panels. */
12394        pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12395        DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12396                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12397
12398fail:
12399        return ret;
12400}
12401
12402static void
12403intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12404{
12405        struct drm_crtc *crtc;
12406        struct drm_crtc_state *crtc_state;
12407        int i;
12408
12409        /* Double check state. */
12410        for_each_crtc_in_state(state, crtc, crtc_state, i) {
12411                to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12412
12413                /* Update hwmode for vblank functions */
12414                if (crtc->state->active)
12415                        crtc->hwmode = crtc->state->adjusted_mode;
12416                else
12417                        crtc->hwmode.crtc_clock = 0;
12418
12419                /*
12420                 * Update legacy state to satisfy fbc code. This can
12421                 * be removed when fbc uses the atomic state.
12422                 */
12423                if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12424                        struct drm_plane_state *plane_state = crtc->primary->state;
12425
12426                        crtc->primary->fb = plane_state->fb;
12427                        crtc->x = plane_state->src_x >> 16;
12428                        crtc->y = plane_state->src_y >> 16;
12429                }
12430        }
12431}
12432
12433static bool intel_fuzzy_clock_check(int clock1, int clock2)
12434{
12435        int diff;
12436
12437        if (clock1 == clock2)
12438                return true;
12439
12440        if (!clock1 || !clock2)
12441                return false;
12442
12443        diff = abs(clock1 - clock2);
12444
12445        if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12446                return true;
12447
12448        return false;
12449}
12450
12451#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12452        list_for_each_entry((intel_crtc), \
12453                            &(dev)->mode_config.crtc_list, \
12454                            base.head) \
12455                for_each_if (mask & (1 <<(intel_crtc)->pipe))
12456
12457static bool
12458intel_compare_m_n(unsigned int m, unsigned int n,
12459                  unsigned int m2, unsigned int n2,
12460                  bool exact)
12461{
12462        if (m == m2 && n == n2)
12463                return true;
12464
12465        if (exact || !m || !n || !m2 || !n2)
12466                return false;
12467
12468        BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12469
12470        if (n > n2) {
12471                while (n > n2) {
12472                        m2 <<= 1;
12473                        n2 <<= 1;
12474                }
12475        } else if (n < n2) {
12476                while (n < n2) {
12477                        m <<= 1;
12478                        n <<= 1;
12479                }
12480        }
12481
12482        if (n != n2)
12483                return false;
12484
12485        return intel_fuzzy_clock_check(m, m2);
12486}
12487
12488static bool
12489intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12490                       struct intel_link_m_n *m2_n2,
12491                       bool adjust)
12492{
12493        if (m_n->tu == m2_n2->tu &&
12494            intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12495                              m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12496            intel_compare_m_n(m_n->link_m, m_n->link_n,
12497                              m2_n2->link_m, m2_n2->link_n, !adjust)) {
12498                if (adjust)
12499                        *m2_n2 = *m_n;
12500
12501                return true;
12502        }
12503
12504        return false;
12505}
12506
12507static bool
12508intel_pipe_config_compare(struct drm_device *dev,
12509                          struct intel_crtc_state *current_config,
12510                          struct intel_crtc_state *pipe_config,
12511                          bool adjust)
12512{
12513        bool ret = true;
12514
12515#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12516        do { \
12517                if (!adjust) \
12518                        DRM_ERROR(fmt, ##__VA_ARGS__); \
12519                else \
12520                        DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12521        } while (0)
12522
12523#define PIPE_CONF_CHECK_X(name) \
12524        if (current_config->name != pipe_config->name) { \
12525                INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12526                          "(expected 0x%08x, found 0x%08x)\n", \
12527                          current_config->name, \
12528                          pipe_config->name); \
12529                ret = false; \
12530        }
12531
12532#define PIPE_CONF_CHECK_I(name) \
12533        if (current_config->name != pipe_config->name) { \
12534                INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12535                          "(expected %i, found %i)\n", \
12536                          current_config->name, \
12537                          pipe_config->name); \
12538                ret = false; \
12539        }
12540
12541#define PIPE_CONF_CHECK_M_N(name) \
12542        if (!intel_compare_link_m_n(&current_config->name, \
12543                                    &pipe_config->name,\
12544                                    adjust)) { \
12545                INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12546                          "(expected tu %i gmch %i/%i link %i/%i, " \
12547                          "found tu %i, gmch %i/%i link %i/%i)\n", \
12548                          current_config->name.tu, \
12549                          current_config->name.gmch_m, \
12550                          current_config->name.gmch_n, \
12551                          current_config->name.link_m, \
12552                          current_config->name.link_n, \
12553                          pipe_config->name.tu, \
12554                          pipe_config->name.gmch_m, \
12555                          pipe_config->name.gmch_n, \
12556                          pipe_config->name.link_m, \
12557                          pipe_config->name.link_n); \
12558                ret = false; \
12559        }
12560
12561#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12562        if (!intel_compare_link_m_n(&current_config->name, \
12563                                    &pipe_config->name, adjust) && \
12564            !intel_compare_link_m_n(&current_config->alt_name, \
12565                                    &pipe_config->name, adjust)) { \
12566                INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12567                          "(expected tu %i gmch %i/%i link %i/%i, " \
12568                          "or tu %i gmch %i/%i link %i/%i, " \
12569                          "found tu %i, gmch %i/%i link %i/%i)\n", \
12570                          current_config->name.tu, \
12571                          current_config->name.gmch_m, \
12572                          current_config->name.gmch_n, \
12573                          current_config->name.link_m, \
12574                          current_config->name.link_n, \
12575                          current_config->alt_name.tu, \
12576                          current_config->alt_name.gmch_m, \
12577                          current_config->alt_name.gmch_n, \
12578                          current_config->alt_name.link_m, \
12579                          current_config->alt_name.link_n, \
12580                          pipe_config->name.tu, \
12581                          pipe_config->name.gmch_m, \
12582                          pipe_config->name.gmch_n, \
12583                          pipe_config->name.link_m, \
12584                          pipe_config->name.link_n); \
12585                ret = false; \
12586        }
12587
12588/* This is required for BDW+ where there is only one set of registers for
12589 * switching between high and low RR.
12590 * This macro can be used whenever a comparison has to be made between one
12591 * hw state and multiple sw state variables.
12592 */
12593#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12594        if ((current_config->name != pipe_config->name) && \
12595                (current_config->alt_name != pipe_config->name)) { \
12596                        INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12597                                  "(expected %i or %i, found %i)\n", \
12598                                  current_config->name, \
12599                                  current_config->alt_name, \
12600                                  pipe_config->name); \
12601                        ret = false; \
12602        }
12603
12604#define PIPE_CONF_CHECK_FLAGS(name, mask)       \
12605        if ((current_config->name ^ pipe_config->name) & (mask)) { \
12606                INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12607                          "(expected %i, found %i)\n", \
12608                          current_config->name & (mask), \
12609                          pipe_config->name & (mask)); \
12610                ret = false; \
12611        }
12612
12613#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12614        if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12615                INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12616                          "(expected %i, found %i)\n", \
12617                          current_config->name, \
12618                          pipe_config->name); \
12619                ret = false; \
12620        }
12621
12622#define PIPE_CONF_QUIRK(quirk)  \
12623        ((current_config->quirks | pipe_config->quirks) & (quirk))
12624
12625        PIPE_CONF_CHECK_I(cpu_transcoder);
12626
12627        PIPE_CONF_CHECK_I(has_pch_encoder);
12628        PIPE_CONF_CHECK_I(fdi_lanes);
12629        PIPE_CONF_CHECK_M_N(fdi_m_n);
12630
12631        PIPE_CONF_CHECK_I(has_dp_encoder);
12632        PIPE_CONF_CHECK_I(lane_count);
12633
12634        if (INTEL_INFO(dev)->gen < 8) {
12635                PIPE_CONF_CHECK_M_N(dp_m_n);
12636
12637                if (current_config->has_drrs)
12638                        PIPE_CONF_CHECK_M_N(dp_m2_n2);
12639        } else
12640                PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12641
12642        PIPE_CONF_CHECK_I(has_dsi_encoder);
12643
12644        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12645        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12646        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12647        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12648        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12649        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12650
12651        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12652        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12653        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12654        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12655        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12656        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12657
12658        PIPE_CONF_CHECK_I(pixel_multiplier);
12659        PIPE_CONF_CHECK_I(has_hdmi_sink);
12660        if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12661            IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12662                PIPE_CONF_CHECK_I(limited_color_range);
12663        PIPE_CONF_CHECK_I(has_infoframe);
12664
12665        PIPE_CONF_CHECK_I(has_audio);
12666
12667        PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12668                              DRM_MODE_FLAG_INTERLACE);
12669
12670        if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12671                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12672                                      DRM_MODE_FLAG_PHSYNC);
12673                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12674                                      DRM_MODE_FLAG_NHSYNC);
12675                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12676                                      DRM_MODE_FLAG_PVSYNC);
12677                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12678                                      DRM_MODE_FLAG_NVSYNC);
12679        }
12680
12681        PIPE_CONF_CHECK_X(gmch_pfit.control);
12682        /* pfit ratios are autocomputed by the hw on gen4+ */
12683        if (INTEL_INFO(dev)->gen < 4)
12684                PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12685        PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12686
12687        if (!adjust) {
12688                PIPE_CONF_CHECK_I(pipe_src_w);
12689                PIPE_CONF_CHECK_I(pipe_src_h);
12690
12691                PIPE_CONF_CHECK_I(pch_pfit.enabled);
12692                if (current_config->pch_pfit.enabled) {
12693                        PIPE_CONF_CHECK_X(pch_pfit.pos);
12694                        PIPE_CONF_CHECK_X(pch_pfit.size);
12695                }
12696
12697                PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12698        }
12699
12700        /* BDW+ don't expose a synchronous way to read the state */
12701        if (IS_HASWELL(dev))
12702                PIPE_CONF_CHECK_I(ips_enabled);
12703
12704        PIPE_CONF_CHECK_I(double_wide);
12705
12706        PIPE_CONF_CHECK_X(ddi_pll_sel);
12707
12708        PIPE_CONF_CHECK_I(shared_dpll);
12709        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12710        PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12711        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12712        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12713        PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12714        PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12715        PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12716        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12717        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12718
12719        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12720                PIPE_CONF_CHECK_I(pipe_bpp);
12721
12722        PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12723        PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12724
12725#undef PIPE_CONF_CHECK_X
12726#undef PIPE_CONF_CHECK_I
12727#undef PIPE_CONF_CHECK_I_ALT
12728#undef PIPE_CONF_CHECK_FLAGS
12729#undef PIPE_CONF_CHECK_CLOCK_FUZZY
12730#undef PIPE_CONF_QUIRK
12731#undef INTEL_ERR_OR_DBG_KMS
12732
12733        return ret;
12734}
12735
12736static void check_wm_state(struct drm_device *dev)
12737{
12738        struct drm_i915_private *dev_priv = dev->dev_private;
12739        struct skl_ddb_allocation hw_ddb, *sw_ddb;
12740        struct intel_crtc *intel_crtc;
12741        int plane;
12742
12743        if (INTEL_INFO(dev)->gen < 9)
12744                return;
12745
12746        skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12747        sw_ddb = &dev_priv->wm.skl_hw.ddb;
12748
12749        for_each_intel_crtc(dev, intel_crtc) {
12750                struct skl_ddb_entry *hw_entry, *sw_entry;
12751                const enum pipe pipe = intel_crtc->pipe;
12752
12753                if (!intel_crtc->active)
12754                        continue;
12755
12756                /* planes */
12757                for_each_plane(dev_priv, pipe, plane) {
12758                        hw_entry = &hw_ddb.plane[pipe][plane];
12759                        sw_entry = &sw_ddb->plane[pipe][plane];
12760
12761                        if (skl_ddb_entry_equal(hw_entry, sw_entry))
12762                                continue;
12763
12764                        DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12765                                  "(expected (%u,%u), found (%u,%u))\n",
12766                                  pipe_name(pipe), plane + 1,
12767                                  sw_entry->start, sw_entry->end,
12768                                  hw_entry->start, hw_entry->end);
12769                }
12770
12771                /* cursor */
12772                hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12773                sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12774
12775                if (skl_ddb_entry_equal(hw_entry, sw_entry))
12776                        continue;
12777
12778                DRM_ERROR("mismatch in DDB state pipe %c cursor "
12779                          "(expected (%u,%u), found (%u,%u))\n",
12780                          pipe_name(pipe),
12781                          sw_entry->start, sw_entry->end,
12782                          hw_entry->start, hw_entry->end);
12783        }
12784}
12785
12786static void
12787check_connector_state(struct drm_device *dev,
12788                      struct drm_atomic_state *old_state)
12789{
12790        struct drm_connector_state *old_conn_state;
12791        struct drm_connector *connector;
12792        int i;
12793
12794        for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12795                struct drm_encoder *encoder = connector->encoder;
12796                struct drm_connector_state *state = connector->state;
12797
12798                /* This also checks the encoder/connector hw state with the
12799                 * ->get_hw_state callbacks. */
12800                intel_connector_check_state(to_intel_connector(connector));
12801
12802                I915_STATE_WARN(state->best_encoder != encoder,
12803                     "connector's atomic encoder doesn't match legacy encoder\n");
12804        }
12805}
12806
12807static void
12808check_encoder_state(struct drm_device *dev)
12809{
12810        struct intel_encoder *encoder;
12811        struct intel_connector *connector;
12812
12813        for_each_intel_encoder(dev, encoder) {
12814                bool enabled = false;
12815                enum pipe pipe;
12816
12817                DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12818                              encoder->base.base.id,
12819                              encoder->base.name);
12820
12821                for_each_intel_connector(dev, connector) {
12822                        if (connector->base.state->best_encoder != &encoder->base)
12823                                continue;
12824                        enabled = true;
12825
12826                        I915_STATE_WARN(connector->base.state->crtc !=
12827                                        encoder->base.crtc,
12828                             "connector's crtc doesn't match encoder crtc\n");
12829                }
12830
12831                I915_STATE_WARN(!!encoder->base.crtc != enabled,
12832                     "encoder's enabled state mismatch "
12833                     "(expected %i, found %i)\n",
12834                     !!encoder->base.crtc, enabled);
12835
12836                if (!encoder->base.crtc) {
12837                        bool active;
12838
12839                        active = encoder->get_hw_state(encoder, &pipe);
12840                        I915_STATE_WARN(active,
12841                             "encoder detached but still enabled on pipe %c.\n",
12842                             pipe_name(pipe));
12843                }
12844        }
12845}
12846
12847static void
12848check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12849{
12850        struct drm_i915_private *dev_priv = dev->dev_private;
12851        struct intel_encoder *encoder;
12852        struct drm_crtc_state *old_crtc_state;
12853        struct drm_crtc *crtc;
12854        int i;
12855
12856        for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12857                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12858                struct intel_crtc_state *pipe_config, *sw_config;
12859                bool active;
12860
12861                if (!needs_modeset(crtc->state) &&
12862                    !to_intel_crtc_state(crtc->state)->update_pipe)
12863                        continue;
12864
12865                __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12866                pipe_config = to_intel_crtc_state(old_crtc_state);
12867                memset(pipe_config, 0, sizeof(*pipe_config));
12868                pipe_config->base.crtc = crtc;
12869                pipe_config->base.state = old_state;
12870
12871                DRM_DEBUG_KMS("[CRTC:%d]\n",
12872                              crtc->base.id);
12873
12874                active = dev_priv->display.get_pipe_config(intel_crtc,
12875                                                           pipe_config);
12876
12877                /* hw state is inconsistent with the pipe quirk */
12878                if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12879                    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12880                        active = crtc->state->active;
12881
12882                I915_STATE_WARN(crtc->state->active != active,
12883                     "crtc active state doesn't match with hw state "
12884                     "(expected %i, found %i)\n", crtc->state->active, active);
12885
12886                I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12887                     "transitional active state does not match atomic hw state "
12888                     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12889
12890                for_each_encoder_on_crtc(dev, crtc, encoder) {
12891                        enum pipe pipe;
12892
12893                        active = encoder->get_hw_state(encoder, &pipe);
12894                        I915_STATE_WARN(active != crtc->state->active,
12895                                "[ENCODER:%i] active %i with crtc active %i\n",
12896                                encoder->base.base.id, active, crtc->state->active);
12897
12898                        I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12899                                        "Encoder connected to wrong pipe %c\n",
12900                                        pipe_name(pipe));
12901
12902                        if (active)
12903                                encoder->get_config(encoder, pipe_config);
12904                }
12905
12906                if (!crtc->state->active)
12907                        continue;
12908
12909                sw_config = to_intel_crtc_state(crtc->state);
12910                if (!intel_pipe_config_compare(dev, sw_config,
12911                                               pipe_config, false)) {
12912                        I915_STATE_WARN(1, "pipe state doesn't match!\n");
12913                        intel_dump_pipe_config(intel_crtc, pipe_config,
12914                                               "[hw state]");
12915                        intel_dump_pipe_config(intel_crtc, sw_config,
12916                                               "[sw state]");
12917                }
12918        }
12919}
12920
12921static void
12922check_shared_dpll_state(struct drm_device *dev)
12923{
12924        struct drm_i915_private *dev_priv = dev->dev_private;
12925        struct intel_crtc *crtc;
12926        struct intel_dpll_hw_state dpll_hw_state;
12927        int i;
12928
12929        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12930                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12931                int enabled_crtcs = 0, active_crtcs = 0;
12932                bool active;
12933
12934                memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12935
12936                DRM_DEBUG_KMS("%s\n", pll->name);
12937
12938                active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12939
12940                I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
12941                     "more active pll users than references: %i vs %i\n",
12942                     pll->active, hweight32(pll->config.crtc_mask));
12943                I915_STATE_WARN(pll->active && !pll->on,
12944                     "pll in active use but not on in sw tracking\n");
12945                I915_STATE_WARN(pll->on && !pll->active,
12946                     "pll in on but not on in use in sw tracking\n");
12947                I915_STATE_WARN(pll->on != active,
12948                     "pll on state mismatch (expected %i, found %i)\n",
12949                     pll->on, active);
12950
12951                for_each_intel_crtc(dev, crtc) {
12952                        if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
12953                                enabled_crtcs++;
12954                        if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12955                                active_crtcs++;
12956                }
12957                I915_STATE_WARN(pll->active != active_crtcs,
12958                     "pll active crtcs mismatch (expected %i, found %i)\n",
12959                     pll->active, active_crtcs);
12960                I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
12961                     "pll enabled crtcs mismatch (expected %i, found %i)\n",
12962                     hweight32(pll->config.crtc_mask), enabled_crtcs);
12963
12964                I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
12965                                       sizeof(dpll_hw_state)),
12966                     "pll hw state mismatch\n");
12967        }
12968}
12969
12970static void
12971intel_modeset_check_state(struct drm_device *dev,
12972                          struct drm_atomic_state *old_state)
12973{
12974        check_wm_state(dev);
12975        check_connector_state(dev, old_state);
12976        check_encoder_state(dev);
12977        check_crtc_state(dev, old_state);
12978        check_shared_dpll_state(dev);
12979}
12980
12981void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
12982                                     int dotclock)
12983{
12984        /*
12985         * FDI already provided one idea for the dotclock.
12986         * Yell if the encoder disagrees.
12987         */
12988        WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
12989             "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12990             pipe_config->base.adjusted_mode.crtc_clock, dotclock);
12991}
12992
12993static void update_scanline_offset(struct intel_crtc *crtc)
12994{
12995        struct drm_device *dev = crtc->base.dev;
12996
12997        /*
12998         * The scanline counter increments at the leading edge of hsync.
12999         *
13000         * On most platforms it starts counting from vtotal-1 on the
13001         * first active line. That means the scanline counter value is
13002         * always one less than what we would expect. Ie. just after
13003         * start of vblank, which also occurs at start of hsync (on the
13004         * last active line), the scanline counter will read vblank_start-1.
13005         *
13006         * On gen2 the scanline counter starts counting from 1 instead
13007         * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13008         * to keep the value positive), instead of adding one.
13009         *
13010         * On HSW+ the behaviour of the scanline counter depends on the output
13011         * type. For DP ports it behaves like most other platforms, but on HDMI
13012         * there's an extra 1 line difference. So we need to add two instead of
13013         * one to the value.
13014         */
13015        if (IS_GEN2(dev)) {
13016                const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13017                int vtotal;
13018
13019                vtotal = adjusted_mode->crtc_vtotal;
13020                if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13021                        vtotal /= 2;
13022
13023                crtc->scanline_offset = vtotal - 1;
13024        } else if (HAS_DDI(dev) &&
13025                   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13026                crtc->scanline_offset = 2;
13027        } else
13028                crtc->scanline_offset = 1;
13029}
13030
13031static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13032{
13033        struct drm_device *dev = state->dev;
13034        struct drm_i915_private *dev_priv = to_i915(dev);
13035        struct intel_shared_dpll_config *shared_dpll = NULL;
13036        struct drm_crtc *crtc;
13037        struct drm_crtc_state *crtc_state;
13038        int i;
13039
13040        if (!dev_priv->display.crtc_compute_clock)
13041                return;
13042
13043        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13044                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13045                int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
13046
13047                if (!needs_modeset(crtc_state))
13048                        continue;
13049
13050                to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
13051
13052                if (old_dpll == DPLL_ID_PRIVATE)
13053                        continue;
13054
13055                if (!shared_dpll)
13056                        shared_dpll = intel_atomic_get_shared_dpll_state(state);
13057
13058                shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
13059        }
13060}
13061
13062/*
13063 * This implements the workaround described in the "notes" section of the mode
13064 * set sequence documentation. When going from no pipes or single pipe to
13065 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13066 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13067 */
13068static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13069{
13070        struct drm_crtc_state *crtc_state;
13071        struct intel_crtc *intel_crtc;
13072        struct drm_crtc *crtc;
13073        struct intel_crtc_state *first_crtc_state = NULL;
13074        struct intel_crtc_state *other_crtc_state = NULL;
13075        enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13076        int i;
13077
13078        /* look at all crtc's that are going to be enabled in during modeset */
13079        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13080                intel_crtc = to_intel_crtc(crtc);
13081
13082                if (!crtc_state->active || !needs_modeset(crtc_state))
13083                        continue;
13084
13085                if (first_crtc_state) {
13086                        other_crtc_state = to_intel_crtc_state(crtc_state);
13087                        break;
13088                } else {
13089                        first_crtc_state = to_intel_crtc_state(crtc_state);
13090                        first_pipe = intel_crtc->pipe;
13091                }
13092        }
13093
13094        /* No workaround needed? */
13095        if (!first_crtc_state)
13096                return 0;
13097
13098        /* w/a possibly needed, check how many crtc's are already enabled. */
13099        for_each_intel_crtc(state->dev, intel_crtc) {
13100                struct intel_crtc_state *pipe_config;
13101
13102                pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13103                if (IS_ERR(pipe_config))
13104                        return PTR_ERR(pipe_config);
13105
13106                pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13107
13108                if (!pipe_config->base.active ||
13109                    needs_modeset(&pipe_config->base))
13110                        continue;
13111
13112                /* 2 or more enabled crtcs means no need for w/a */
13113                if (enabled_pipe != INVALID_PIPE)
13114                        return 0;
13115
13116                enabled_pipe = intel_crtc->pipe;
13117        }
13118
13119        if (enabled_pipe != INVALID_PIPE)
13120                first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13121        else if (other_crtc_state)
13122                other_crtc_state->hsw_workaround_pipe = first_pipe;
13123
13124        return 0;
13125}
13126
13127static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13128{
13129        struct drm_crtc *crtc;
13130        struct drm_crtc_state *crtc_state;
13131        int ret = 0;
13132
13133        /* add all active pipes to the state */
13134        for_each_crtc(state->dev, crtc) {
13135                crtc_state = drm_atomic_get_crtc_state(state, crtc);
13136                if (IS_ERR(crtc_state))
13137                        return PTR_ERR(crtc_state);
13138
13139                if (!crtc_state->active || needs_modeset(crtc_state))
13140                        continue;
13141
13142                crtc_state->mode_changed = true;
13143
13144                ret = drm_atomic_add_affected_connectors(state, crtc);
13145                if (ret)
13146                        break;
13147
13148                ret = drm_atomic_add_affected_planes(state, crtc);
13149                if (ret)
13150                        break;
13151        }
13152
13153        return ret;
13154}
13155
13156static int intel_modeset_checks(struct drm_atomic_state *state)
13157{
13158        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13159        struct drm_i915_private *dev_priv = state->dev->dev_private;
13160        struct drm_crtc *crtc;
13161        struct drm_crtc_state *crtc_state;
13162        int ret = 0, i;
13163
13164        if (!check_digital_port_conflicts(state)) {
13165                DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13166                return -EINVAL;
13167        }
13168
13169        intel_state->modeset = true;
13170        intel_state->active_crtcs = dev_priv->active_crtcs;
13171
13172        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13173                if (crtc_state->active)
13174                        intel_state->active_crtcs |= 1 << i;
13175                else
13176                        intel_state->active_crtcs &= ~(1 << i);
13177        }
13178
13179        /*
13180         * See if the config requires any additional preparation, e.g.
13181         * to adjust global state with pipes off.  We need to do this
13182         * here so we can get the modeset_pipe updated config for the new
13183         * mode set on this crtc.  For other crtcs we need to use the
13184         * adjusted_mode bits in the crtc directly.
13185         */
13186        if (dev_priv->display.modeset_calc_cdclk) {
13187                ret = dev_priv->display.modeset_calc_cdclk(state);
13188
13189                if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
13190                        ret = intel_modeset_all_pipes(state);
13191
13192                if (ret < 0)
13193                        return ret;
13194
13195                DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13196                              intel_state->cdclk, intel_state->dev_cdclk);
13197        } else
13198                to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13199
13200        intel_modeset_clear_plls(state);
13201
13202        if (IS_HASWELL(dev_priv))
13203                return haswell_mode_set_planes_workaround(state);
13204
13205        return 0;
13206}
13207
13208/*
13209 * Handle calculation of various watermark data at the end of the atomic check
13210 * phase.  The code here should be run after the per-crtc and per-plane 'check'
13211 * handlers to ensure that all derived state has been updated.
13212 */
13213static void calc_watermark_data(struct drm_atomic_state *state)
13214{
13215        struct drm_device *dev = state->dev;
13216        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13217        struct drm_crtc *crtc;
13218        struct drm_crtc_state *cstate;
13219        struct drm_plane *plane;
13220        struct drm_plane_state *pstate;
13221
13222        /*
13223         * Calculate watermark configuration details now that derived
13224         * plane/crtc state is all properly updated.
13225         */
13226        drm_for_each_crtc(crtc, dev) {
13227                cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13228                        crtc->state;
13229
13230                if (cstate->active)
13231                        intel_state->wm_config.num_pipes_active++;
13232        }
13233        drm_for_each_legacy_plane(plane, dev) {
13234                pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13235                        plane->state;
13236
13237                if (!to_intel_plane_state(pstate)->visible)
13238                        continue;
13239
13240                intel_state->wm_config.sprites_enabled = true;
13241                if (pstate->crtc_w != pstate->src_w >> 16 ||
13242                    pstate->crtc_h != pstate->src_h >> 16)
13243                        intel_state->wm_config.sprites_scaled = true;
13244        }
13245}
13246
13247/**
13248 * intel_atomic_check - validate state object
13249 * @dev: drm device
13250 * @state: state to validate
13251 */
13252static int intel_atomic_check(struct drm_device *dev,
13253                              struct drm_atomic_state *state)
13254{
13255        struct drm_i915_private *dev_priv = to_i915(dev);
13256        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13257        struct drm_crtc *crtc;
13258        struct drm_crtc_state *crtc_state;
13259        int ret, i;
13260        bool any_ms = false;
13261
13262        ret = drm_atomic_helper_check_modeset(dev, state);
13263        if (ret)
13264                return ret;
13265
13266        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13267                struct intel_crtc_state *pipe_config =
13268                        to_intel_crtc_state(crtc_state);
13269
13270                memset(&to_intel_crtc(crtc)->atomic, 0,
13271                       sizeof(struct intel_crtc_atomic_commit));
13272
13273                /* Catch I915_MODE_FLAG_INHERITED */
13274                if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13275                        crtc_state->mode_changed = true;
13276
13277                if (!crtc_state->enable) {
13278                        if (needs_modeset(crtc_state))
13279                                any_ms = true;
13280                        continue;
13281                }
13282
13283                if (!needs_modeset(crtc_state))
13284                        continue;
13285
13286                /* FIXME: For only active_changed we shouldn't need to do any
13287                 * state recomputation at all. */
13288
13289                ret = drm_atomic_add_affected_connectors(state, crtc);
13290                if (ret)
13291                        return ret;
13292
13293                ret = intel_modeset_pipe_config(crtc, pipe_config);
13294                if (ret)
13295                        return ret;
13296
13297                if (i915.fastboot &&
13298                    intel_pipe_config_compare(dev,
13299                                        to_intel_crtc_state(crtc->state),
13300                                        pipe_config, true)) {
13301                        crtc_state->mode_changed = false;
13302                        to_intel_crtc_state(crtc_state)->update_pipe = true;
13303                }
13304
13305                if (needs_modeset(crtc_state)) {
13306                        any_ms = true;
13307
13308                        ret = drm_atomic_add_affected_planes(state, crtc);
13309                        if (ret)
13310                                return ret;
13311                }
13312
13313                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13314                                       needs_modeset(crtc_state) ?
13315                                       "[modeset]" : "[fastset]");
13316        }
13317
13318        if (any_ms) {
13319                ret = intel_modeset_checks(state);
13320
13321                if (ret)
13322                        return ret;
13323        } else
13324                intel_state->cdclk = dev_priv->cdclk_freq;
13325
13326        ret = drm_atomic_helper_check_planes(dev, state);
13327        if (ret)
13328                return ret;
13329
13330        intel_fbc_choose_crtc(dev_priv, state);
13331        calc_watermark_data(state);
13332
13333        return 0;
13334}
13335
13336static int intel_atomic_prepare_commit(struct drm_device *dev,
13337                                       struct drm_atomic_state *state,
13338                                       bool async)
13339{
13340        struct drm_i915_private *dev_priv = dev->dev_private;
13341        struct drm_plane_state *plane_state;
13342        struct drm_crtc_state *crtc_state;
13343        struct drm_plane *plane;
13344        struct drm_crtc *crtc;
13345        int i, ret;
13346
13347        if (async) {
13348                DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13349                return -EINVAL;
13350        }
13351
13352        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13353                if (state->legacy_cursor_update)
13354                        continue;
13355
13356                ret = intel_crtc_wait_for_pending_flips(crtc);
13357                if (ret)
13358                        return ret;
13359
13360                if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13361                        flush_workqueue(dev_priv->wq);
13362        }
13363
13364        ret = mutex_lock_interruptible(&dev->struct_mutex);
13365        if (ret)
13366                return ret;
13367
13368        ret = drm_atomic_helper_prepare_planes(dev, state);
13369        if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13370                u32 reset_counter;
13371
13372                reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13373                mutex_unlock(&dev->struct_mutex);
13374
13375                for_each_plane_in_state(state, plane, plane_state, i) {
13376                        struct intel_plane_state *intel_plane_state =
13377                                to_intel_plane_state(plane_state);
13378
13379                        if (!intel_plane_state->wait_req)
13380                                continue;
13381
13382                        ret = __i915_wait_request(intel_plane_state->wait_req,
13383                                                  reset_counter, true,
13384                                                  NULL, NULL);
13385
13386                        /* Swallow -EIO errors to allow updates during hw lockup. */
13387                        if (ret == -EIO)
13388                                ret = 0;
13389
13390                        if (ret)
13391                                break;
13392                }
13393
13394                if (!ret)
13395                        return 0;
13396
13397                mutex_lock(&dev->struct_mutex);
13398                drm_atomic_helper_cleanup_planes(dev, state);
13399        }
13400
13401        mutex_unlock(&dev->struct_mutex);
13402        return ret;
13403}
13404
13405static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13406                                          struct drm_i915_private *dev_priv,
13407                                          unsigned crtc_mask)
13408{
13409        unsigned last_vblank_count[I915_MAX_PIPES];
13410        enum pipe pipe;
13411        int ret;
13412
13413        if (!crtc_mask)
13414                return;
13415
13416        for_each_pipe(dev_priv, pipe) {
13417                struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13418
13419                if (!((1 << pipe) & crtc_mask))
13420                        continue;
13421
13422                ret = drm_crtc_vblank_get(crtc);
13423                if (WARN_ON(ret != 0)) {
13424                        crtc_mask &= ~(1 << pipe);
13425                        continue;
13426                }
13427
13428                last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13429        }
13430
13431        for_each_pipe(dev_priv, pipe) {
13432                struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13433                long lret;
13434
13435                if (!((1 << pipe) & crtc_mask))
13436                        continue;
13437
13438                lret = wait_event_timeout(dev->vblank[pipe].queue,
13439                                last_vblank_count[pipe] !=
13440                                        drm_crtc_vblank_count(crtc),
13441                                msecs_to_jiffies(50));
13442
13443                WARN_ON(!lret);
13444
13445                drm_crtc_vblank_put(crtc);
13446        }
13447}
13448
13449static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13450{
13451        /* fb updated, need to unpin old fb */
13452        if (crtc_state->fb_changed)
13453                return true;
13454
13455        /* wm changes, need vblank before final wm's */
13456        if (crtc_state->wm_changed)
13457                return true;
13458
13459        /*
13460         * cxsr is re-enabled after vblank.
13461         * This is already handled by crtc_state->wm_changed,
13462         * but added for clarity.
13463         */
13464        if (crtc_state->disable_cxsr)
13465                return true;
13466
13467        return false;
13468}
13469
13470/**
13471 * intel_atomic_commit - commit validated state object
13472 * @dev: DRM device
13473 * @state: the top-level driver state object
13474 * @async: asynchronous commit
13475 *
13476 * This function commits a top-level state object that has been validated
13477 * with drm_atomic_helper_check().
13478 *
13479 * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13480 * we can only handle plane-related operations and do not yet support
13481 * asynchronous commit.
13482 *
13483 * RETURNS
13484 * Zero for success or -errno.
13485 */
13486static int intel_atomic_commit(struct drm_device *dev,
13487                               struct drm_atomic_state *state,
13488                               bool async)
13489{
13490        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13491        struct drm_i915_private *dev_priv = dev->dev_private;
13492        struct drm_crtc_state *crtc_state;
13493        struct drm_crtc *crtc;
13494        int ret = 0, i;
13495        bool hw_check = intel_state->modeset;
13496        unsigned long put_domains[I915_MAX_PIPES] = {};
13497        unsigned crtc_vblank_mask = 0;
13498
13499        ret = intel_atomic_prepare_commit(dev, state, async);
13500        if (ret) {
13501                DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13502                return ret;
13503        }
13504
13505        drm_atomic_helper_swap_state(dev, state);
13506        dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
13507
13508        if (intel_state->modeset) {
13509                memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13510                       sizeof(intel_state->min_pixclk));
13511                dev_priv->active_crtcs = intel_state->active_crtcs;
13512                dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13513
13514                intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13515        }
13516
13517        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13518                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13519
13520                if (needs_modeset(crtc->state) ||
13521                    to_intel_crtc_state(crtc->state)->update_pipe) {
13522                        hw_check = true;
13523
13524                        put_domains[to_intel_crtc(crtc)->pipe] =
13525                                modeset_get_crtc_power_domains(crtc,
13526                                        to_intel_crtc_state(crtc->state));
13527                }
13528
13529                if (!needs_modeset(crtc->state))
13530                        continue;
13531
13532                intel_pre_plane_update(to_intel_crtc_state(crtc_state));
13533
13534                if (crtc_state->active) {
13535                        intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13536                        dev_priv->display.crtc_disable(crtc);
13537                        intel_crtc->active = false;
13538                        intel_fbc_disable(intel_crtc);
13539                        intel_disable_shared_dpll(intel_crtc);
13540
13541                        /*
13542                         * Underruns don't always raise
13543                         * interrupts, so check manually.
13544                         */
13545                        intel_check_cpu_fifo_underruns(dev_priv);
13546                        intel_check_pch_fifo_underruns(dev_priv);
13547
13548                        if (!crtc->state->active)
13549                                intel_update_watermarks(crtc);
13550                }
13551        }
13552
13553        /* Only after disabling all output pipelines that will be changed can we
13554         * update the the output configuration. */
13555        intel_modeset_update_crtc_state(state);
13556
13557        if (intel_state->modeset) {
13558                intel_shared_dpll_commit(state);
13559
13560                drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13561
13562                if (dev_priv->display.modeset_commit_cdclk &&
13563                    intel_state->dev_cdclk != dev_priv->cdclk_freq)
13564                        dev_priv->display.modeset_commit_cdclk(state);
13565        }
13566
13567        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13568        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13569                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13570                bool modeset = needs_modeset(crtc->state);
13571                struct intel_crtc_state *pipe_config =
13572                        to_intel_crtc_state(crtc->state);
13573                bool update_pipe = !modeset && pipe_config->update_pipe;
13574
13575                if (modeset && crtc->state->active) {
13576                        update_scanline_offset(to_intel_crtc(crtc));
13577                        dev_priv->display.crtc_enable(crtc);
13578                }
13579
13580                if (!modeset)
13581                        intel_pre_plane_update(to_intel_crtc_state(crtc_state));
13582
13583                if (crtc->state->active && intel_crtc->atomic.update_fbc)
13584                        intel_fbc_enable(intel_crtc);
13585
13586                if (crtc->state->active &&
13587                    (crtc->state->planes_changed || update_pipe))
13588                        drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13589
13590                if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13591                        crtc_vblank_mask |= 1 << i;
13592        }
13593
13594        /* FIXME: add subpixel order */
13595
13596        if (!state->legacy_cursor_update)
13597                intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13598
13599        for_each_crtc_in_state(state, crtc, crtc_state, i) {
13600                intel_post_plane_update(to_intel_crtc(crtc));
13601
13602                if (put_domains[i])
13603                        modeset_put_power_domains(dev_priv, put_domains[i]);
13604        }
13605
13606        if (intel_state->modeset)
13607                intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13608
13609        mutex_lock(&dev->struct_mutex);
13610        drm_atomic_helper_cleanup_planes(dev, state);
13611        mutex_unlock(&dev->struct_mutex);
13612
13613        if (hw_check)
13614                intel_modeset_check_state(dev, state);
13615
13616        drm_atomic_state_free(state);
13617
13618        /* As one of the primary mmio accessors, KMS has a high likelihood
13619         * of triggering bugs in unclaimed access. After we finish
13620         * modesetting, see if an error has been flagged, and if so
13621         * enable debugging for the next modeset - and hope we catch
13622         * the culprit.
13623         *
13624         * XXX note that we assume display power is on at this point.
13625         * This might hold true now but we need to add pm helper to check
13626         * unclaimed only when the hardware is on, as atomic commits
13627         * can happen also when the device is completely off.
13628         */
13629        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13630
13631        return 0;
13632}
13633
13634void intel_crtc_restore_mode(struct drm_crtc *crtc)
13635{
13636        struct drm_device *dev = crtc->dev;
13637        struct drm_atomic_state *state;
13638        struct drm_crtc_state *crtc_state;
13639        int ret;
13640
13641        state = drm_atomic_state_alloc(dev);
13642        if (!state) {
13643                DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13644                              crtc->base.id);
13645                return;
13646        }
13647
13648        state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13649
13650retry:
13651        crtc_state = drm_atomic_get_crtc_state(state, crtc);
13652        ret = PTR_ERR_OR_ZERO(crtc_state);
13653        if (!ret) {
13654                if (!crtc_state->active)
13655                        goto out;
13656
13657                crtc_state->mode_changed = true;
13658                ret = drm_atomic_commit(state);
13659        }
13660
13661        if (ret == -EDEADLK) {
13662                drm_atomic_state_clear(state);
13663                drm_modeset_backoff(state->acquire_ctx);
13664                goto retry;
13665        }
13666
13667        if (ret)
13668out:
13669                drm_atomic_state_free(state);
13670}
13671
13672#undef for_each_intel_crtc_masked
13673
13674static const struct drm_crtc_funcs intel_crtc_funcs = {
13675        .gamma_set = intel_crtc_gamma_set,
13676        .set_config = drm_atomic_helper_set_config,
13677        .destroy = intel_crtc_destroy,
13678        .page_flip = intel_crtc_page_flip,
13679        .atomic_duplicate_state = intel_crtc_duplicate_state,
13680        .atomic_destroy_state = intel_crtc_destroy_state,
13681};
13682
13683static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13684                                      struct intel_shared_dpll *pll,
13685                                      struct intel_dpll_hw_state *hw_state)
13686{
13687        uint32_t val;
13688
13689        if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13690                return false;
13691
13692        val = I915_READ(PCH_DPLL(pll->id));
13693        hw_state->dpll = val;
13694        hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13695        hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13696
13697        intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13698
13699        return val & DPLL_VCO_ENABLE;
13700}
13701
13702static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13703                                  struct intel_shared_dpll *pll)
13704{
13705        I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13706        I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
13707}
13708
13709static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13710                                struct intel_shared_dpll *pll)
13711{
13712        /* PCH refclock must be enabled first */
13713        ibx_assert_pch_refclk_enabled(dev_priv);
13714
13715        I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13716
13717        /* Wait for the clocks to stabilize. */
13718        POSTING_READ(PCH_DPLL(pll->id));
13719        udelay(150);
13720
13721        /* The pixel multiplier can only be updated once the
13722         * DPLL is enabled and the clocks are stable.
13723         *
13724         * So write it again.
13725         */
13726        I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13727        POSTING_READ(PCH_DPLL(pll->id));
13728        udelay(200);
13729}
13730
13731static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13732                                 struct intel_shared_dpll *pll)
13733{
13734        struct drm_device *dev = dev_priv->dev;
13735        struct intel_crtc *crtc;
13736
13737        /* Make sure no transcoder isn't still depending on us. */
13738        for_each_intel_crtc(dev, crtc) {
13739                if (intel_crtc_to_shared_dpll(crtc) == pll)
13740                        assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
13741        }
13742
13743        I915_WRITE(PCH_DPLL(pll->id), 0);
13744        POSTING_READ(PCH_DPLL(pll->id));
13745        udelay(200);
13746}
13747
13748static char *ibx_pch_dpll_names[] = {
13749        "PCH DPLL A",
13750        "PCH DPLL B",
13751};
13752
13753static void ibx_pch_dpll_init(struct drm_device *dev)
13754{
13755        struct drm_i915_private *dev_priv = dev->dev_private;
13756        int i;
13757
13758        dev_priv->num_shared_dpll = 2;
13759
13760        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13761                dev_priv->shared_dplls[i].id = i;
13762                dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13763                dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13764                dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13765                dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13766                dev_priv->shared_dplls[i].get_hw_state =
13767                        ibx_pch_dpll_get_hw_state;
13768        }
13769}
13770
13771static void intel_shared_dpll_init(struct drm_device *dev)
13772{
13773        struct drm_i915_private *dev_priv = dev->dev_private;
13774
13775        if (HAS_DDI(dev))
13776                intel_ddi_pll_init(dev);
13777        else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13778                ibx_pch_dpll_init(dev);
13779        else
13780                dev_priv->num_shared_dpll = 0;
13781
13782        BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13783}
13784
13785/**
13786 * intel_prepare_plane_fb - Prepare fb for usage on plane
13787 * @plane: drm plane to prepare for
13788 * @fb: framebuffer to prepare for presentation
13789 *
13790 * Prepares a framebuffer for usage on a display plane.  Generally this
13791 * involves pinning the underlying object and updating the frontbuffer tracking
13792 * bits.  Some older platforms need special physical address handling for
13793 * cursor planes.
13794 *
13795 * Must be called with struct_mutex held.
13796 *
13797 * Returns 0 on success, negative error code on failure.
13798 */
13799int
13800intel_prepare_plane_fb(struct drm_plane *plane,
13801                       const struct drm_plane_state *new_state)
13802{
13803        struct drm_device *dev = plane->dev;
13804        struct drm_framebuffer *fb = new_state->fb;
13805        struct intel_plane *intel_plane = to_intel_plane(plane);
13806        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13807        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13808        int ret = 0;
13809
13810        if (!obj && !old_obj)
13811                return 0;
13812
13813        if (old_obj) {
13814                struct drm_crtc_state *crtc_state =
13815                        drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13816
13817                /* Big Hammer, we also need to ensure that any pending
13818                 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13819                 * current scanout is retired before unpinning the old
13820                 * framebuffer. Note that we rely on userspace rendering
13821                 * into the buffer attached to the pipe they are waiting
13822                 * on. If not, userspace generates a GPU hang with IPEHR
13823                 * point to the MI_WAIT_FOR_EVENT.
13824                 *
13825                 * This should only fail upon a hung GPU, in which case we
13826                 * can safely continue.
13827                 */
13828                if (needs_modeset(crtc_state))
13829                        ret = i915_gem_object_wait_rendering(old_obj, true);
13830
13831                /* Swallow -EIO errors to allow updates during hw lockup. */
13832                if (ret && ret != -EIO)
13833                        return ret;
13834        }
13835
13836        /* For framebuffer backed by dmabuf, wait for fence */
13837        if (obj && obj->base.dma_buf) {
13838                long lret;
13839
13840                lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13841                                                           false, true,
13842                                                           MAX_SCHEDULE_TIMEOUT);
13843                if (lret == -ERESTARTSYS)
13844                        return lret;
13845
13846                WARN(lret < 0, "waiting returns %li\n", lret);
13847        }
13848
13849        if (!obj) {
13850                ret = 0;
13851        } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13852            INTEL_INFO(dev)->cursor_needs_physical) {
13853                int align = IS_I830(dev) ? 16 * 1024 : 256;
13854                ret = i915_gem_object_attach_phys(obj, align);
13855                if (ret)
13856                        DRM_DEBUG_KMS("failed to attach phys object\n");
13857        } else {
13858                ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
13859        }
13860
13861        if (ret == 0) {
13862                if (obj) {
13863                        struct intel_plane_state *plane_state =
13864                                to_intel_plane_state(new_state);
13865
13866                        i915_gem_request_assign(&plane_state->wait_req,
13867                                                obj->last_write_req);
13868                }
13869
13870                i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13871        }
13872
13873        return ret;
13874}
13875
13876/**
13877 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13878 * @plane: drm plane to clean up for
13879 * @fb: old framebuffer that was on plane
13880 *
13881 * Cleans up a framebuffer that has just been removed from a plane.
13882 *
13883 * Must be called with struct_mutex held.
13884 */
13885void
13886intel_cleanup_plane_fb(struct drm_plane *plane,
13887                       const struct drm_plane_state *old_state)
13888{
13889        struct drm_device *dev = plane->dev;
13890        struct intel_plane *intel_plane = to_intel_plane(plane);
13891        struct intel_plane_state *old_intel_state;
13892        struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13893        struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13894
13895        old_intel_state = to_intel_plane_state(old_state);
13896
13897        if (!obj && !old_obj)
13898                return;
13899
13900        if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13901            !INTEL_INFO(dev)->cursor_needs_physical))
13902                intel_unpin_fb_obj(old_state->fb, old_state);
13903
13904        /* prepare_fb aborted? */
13905        if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13906            (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13907                i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13908
13909        i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13910
13911}
13912
13913int
13914skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13915{
13916        int max_scale;
13917        struct drm_device *dev;
13918        struct drm_i915_private *dev_priv;
13919        int crtc_clock, cdclk;
13920
13921        if (!intel_crtc || !crtc_state->base.enable)
13922                return DRM_PLANE_HELPER_NO_SCALING;
13923
13924        dev = intel_crtc->base.dev;
13925        dev_priv = dev->dev_private;
13926        crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13927        cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13928
13929        if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13930                return DRM_PLANE_HELPER_NO_SCALING;
13931
13932        /*
13933         * skl max scale is lower of:
13934         *    close to 3 but not 3, -1 is for that purpose
13935         *            or
13936         *    cdclk/crtc_clock
13937         */
13938        max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13939
13940        return max_scale;
13941}
13942
13943static int
13944intel_check_primary_plane(struct drm_plane *plane,
13945                          struct intel_crtc_state *crtc_state,
13946                          struct intel_plane_state *state)
13947{
13948        struct drm_crtc *crtc = state->base.crtc;
13949        struct drm_framebuffer *fb = state->base.fb;
13950        int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13951        int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13952        bool can_position = false;
13953
13954        if (INTEL_INFO(plane->dev)->gen >= 9) {
13955                /* use scaler when colorkey is not required */
13956                if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13957                        min_scale = 1;
13958                        max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13959                }
13960                can_position = true;
13961        }
13962
13963        return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13964                                             &state->dst, &state->clip,
13965                                             min_scale, max_scale,
13966                                             can_position, true,
13967                                             &state->visible);
13968}
13969
13970static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13971                                    struct drm_crtc_state *old_crtc_state)
13972{
13973        struct drm_device *dev = crtc->dev;
13974        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13975        struct intel_crtc_state *old_intel_state =
13976                to_intel_crtc_state(old_crtc_state);
13977        bool modeset = needs_modeset(crtc->state);
13978
13979        /* Perform vblank evasion around commit operation */
13980        intel_pipe_update_start(intel_crtc);
13981
13982        if (modeset)
13983                return;
13984
13985        if (to_intel_crtc_state(crtc->state)->update_pipe)
13986                intel_update_pipe_config(intel_crtc, old_intel_state);
13987        else if (INTEL_INFO(dev)->gen >= 9)
13988                skl_detach_scalers(intel_crtc);
13989}
13990
13991static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13992                                     struct drm_crtc_state *old_crtc_state)
13993{
13994        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13995
13996        intel_pipe_update_end(intel_crtc);
13997}
13998
13999/**
14000 * intel_plane_destroy - destroy a plane
14001 * @plane: plane to destroy
14002 *
14003 * Common destruction function for all types of planes (primary, cursor,
14004 * sprite).
14005 */
14006void intel_plane_destroy(struct drm_plane *plane)
14007{
14008        struct intel_plane *intel_plane = to_intel_plane(plane);
14009        drm_plane_cleanup(plane);
14010        kfree(intel_plane);
14011}
14012
14013const struct drm_plane_funcs intel_plane_funcs = {
14014        .update_plane = drm_atomic_helper_update_plane,
14015        .disable_plane = drm_atomic_helper_disable_plane,
14016        .destroy = intel_plane_destroy,
14017        .set_property = drm_atomic_helper_plane_set_property,
14018        .atomic_get_property = intel_plane_atomic_get_property,
14019        .atomic_set_property = intel_plane_atomic_set_property,
14020        .atomic_duplicate_state = intel_plane_duplicate_state,
14021        .atomic_destroy_state = intel_plane_destroy_state,
14022
14023};
14024
14025static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14026                                                    int pipe)
14027{
14028        struct intel_plane *primary;
14029        struct intel_plane_state *state;
14030        const uint32_t *intel_primary_formats;
14031        unsigned int num_formats;
14032
14033        primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14034        if (primary == NULL)
14035                return NULL;
14036
14037        state = intel_create_plane_state(&primary->base);
14038        if (!state) {
14039                kfree(primary);
14040                return NULL;
14041        }
14042        primary->base.state = &state->base;
14043
14044        primary->can_scale = false;
14045        primary->max_downscale = 1;
14046        if (INTEL_INFO(dev)->gen >= 9) {
14047                primary->can_scale = true;
14048                state->scaler_id = -1;
14049        }
14050        primary->pipe = pipe;
14051        primary->plane = pipe;
14052        primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14053        primary->check_plane = intel_check_primary_plane;
14054        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14055                primary->plane = !pipe;
14056
14057        if (INTEL_INFO(dev)->gen >= 9) {
14058                intel_primary_formats = skl_primary_formats;
14059                num_formats = ARRAY_SIZE(skl_primary_formats);
14060
14061                primary->update_plane = skylake_update_primary_plane;
14062                primary->disable_plane = skylake_disable_primary_plane;
14063        } else if (HAS_PCH_SPLIT(dev)) {
14064                intel_primary_formats = i965_primary_formats;
14065                num_formats = ARRAY_SIZE(i965_primary_formats);
14066
14067                primary->update_plane = ironlake_update_primary_plane;
14068                primary->disable_plane = i9xx_disable_primary_plane;
14069        } else if (INTEL_INFO(dev)->gen >= 4) {
14070                intel_primary_formats = i965_primary_formats;
14071                num_formats = ARRAY_SIZE(i965_primary_formats);
14072
14073                primary->update_plane = i9xx_update_primary_plane;
14074                primary->disable_plane = i9xx_disable_primary_plane;
14075        } else {
14076                intel_primary_formats = i8xx_primary_formats;
14077                num_formats = ARRAY_SIZE(i8xx_primary_formats);
14078
14079                primary->update_plane = i9xx_update_primary_plane;
14080                primary->disable_plane = i9xx_disable_primary_plane;
14081        }
14082
14083        drm_universal_plane_init(dev, &primary->base, 0,
14084                                 &intel_plane_funcs,
14085                                 intel_primary_formats, num_formats,
14086                                 DRM_PLANE_TYPE_PRIMARY, NULL);
14087
14088        if (INTEL_INFO(dev)->gen >= 4)
14089                intel_create_rotation_property(dev, primary);
14090
14091        drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14092
14093        return &primary->base;
14094}
14095
14096void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14097{
14098        if (!dev->mode_config.rotation_property) {
14099                unsigned long flags = BIT(DRM_ROTATE_0) |
14100                        BIT(DRM_ROTATE_180);
14101
14102                if (INTEL_INFO(dev)->gen >= 9)
14103                        flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14104
14105                dev->mode_config.rotation_property =
14106                        drm_mode_create_rotation_property(dev, flags);
14107        }
14108        if (dev->mode_config.rotation_property)
14109                drm_object_attach_property(&plane->base.base,
14110                                dev->mode_config.rotation_property,
14111                                plane->base.state->rotation);
14112}
14113
14114static int
14115intel_check_cursor_plane(struct drm_plane *plane,
14116                         struct intel_crtc_state *crtc_state,
14117                         struct intel_plane_state *state)
14118{
14119        struct drm_crtc *crtc = crtc_state->base.crtc;
14120        struct drm_framebuffer *fb = state->base.fb;
14121        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14122        enum pipe pipe = to_intel_plane(plane)->pipe;
14123        unsigned stride;
14124        int ret;
14125
14126        ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14127                                            &state->dst, &state->clip,
14128                                            DRM_PLANE_HELPER_NO_SCALING,
14129                                            DRM_PLANE_HELPER_NO_SCALING,
14130                                            true, true, &state->visible);
14131        if (ret)
14132                return ret;
14133
14134        /* if we want to turn off the cursor ignore width and height */
14135        if (!obj)
14136                return 0;
14137
14138        /* Check for which cursor types we support */
14139        if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14140                DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14141                          state->base.crtc_w, state->base.crtc_h);
14142                return -EINVAL;
14143        }
14144
14145        stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14146        if (obj->base.size < stride * state->base.crtc_h) {
14147                DRM_DEBUG_KMS("buffer is too small\n");
14148                return -ENOMEM;
14149        }
14150
14151        if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14152                DRM_DEBUG_KMS("cursor cannot be tiled\n");
14153                return -EINVAL;
14154        }
14155
14156        /*
14157         * There's something wrong with the cursor on CHV pipe C.
14158         * If it straddles the left edge of the screen then
14159         * moving it away from the edge or disabling it often
14160         * results in a pipe underrun, and often that can lead to
14161         * dead pipe (constant underrun reported, and it scans
14162         * out just a solid color). To recover from that, the
14163         * display power well must be turned off and on again.
14164         * Refuse the put the cursor into that compromised position.
14165         */
14166        if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14167            state->visible && state->base.crtc_x < 0) {
14168                DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14169                return -EINVAL;
14170        }
14171
14172        return 0;
14173}
14174
14175static void
14176intel_disable_cursor_plane(struct drm_plane *plane,
14177                           struct drm_crtc *crtc)
14178{
14179        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14180
14181        intel_crtc->cursor_addr = 0;
14182        intel_crtc_update_cursor(crtc, NULL);
14183}
14184
14185static void
14186intel_update_cursor_plane(struct drm_plane *plane,
14187                          const struct intel_crtc_state *crtc_state,
14188                          const struct intel_plane_state *state)
14189{
14190        struct drm_crtc *crtc = crtc_state->base.crtc;
14191        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14192        struct drm_device *dev = plane->dev;
14193        struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14194        uint32_t addr;
14195
14196        if (!obj)
14197                addr = 0;
14198        else if (!INTEL_INFO(dev)->cursor_needs_physical)
14199                addr = i915_gem_obj_ggtt_offset(obj);
14200        else
14201                addr = obj->phys_handle->busaddr;
14202
14203        intel_crtc->cursor_addr = addr;
14204        intel_crtc_update_cursor(crtc, state);
14205}
14206
14207static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14208                                                   int pipe)
14209{
14210        struct intel_plane *cursor;
14211        struct intel_plane_state *state;
14212
14213        cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14214        if (cursor == NULL)
14215                return NULL;
14216
14217        state = intel_create_plane_state(&cursor->base);
14218        if (!state) {
14219                kfree(cursor);
14220                return NULL;
14221        }
14222        cursor->base.state = &state->base;
14223
14224        cursor->can_scale = false;
14225        cursor->max_downscale = 1;
14226        cursor->pipe = pipe;
14227        cursor->plane = pipe;
14228        cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14229        cursor->check_plane = intel_check_cursor_plane;
14230        cursor->update_plane = intel_update_cursor_plane;
14231        cursor->disable_plane = intel_disable_cursor_plane;
14232
14233        drm_universal_plane_init(dev, &cursor->base, 0,
14234                                 &intel_plane_funcs,
14235                                 intel_cursor_formats,
14236                                 ARRAY_SIZE(intel_cursor_formats),
14237                                 DRM_PLANE_TYPE_CURSOR, NULL);
14238
14239        if (INTEL_INFO(dev)->gen >= 4) {
14240                if (!dev->mode_config.rotation_property)
14241                        dev->mode_config.rotation_property =
14242                                drm_mode_create_rotation_property(dev,
14243                                                        BIT(DRM_ROTATE_0) |
14244                                                        BIT(DRM_ROTATE_180));
14245                if (dev->mode_config.rotation_property)
14246                        drm_object_attach_property(&cursor->base.base,
14247                                dev->mode_config.rotation_property,
14248                                state->base.rotation);
14249        }
14250
14251        if (INTEL_INFO(dev)->gen >=9)
14252                state->scaler_id = -1;
14253
14254        drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14255
14256        return &cursor->base;
14257}
14258
14259static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14260        struct intel_crtc_state *crtc_state)
14261{
14262        int i;
14263        struct intel_scaler *intel_scaler;
14264        struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14265
14266        for (i = 0; i < intel_crtc->num_scalers; i++) {
14267                intel_scaler = &scaler_state->scalers[i];
14268                intel_scaler->in_use = 0;
14269                intel_scaler->mode = PS_SCALER_MODE_DYN;
14270        }
14271
14272        scaler_state->scaler_id = -1;
14273}
14274
14275static void intel_crtc_init(struct drm_device *dev, int pipe)
14276{
14277        struct drm_i915_private *dev_priv = dev->dev_private;
14278        struct intel_crtc *intel_crtc;
14279        struct intel_crtc_state *crtc_state = NULL;
14280        struct drm_plane *primary = NULL;
14281        struct drm_plane *cursor = NULL;
14282        int i, ret;
14283
14284        intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14285        if (intel_crtc == NULL)
14286                return;
14287
14288        crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14289        if (!crtc_state)
14290                goto fail;
14291        intel_crtc->config = crtc_state;
14292        intel_crtc->base.state = &crtc_state->base;
14293        crtc_state->base.crtc = &intel_crtc->base;
14294
14295        /* initialize shared scalers */
14296        if (INTEL_INFO(dev)->gen >= 9) {
14297                if (pipe == PIPE_C)
14298                        intel_crtc->num_scalers = 1;
14299                else
14300                        intel_crtc->num_scalers = SKL_NUM_SCALERS;
14301
14302                skl_init_scalers(dev, intel_crtc, crtc_state);
14303        }
14304
14305        primary = intel_primary_plane_create(dev, pipe);
14306        if (!primary)
14307                goto fail;
14308
14309        cursor = intel_cursor_plane_create(dev, pipe);
14310        if (!cursor)
14311                goto fail;
14312
14313        ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14314                                        cursor, &intel_crtc_funcs, NULL);
14315        if (ret)
14316                goto fail;
14317
14318        drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14319        for (i = 0; i < 256; i++) {
14320                intel_crtc->lut_r[i] = i;
14321                intel_crtc->lut_g[i] = i;
14322                intel_crtc->lut_b[i] = i;
14323        }
14324
14325        /*
14326         * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14327         * is hooked to pipe B. Hence we want plane A feeding pipe B.
14328         */
14329        intel_crtc->pipe = pipe;
14330        intel_crtc->plane = pipe;
14331        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14332                DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14333                intel_crtc->plane = !pipe;
14334        }
14335
14336        intel_crtc->cursor_base = ~0;
14337        intel_crtc->cursor_cntl = ~0;
14338        intel_crtc->cursor_size = ~0;
14339
14340        intel_crtc->wm.cxsr_allowed = true;
14341
14342        BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14343               dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14344        dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14345        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14346
14347        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14348
14349        WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14350        return;
14351
14352fail:
14353        if (primary)
14354                drm_plane_cleanup(primary);
14355        if (cursor)
14356                drm_plane_cleanup(cursor);
14357        kfree(crtc_state);
14358        kfree(intel_crtc);
14359}
14360
14361enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14362{
14363        struct drm_encoder *encoder = connector->base.encoder;
14364        struct drm_device *dev = connector->base.dev;
14365
14366        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14367
14368        if (!encoder || WARN_ON(!encoder->crtc))
14369                return INVALID_PIPE;
14370
14371        return to_intel_crtc(encoder->crtc)->pipe;
14372}
14373
14374int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14375                                struct drm_file *file)
14376{
14377        struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14378        struct drm_crtc *drmmode_crtc;
14379        struct intel_crtc *crtc;
14380
14381        drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14382
14383        if (!drmmode_crtc) {
14384                DRM_ERROR("no such CRTC id\n");
14385                return -ENOENT;
14386        }
14387
14388        crtc = to_intel_crtc(drmmode_crtc);
14389        pipe_from_crtc_id->pipe = crtc->pipe;
14390
14391        return 0;
14392}
14393
14394static int intel_encoder_clones(struct intel_encoder *encoder)
14395{
14396        struct drm_device *dev = encoder->base.dev;
14397        struct intel_encoder *source_encoder;
14398        int index_mask = 0;
14399        int entry = 0;
14400
14401        for_each_intel_encoder(dev, source_encoder) {
14402                if (encoders_cloneable(encoder, source_encoder))
14403                        index_mask |= (1 << entry);
14404
14405                entry++;
14406        }
14407
14408        return index_mask;
14409}
14410
14411static bool has_edp_a(struct drm_device *dev)
14412{
14413        struct drm_i915_private *dev_priv = dev->dev_private;
14414
14415        if (!IS_MOBILE(dev))
14416                return false;
14417
14418        if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14419                return false;
14420
14421        if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14422                return false;
14423
14424        return true;
14425}
14426
14427static bool intel_crt_present(struct drm_device *dev)
14428{
14429        struct drm_i915_private *dev_priv = dev->dev_private;
14430
14431        if (INTEL_INFO(dev)->gen >= 9)
14432                return false;
14433
14434        if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14435                return false;
14436
14437        if (IS_CHERRYVIEW(dev))
14438                return false;
14439
14440        if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14441                return false;
14442
14443        /* DDI E can't be used if DDI A requires 4 lanes */
14444        if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14445                return false;
14446
14447        if (!dev_priv->vbt.int_crt_support)
14448                return false;
14449
14450        return true;
14451}
14452
14453static void intel_setup_outputs(struct drm_device *dev)
14454{
14455        struct drm_i915_private *dev_priv = dev->dev_private;
14456        struct intel_encoder *encoder;
14457        bool dpd_is_edp = false;
14458
14459        intel_lvds_init(dev);
14460
14461        if (intel_crt_present(dev))
14462                intel_crt_init(dev);
14463
14464        if (IS_BROXTON(dev)) {
14465                /*
14466                 * FIXME: Broxton doesn't support port detection via the
14467                 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14468                 * detect the ports.
14469                 */
14470                intel_ddi_init(dev, PORT_A);
14471                intel_ddi_init(dev, PORT_B);
14472                intel_ddi_init(dev, PORT_C);
14473        } else if (HAS_DDI(dev)) {
14474                int found;
14475
14476                /*
14477                 * Haswell uses DDI functions to detect digital outputs.
14478                 * On SKL pre-D0 the strap isn't connected, so we assume
14479                 * it's there.
14480                 */
14481                found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14482                /* WaIgnoreDDIAStrap: skl */
14483                if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14484                        intel_ddi_init(dev, PORT_A);
14485
14486                /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14487                 * register */
14488                found = I915_READ(SFUSE_STRAP);
14489
14490                if (found & SFUSE_STRAP_DDIB_DETECTED)
14491                        intel_ddi_init(dev, PORT_B);
14492                if (found & SFUSE_STRAP_DDIC_DETECTED)
14493                        intel_ddi_init(dev, PORT_C);
14494                if (found & SFUSE_STRAP_DDID_DETECTED)
14495                        intel_ddi_init(dev, PORT_D);
14496                /*
14497                 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14498                 */
14499                if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14500                    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14501                     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14502                     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14503                        intel_ddi_init(dev, PORT_E);
14504
14505        } else if (HAS_PCH_SPLIT(dev)) {
14506                int found;
14507                dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14508
14509                if (has_edp_a(dev))
14510                        intel_dp_init(dev, DP_A, PORT_A);
14511
14512                if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14513                        /* PCH SDVOB multiplex with HDMIB */
14514                        found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14515                        if (!found)
14516                                intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14517                        if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14518                                intel_dp_init(dev, PCH_DP_B, PORT_B);
14519                }
14520
14521                if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14522                        intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14523
14524                if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14525                        intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14526
14527                if (I915_READ(PCH_DP_C) & DP_DETECTED)
14528                        intel_dp_init(dev, PCH_DP_C, PORT_C);
14529
14530                if (I915_READ(PCH_DP_D) & DP_DETECTED)
14531                        intel_dp_init(dev, PCH_DP_D, PORT_D);
14532        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14533                /*
14534                 * The DP_DETECTED bit is the latched state of the DDC
14535                 * SDA pin at boot. However since eDP doesn't require DDC
14536                 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14537                 * eDP ports may have been muxed to an alternate function.
14538                 * Thus we can't rely on the DP_DETECTED bit alone to detect
14539                 * eDP ports. Consult the VBT as well as DP_DETECTED to
14540                 * detect eDP ports.
14541                 */
14542                if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14543                    !intel_dp_is_edp(dev, PORT_B))
14544                        intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14545                if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14546                    intel_dp_is_edp(dev, PORT_B))
14547                        intel_dp_init(dev, VLV_DP_B, PORT_B);
14548
14549                if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14550                    !intel_dp_is_edp(dev, PORT_C))
14551                        intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14552                if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14553                    intel_dp_is_edp(dev, PORT_C))
14554                        intel_dp_init(dev, VLV_DP_C, PORT_C);
14555
14556                if (IS_CHERRYVIEW(dev)) {
14557                        /* eDP not supported on port D, so don't check VBT */
14558                        if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14559                                intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14560                        if (I915_READ(CHV_DP_D) & DP_DETECTED)
14561                                intel_dp_init(dev, CHV_DP_D, PORT_D);
14562                }
14563
14564                intel_dsi_init(dev);
14565        } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14566                bool found = false;
14567
14568                if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14569                        DRM_DEBUG_KMS("probing SDVOB\n");
14570                        found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14571                        if (!found && IS_G4X(dev)) {
14572                                DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14573                                intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14574                        }
14575
14576                        if (!found && IS_G4X(dev))
14577                                intel_dp_init(dev, DP_B, PORT_B);
14578                }
14579
14580                /* Before G4X SDVOC doesn't have its own detect register */
14581
14582                if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14583                        DRM_DEBUG_KMS("probing SDVOC\n");
14584                        found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14585                }
14586
14587                if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14588
14589                        if (IS_G4X(dev)) {
14590                                DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14591                                intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14592                        }
14593                        if (IS_G4X(dev))
14594                                intel_dp_init(dev, DP_C, PORT_C);
14595                }
14596
14597                if (IS_G4X(dev) &&
14598                    (I915_READ(DP_D) & DP_DETECTED))
14599                        intel_dp_init(dev, DP_D, PORT_D);
14600        } else if (IS_GEN2(dev))
14601                intel_dvo_init(dev);
14602
14603        if (SUPPORTS_TV(dev))
14604                intel_tv_init(dev);
14605
14606        intel_psr_init(dev);
14607
14608        for_each_intel_encoder(dev, encoder) {
14609                encoder->base.possible_crtcs = encoder->crtc_mask;
14610                encoder->base.possible_clones =
14611                        intel_encoder_clones(encoder);
14612        }
14613
14614        intel_init_pch_refclk(dev);
14615
14616        drm_helper_move_panel_connectors_to_head(dev);
14617}
14618
14619static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14620{
14621        struct drm_device *dev = fb->dev;
14622        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14623
14624        drm_framebuffer_cleanup(fb);
14625        mutex_lock(&dev->struct_mutex);
14626        WARN_ON(!intel_fb->obj->framebuffer_references--);
14627        drm_gem_object_unreference(&intel_fb->obj->base);
14628        mutex_unlock(&dev->struct_mutex);
14629        kfree(intel_fb);
14630}
14631
14632static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14633                                                struct drm_file *file,
14634                                                unsigned int *handle)
14635{
14636        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14637        struct drm_i915_gem_object *obj = intel_fb->obj;
14638
14639        if (obj->userptr.mm) {
14640                DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14641                return -EINVAL;
14642        }
14643
14644        return drm_gem_handle_create(file, &obj->base, handle);
14645}
14646
14647static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14648                                        struct drm_file *file,
14649                                        unsigned flags, unsigned color,
14650                                        struct drm_clip_rect *clips,
14651                                        unsigned num_clips)
14652{
14653        struct drm_device *dev = fb->dev;
14654        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14655        struct drm_i915_gem_object *obj = intel_fb->obj;
14656
14657        mutex_lock(&dev->struct_mutex);
14658        intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14659        mutex_unlock(&dev->struct_mutex);
14660
14661        return 0;
14662}
14663
14664static const struct drm_framebuffer_funcs intel_fb_funcs = {
14665        .destroy = intel_user_framebuffer_destroy,
14666        .create_handle = intel_user_framebuffer_create_handle,
14667        .dirty = intel_user_framebuffer_dirty,
14668};
14669
14670static
14671u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14672                         uint32_t pixel_format)
14673{
14674        u32 gen = INTEL_INFO(dev)->gen;
14675
14676        if (gen >= 9) {
14677                int cpp = drm_format_plane_cpp(pixel_format, 0);
14678
14679                /* "The stride in bytes must not exceed the of the size of 8K
14680                 *  pixels and 32K bytes."
14681                 */
14682                return min(8192 * cpp, 32768);
14683        } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14684                return 32*1024;
14685        } else if (gen >= 4) {
14686                if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14687                        return 16*1024;
14688                else
14689                        return 32*1024;
14690        } else if (gen >= 3) {
14691                if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14692                        return 8*1024;
14693                else
14694                        return 16*1024;
14695        } else {
14696                /* XXX DSPC is limited to 4k tiled */
14697                return 8*1024;
14698        }
14699}
14700
14701static int intel_framebuffer_init(struct drm_device *dev,
14702                                  struct intel_framebuffer *intel_fb,
14703                                  struct drm_mode_fb_cmd2 *mode_cmd,
14704                                  struct drm_i915_gem_object *obj)
14705{
14706        struct drm_i915_private *dev_priv = to_i915(dev);
14707        unsigned int aligned_height;
14708        int ret;
14709        u32 pitch_limit, stride_alignment;
14710
14711        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14712
14713        if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14714                /* Enforce that fb modifier and tiling mode match, but only for
14715                 * X-tiled. This is needed for FBC. */
14716                if (!!(obj->tiling_mode == I915_TILING_X) !=
14717                    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14718                        DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14719                        return -EINVAL;
14720                }
14721        } else {
14722                if (obj->tiling_mode == I915_TILING_X)
14723                        mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14724                else if (obj->tiling_mode == I915_TILING_Y) {
14725                        DRM_DEBUG("No Y tiling for legacy addfb\n");
14726                        return -EINVAL;
14727                }
14728        }
14729
14730        /* Passed in modifier sanity checking. */
14731        switch (mode_cmd->modifier[0]) {
14732        case I915_FORMAT_MOD_Y_TILED:
14733        case I915_FORMAT_MOD_Yf_TILED:
14734                if (INTEL_INFO(dev)->gen < 9) {
14735                        DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14736                                  mode_cmd->modifier[0]);
14737                        return -EINVAL;
14738                }
14739        case DRM_FORMAT_MOD_NONE:
14740        case I915_FORMAT_MOD_X_TILED:
14741                break;
14742        default:
14743                DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14744                          mode_cmd->modifier[0]);
14745                return -EINVAL;
14746        }
14747
14748        stride_alignment = intel_fb_stride_alignment(dev_priv,
14749                                                     mode_cmd->modifier[0],
14750                                                     mode_cmd->pixel_format);
14751        if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14752                DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14753                          mode_cmd->pitches[0], stride_alignment);
14754                return -EINVAL;
14755        }
14756
14757        pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14758                                           mode_cmd->pixel_format);
14759        if (mode_cmd->pitches[0] > pitch_limit) {
14760                DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14761                          mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14762                          "tiled" : "linear",
14763                          mode_cmd->pitches[0], pitch_limit);
14764                return -EINVAL;
14765        }
14766
14767        if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14768            mode_cmd->pitches[0] != obj->stride) {
14769                DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14770                          mode_cmd->pitches[0], obj->stride);
14771                return -EINVAL;
14772        }
14773
14774        /* Reject formats not supported by any plane early. */
14775        switch (mode_cmd->pixel_format) {
14776        case DRM_FORMAT_C8:
14777        case DRM_FORMAT_RGB565:
14778        case DRM_FORMAT_XRGB8888:
14779        case DRM_FORMAT_ARGB8888:
14780                break;
14781        case DRM_FORMAT_XRGB1555:
14782                if (INTEL_INFO(dev)->gen > 3) {
14783                        DRM_DEBUG("unsupported pixel format: %s\n",
14784                                  drm_get_format_name(mode_cmd->pixel_format));
14785                        return -EINVAL;
14786                }
14787                break;
14788        case DRM_FORMAT_ABGR8888:
14789                if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14790                    INTEL_INFO(dev)->gen < 9) {
14791                        DRM_DEBUG("unsupported pixel format: %s\n",
14792                                  drm_get_format_name(mode_cmd->pixel_format));
14793                        return -EINVAL;
14794                }
14795                break;
14796        case DRM_FORMAT_XBGR8888:
14797        case DRM_FORMAT_XRGB2101010:
14798        case DRM_FORMAT_XBGR2101010:
14799                if (INTEL_INFO(dev)->gen < 4) {
14800                        DRM_DEBUG("unsupported pixel format: %s\n",
14801                                  drm_get_format_name(mode_cmd->pixel_format));
14802                        return -EINVAL;
14803                }
14804                break;
14805        case DRM_FORMAT_ABGR2101010:
14806                if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14807                        DRM_DEBUG("unsupported pixel format: %s\n",
14808                                  drm_get_format_name(mode_cmd->pixel_format));
14809                        return -EINVAL;
14810                }
14811                break;
14812        case DRM_FORMAT_YUYV:
14813        case DRM_FORMAT_UYVY:
14814        case DRM_FORMAT_YVYU:
14815        case DRM_FORMAT_VYUY:
14816                if (INTEL_INFO(dev)->gen < 5) {
14817                        DRM_DEBUG("unsupported pixel format: %s\n",
14818                                  drm_get_format_name(mode_cmd->pixel_format));
14819                        return -EINVAL;
14820                }
14821                break;
14822        default:
14823                DRM_DEBUG("unsupported pixel format: %s\n",
14824                          drm_get_format_name(mode_cmd->pixel_format));
14825                return -EINVAL;
14826        }
14827
14828        /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14829        if (mode_cmd->offsets[0] != 0)
14830                return -EINVAL;
14831
14832        aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14833                                               mode_cmd->pixel_format,
14834                                               mode_cmd->modifier[0]);
14835        /* FIXME drm helper for size checks (especially planar formats)? */
14836        if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14837                return -EINVAL;
14838
14839        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14840        intel_fb->obj = obj;
14841
14842        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14843        if (ret) {
14844                DRM_ERROR("framebuffer init failed %d\n", ret);
14845                return ret;
14846        }
14847
14848        intel_fb->obj->framebuffer_references++;
14849
14850        return 0;
14851}
14852
14853static struct drm_framebuffer *
14854intel_user_framebuffer_create(struct drm_device *dev,
14855                              struct drm_file *filp,
14856                              const struct drm_mode_fb_cmd2 *user_mode_cmd)
14857{
14858        struct drm_framebuffer *fb;
14859        struct drm_i915_gem_object *obj;
14860        struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14861
14862        obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14863                                                mode_cmd.handles[0]));
14864        if (&obj->base == NULL)
14865                return ERR_PTR(-ENOENT);
14866
14867        fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14868        if (IS_ERR(fb))
14869                drm_gem_object_unreference_unlocked(&obj->base);
14870
14871        return fb;
14872}
14873
14874#ifndef CONFIG_DRM_FBDEV_EMULATION
14875static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14876{
14877}
14878#endif
14879
14880static const struct drm_mode_config_funcs intel_mode_funcs = {
14881        .fb_create = intel_user_framebuffer_create,
14882        .output_poll_changed = intel_fbdev_output_poll_changed,
14883        .atomic_check = intel_atomic_check,
14884        .atomic_commit = intel_atomic_commit,
14885        .atomic_state_alloc = intel_atomic_state_alloc,
14886        .atomic_state_clear = intel_atomic_state_clear,
14887};
14888
14889/* Set up chip specific display functions */
14890static void intel_init_display(struct drm_device *dev)
14891{
14892        struct drm_i915_private *dev_priv = dev->dev_private;
14893
14894        if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14895                dev_priv->display.find_dpll = g4x_find_best_dpll;
14896        else if (IS_CHERRYVIEW(dev))
14897                dev_priv->display.find_dpll = chv_find_best_dpll;
14898        else if (IS_VALLEYVIEW(dev))
14899                dev_priv->display.find_dpll = vlv_find_best_dpll;
14900        else if (IS_PINEVIEW(dev))
14901                dev_priv->display.find_dpll = pnv_find_best_dpll;
14902        else
14903                dev_priv->display.find_dpll = i9xx_find_best_dpll;
14904
14905        if (INTEL_INFO(dev)->gen >= 9) {
14906                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14907                dev_priv->display.get_initial_plane_config =
14908                        skylake_get_initial_plane_config;
14909                dev_priv->display.crtc_compute_clock =
14910                        haswell_crtc_compute_clock;
14911                dev_priv->display.crtc_enable = haswell_crtc_enable;
14912                dev_priv->display.crtc_disable = haswell_crtc_disable;
14913        } else if (HAS_DDI(dev)) {
14914                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14915                dev_priv->display.get_initial_plane_config =
14916                        ironlake_get_initial_plane_config;
14917                dev_priv->display.crtc_compute_clock =
14918                        haswell_crtc_compute_clock;
14919                dev_priv->display.crtc_enable = haswell_crtc_enable;
14920                dev_priv->display.crtc_disable = haswell_crtc_disable;
14921        } else if (HAS_PCH_SPLIT(dev)) {
14922                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14923                dev_priv->display.get_initial_plane_config =
14924                        ironlake_get_initial_plane_config;
14925                dev_priv->display.crtc_compute_clock =
14926                        ironlake_crtc_compute_clock;
14927                dev_priv->display.crtc_enable = ironlake_crtc_enable;
14928                dev_priv->display.crtc_disable = ironlake_crtc_disable;
14929        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14930                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14931                dev_priv->display.get_initial_plane_config =
14932                        i9xx_get_initial_plane_config;
14933                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14934                dev_priv->display.crtc_enable = valleyview_crtc_enable;
14935                dev_priv->display.crtc_disable = i9xx_crtc_disable;
14936        } else {
14937                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14938                dev_priv->display.get_initial_plane_config =
14939                        i9xx_get_initial_plane_config;
14940                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14941                dev_priv->display.crtc_enable = i9xx_crtc_enable;
14942                dev_priv->display.crtc_disable = i9xx_crtc_disable;
14943        }
14944
14945        /* Returns the core display clock speed */
14946        if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14947                dev_priv->display.get_display_clock_speed =
14948                        skylake_get_display_clock_speed;
14949        else if (IS_BROXTON(dev))
14950                dev_priv->display.get_display_clock_speed =
14951                        broxton_get_display_clock_speed;
14952        else if (IS_BROADWELL(dev))
14953                dev_priv->display.get_display_clock_speed =
14954                        broadwell_get_display_clock_speed;
14955        else if (IS_HASWELL(dev))
14956                dev_priv->display.get_display_clock_speed =
14957                        haswell_get_display_clock_speed;
14958        else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
14959                dev_priv->display.get_display_clock_speed =
14960                        valleyview_get_display_clock_speed;
14961        else if (IS_GEN5(dev))
14962                dev_priv->display.get_display_clock_speed =
14963                        ilk_get_display_clock_speed;
14964        else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14965                 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14966                dev_priv->display.get_display_clock_speed =
14967                        i945_get_display_clock_speed;
14968        else if (IS_GM45(dev))
14969                dev_priv->display.get_display_clock_speed =
14970                        gm45_get_display_clock_speed;
14971        else if (IS_CRESTLINE(dev))
14972                dev_priv->display.get_display_clock_speed =
14973                        i965gm_get_display_clock_speed;
14974        else if (IS_PINEVIEW(dev))
14975                dev_priv->display.get_display_clock_speed =
14976                        pnv_get_display_clock_speed;
14977        else if (IS_G33(dev) || IS_G4X(dev))
14978                dev_priv->display.get_display_clock_speed =
14979                        g33_get_display_clock_speed;
14980        else if (IS_I915G(dev))
14981                dev_priv->display.get_display_clock_speed =
14982                        i915_get_display_clock_speed;
14983        else if (IS_I945GM(dev) || IS_845G(dev))
14984                dev_priv->display.get_display_clock_speed =
14985                        i9xx_misc_get_display_clock_speed;
14986        else if (IS_I915GM(dev))
14987                dev_priv->display.get_display_clock_speed =
14988                        i915gm_get_display_clock_speed;
14989        else if (IS_I865G(dev))
14990                dev_priv->display.get_display_clock_speed =
14991                        i865_get_display_clock_speed;
14992        else if (IS_I85X(dev))
14993                dev_priv->display.get_display_clock_speed =
14994                        i85x_get_display_clock_speed;
14995        else { /* 830 */
14996                WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
14997                dev_priv->display.get_display_clock_speed =
14998                        i830_get_display_clock_speed;
14999        }
15000
15001        if (IS_GEN5(dev)) {
15002                dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15003        } else if (IS_GEN6(dev)) {
15004                dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15005        } else if (IS_IVYBRIDGE(dev)) {
15006                /* FIXME: detect B0+ stepping and use auto training */
15007                dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15008        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
15009                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15010                if (IS_BROADWELL(dev)) {
15011                        dev_priv->display.modeset_commit_cdclk =
15012                                broadwell_modeset_commit_cdclk;
15013                        dev_priv->display.modeset_calc_cdclk =
15014                                broadwell_modeset_calc_cdclk;
15015                }
15016        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
15017                dev_priv->display.modeset_commit_cdclk =
15018                        valleyview_modeset_commit_cdclk;
15019                dev_priv->display.modeset_calc_cdclk =
15020                        valleyview_modeset_calc_cdclk;
15021        } else if (IS_BROXTON(dev)) {
15022                dev_priv->display.modeset_commit_cdclk =
15023                        broxton_modeset_commit_cdclk;
15024                dev_priv->display.modeset_calc_cdclk =
15025                        broxton_modeset_calc_cdclk;
15026        }
15027
15028        switch (INTEL_INFO(dev)->gen) {
15029        case 2:
15030                dev_priv->display.queue_flip = intel_gen2_queue_flip;
15031                break;
15032
15033        case 3:
15034                dev_priv->display.queue_flip = intel_gen3_queue_flip;
15035                break;
15036
15037        case 4:
15038        case 5:
15039                dev_priv->display.queue_flip = intel_gen4_queue_flip;
15040                break;
15041
15042        case 6:
15043                dev_priv->display.queue_flip = intel_gen6_queue_flip;
15044                break;
15045        case 7:
15046        case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15047                dev_priv->display.queue_flip = intel_gen7_queue_flip;
15048                break;
15049        case 9:
15050                /* Drop through - unsupported since execlist only. */
15051        default:
15052                /* Default just returns -ENODEV to indicate unsupported */
15053                dev_priv->display.queue_flip = intel_default_queue_flip;
15054        }
15055
15056        mutex_init(&dev_priv->pps_mutex);
15057}
15058
15059/*
15060 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15061 * resume, or other times.  This quirk makes sure that's the case for
15062 * affected systems.
15063 */
15064static void quirk_pipea_force(struct drm_device *dev)
15065{
15066        struct drm_i915_private *dev_priv = dev->dev_private;
15067
15068        dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15069        DRM_INFO("applying pipe a force quirk\n");
15070}
15071
15072static void quirk_pipeb_force(struct drm_device *dev)
15073{
15074        struct drm_i915_private *dev_priv = dev->dev_private;
15075
15076        dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15077        DRM_INFO("applying pipe b force quirk\n");
15078}
15079
15080/*
15081 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15082 */
15083static void quirk_ssc_force_disable(struct drm_device *dev)
15084{
15085        struct drm_i915_private *dev_priv = dev->dev_private;
15086        dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15087        DRM_INFO("applying lvds SSC disable quirk\n");
15088}
15089
15090/*
15091 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15092 * brightness value
15093 */
15094static void quirk_invert_brightness(struct drm_device *dev)
15095{
15096        struct drm_i915_private *dev_priv = dev->dev_private;
15097        dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15098        DRM_INFO("applying inverted panel brightness quirk\n");
15099}
15100
15101/* Some VBT's incorrectly indicate no backlight is present */
15102static void quirk_backlight_present(struct drm_device *dev)
15103{
15104        struct drm_i915_private *dev_priv = dev->dev_private;
15105        dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15106        DRM_INFO("applying backlight present quirk\n");
15107}
15108
15109struct intel_quirk {
15110        int device;
15111        int subsystem_vendor;
15112        int subsystem_device;
15113        void (*hook)(struct drm_device *dev);
15114};
15115
15116/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15117struct intel_dmi_quirk {
15118        void (*hook)(struct drm_device *dev);
15119        const struct dmi_system_id (*dmi_id_list)[];
15120};
15121
15122static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15123{
15124        DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15125        return 1;
15126}
15127
15128static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15129        {
15130                .dmi_id_list = &(const struct dmi_system_id[]) {
15131                        {
15132                                .callback = intel_dmi_reverse_brightness,
15133                                .ident = "NCR Corporation",
15134                                .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15135                                            DMI_MATCH(DMI_PRODUCT_NAME, ""),
15136                                },
15137                        },
15138                        { }  /* terminating entry */
15139                },
15140                .hook = quirk_invert_brightness,
15141        },
15142};
15143
15144static struct intel_quirk intel_quirks[] = {
15145        /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15146        { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15147
15148        /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15149        { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15150
15151        /* 830 needs to leave pipe A & dpll A up */
15152        { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15153
15154        /* 830 needs to leave pipe B & dpll B up */
15155        { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15156
15157        /* Lenovo U160 cannot use SSC on LVDS */
15158        { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15159
15160        /* Sony Vaio Y cannot use SSC on LVDS */
15161        { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15162
15163        /* Acer Aspire 5734Z must invert backlight brightness */
15164        { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15165
15166        /* Acer/eMachines G725 */
15167        { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15168
15169        /* Acer/eMachines e725 */
15170        { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15171
15172        /* Acer/Packard Bell NCL20 */
15173        { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15174
15175        /* Acer Aspire 4736Z */
15176        { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15177
15178        /* Acer Aspire 5336 */
15179        { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15180
15181        /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15182        { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15183
15184        /* Acer C720 Chromebook (Core i3 4005U) */
15185        { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15186
15187        /* Apple Macbook 2,1 (Core 2 T7400) */
15188        { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15189
15190        /* Apple Macbook 4,1 */
15191        { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15192
15193        /* Toshiba CB35 Chromebook (Celeron 2955U) */
15194        { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15195
15196        /* HP Chromebook 14 (Celeron 2955U) */
15197        { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15198
15199        /* Dell Chromebook 11 */
15200        { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15201
15202        /* Dell Chromebook 11 (2015 version) */
15203        { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15204};
15205
15206static void intel_init_quirks(struct drm_device *dev)
15207{
15208        struct pci_dev *d = dev->pdev;
15209        int i;
15210
15211        for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15212                struct intel_quirk *q = &intel_quirks[i];
15213
15214                if (d->device == q->device &&
15215                    (d->subsystem_vendor == q->subsystem_vendor ||
15216                     q->subsystem_vendor == PCI_ANY_ID) &&
15217                    (d->subsystem_device == q->subsystem_device ||
15218                     q->subsystem_device == PCI_ANY_ID))
15219                        q->hook(dev);
15220        }
15221        for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15222                if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15223                        intel_dmi_quirks[i].hook(dev);
15224        }
15225}
15226
15227/* Disable the VGA plane that we never use */
15228static void i915_disable_vga(struct drm_device *dev)
15229{
15230        struct drm_i915_private *dev_priv = dev->dev_private;
15231        u8 sr1;
15232        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15233
15234        /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15235        vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15236        outb(SR01, VGA_SR_INDEX);
15237        sr1 = inb(VGA_SR_DATA);
15238        outb(sr1 | 1<<5, VGA_SR_DATA);
15239        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15240        udelay(300);
15241
15242        I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15243        POSTING_READ(vga_reg);
15244}
15245
15246void intel_modeset_init_hw(struct drm_device *dev)
15247{
15248        struct drm_i915_private *dev_priv = dev->dev_private;
15249
15250        intel_update_cdclk(dev);
15251
15252        dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15253
15254        intel_init_clock_gating(dev);
15255        intel_enable_gt_powersave(dev);
15256}
15257
15258/*
15259 * Calculate what we think the watermarks should be for the state we've read
15260 * out of the hardware and then immediately program those watermarks so that
15261 * we ensure the hardware settings match our internal state.
15262 *
15263 * We can calculate what we think WM's should be by creating a duplicate of the
15264 * current state (which was constructed during hardware readout) and running it
15265 * through the atomic check code to calculate new watermark values in the
15266 * state object.
15267 */
15268static void sanitize_watermarks(struct drm_device *dev)
15269{
15270        struct drm_i915_private *dev_priv = to_i915(dev);
15271        struct drm_atomic_state *state;
15272        struct drm_crtc *crtc;
15273        struct drm_crtc_state *cstate;
15274        struct drm_modeset_acquire_ctx ctx;
15275        int ret;
15276        int i;
15277
15278        /* Only supported on platforms that use atomic watermark design */
15279        if (!dev_priv->display.program_watermarks)
15280                return;
15281
15282        /*
15283         * We need to hold connection_mutex before calling duplicate_state so
15284         * that the connector loop is protected.
15285         */
15286        drm_modeset_acquire_init(&ctx, 0);
15287retry:
15288        ret = drm_modeset_lock_all_ctx(dev, &ctx);
15289        if (ret == -EDEADLK) {
15290                drm_modeset_backoff(&ctx);
15291                goto retry;
15292        } else if (WARN_ON(ret)) {
15293                goto fail;
15294        }
15295
15296        state = drm_atomic_helper_duplicate_state(dev, &ctx);
15297        if (WARN_ON(IS_ERR(state)))
15298                goto fail;
15299
15300        ret = intel_atomic_check(dev, state);
15301        if (ret) {
15302                /*
15303                 * If we fail here, it means that the hardware appears to be
15304                 * programmed in a way that shouldn't be possible, given our
15305                 * understanding of watermark requirements.  This might mean a
15306                 * mistake in the hardware readout code or a mistake in the
15307                 * watermark calculations for a given platform.  Raise a WARN
15308                 * so that this is noticeable.
15309                 *
15310                 * If this actually happens, we'll have to just leave the
15311                 * BIOS-programmed watermarks untouched and hope for the best.
15312                 */
15313                WARN(true, "Could not determine valid watermarks for inherited state\n");
15314                goto fail;
15315        }
15316
15317        /* Write calculated watermark values back */
15318        to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15319        for_each_crtc_in_state(state, crtc, cstate, i) {
15320                struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15321
15322                dev_priv->display.program_watermarks(cs);
15323        }
15324
15325        drm_atomic_state_free(state);
15326fail:
15327        drm_modeset_drop_locks(&ctx);
15328        drm_modeset_acquire_fini(&ctx);
15329}
15330
15331void intel_modeset_init(struct drm_device *dev)
15332{
15333        struct drm_i915_private *dev_priv = dev->dev_private;
15334        int sprite, ret;
15335        enum pipe pipe;
15336        struct intel_crtc *crtc;
15337
15338        drm_mode_config_init(dev);
15339
15340        dev->mode_config.min_width = 0;
15341        dev->mode_config.min_height = 0;
15342
15343        dev->mode_config.preferred_depth = 24;
15344        dev->mode_config.prefer_shadow = 1;
15345
15346        dev->mode_config.allow_fb_modifiers = true;
15347
15348        dev->mode_config.funcs = &intel_mode_funcs;
15349
15350        intel_init_quirks(dev);
15351
15352        intel_init_pm(dev);
15353
15354        if (INTEL_INFO(dev)->num_pipes == 0)
15355                return;
15356
15357        /*
15358         * There may be no VBT; and if the BIOS enabled SSC we can
15359         * just keep using it to avoid unnecessary flicker.  Whereas if the
15360         * BIOS isn't using it, don't assume it will work even if the VBT
15361         * indicates as much.
15362         */
15363        if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15364                bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15365                                            DREF_SSC1_ENABLE);
15366
15367                if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15368                        DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15369                                     bios_lvds_use_ssc ? "en" : "dis",
15370                                     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15371                        dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15372                }
15373        }
15374
15375        intel_init_display(dev);
15376        intel_init_audio(dev);
15377
15378        if (IS_GEN2(dev)) {
15379                dev->mode_config.max_width = 2048;
15380                dev->mode_config.max_height = 2048;
15381        } else if (IS_GEN3(dev)) {
15382                dev->mode_config.max_width = 4096;
15383                dev->mode_config.max_height = 4096;
15384        } else {
15385                dev->mode_config.max_width = 8192;
15386                dev->mode_config.max_height = 8192;
15387        }
15388
15389        if (IS_845G(dev) || IS_I865G(dev)) {
15390                dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15391                dev->mode_config.cursor_height = 1023;
15392        } else if (IS_GEN2(dev)) {
15393                dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15394                dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15395        } else {
15396                dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15397                dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15398        }
15399
15400        dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
15401
15402        DRM_DEBUG_KMS("%d display pipe%s available.\n",
15403                      INTEL_INFO(dev)->num_pipes,
15404                      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15405
15406        for_each_pipe(dev_priv, pipe) {
15407                intel_crtc_init(dev, pipe);
15408                for_each_sprite(dev_priv, pipe, sprite) {
15409                        ret = intel_plane_init(dev, pipe, sprite);
15410                        if (ret)
15411                                DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15412                                              pipe_name(pipe), sprite_name(pipe, sprite), ret);
15413                }
15414        }
15415
15416        intel_update_czclk(dev_priv);
15417        intel_update_cdclk(dev);
15418
15419        intel_shared_dpll_init(dev);
15420
15421        /* Just disable it once at startup */
15422        i915_disable_vga(dev);
15423        intel_setup_outputs(dev);
15424
15425        drm_modeset_lock_all(dev);
15426        intel_modeset_setup_hw_state(dev);
15427        drm_modeset_unlock_all(dev);
15428
15429        for_each_intel_crtc(dev, crtc) {
15430                struct intel_initial_plane_config plane_config = {};
15431
15432                if (!crtc->active)
15433                        continue;
15434
15435                /*
15436                 * Note that reserving the BIOS fb up front prevents us
15437                 * from stuffing other stolen allocations like the ring
15438                 * on top.  This prevents some ugliness at boot time, and
15439                 * can even allow for smooth boot transitions if the BIOS
15440                 * fb is large enough for the active pipe configuration.
15441                 */
15442                dev_priv->display.get_initial_plane_config(crtc,
15443                                                           &plane_config);
15444
15445                /*
15446                 * If the fb is shared between multiple heads, we'll
15447                 * just get the first one.
15448                 */
15449                intel_find_initial_plane_obj(crtc, &plane_config);
15450        }
15451
15452        /*
15453         * Make sure hardware watermarks really match the state we read out.
15454         * Note that we need to do this after reconstructing the BIOS fb's
15455         * since the watermark calculation done here will use pstate->fb.
15456         */
15457        sanitize_watermarks(dev);
15458}
15459
15460static void intel_enable_pipe_a(struct drm_device *dev)
15461{
15462        struct intel_connector *connector;
15463        struct drm_connector *crt = NULL;
15464        struct intel_load_detect_pipe load_detect_temp;
15465        struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15466
15467        /* We can't just switch on the pipe A, we need to set things up with a
15468         * proper mode and output configuration. As a gross hack, enable pipe A
15469         * by enabling the load detect pipe once. */
15470        for_each_intel_connector(dev, connector) {
15471                if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15472                        crt = &connector->base;
15473                        break;
15474                }
15475        }
15476
15477        if (!crt)
15478                return;
15479
15480        if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15481                intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15482}
15483
15484static bool
15485intel_check_plane_mapping(struct intel_crtc *crtc)
15486{
15487        struct drm_device *dev = crtc->base.dev;
15488        struct drm_i915_private *dev_priv = dev->dev_private;
15489        u32 val;
15490
15491        if (INTEL_INFO(dev)->num_pipes == 1)
15492                return true;
15493
15494        val = I915_READ(DSPCNTR(!crtc->plane));
15495
15496        if ((val & DISPLAY_PLANE_ENABLE) &&
15497            (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15498                return false;
15499
15500        return true;
15501}
15502
15503static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15504{
15505        struct drm_device *dev = crtc->base.dev;
15506        struct intel_encoder *encoder;
15507
15508        for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15509                return true;
15510
15511        return false;
15512}
15513
15514static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15515{
15516        struct drm_device *dev = encoder->base.dev;
15517        struct intel_connector *connector;
15518
15519        for_each_connector_on_encoder(dev, &encoder->base, connector)
15520                return true;
15521
15522        return false;
15523}
15524
15525static void intel_sanitize_crtc(struct intel_crtc *crtc)
15526{
15527        struct drm_device *dev = crtc->base.dev;
15528        struct drm_i915_private *dev_priv = dev->dev_private;
15529        i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
15530
15531        /* Clear any frame start delays used for debugging left by the BIOS */
15532        I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15533
15534        /* restore vblank interrupts to correct state */
15535        drm_crtc_vblank_reset(&crtc->base);
15536        if (crtc->active) {
15537                struct intel_plane *plane;
15538
15539                drm_crtc_vblank_on(&crtc->base);
15540
15541                /* Disable everything but the primary plane */
15542                for_each_intel_plane_on_crtc(dev, crtc, plane) {
15543                        if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15544                                continue;
15545
15546                        plane->disable_plane(&plane->base, &crtc->base);
15547                }
15548        }
15549
15550        /* We need to sanitize the plane -> pipe mapping first because this will
15551         * disable the crtc (and hence change the state) if it is wrong. Note
15552         * that gen4+ has a fixed plane -> pipe mapping.  */
15553        if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15554                bool plane;
15555
15556                DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15557                              crtc->base.base.id);
15558
15559                /* Pipe has the wrong plane attached and the plane is active.
15560                 * Temporarily change the plane mapping and disable everything
15561                 * ...  */
15562                plane = crtc->plane;
15563                to_intel_plane_state(crtc->base.primary->state)->visible = true;
15564                crtc->plane = !plane;
15565                intel_crtc_disable_noatomic(&crtc->base);
15566                crtc->plane = plane;
15567        }
15568
15569        if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15570            crtc->pipe == PIPE_A && !crtc->active) {
15571                /* BIOS forgot to enable pipe A, this mostly happens after
15572                 * resume. Force-enable the pipe to fix this, the update_dpms
15573                 * call below we restore the pipe to the right state, but leave
15574                 * the required bits on. */
15575                intel_enable_pipe_a(dev);
15576        }
15577
15578        /* Adjust the state of the output pipe according to whether we
15579         * have active connectors/encoders. */
15580        if (!intel_crtc_has_encoders(crtc))
15581                intel_crtc_disable_noatomic(&crtc->base);
15582
15583        if (crtc->active != crtc->base.state->active) {
15584                struct intel_encoder *encoder;
15585
15586                /* This can happen either due to bugs in the get_hw_state
15587                 * functions or because of calls to intel_crtc_disable_noatomic,
15588                 * or because the pipe is force-enabled due to the
15589                 * pipe A quirk. */
15590                DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15591                              crtc->base.base.id,
15592                              crtc->base.state->enable ? "enabled" : "disabled",
15593                              crtc->active ? "enabled" : "disabled");
15594
15595                WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15596                crtc->base.state->active = crtc->active;
15597                crtc->base.enabled = crtc->active;
15598                crtc->base.state->connector_mask = 0;
15599                crtc->base.state->encoder_mask = 0;
15600
15601                /* Because we only establish the connector -> encoder ->
15602                 * crtc links if something is active, this means the
15603                 * crtc is now deactivated. Break the links. connector
15604                 * -> encoder links are only establish when things are
15605                 *  actually up, hence no need to break them. */
15606                WARN_ON(crtc->active);
15607
15608                for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15609                        encoder->base.crtc = NULL;
15610        }
15611
15612        if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15613                /*
15614                 * We start out with underrun reporting disabled to avoid races.
15615                 * For correct bookkeeping mark this on active crtcs.
15616                 *
15617                 * Also on gmch platforms we dont have any hardware bits to
15618                 * disable the underrun reporting. Which means we need to start
15619                 * out with underrun reporting disabled also on inactive pipes,
15620                 * since otherwise we'll complain about the garbage we read when
15621                 * e.g. coming up after runtime pm.
15622                 *
15623                 * No protection against concurrent access is required - at
15624                 * worst a fifo underrun happens which also sets this to false.
15625                 */
15626                crtc->cpu_fifo_underrun_disabled = true;
15627                crtc->pch_fifo_underrun_disabled = true;
15628        }
15629}
15630
15631static void intel_sanitize_encoder(struct intel_encoder *encoder)
15632{
15633        struct intel_connector *connector;
15634        struct drm_device *dev = encoder->base.dev;
15635
15636        /* We need to check both for a crtc link (meaning that the
15637         * encoder is active and trying to read from a pipe) and the
15638         * pipe itself being active. */
15639        bool has_active_crtc = encoder->base.crtc &&
15640                to_intel_crtc(encoder->base.crtc)->active;
15641
15642        if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15643                DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15644                              encoder->base.base.id,
15645                              encoder->base.name);
15646
15647                /* Connector is active, but has no active pipe. This is
15648                 * fallout from our resume register restoring. Disable
15649                 * the encoder manually again. */
15650                if (encoder->base.crtc) {
15651                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15652                                      encoder->base.base.id,
15653                                      encoder->base.name);
15654                        encoder->disable(encoder);
15655                        if (encoder->post_disable)
15656                                encoder->post_disable(encoder);
15657                }
15658                encoder->base.crtc = NULL;
15659
15660                /* Inconsistent output/port/pipe state happens presumably due to
15661                 * a bug in one of the get_hw_state functions. Or someplace else
15662                 * in our code, like the register restore mess on resume. Clamp
15663                 * things to off as a safer default. */
15664                for_each_intel_connector(dev, connector) {
15665                        if (connector->encoder != encoder)
15666                                continue;
15667                        connector->base.dpms = DRM_MODE_DPMS_OFF;
15668                        connector->base.encoder = NULL;
15669                }
15670        }
15671        /* Enabled encoders without active connectors will be fixed in
15672         * the crtc fixup. */
15673}
15674
15675void i915_redisable_vga_power_on(struct drm_device *dev)
15676{
15677        struct drm_i915_private *dev_priv = dev->dev_private;
15678        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15679
15680        if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15681                DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15682                i915_disable_vga(dev);
15683        }
15684}
15685
15686void i915_redisable_vga(struct drm_device *dev)
15687{
15688        struct drm_i915_private *dev_priv = dev->dev_private;
15689
15690        /* This function can be called both from intel_modeset_setup_hw_state or
15691         * at a very early point in our resume sequence, where the power well
15692         * structures are not yet restored. Since this function is at a very
15693         * paranoid "someone might have enabled VGA while we were not looking"
15694         * level, just check if the power well is enabled instead of trying to
15695         * follow the "don't touch the power well if we don't need it" policy
15696         * the rest of the driver uses. */
15697        if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15698                return;
15699
15700        i915_redisable_vga_power_on(dev);
15701
15702        intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15703}
15704
15705static bool primary_get_hw_state(struct intel_plane *plane)
15706{
15707        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15708
15709        return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15710}
15711
15712/* FIXME read out full plane state for all planes */
15713static void readout_plane_state(struct intel_crtc *crtc)
15714{
15715        struct drm_plane *primary = crtc->base.primary;
15716        struct intel_plane_state *plane_state =
15717                to_intel_plane_state(primary->state);
15718
15719        plane_state->visible = crtc->active &&
15720                primary_get_hw_state(to_intel_plane(primary));
15721
15722        if (plane_state->visible)
15723                crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15724}
15725
15726static void intel_modeset_readout_hw_state(struct drm_device *dev)
15727{
15728        struct drm_i915_private *dev_priv = dev->dev_private;
15729        enum pipe pipe;
15730        struct intel_crtc *crtc;
15731        struct intel_encoder *encoder;
15732        struct intel_connector *connector;
15733        int i;
15734
15735        dev_priv->active_crtcs = 0;
15736
15737        for_each_intel_crtc(dev, crtc) {
15738                struct intel_crtc_state *crtc_state = crtc->config;
15739                int pixclk = 0;
15740
15741                __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
15742                memset(crtc_state, 0, sizeof(*crtc_state));
15743                crtc_state->base.crtc = &crtc->base;
15744
15745                crtc_state->base.active = crtc_state->base.enable =
15746                        dev_priv->display.get_pipe_config(crtc, crtc_state);
15747
15748                crtc->base.enabled = crtc_state->base.enable;
15749                crtc->active = crtc_state->base.active;
15750
15751                if (crtc_state->base.active) {
15752                        dev_priv->active_crtcs |= 1 << crtc->pipe;
15753
15754                        if (IS_BROADWELL(dev_priv)) {
15755                                pixclk = ilk_pipe_pixel_rate(crtc_state);
15756
15757                                /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15758                                if (crtc_state->ips_enabled)
15759                                        pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15760                        } else if (IS_VALLEYVIEW(dev_priv) ||
15761                                   IS_CHERRYVIEW(dev_priv) ||
15762                                   IS_BROXTON(dev_priv))
15763                                pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15764                        else
15765                                WARN_ON(dev_priv->display.modeset_calc_cdclk);
15766                }
15767
15768                dev_priv->min_pixclk[crtc->pipe] = pixclk;
15769
15770                readout_plane_state(crtc);
15771
15772                DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15773                              crtc->base.base.id,
15774                              crtc->active ? "enabled" : "disabled");
15775        }
15776
15777        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15778                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15779
15780                pll->on = pll->get_hw_state(dev_priv, pll,
15781                                            &pll->config.hw_state);
15782                pll->active = 0;
15783                pll->config.crtc_mask = 0;
15784                for_each_intel_crtc(dev, crtc) {
15785                        if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15786                                pll->active++;
15787                                pll->config.crtc_mask |= 1 << crtc->pipe;
15788                        }
15789                }
15790
15791                DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15792                              pll->name, pll->config.crtc_mask, pll->on);
15793
15794                if (pll->config.crtc_mask)
15795                        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15796        }
15797
15798        for_each_intel_encoder(dev, encoder) {
15799                pipe = 0;
15800
15801                if (encoder->get_hw_state(encoder, &pipe)) {
15802                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15803                        encoder->base.crtc = &crtc->base;
15804                        encoder->get_config(encoder, crtc->config);
15805                } else {
15806                        encoder->base.crtc = NULL;
15807                }
15808
15809                DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15810                              encoder->base.base.id,
15811                              encoder->base.name,
15812                              encoder->base.crtc ? "enabled" : "disabled",
15813                              pipe_name(pipe));
15814        }
15815
15816        for_each_intel_connector(dev, connector) {
15817                if (connector->get_hw_state(connector)) {
15818                        connector->base.dpms = DRM_MODE_DPMS_ON;
15819
15820                        encoder = connector->encoder;
15821                        connector->base.encoder = &encoder->base;
15822
15823                        if (encoder->base.crtc &&
15824                            encoder->base.crtc->state->active) {
15825                                /*
15826                                 * This has to be done during hardware readout
15827                                 * because anything calling .crtc_disable may
15828                                 * rely on the connector_mask being accurate.
15829                                 */
15830                                encoder->base.crtc->state->connector_mask |=
15831                                        1 << drm_connector_index(&connector->base);
15832                                encoder->base.crtc->state->encoder_mask |=
15833                                        1 << drm_encoder_index(&encoder->base);
15834                        }
15835
15836                } else {
15837                        connector->base.dpms = DRM_MODE_DPMS_OFF;
15838                        connector->base.encoder = NULL;
15839                }
15840                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15841                              connector->base.base.id,
15842                              connector->base.name,
15843                              connector->base.encoder ? "enabled" : "disabled");
15844        }
15845
15846        for_each_intel_crtc(dev, crtc) {
15847                crtc->base.hwmode = crtc->config->base.adjusted_mode;
15848
15849                memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15850                if (crtc->base.state->active) {
15851                        intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15852                        intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15853                        WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15854
15855                        /*
15856                         * The initial mode needs to be set in order to keep
15857                         * the atomic core happy. It wants a valid mode if the
15858                         * crtc's enabled, so we do the above call.
15859                         *
15860                         * At this point some state updated by the connectors
15861                         * in their ->detect() callback has not run yet, so
15862                         * no recalculation can be done yet.
15863                         *
15864                         * Even if we could do a recalculation and modeset
15865                         * right now it would cause a double modeset if
15866                         * fbdev or userspace chooses a different initial mode.
15867                         *
15868                         * If that happens, someone indicated they wanted a
15869                         * mode change, which means it's safe to do a full
15870                         * recalculation.
15871                         */
15872                        crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15873
15874                        drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15875                        update_scanline_offset(crtc);
15876                }
15877        }
15878}
15879
15880/* Scan out the current hw modeset state,
15881 * and sanitizes it to the current state
15882 */
15883static void
15884intel_modeset_setup_hw_state(struct drm_device *dev)
15885{
15886        struct drm_i915_private *dev_priv = dev->dev_private;
15887        enum pipe pipe;
15888        struct intel_crtc *crtc;
15889        struct intel_encoder *encoder;
15890        int i;
15891
15892        intel_modeset_readout_hw_state(dev);
15893
15894        /* HW state is read out, now we need to sanitize this mess. */
15895        for_each_intel_encoder(dev, encoder) {
15896                intel_sanitize_encoder(encoder);
15897        }
15898
15899        for_each_pipe(dev_priv, pipe) {
15900                crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15901                intel_sanitize_crtc(crtc);
15902                intel_dump_pipe_config(crtc, crtc->config,
15903                                       "[setup_hw_state]");
15904        }
15905
15906        intel_modeset_update_connector_atomic_state(dev);
15907
15908        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15909                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15910
15911                if (!pll->on || pll->active)
15912                        continue;
15913
15914                DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15915
15916                pll->disable(dev_priv, pll);
15917                pll->on = false;
15918        }
15919
15920        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15921                vlv_wm_get_hw_state(dev);
15922        else if (IS_GEN9(dev))
15923                skl_wm_get_hw_state(dev);
15924        else if (HAS_PCH_SPLIT(dev))
15925                ilk_wm_get_hw_state(dev);
15926
15927        for_each_intel_crtc(dev, crtc) {
15928                unsigned long put_domains;
15929
15930                put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15931                if (WARN_ON(put_domains))
15932                        modeset_put_power_domains(dev_priv, put_domains);
15933        }
15934        intel_display_set_init_power(dev_priv, false);
15935
15936        intel_fbc_init_pipe_state(dev_priv);
15937}
15938
15939void intel_display_resume(struct drm_device *dev)
15940{
15941        struct drm_i915_private *dev_priv = to_i915(dev);
15942        struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15943        struct drm_modeset_acquire_ctx ctx;
15944        int ret;
15945        bool setup = false;
15946
15947        dev_priv->modeset_restore_state = NULL;
15948
15949        /*
15950         * This is a cludge because with real atomic modeset mode_config.mutex
15951         * won't be taken. Unfortunately some probed state like
15952         * audio_codec_enable is still protected by mode_config.mutex, so lock
15953         * it here for now.
15954         */
15955        mutex_lock(&dev->mode_config.mutex);
15956        drm_modeset_acquire_init(&ctx, 0);
15957
15958retry:
15959        ret = drm_modeset_lock_all_ctx(dev, &ctx);
15960
15961        if (ret == 0 && !setup) {
15962                setup = true;
15963
15964                intel_modeset_setup_hw_state(dev);
15965                i915_redisable_vga(dev);
15966        }
15967
15968        if (ret == 0 && state) {
15969                struct drm_crtc_state *crtc_state;
15970                struct drm_crtc *crtc;
15971                int i;
15972
15973                state->acquire_ctx = &ctx;
15974
15975                for_each_crtc_in_state(state, crtc, crtc_state, i) {
15976                        /*
15977                         * Force recalculation even if we restore
15978                         * current state. With fast modeset this may not result
15979                         * in a modeset when the state is compatible.
15980                         */
15981                        crtc_state->mode_changed = true;
15982                }
15983
15984                ret = drm_atomic_commit(state);
15985        }
15986
15987        if (ret == -EDEADLK) {
15988                drm_modeset_backoff(&ctx);
15989                goto retry;
15990        }
15991
15992        drm_modeset_drop_locks(&ctx);
15993        drm_modeset_acquire_fini(&ctx);
15994        mutex_unlock(&dev->mode_config.mutex);
15995
15996        if (ret) {
15997                DRM_ERROR("Restoring old state failed with %i\n", ret);
15998                drm_atomic_state_free(state);
15999        }
16000}
16001
16002void intel_modeset_gem_init(struct drm_device *dev)
16003{
16004        struct drm_crtc *c;
16005        struct drm_i915_gem_object *obj;
16006        int ret;
16007
16008        intel_init_gt_powersave(dev);
16009
16010        intel_modeset_init_hw(dev);
16011
16012        intel_setup_overlay(dev);
16013
16014        /*
16015         * Make sure any fbs we allocated at startup are properly
16016         * pinned & fenced.  When we do the allocation it's too early
16017         * for this.
16018         */
16019        for_each_crtc(dev, c) {
16020                obj = intel_fb_obj(c->primary->fb);
16021                if (obj == NULL)
16022                        continue;
16023
16024                mutex_lock(&dev->struct_mutex);
16025                ret = intel_pin_and_fence_fb_obj(c->primary,
16026                                                 c->primary->fb,
16027                                                 c->primary->state);
16028                mutex_unlock(&dev->struct_mutex);
16029                if (ret) {
16030                        DRM_ERROR("failed to pin boot fb on pipe %d\n",
16031                                  to_intel_crtc(c)->pipe);
16032                        drm_framebuffer_unreference(c->primary->fb);
16033                        c->primary->fb = NULL;
16034                        c->primary->crtc = c->primary->state->crtc = NULL;
16035                        update_state_fb(c->primary);
16036                        c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16037                }
16038        }
16039
16040        intel_backlight_register(dev);
16041}
16042
16043void intel_connector_unregister(struct intel_connector *intel_connector)
16044{
16045        struct drm_connector *connector = &intel_connector->base;
16046
16047        intel_panel_destroy_backlight(connector);
16048        drm_connector_unregister(connector);
16049}
16050
16051void intel_modeset_cleanup(struct drm_device *dev)
16052{
16053        struct drm_i915_private *dev_priv = dev->dev_private;
16054        struct intel_connector *connector;
16055
16056        intel_disable_gt_powersave(dev);
16057
16058        intel_backlight_unregister(dev);
16059
16060        /*
16061         * Interrupts and polling as the first thing to avoid creating havoc.
16062         * Too much stuff here (turning of connectors, ...) would
16063         * experience fancy races otherwise.
16064         */
16065        intel_irq_uninstall(dev_priv);
16066
16067        /*
16068         * Due to the hpd irq storm handling the hotplug work can re-arm the
16069         * poll handlers. Hence disable polling after hpd handling is shut down.
16070         */
16071        drm_kms_helper_poll_fini(dev);
16072
16073        intel_unregister_dsm_handler();
16074
16075        intel_fbc_global_disable(dev_priv);
16076
16077        /* flush any delayed tasks or pending work */
16078        flush_scheduled_work();
16079
16080        /* destroy the backlight and sysfs files before encoders/connectors */
16081        for_each_intel_connector(dev, connector)
16082                connector->unregister(connector);
16083
16084        drm_mode_config_cleanup(dev);
16085
16086        intel_cleanup_overlay(dev);
16087
16088        intel_cleanup_gt_powersave(dev);
16089
16090        intel_teardown_gmbus(dev);
16091}
16092
16093/*
16094 * Return which encoder is currently attached for connector.
16095 */
16096struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16097{
16098        return &intel_attached_encoder(connector)->base;
16099}
16100
16101void intel_connector_attach_encoder(struct intel_connector *connector,
16102                                    struct intel_encoder *encoder)
16103{
16104        connector->encoder = encoder;
16105        drm_mode_connector_attach_encoder(&connector->base,
16106                                          &encoder->base);
16107}
16108
16109/*
16110 * set vga decode state - true == enable VGA decode
16111 */
16112int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16113{
16114        struct drm_i915_private *dev_priv = dev->dev_private;
16115        unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16116        u16 gmch_ctrl;
16117
16118        if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16119                DRM_ERROR("failed to read control word\n");
16120                return -EIO;
16121        }
16122
16123        if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16124                return 0;
16125
16126        if (state)
16127                gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16128        else
16129                gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16130
16131        if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16132                DRM_ERROR("failed to write control word\n");
16133                return -EIO;
16134        }
16135
16136        return 0;
16137}
16138
16139struct intel_display_error_state {
16140
16141        u32 power_well_driver;
16142
16143        int num_transcoders;
16144
16145        struct intel_cursor_error_state {
16146                u32 control;
16147                u32 position;
16148                u32 base;
16149                u32 size;
16150        } cursor[I915_MAX_PIPES];
16151
16152        struct intel_pipe_error_state {
16153                bool power_domain_on;
16154                u32 source;
16155                u32 stat;
16156        } pipe[I915_MAX_PIPES];
16157
16158        struct intel_plane_error_state {
16159                u32 control;
16160                u32 stride;
16161                u32 size;
16162                u32 pos;
16163                u32 addr;
16164                u32 surface;
16165                u32 tile_offset;
16166        } plane[I915_MAX_PIPES];
16167
16168        struct intel_transcoder_error_state {
16169                bool power_domain_on;
16170                enum transcoder cpu_transcoder;
16171
16172                u32 conf;
16173
16174                u32 htotal;
16175                u32 hblank;
16176                u32 hsync;
16177                u32 vtotal;
16178                u32 vblank;
16179                u32 vsync;
16180        } transcoder[4];
16181};
16182
16183struct intel_display_error_state *
16184intel_display_capture_error_state(struct drm_device *dev)
16185{
16186        struct drm_i915_private *dev_priv = dev->dev_private;
16187        struct intel_display_error_state *error;
16188        int transcoders[] = {
16189                TRANSCODER_A,
16190                TRANSCODER_B,
16191                TRANSCODER_C,
16192                TRANSCODER_EDP,
16193        };
16194        int i;
16195
16196        if (INTEL_INFO(dev)->num_pipes == 0)
16197                return NULL;
16198
16199        error = kzalloc(sizeof(*error), GFP_ATOMIC);
16200        if (error == NULL)
16201                return NULL;
16202
16203        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16204                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16205
16206        for_each_pipe(dev_priv, i) {
16207                error->pipe[i].power_domain_on =
16208                        __intel_display_power_is_enabled(dev_priv,
16209                                                         POWER_DOMAIN_PIPE(i));
16210                if (!error->pipe[i].power_domain_on)
16211                        continue;
16212
16213                error->cursor[i].control = I915_READ(CURCNTR(i));
16214                error->cursor[i].position = I915_READ(CURPOS(i));
16215                error->cursor[i].base = I915_READ(CURBASE(i));
16216
16217                error->plane[i].control = I915_READ(DSPCNTR(i));
16218                error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16219                if (INTEL_INFO(dev)->gen <= 3) {
16220                        error->plane[i].size = I915_READ(DSPSIZE(i));
16221                        error->plane[i].pos = I915_READ(DSPPOS(i));
16222                }
16223                if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16224                        error->plane[i].addr = I915_READ(DSPADDR(i));
16225                if (INTEL_INFO(dev)->gen >= 4) {
16226                        error->plane[i].surface = I915_READ(DSPSURF(i));
16227                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16228                }
16229
16230                error->pipe[i].source = I915_READ(PIPESRC(i));
16231
16232                if (HAS_GMCH_DISPLAY(dev))
16233                        error->pipe[i].stat = I915_READ(PIPESTAT(i));
16234        }
16235
16236        error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16237        if (HAS_DDI(dev_priv->dev))
16238                error->num_transcoders++; /* Account for eDP. */
16239
16240        for (i = 0; i < error->num_transcoders; i++) {
16241                enum transcoder cpu_transcoder = transcoders[i];
16242
16243                error->transcoder[i].power_domain_on =
16244                        __intel_display_power_is_enabled(dev_priv,
16245                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16246                if (!error->transcoder[i].power_domain_on)
16247                        continue;
16248
16249                error->transcoder[i].cpu_transcoder = cpu_transcoder;
16250
16251                error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16252                error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16253                error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16254                error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16255                error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16256                error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16257                error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16258        }
16259
16260        return error;
16261}
16262
16263#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16264
16265void
16266intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16267                                struct drm_device *dev,
16268                                struct intel_display_error_state *error)
16269{
16270        struct drm_i915_private *dev_priv = dev->dev_private;
16271        int i;
16272
16273        if (!error)
16274                return;
16275
16276        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16277        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16278                err_printf(m, "PWR_WELL_CTL2: %08x\n",
16279                           error->power_well_driver);
16280        for_each_pipe(dev_priv, i) {
16281                err_printf(m, "Pipe [%d]:\n", i);
16282                err_printf(m, "  Power: %s\n",
16283                           onoff(error->pipe[i].power_domain_on));
16284                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16285                err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16286
16287                err_printf(m, "Plane [%d]:\n", i);
16288                err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16289                err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16290                if (INTEL_INFO(dev)->gen <= 3) {
16291                        err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16292                        err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16293                }
16294                if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16295                        err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16296                if (INTEL_INFO(dev)->gen >= 4) {
16297                        err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16298                        err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16299                }
16300
16301                err_printf(m, "Cursor [%d]:\n", i);
16302                err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16303                err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16304                err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16305        }
16306
16307        for (i = 0; i < error->num_transcoders; i++) {
16308                err_printf(m, "CPU transcoder: %c\n",
16309                           transcoder_name(error->transcoder[i].cpu_transcoder));
16310                err_printf(m, "  Power: %s\n",
16311                           onoff(error->transcoder[i].power_domain_on));
16312                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16313                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16314                err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16315                err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16316                err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16317                err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16318                err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16319        }
16320}
16321