linux/drivers/gpu/drm/i915/intel_display.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2006-2007 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Eric Anholt <eric@anholt.net>
  25 */
  26
  27#include <linux/dmi.h>
  28#include <linux/module.h>
  29#include <linux/input.h>
  30#include <linux/i2c.h>
  31#include <linux/kernel.h>
  32#include <linux/slab.h>
  33#include <linux/vgaarb.h>
  34#include <drm/drm_edid.h>
  35#include <drm/drmP.h>
  36#include "intel_drv.h"
  37#include <drm/i915_drm.h>
  38#include "i915_drv.h"
  39#include "i915_trace.h"
  40#include <drm/drm_atomic.h>
  41#include <drm/drm_atomic_helper.h>
  42#include <drm/drm_dp_helper.h>
  43#include <drm/drm_crtc_helper.h>
  44#include <drm/drm_plane_helper.h>
  45#include <drm/drm_rect.h>
  46#include <linux/dma_remapping.h>
  47
  48/* Primary plane formats supported by all gen */
  49#define COMMON_PRIMARY_FORMATS \
  50        DRM_FORMAT_C8, \
  51        DRM_FORMAT_RGB565, \
  52        DRM_FORMAT_XRGB8888, \
  53        DRM_FORMAT_ARGB8888
  54
  55/* Primary plane formats for gen <= 3 */
  56static const uint32_t intel_primary_formats_gen2[] = {
  57        COMMON_PRIMARY_FORMATS,
  58        DRM_FORMAT_XRGB1555,
  59        DRM_FORMAT_ARGB1555,
  60};
  61
  62/* Primary plane formats for gen >= 4 */
  63static const uint32_t intel_primary_formats_gen4[] = {
  64        COMMON_PRIMARY_FORMATS, \
  65        DRM_FORMAT_XBGR8888,
  66        DRM_FORMAT_ABGR8888,
  67        DRM_FORMAT_XRGB2101010,
  68        DRM_FORMAT_ARGB2101010,
  69        DRM_FORMAT_XBGR2101010,
  70        DRM_FORMAT_ABGR2101010,
  71};
  72
  73/* Cursor formats */
  74static const uint32_t intel_cursor_formats[] = {
  75        DRM_FORMAT_ARGB8888,
  76};
  77
  78static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  79
  80static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  81                                struct intel_crtc_state *pipe_config);
  82static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  83                                   struct intel_crtc_state *pipe_config);
  84
  85static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
  86                          int x, int y, struct drm_framebuffer *old_fb,
  87                          struct drm_atomic_state *state);
  88static int intel_framebuffer_init(struct drm_device *dev,
  89                                  struct intel_framebuffer *ifb,
  90                                  struct drm_mode_fb_cmd2 *mode_cmd,
  91                                  struct drm_i915_gem_object *obj);
  92static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  93static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  94static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  95                                         struct intel_link_m_n *m_n,
  96                                         struct intel_link_m_n *m2_n2);
  97static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  98static void haswell_set_pipeconf(struct drm_crtc *crtc);
  99static void intel_set_pipe_csc(struct drm_crtc *crtc);
 100static void vlv_prepare_pll(struct intel_crtc *crtc,
 101                            const struct intel_crtc_state *pipe_config);
 102static void chv_prepare_pll(struct intel_crtc *crtc,
 103                            const struct intel_crtc_state *pipe_config);
 104static void intel_begin_crtc_commit(struct drm_crtc *crtc);
 105static void intel_finish_crtc_commit(struct drm_crtc *crtc);
 106
 107static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
 108{
 109        if (!connector->mst_port)
 110                return connector->encoder;
 111        else
 112                return &connector->mst_port->mst_encoders[pipe]->base;
 113}
 114
 115typedef struct {
 116        int     min, max;
 117} intel_range_t;
 118
 119typedef struct {
 120        int     dot_limit;
 121        int     p2_slow, p2_fast;
 122} intel_p2_t;
 123
 124typedef struct intel_limit intel_limit_t;
 125struct intel_limit {
 126        intel_range_t   dot, vco, n, m, m1, m2, p, p1;
 127        intel_p2_t          p2;
 128};
 129
 130int
 131intel_pch_rawclk(struct drm_device *dev)
 132{
 133        struct drm_i915_private *dev_priv = dev->dev_private;
 134
 135        WARN_ON(!HAS_PCH_SPLIT(dev));
 136
 137        return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
 138}
 139
 140static inline u32 /* units of 100MHz */
 141intel_fdi_link_freq(struct drm_device *dev)
 142{
 143        if (IS_GEN5(dev)) {
 144                struct drm_i915_private *dev_priv = dev->dev_private;
 145                return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
 146        } else
 147                return 27;
 148}
 149
 150static const intel_limit_t intel_limits_i8xx_dac = {
 151        .dot = { .min = 25000, .max = 350000 },
 152        .vco = { .min = 908000, .max = 1512000 },
 153        .n = { .min = 2, .max = 16 },
 154        .m = { .min = 96, .max = 140 },
 155        .m1 = { .min = 18, .max = 26 },
 156        .m2 = { .min = 6, .max = 16 },
 157        .p = { .min = 4, .max = 128 },
 158        .p1 = { .min = 2, .max = 33 },
 159        .p2 = { .dot_limit = 165000,
 160                .p2_slow = 4, .p2_fast = 2 },
 161};
 162
 163static const intel_limit_t intel_limits_i8xx_dvo = {
 164        .dot = { .min = 25000, .max = 350000 },
 165        .vco = { .min = 908000, .max = 1512000 },
 166        .n = { .min = 2, .max = 16 },
 167        .m = { .min = 96, .max = 140 },
 168        .m1 = { .min = 18, .max = 26 },
 169        .m2 = { .min = 6, .max = 16 },
 170        .p = { .min = 4, .max = 128 },
 171        .p1 = { .min = 2, .max = 33 },
 172        .p2 = { .dot_limit = 165000,
 173                .p2_slow = 4, .p2_fast = 4 },
 174};
 175
 176static const intel_limit_t intel_limits_i8xx_lvds = {
 177        .dot = { .min = 25000, .max = 350000 },
 178        .vco = { .min = 908000, .max = 1512000 },
 179        .n = { .min = 2, .max = 16 },
 180        .m = { .min = 96, .max = 140 },
 181        .m1 = { .min = 18, .max = 26 },
 182        .m2 = { .min = 6, .max = 16 },
 183        .p = { .min = 4, .max = 128 },
 184        .p1 = { .min = 1, .max = 6 },
 185        .p2 = { .dot_limit = 165000,
 186                .p2_slow = 14, .p2_fast = 7 },
 187};
 188
 189static const intel_limit_t intel_limits_i9xx_sdvo = {
 190        .dot = { .min = 20000, .max = 400000 },
 191        .vco = { .min = 1400000, .max = 2800000 },
 192        .n = { .min = 1, .max = 6 },
 193        .m = { .min = 70, .max = 120 },
 194        .m1 = { .min = 8, .max = 18 },
 195        .m2 = { .min = 3, .max = 7 },
 196        .p = { .min = 5, .max = 80 },
 197        .p1 = { .min = 1, .max = 8 },
 198        .p2 = { .dot_limit = 200000,
 199                .p2_slow = 10, .p2_fast = 5 },
 200};
 201
 202static const intel_limit_t intel_limits_i9xx_lvds = {
 203        .dot = { .min = 20000, .max = 400000 },
 204        .vco = { .min = 1400000, .max = 2800000 },
 205        .n = { .min = 1, .max = 6 },
 206        .m = { .min = 70, .max = 120 },
 207        .m1 = { .min = 8, .max = 18 },
 208        .m2 = { .min = 3, .max = 7 },
 209        .p = { .min = 7, .max = 98 },
 210        .p1 = { .min = 1, .max = 8 },
 211        .p2 = { .dot_limit = 112000,
 212                .p2_slow = 14, .p2_fast = 7 },
 213};
 214
 215
 216static const intel_limit_t intel_limits_g4x_sdvo = {
 217        .dot = { .min = 25000, .max = 270000 },
 218        .vco = { .min = 1750000, .max = 3500000},
 219        .n = { .min = 1, .max = 4 },
 220        .m = { .min = 104, .max = 138 },
 221        .m1 = { .min = 17, .max = 23 },
 222        .m2 = { .min = 5, .max = 11 },
 223        .p = { .min = 10, .max = 30 },
 224        .p1 = { .min = 1, .max = 3},
 225        .p2 = { .dot_limit = 270000,
 226                .p2_slow = 10,
 227                .p2_fast = 10
 228        },
 229};
 230
 231static const intel_limit_t intel_limits_g4x_hdmi = {
 232        .dot = { .min = 22000, .max = 400000 },
 233        .vco = { .min = 1750000, .max = 3500000},
 234        .n = { .min = 1, .max = 4 },
 235        .m = { .min = 104, .max = 138 },
 236        .m1 = { .min = 16, .max = 23 },
 237        .m2 = { .min = 5, .max = 11 },
 238        .p = { .min = 5, .max = 80 },
 239        .p1 = { .min = 1, .max = 8},
 240        .p2 = { .dot_limit = 165000,
 241                .p2_slow = 10, .p2_fast = 5 },
 242};
 243
 244static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
 245        .dot = { .min = 20000, .max = 115000 },
 246        .vco = { .min = 1750000, .max = 3500000 },
 247        .n = { .min = 1, .max = 3 },
 248        .m = { .min = 104, .max = 138 },
 249        .m1 = { .min = 17, .max = 23 },
 250        .m2 = { .min = 5, .max = 11 },
 251        .p = { .min = 28, .max = 112 },
 252        .p1 = { .min = 2, .max = 8 },
 253        .p2 = { .dot_limit = 0,
 254                .p2_slow = 14, .p2_fast = 14
 255        },
 256};
 257
 258static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
 259        .dot = { .min = 80000, .max = 224000 },
 260        .vco = { .min = 1750000, .max = 3500000 },
 261        .n = { .min = 1, .max = 3 },
 262        .m = { .min = 104, .max = 138 },
 263        .m1 = { .min = 17, .max = 23 },
 264        .m2 = { .min = 5, .max = 11 },
 265        .p = { .min = 14, .max = 42 },
 266        .p1 = { .min = 2, .max = 6 },
 267        .p2 = { .dot_limit = 0,
 268                .p2_slow = 7, .p2_fast = 7
 269        },
 270};
 271
 272static const intel_limit_t intel_limits_pineview_sdvo = {
 273        .dot = { .min = 20000, .max = 400000},
 274        .vco = { .min = 1700000, .max = 3500000 },
 275        /* Pineview's Ncounter is a ring counter */
 276        .n = { .min = 3, .max = 6 },
 277        .m = { .min = 2, .max = 256 },
 278        /* Pineview only has one combined m divider, which we treat as m2. */
 279        .m1 = { .min = 0, .max = 0 },
 280        .m2 = { .min = 0, .max = 254 },
 281        .p = { .min = 5, .max = 80 },
 282        .p1 = { .min = 1, .max = 8 },
 283        .p2 = { .dot_limit = 200000,
 284                .p2_slow = 10, .p2_fast = 5 },
 285};
 286
 287static const intel_limit_t intel_limits_pineview_lvds = {
 288        .dot = { .min = 20000, .max = 400000 },
 289        .vco = { .min = 1700000, .max = 3500000 },
 290        .n = { .min = 3, .max = 6 },
 291        .m = { .min = 2, .max = 256 },
 292        .m1 = { .min = 0, .max = 0 },
 293        .m2 = { .min = 0, .max = 254 },
 294        .p = { .min = 7, .max = 112 },
 295        .p1 = { .min = 1, .max = 8 },
 296        .p2 = { .dot_limit = 112000,
 297                .p2_slow = 14, .p2_fast = 14 },
 298};
 299
 300/* Ironlake / Sandybridge
 301 *
 302 * We calculate clock using (register_value + 2) for N/M1/M2, so here
 303 * the range value for them is (actual_value - 2).
 304 */
 305static const intel_limit_t intel_limits_ironlake_dac = {
 306        .dot = { .min = 25000, .max = 350000 },
 307        .vco = { .min = 1760000, .max = 3510000 },
 308        .n = { .min = 1, .max = 5 },
 309        .m = { .min = 79, .max = 127 },
 310        .m1 = { .min = 12, .max = 22 },
 311        .m2 = { .min = 5, .max = 9 },
 312        .p = { .min = 5, .max = 80 },
 313        .p1 = { .min = 1, .max = 8 },
 314        .p2 = { .dot_limit = 225000,
 315                .p2_slow = 10, .p2_fast = 5 },
 316};
 317
 318static const intel_limit_t intel_limits_ironlake_single_lvds = {
 319        .dot = { .min = 25000, .max = 350000 },
 320        .vco = { .min = 1760000, .max = 3510000 },
 321        .n = { .min = 1, .max = 3 },
 322        .m = { .min = 79, .max = 118 },
 323        .m1 = { .min = 12, .max = 22 },
 324        .m2 = { .min = 5, .max = 9 },
 325        .p = { .min = 28, .max = 112 },
 326        .p1 = { .min = 2, .max = 8 },
 327        .p2 = { .dot_limit = 225000,
 328                .p2_slow = 14, .p2_fast = 14 },
 329};
 330
 331static const intel_limit_t intel_limits_ironlake_dual_lvds = {
 332        .dot = { .min = 25000, .max = 350000 },
 333        .vco = { .min = 1760000, .max = 3510000 },
 334        .n = { .min = 1, .max = 3 },
 335        .m = { .min = 79, .max = 127 },
 336        .m1 = { .min = 12, .max = 22 },
 337        .m2 = { .min = 5, .max = 9 },
 338        .p = { .min = 14, .max = 56 },
 339        .p1 = { .min = 2, .max = 8 },
 340        .p2 = { .dot_limit = 225000,
 341                .p2_slow = 7, .p2_fast = 7 },
 342};
 343
 344/* LVDS 100mhz refclk limits. */
 345static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
 346        .dot = { .min = 25000, .max = 350000 },
 347        .vco = { .min = 1760000, .max = 3510000 },
 348        .n = { .min = 1, .max = 2 },
 349        .m = { .min = 79, .max = 126 },
 350        .m1 = { .min = 12, .max = 22 },
 351        .m2 = { .min = 5, .max = 9 },
 352        .p = { .min = 28, .max = 112 },
 353        .p1 = { .min = 2, .max = 8 },
 354        .p2 = { .dot_limit = 225000,
 355                .p2_slow = 14, .p2_fast = 14 },
 356};
 357
 358static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
 359        .dot = { .min = 25000, .max = 350000 },
 360        .vco = { .min = 1760000, .max = 3510000 },
 361        .n = { .min = 1, .max = 3 },
 362        .m = { .min = 79, .max = 126 },
 363        .m1 = { .min = 12, .max = 22 },
 364        .m2 = { .min = 5, .max = 9 },
 365        .p = { .min = 14, .max = 42 },
 366        .p1 = { .min = 2, .max = 6 },
 367        .p2 = { .dot_limit = 225000,
 368                .p2_slow = 7, .p2_fast = 7 },
 369};
 370
 371static const intel_limit_t intel_limits_vlv = {
 372         /*
 373          * These are the data rate limits (measured in fast clocks)
 374          * since those are the strictest limits we have. The fast
 375          * clock and actual rate limits are more relaxed, so checking
 376          * them would make no difference.
 377          */
 378        .dot = { .min = 25000 * 5, .max = 270000 * 5 },
 379        .vco = { .min = 4000000, .max = 6000000 },
 380        .n = { .min = 1, .max = 7 },
 381        .m1 = { .min = 2, .max = 3 },
 382        .m2 = { .min = 11, .max = 156 },
 383        .p1 = { .min = 2, .max = 3 },
 384        .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
 385};
 386
 387static const intel_limit_t intel_limits_chv = {
 388        /*
 389         * These are the data rate limits (measured in fast clocks)
 390         * since those are the strictest limits we have.  The fast
 391         * clock and actual rate limits are more relaxed, so checking
 392         * them would make no difference.
 393         */
 394        .dot = { .min = 25000 * 5, .max = 540000 * 5},
 395        .vco = { .min = 4800000, .max = 6480000 },
 396        .n = { .min = 1, .max = 1 },
 397        .m1 = { .min = 2, .max = 2 },
 398        .m2 = { .min = 24 << 22, .max = 175 << 22 },
 399        .p1 = { .min = 2, .max = 4 },
 400        .p2 = { .p2_slow = 1, .p2_fast = 14 },
 401};
 402
 403static void vlv_clock(int refclk, intel_clock_t *clock)
 404{
 405        clock->m = clock->m1 * clock->m2;
 406        clock->p = clock->p1 * clock->p2;
 407        if (WARN_ON(clock->n == 0 || clock->p == 0))
 408                return;
 409        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 410        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 411}
 412
 413/**
 414 * Returns whether any output on the specified pipe is of the specified type
 415 */
 416bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
 417{
 418        struct drm_device *dev = crtc->base.dev;
 419        struct intel_encoder *encoder;
 420
 421        for_each_encoder_on_crtc(dev, &crtc->base, encoder)
 422                if (encoder->type == type)
 423                        return true;
 424
 425        return false;
 426}
 427
 428/**
 429 * Returns whether any output on the specified pipe will have the specified
 430 * type after a staged modeset is complete, i.e., the same as
 431 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
 432 * encoder->crtc.
 433 */
 434static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
 435                                      int type)
 436{
 437        struct drm_atomic_state *state = crtc_state->base.state;
 438        struct drm_connector_state *connector_state;
 439        struct intel_encoder *encoder;
 440        int i, num_connectors = 0;
 441
 442        for (i = 0; i < state->num_connector; i++) {
 443                if (!state->connectors[i])
 444                        continue;
 445
 446                connector_state = state->connector_states[i];
 447                if (connector_state->crtc != crtc_state->base.crtc)
 448                        continue;
 449
 450                num_connectors++;
 451
 452                encoder = to_intel_encoder(connector_state->best_encoder);
 453                if (encoder->type == type)
 454                        return true;
 455        }
 456
 457        WARN_ON(num_connectors == 0);
 458
 459        return false;
 460}
 461
 462static const intel_limit_t *
 463intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
 464{
 465        struct drm_device *dev = crtc_state->base.crtc->dev;
 466        const intel_limit_t *limit;
 467
 468        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 469                if (intel_is_dual_link_lvds(dev)) {
 470                        if (refclk == 100000)
 471                                limit = &intel_limits_ironlake_dual_lvds_100m;
 472                        else
 473                                limit = &intel_limits_ironlake_dual_lvds;
 474                } else {
 475                        if (refclk == 100000)
 476                                limit = &intel_limits_ironlake_single_lvds_100m;
 477                        else
 478                                limit = &intel_limits_ironlake_single_lvds;
 479                }
 480        } else
 481                limit = &intel_limits_ironlake_dac;
 482
 483        return limit;
 484}
 485
 486static const intel_limit_t *
 487intel_g4x_limit(struct intel_crtc_state *crtc_state)
 488{
 489        struct drm_device *dev = crtc_state->base.crtc->dev;
 490        const intel_limit_t *limit;
 491
 492        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 493                if (intel_is_dual_link_lvds(dev))
 494                        limit = &intel_limits_g4x_dual_channel_lvds;
 495                else
 496                        limit = &intel_limits_g4x_single_channel_lvds;
 497        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
 498                   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
 499                limit = &intel_limits_g4x_hdmi;
 500        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
 501                limit = &intel_limits_g4x_sdvo;
 502        } else /* The option is for other outputs */
 503                limit = &intel_limits_i9xx_sdvo;
 504
 505        return limit;
 506}
 507
 508static const intel_limit_t *
 509intel_limit(struct intel_crtc_state *crtc_state, int refclk)
 510{
 511        struct drm_device *dev = crtc_state->base.crtc->dev;
 512        const intel_limit_t *limit;
 513
 514        if (HAS_PCH_SPLIT(dev))
 515                limit = intel_ironlake_limit(crtc_state, refclk);
 516        else if (IS_G4X(dev)) {
 517                limit = intel_g4x_limit(crtc_state);
 518        } else if (IS_PINEVIEW(dev)) {
 519                if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
 520                        limit = &intel_limits_pineview_lvds;
 521                else
 522                        limit = &intel_limits_pineview_sdvo;
 523        } else if (IS_CHERRYVIEW(dev)) {
 524                limit = &intel_limits_chv;
 525        } else if (IS_VALLEYVIEW(dev)) {
 526                limit = &intel_limits_vlv;
 527        } else if (!IS_GEN2(dev)) {
 528                if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
 529                        limit = &intel_limits_i9xx_lvds;
 530                else
 531                        limit = &intel_limits_i9xx_sdvo;
 532        } else {
 533                if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
 534                        limit = &intel_limits_i8xx_lvds;
 535                else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
 536                        limit = &intel_limits_i8xx_dvo;
 537                else
 538                        limit = &intel_limits_i8xx_dac;
 539        }
 540        return limit;
 541}
 542
 543/* m1 is reserved as 0 in Pineview, n is a ring counter */
 544static void pineview_clock(int refclk, intel_clock_t *clock)
 545{
 546        clock->m = clock->m2 + 2;
 547        clock->p = clock->p1 * clock->p2;
 548        if (WARN_ON(clock->n == 0 || clock->p == 0))
 549                return;
 550        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 551        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 552}
 553
 554static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
 555{
 556        return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 557}
 558
 559static void i9xx_clock(int refclk, intel_clock_t *clock)
 560{
 561        clock->m = i9xx_dpll_compute_m(clock);
 562        clock->p = clock->p1 * clock->p2;
 563        if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
 564                return;
 565        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
 566        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 567}
 568
 569static void chv_clock(int refclk, intel_clock_t *clock)
 570{
 571        clock->m = clock->m1 * clock->m2;
 572        clock->p = clock->p1 * clock->p2;
 573        if (WARN_ON(clock->n == 0 || clock->p == 0))
 574                return;
 575        clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
 576                        clock->n << 22);
 577        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 578}
 579
 580#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
 581/**
 582 * Returns whether the given set of divisors are valid for a given refclk with
 583 * the given connectors.
 584 */
 585
 586static bool intel_PLL_is_valid(struct drm_device *dev,
 587                               const intel_limit_t *limit,
 588                               const intel_clock_t *clock)
 589{
 590        if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
 591                INTELPllInvalid("n out of range\n");
 592        if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 593                INTELPllInvalid("p1 out of range\n");
 594        if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 595                INTELPllInvalid("m2 out of range\n");
 596        if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 597                INTELPllInvalid("m1 out of range\n");
 598
 599        if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
 600                if (clock->m1 <= clock->m2)
 601                        INTELPllInvalid("m1 <= m2\n");
 602
 603        if (!IS_VALLEYVIEW(dev)) {
 604                if (clock->p < limit->p.min || limit->p.max < clock->p)
 605                        INTELPllInvalid("p out of range\n");
 606                if (clock->m < limit->m.min || limit->m.max < clock->m)
 607                        INTELPllInvalid("m out of range\n");
 608        }
 609
 610        if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 611                INTELPllInvalid("vco out of range\n");
 612        /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
 613         * connector, etc., rather than just a single range.
 614         */
 615        if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
 616                INTELPllInvalid("dot out of range\n");
 617
 618        return true;
 619}
 620
 621static bool
 622i9xx_find_best_dpll(const intel_limit_t *limit,
 623                    struct intel_crtc_state *crtc_state,
 624                    int target, int refclk, intel_clock_t *match_clock,
 625                    intel_clock_t *best_clock)
 626{
 627        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 628        struct drm_device *dev = crtc->base.dev;
 629        intel_clock_t clock;
 630        int err = target;
 631
 632        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 633                /*
 634                 * For LVDS just rely on its current settings for dual-channel.
 635                 * We haven't figured out how to reliably set up different
 636                 * single/dual channel state, if we even can.
 637                 */
 638                if (intel_is_dual_link_lvds(dev))
 639                        clock.p2 = limit->p2.p2_fast;
 640                else
 641                        clock.p2 = limit->p2.p2_slow;
 642        } else {
 643                if (target < limit->p2.dot_limit)
 644                        clock.p2 = limit->p2.p2_slow;
 645                else
 646                        clock.p2 = limit->p2.p2_fast;
 647        }
 648
 649        memset(best_clock, 0, sizeof(*best_clock));
 650
 651        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 652             clock.m1++) {
 653                for (clock.m2 = limit->m2.min;
 654                     clock.m2 <= limit->m2.max; clock.m2++) {
 655                        if (clock.m2 >= clock.m1)
 656                                break;
 657                        for (clock.n = limit->n.min;
 658                             clock.n <= limit->n.max; clock.n++) {
 659                                for (clock.p1 = limit->p1.min;
 660                                        clock.p1 <= limit->p1.max; clock.p1++) {
 661                                        int this_err;
 662
 663                                        i9xx_clock(refclk, &clock);
 664                                        if (!intel_PLL_is_valid(dev, limit,
 665                                                                &clock))
 666                                                continue;
 667                                        if (match_clock &&
 668                                            clock.p != match_clock->p)
 669                                                continue;
 670
 671                                        this_err = abs(clock.dot - target);
 672                                        if (this_err < err) {
 673                                                *best_clock = clock;
 674                                                err = this_err;
 675                                        }
 676                                }
 677                        }
 678                }
 679        }
 680
 681        return (err != target);
 682}
 683
 684static bool
 685pnv_find_best_dpll(const intel_limit_t *limit,
 686                   struct intel_crtc_state *crtc_state,
 687                   int target, int refclk, intel_clock_t *match_clock,
 688                   intel_clock_t *best_clock)
 689{
 690        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 691        struct drm_device *dev = crtc->base.dev;
 692        intel_clock_t clock;
 693        int err = target;
 694
 695        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 696                /*
 697                 * For LVDS just rely on its current settings for dual-channel.
 698                 * We haven't figured out how to reliably set up different
 699                 * single/dual channel state, if we even can.
 700                 */
 701                if (intel_is_dual_link_lvds(dev))
 702                        clock.p2 = limit->p2.p2_fast;
 703                else
 704                        clock.p2 = limit->p2.p2_slow;
 705        } else {
 706                if (target < limit->p2.dot_limit)
 707                        clock.p2 = limit->p2.p2_slow;
 708                else
 709                        clock.p2 = limit->p2.p2_fast;
 710        }
 711
 712        memset(best_clock, 0, sizeof(*best_clock));
 713
 714        for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 715             clock.m1++) {
 716                for (clock.m2 = limit->m2.min;
 717                     clock.m2 <= limit->m2.max; clock.m2++) {
 718                        for (clock.n = limit->n.min;
 719                             clock.n <= limit->n.max; clock.n++) {
 720                                for (clock.p1 = limit->p1.min;
 721                                        clock.p1 <= limit->p1.max; clock.p1++) {
 722                                        int this_err;
 723
 724                                        pineview_clock(refclk, &clock);
 725                                        if (!intel_PLL_is_valid(dev, limit,
 726                                                                &clock))
 727                                                continue;
 728                                        if (match_clock &&
 729                                            clock.p != match_clock->p)
 730                                                continue;
 731
 732                                        this_err = abs(clock.dot - target);
 733                                        if (this_err < err) {
 734                                                *best_clock = clock;
 735                                                err = this_err;
 736                                        }
 737                                }
 738                        }
 739                }
 740        }
 741
 742        return (err != target);
 743}
 744
 745static bool
 746g4x_find_best_dpll(const intel_limit_t *limit,
 747                   struct intel_crtc_state *crtc_state,
 748                   int target, int refclk, intel_clock_t *match_clock,
 749                   intel_clock_t *best_clock)
 750{
 751        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 752        struct drm_device *dev = crtc->base.dev;
 753        intel_clock_t clock;
 754        int max_n;
 755        bool found;
 756        /* approximately equals target * 0.00585 */
 757        int err_most = (target >> 8) + (target >> 9);
 758        found = false;
 759
 760        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 761                if (intel_is_dual_link_lvds(dev))
 762                        clock.p2 = limit->p2.p2_fast;
 763                else
 764                        clock.p2 = limit->p2.p2_slow;
 765        } else {
 766                if (target < limit->p2.dot_limit)
 767                        clock.p2 = limit->p2.p2_slow;
 768                else
 769                        clock.p2 = limit->p2.p2_fast;
 770        }
 771
 772        memset(best_clock, 0, sizeof(*best_clock));
 773        max_n = limit->n.max;
 774        /* based on hardware requirement, prefer smaller n to precision */
 775        for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 776                /* based on hardware requirement, prefere larger m1,m2 */
 777                for (clock.m1 = limit->m1.max;
 778                     clock.m1 >= limit->m1.min; clock.m1--) {
 779                        for (clock.m2 = limit->m2.max;
 780                             clock.m2 >= limit->m2.min; clock.m2--) {
 781                                for (clock.p1 = limit->p1.max;
 782                                     clock.p1 >= limit->p1.min; clock.p1--) {
 783                                        int this_err;
 784
 785                                        i9xx_clock(refclk, &clock);
 786                                        if (!intel_PLL_is_valid(dev, limit,
 787                                                                &clock))
 788                                                continue;
 789
 790                                        this_err = abs(clock.dot - target);
 791                                        if (this_err < err_most) {
 792                                                *best_clock = clock;
 793                                                err_most = this_err;
 794                                                max_n = clock.n;
 795                                                found = true;
 796                                        }
 797                                }
 798                        }
 799                }
 800        }
 801        return found;
 802}
 803
 804/*
 805 * Check if the calculated PLL configuration is more optimal compared to the
 806 * best configuration and error found so far. Return the calculated error.
 807 */
 808static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
 809                               const intel_clock_t *calculated_clock,
 810                               const intel_clock_t *best_clock,
 811                               unsigned int best_error_ppm,
 812                               unsigned int *error_ppm)
 813{
 814        /*
 815         * For CHV ignore the error and consider only the P value.
 816         * Prefer a bigger P value based on HW requirements.
 817         */
 818        if (IS_CHERRYVIEW(dev)) {
 819                *error_ppm = 0;
 820
 821                return calculated_clock->p > best_clock->p;
 822        }
 823
 824        if (WARN_ON_ONCE(!target_freq))
 825                return false;
 826
 827        *error_ppm = div_u64(1000000ULL *
 828                                abs(target_freq - calculated_clock->dot),
 829                             target_freq);
 830        /*
 831         * Prefer a better P value over a better (smaller) error if the error
 832         * is small. Ensure this preference for future configurations too by
 833         * setting the error to 0.
 834         */
 835        if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
 836                *error_ppm = 0;
 837
 838                return true;
 839        }
 840
 841        return *error_ppm + 10 < best_error_ppm;
 842}
 843
 844static bool
 845vlv_find_best_dpll(const intel_limit_t *limit,
 846                   struct intel_crtc_state *crtc_state,
 847                   int target, int refclk, intel_clock_t *match_clock,
 848                   intel_clock_t *best_clock)
 849{
 850        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 851        struct drm_device *dev = crtc->base.dev;
 852        intel_clock_t clock;
 853        unsigned int bestppm = 1000000;
 854        /* min update 19.2 MHz */
 855        int max_n = min(limit->n.max, refclk / 19200);
 856        bool found = false;
 857
 858        target *= 5; /* fast clock */
 859
 860        memset(best_clock, 0, sizeof(*best_clock));
 861
 862        /* based on hardware requirement, prefer smaller n to precision */
 863        for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
 864                for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
 865                        for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
 866                             clock.p2 -= clock.p2 > 10 ? 2 : 1) {
 867                                clock.p = clock.p1 * clock.p2;
 868                                /* based on hardware requirement, prefer bigger m1,m2 values */
 869                                for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
 870                                        unsigned int ppm;
 871
 872                                        clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
 873                                                                     refclk * clock.m1);
 874
 875                                        vlv_clock(refclk, &clock);
 876
 877                                        if (!intel_PLL_is_valid(dev, limit,
 878                                                                &clock))
 879                                                continue;
 880
 881                                        if (!vlv_PLL_is_optimal(dev, target,
 882                                                                &clock,
 883                                                                best_clock,
 884                                                                bestppm, &ppm))
 885                                                continue;
 886
 887                                        *best_clock = clock;
 888                                        bestppm = ppm;
 889                                        found = true;
 890                                }
 891                        }
 892                }
 893        }
 894
 895        return found;
 896}
 897
 898static bool
 899chv_find_best_dpll(const intel_limit_t *limit,
 900                   struct intel_crtc_state *crtc_state,
 901                   int target, int refclk, intel_clock_t *match_clock,
 902                   intel_clock_t *best_clock)
 903{
 904        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 905        struct drm_device *dev = crtc->base.dev;
 906        unsigned int best_error_ppm;
 907        intel_clock_t clock;
 908        uint64_t m2;
 909        int found = false;
 910
 911        memset(best_clock, 0, sizeof(*best_clock));
 912        best_error_ppm = 1000000;
 913
 914        /*
 915         * Based on hardware doc, the n always set to 1, and m1 always
 916         * set to 2.  If requires to support 200Mhz refclk, we need to
 917         * revisit this because n may not 1 anymore.
 918         */
 919        clock.n = 1, clock.m1 = 2;
 920        target *= 5;    /* fast clock */
 921
 922        for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
 923                for (clock.p2 = limit->p2.p2_fast;
 924                                clock.p2 >= limit->p2.p2_slow;
 925                                clock.p2 -= clock.p2 > 10 ? 2 : 1) {
 926                        unsigned int error_ppm;
 927
 928                        clock.p = clock.p1 * clock.p2;
 929
 930                        m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
 931                                        clock.n) << 22, refclk * clock.m1);
 932
 933                        if (m2 > INT_MAX/clock.m1)
 934                                continue;
 935
 936                        clock.m2 = m2;
 937
 938                        chv_clock(refclk, &clock);
 939
 940                        if (!intel_PLL_is_valid(dev, limit, &clock))
 941                                continue;
 942
 943                        if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
 944                                                best_error_ppm, &error_ppm))
 945                                continue;
 946
 947                        *best_clock = clock;
 948                        best_error_ppm = error_ppm;
 949                        found = true;
 950                }
 951        }
 952
 953        return found;
 954}
 955
 956bool intel_crtc_active(struct drm_crtc *crtc)
 957{
 958        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 959
 960        /* Be paranoid as we can arrive here with only partial
 961         * state retrieved from the hardware during setup.
 962         *
 963         * We can ditch the adjusted_mode.crtc_clock check as soon
 964         * as Haswell has gained clock readout/fastboot support.
 965         *
 966         * We can ditch the crtc->primary->fb check as soon as we can
 967         * properly reconstruct framebuffers.
 968         *
 969         * FIXME: The intel_crtc->active here should be switched to
 970         * crtc->state->active once we have proper CRTC states wired up
 971         * for atomic.
 972         */
 973        return intel_crtc->active && crtc->primary->state->fb &&
 974                intel_crtc->config->base.adjusted_mode.crtc_clock;
 975}
 976
 977enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 978                                             enum pipe pipe)
 979{
 980        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 981        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 982
 983        return intel_crtc->config->cpu_transcoder;
 984}
 985
 986static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 987{
 988        struct drm_i915_private *dev_priv = dev->dev_private;
 989        u32 reg = PIPEDSL(pipe);
 990        u32 line1, line2;
 991        u32 line_mask;
 992
 993        if (IS_GEN2(dev))
 994                line_mask = DSL_LINEMASK_GEN2;
 995        else
 996                line_mask = DSL_LINEMASK_GEN3;
 997
 998        line1 = I915_READ(reg) & line_mask;
 999        mdelay(5);
1000        line2 = I915_READ(reg) & line_mask;
1001
1002        return line1 == line2;
1003}
1004
1005/*
1006 * intel_wait_for_pipe_off - wait for pipe to turn off
1007 * @crtc: crtc whose pipe to wait for
1008 *
1009 * After disabling a pipe, we can't wait for vblank in the usual way,
1010 * spinning on the vblank interrupt status bit, since we won't actually
1011 * see an interrupt when the pipe is disabled.
1012 *
1013 * On Gen4 and above:
1014 *   wait for the pipe register state bit to turn off
1015 *
1016 * Otherwise:
1017 *   wait for the display line value to settle (it usually
1018 *   ends up stopping at the start of the next frame).
1019 *
1020 */
1021static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1022{
1023        struct drm_device *dev = crtc->base.dev;
1024        struct drm_i915_private *dev_priv = dev->dev_private;
1025        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1026        enum pipe pipe = crtc->pipe;
1027
1028        if (INTEL_INFO(dev)->gen >= 4) {
1029                int reg = PIPECONF(cpu_transcoder);
1030
1031                /* Wait for the Pipe State to go off */
1032                if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1033                             100))
1034                        WARN(1, "pipe_off wait timed out\n");
1035        } else {
1036                /* Wait for the display line to settle */
1037                if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1038                        WARN(1, "pipe_off wait timed out\n");
1039        }
1040}
1041
1042/*
1043 * ibx_digital_port_connected - is the specified port connected?
1044 * @dev_priv: i915 private structure
1045 * @port: the port to test
1046 *
1047 * Returns true if @port is connected, false otherwise.
1048 */
1049bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1050                                struct intel_digital_port *port)
1051{
1052        u32 bit;
1053
1054        if (HAS_PCH_IBX(dev_priv->dev)) {
1055                switch (port->port) {
1056                case PORT_B:
1057                        bit = SDE_PORTB_HOTPLUG;
1058                        break;
1059                case PORT_C:
1060                        bit = SDE_PORTC_HOTPLUG;
1061                        break;
1062                case PORT_D:
1063                        bit = SDE_PORTD_HOTPLUG;
1064                        break;
1065                default:
1066                        return true;
1067                }
1068        } else {
1069                switch (port->port) {
1070                case PORT_B:
1071                        bit = SDE_PORTB_HOTPLUG_CPT;
1072                        break;
1073                case PORT_C:
1074                        bit = SDE_PORTC_HOTPLUG_CPT;
1075                        break;
1076                case PORT_D:
1077                        bit = SDE_PORTD_HOTPLUG_CPT;
1078                        break;
1079                default:
1080                        return true;
1081                }
1082        }
1083
1084        return I915_READ(SDEISR) & bit;
1085}
1086
1087static const char *state_string(bool enabled)
1088{
1089        return enabled ? "on" : "off";
1090}
1091
1092/* Only for pre-ILK configs */
1093void assert_pll(struct drm_i915_private *dev_priv,
1094                enum pipe pipe, bool state)
1095{
1096        int reg;
1097        u32 val;
1098        bool cur_state;
1099
1100        reg = DPLL(pipe);
1101        val = I915_READ(reg);
1102        cur_state = !!(val & DPLL_VCO_ENABLE);
1103        I915_STATE_WARN(cur_state != state,
1104             "PLL state assertion failure (expected %s, current %s)\n",
1105             state_string(state), state_string(cur_state));
1106}
1107
1108/* XXX: the dsi pll is shared between MIPI DSI ports */
1109static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1110{
1111        u32 val;
1112        bool cur_state;
1113
1114        mutex_lock(&dev_priv->dpio_lock);
1115        val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1116        mutex_unlock(&dev_priv->dpio_lock);
1117
1118        cur_state = val & DSI_PLL_VCO_EN;
1119        I915_STATE_WARN(cur_state != state,
1120             "DSI PLL state assertion failure (expected %s, current %s)\n",
1121             state_string(state), state_string(cur_state));
1122}
1123#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1124#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1125
1126struct intel_shared_dpll *
1127intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1128{
1129        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1130
1131        if (crtc->config->shared_dpll < 0)
1132                return NULL;
1133
1134        return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1135}
1136
1137/* For ILK+ */
1138void assert_shared_dpll(struct drm_i915_private *dev_priv,
1139                        struct intel_shared_dpll *pll,
1140                        bool state)
1141{
1142        bool cur_state;
1143        struct intel_dpll_hw_state hw_state;
1144
1145        if (WARN (!pll,
1146                  "asserting DPLL %s with no DPLL\n", state_string(state)))
1147                return;
1148
1149        cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1150        I915_STATE_WARN(cur_state != state,
1151             "%s assertion failure (expected %s, current %s)\n",
1152             pll->name, state_string(state), state_string(cur_state));
1153}
1154
1155static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1156                          enum pipe pipe, bool state)
1157{
1158        int reg;
1159        u32 val;
1160        bool cur_state;
1161        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1162                                                                      pipe);
1163
1164        if (HAS_DDI(dev_priv->dev)) {
1165                /* DDI does not have a specific FDI_TX register */
1166                reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1167                val = I915_READ(reg);
1168                cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1169        } else {
1170                reg = FDI_TX_CTL(pipe);
1171                val = I915_READ(reg);
1172                cur_state = !!(val & FDI_TX_ENABLE);
1173        }
1174        I915_STATE_WARN(cur_state != state,
1175             "FDI TX state assertion failure (expected %s, current %s)\n",
1176             state_string(state), state_string(cur_state));
1177}
1178#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1179#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1180
1181static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1182                          enum pipe pipe, bool state)
1183{
1184        int reg;
1185        u32 val;
1186        bool cur_state;
1187
1188        reg = FDI_RX_CTL(pipe);
1189        val = I915_READ(reg);
1190        cur_state = !!(val & FDI_RX_ENABLE);
1191        I915_STATE_WARN(cur_state != state,
1192             "FDI RX state assertion failure (expected %s, current %s)\n",
1193             state_string(state), state_string(cur_state));
1194}
1195#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1196#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1197
1198static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1199                                      enum pipe pipe)
1200{
1201        int reg;
1202        u32 val;
1203
1204        /* ILK FDI PLL is always enabled */
1205        if (INTEL_INFO(dev_priv->dev)->gen == 5)
1206                return;
1207
1208        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1209        if (HAS_DDI(dev_priv->dev))
1210                return;
1211
1212        reg = FDI_TX_CTL(pipe);
1213        val = I915_READ(reg);
1214        I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1215}
1216
1217void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1218                       enum pipe pipe, bool state)
1219{
1220        int reg;
1221        u32 val;
1222        bool cur_state;
1223
1224        reg = FDI_RX_CTL(pipe);
1225        val = I915_READ(reg);
1226        cur_state = !!(val & FDI_RX_PLL_ENABLE);
1227        I915_STATE_WARN(cur_state != state,
1228             "FDI RX PLL assertion failure (expected %s, current %s)\n",
1229             state_string(state), state_string(cur_state));
1230}
1231
1232void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1233                           enum pipe pipe)
1234{
1235        struct drm_device *dev = dev_priv->dev;
1236        int pp_reg;
1237        u32 val;
1238        enum pipe panel_pipe = PIPE_A;
1239        bool locked = true;
1240
1241        if (WARN_ON(HAS_DDI(dev)))
1242                return;
1243
1244        if (HAS_PCH_SPLIT(dev)) {
1245                u32 port_sel;
1246
1247                pp_reg = PCH_PP_CONTROL;
1248                port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1249
1250                if (port_sel == PANEL_PORT_SELECT_LVDS &&
1251                    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1252                        panel_pipe = PIPE_B;
1253                /* XXX: else fix for eDP */
1254        } else if (IS_VALLEYVIEW(dev)) {
1255                /* presumably write lock depends on pipe, not port select */
1256                pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1257                panel_pipe = pipe;
1258        } else {
1259                pp_reg = PP_CONTROL;
1260                if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1261                        panel_pipe = PIPE_B;
1262        }
1263
1264        val = I915_READ(pp_reg);
1265        if (!(val & PANEL_POWER_ON) ||
1266            ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1267                locked = false;
1268
1269        I915_STATE_WARN(panel_pipe == pipe && locked,
1270             "panel assertion failure, pipe %c regs locked\n",
1271             pipe_name(pipe));
1272}
1273
1274static void assert_cursor(struct drm_i915_private *dev_priv,
1275                          enum pipe pipe, bool state)
1276{
1277        struct drm_device *dev = dev_priv->dev;
1278        bool cur_state;
1279
1280        if (IS_845G(dev) || IS_I865G(dev))
1281                cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1282        else
1283                cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1284
1285        I915_STATE_WARN(cur_state != state,
1286             "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1287             pipe_name(pipe), state_string(state), state_string(cur_state));
1288}
1289#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1290#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1291
1292void assert_pipe(struct drm_i915_private *dev_priv,
1293                 enum pipe pipe, bool state)
1294{
1295        int reg;
1296        u32 val;
1297        bool cur_state;
1298        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1299                                                                      pipe);
1300
1301        /* if we need the pipe quirk it must be always on */
1302        if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1303            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1304                state = true;
1305
1306        if (!intel_display_power_is_enabled(dev_priv,
1307                                POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1308                cur_state = false;
1309        } else {
1310                reg = PIPECONF(cpu_transcoder);
1311                val = I915_READ(reg);
1312                cur_state = !!(val & PIPECONF_ENABLE);
1313        }
1314
1315        I915_STATE_WARN(cur_state != state,
1316             "pipe %c assertion failure (expected %s, current %s)\n",
1317             pipe_name(pipe), state_string(state), state_string(cur_state));
1318}
1319
1320static void assert_plane(struct drm_i915_private *dev_priv,
1321                         enum plane plane, bool state)
1322{
1323        int reg;
1324        u32 val;
1325        bool cur_state;
1326
1327        reg = DSPCNTR(plane);
1328        val = I915_READ(reg);
1329        cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1330        I915_STATE_WARN(cur_state != state,
1331             "plane %c assertion failure (expected %s, current %s)\n",
1332             plane_name(plane), state_string(state), state_string(cur_state));
1333}
1334
1335#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1336#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1337
1338static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1339                                   enum pipe pipe)
1340{
1341        struct drm_device *dev = dev_priv->dev;
1342        int reg, i;
1343        u32 val;
1344        int cur_pipe;
1345
1346        /* Primary planes are fixed to pipes on gen4+ */
1347        if (INTEL_INFO(dev)->gen >= 4) {
1348                reg = DSPCNTR(pipe);
1349                val = I915_READ(reg);
1350                I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1351                     "plane %c assertion failure, should be disabled but not\n",
1352                     plane_name(pipe));
1353                return;
1354        }
1355
1356        /* Need to check both planes against the pipe */
1357        for_each_pipe(dev_priv, i) {
1358                reg = DSPCNTR(i);
1359                val = I915_READ(reg);
1360                cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1361                        DISPPLANE_SEL_PIPE_SHIFT;
1362                I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1363                     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1364                     plane_name(i), pipe_name(pipe));
1365        }
1366}
1367
1368static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1369                                    enum pipe pipe)
1370{
1371        struct drm_device *dev = dev_priv->dev;
1372        int reg, sprite;
1373        u32 val;
1374
1375        if (INTEL_INFO(dev)->gen >= 9) {
1376                for_each_sprite(dev_priv, pipe, sprite) {
1377                        val = I915_READ(PLANE_CTL(pipe, sprite));
1378                        I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1379                             "plane %d assertion failure, should be off on pipe %c but is still active\n",
1380                             sprite, pipe_name(pipe));
1381                }
1382        } else if (IS_VALLEYVIEW(dev)) {
1383                for_each_sprite(dev_priv, pipe, sprite) {
1384                        reg = SPCNTR(pipe, sprite);
1385                        val = I915_READ(reg);
1386                        I915_STATE_WARN(val & SP_ENABLE,
1387                             "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1388                             sprite_name(pipe, sprite), pipe_name(pipe));
1389                }
1390        } else if (INTEL_INFO(dev)->gen >= 7) {
1391                reg = SPRCTL(pipe);
1392                val = I915_READ(reg);
1393                I915_STATE_WARN(val & SPRITE_ENABLE,
1394                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1395                     plane_name(pipe), pipe_name(pipe));
1396        } else if (INTEL_INFO(dev)->gen >= 5) {
1397                reg = DVSCNTR(pipe);
1398                val = I915_READ(reg);
1399                I915_STATE_WARN(val & DVS_ENABLE,
1400                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1401                     plane_name(pipe), pipe_name(pipe));
1402        }
1403}
1404
1405static void assert_vblank_disabled(struct drm_crtc *crtc)
1406{
1407        if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1408                drm_crtc_vblank_put(crtc);
1409}
1410
1411static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1412{
1413        u32 val;
1414        bool enabled;
1415
1416        I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1417
1418        val = I915_READ(PCH_DREF_CONTROL);
1419        enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1420                            DREF_SUPERSPREAD_SOURCE_MASK));
1421        I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1422}
1423
1424static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1425                                           enum pipe pipe)
1426{
1427        int reg;
1428        u32 val;
1429        bool enabled;
1430
1431        reg = PCH_TRANSCONF(pipe);
1432        val = I915_READ(reg);
1433        enabled = !!(val & TRANS_ENABLE);
1434        I915_STATE_WARN(enabled,
1435             "transcoder assertion failed, should be off on pipe %c but is still active\n",
1436             pipe_name(pipe));
1437}
1438
1439static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1440                            enum pipe pipe, u32 port_sel, u32 val)
1441{
1442        if ((val & DP_PORT_EN) == 0)
1443                return false;
1444
1445        if (HAS_PCH_CPT(dev_priv->dev)) {
1446                u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1447                u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1448                if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1449                        return false;
1450        } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1451                if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1452                        return false;
1453        } else {
1454                if ((val & DP_PIPE_MASK) != (pipe << 30))
1455                        return false;
1456        }
1457        return true;
1458}
1459
1460static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1461                              enum pipe pipe, u32 val)
1462{
1463        if ((val & SDVO_ENABLE) == 0)
1464                return false;
1465
1466        if (HAS_PCH_CPT(dev_priv->dev)) {
1467                if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1468                        return false;
1469        } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1470                if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1471                        return false;
1472        } else {
1473                if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1474                        return false;
1475        }
1476        return true;
1477}
1478
1479static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1480                              enum pipe pipe, u32 val)
1481{
1482        if ((val & LVDS_PORT_EN) == 0)
1483                return false;
1484
1485        if (HAS_PCH_CPT(dev_priv->dev)) {
1486                if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1487                        return false;
1488        } else {
1489                if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1490                        return false;
1491        }
1492        return true;
1493}
1494
1495static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1496                              enum pipe pipe, u32 val)
1497{
1498        if ((val & ADPA_DAC_ENABLE) == 0)
1499                return false;
1500        if (HAS_PCH_CPT(dev_priv->dev)) {
1501                if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1502                        return false;
1503        } else {
1504                if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1505                        return false;
1506        }
1507        return true;
1508}
1509
1510static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1511                                   enum pipe pipe, int reg, u32 port_sel)
1512{
1513        u32 val = I915_READ(reg);
1514        I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1515             "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1516             reg, pipe_name(pipe));
1517
1518        I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1519             && (val & DP_PIPEB_SELECT),
1520             "IBX PCH dp port still using transcoder B\n");
1521}
1522
1523static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1524                                     enum pipe pipe, int reg)
1525{
1526        u32 val = I915_READ(reg);
1527        I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1528             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1529             reg, pipe_name(pipe));
1530
1531        I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1532             && (val & SDVO_PIPE_B_SELECT),
1533             "IBX PCH hdmi port still using transcoder B\n");
1534}
1535
1536static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1537                                      enum pipe pipe)
1538{
1539        int reg;
1540        u32 val;
1541
1542        assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1543        assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1544        assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1545
1546        reg = PCH_ADPA;
1547        val = I915_READ(reg);
1548        I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1549             "PCH VGA enabled on transcoder %c, should be disabled\n",
1550             pipe_name(pipe));
1551
1552        reg = PCH_LVDS;
1553        val = I915_READ(reg);
1554        I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1555             "PCH LVDS enabled on transcoder %c, should be disabled\n",
1556             pipe_name(pipe));
1557
1558        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1559        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1560        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1561}
1562
1563static void intel_init_dpio(struct drm_device *dev)
1564{
1565        struct drm_i915_private *dev_priv = dev->dev_private;
1566
1567        if (!IS_VALLEYVIEW(dev))
1568                return;
1569
1570        /*
1571         * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1572         * CHV x1 PHY (DP/HDMI D)
1573         * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1574         */
1575        if (IS_CHERRYVIEW(dev)) {
1576                DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1577                DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1578        } else {
1579                DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1580        }
1581}
1582
1583static void vlv_enable_pll(struct intel_crtc *crtc,
1584                           const struct intel_crtc_state *pipe_config)
1585{
1586        struct drm_device *dev = crtc->base.dev;
1587        struct drm_i915_private *dev_priv = dev->dev_private;
1588        int reg = DPLL(crtc->pipe);
1589        u32 dpll = pipe_config->dpll_hw_state.dpll;
1590
1591        assert_pipe_disabled(dev_priv, crtc->pipe);
1592
1593        /* No really, not for ILK+ */
1594        BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1595
1596        /* PLL is protected by panel, make sure we can write it */
1597        if (IS_MOBILE(dev_priv->dev))
1598                assert_panel_unlocked(dev_priv, crtc->pipe);
1599
1600        I915_WRITE(reg, dpll);
1601        POSTING_READ(reg);
1602        udelay(150);
1603
1604        if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1605                DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1606
1607        I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1608        POSTING_READ(DPLL_MD(crtc->pipe));
1609
1610        /* We do this three times for luck */
1611        I915_WRITE(reg, dpll);
1612        POSTING_READ(reg);
1613        udelay(150); /* wait for warmup */
1614        I915_WRITE(reg, dpll);
1615        POSTING_READ(reg);
1616        udelay(150); /* wait for warmup */
1617        I915_WRITE(reg, dpll);
1618        POSTING_READ(reg);
1619        udelay(150); /* wait for warmup */
1620}
1621
1622static void chv_enable_pll(struct intel_crtc *crtc,
1623                           const struct intel_crtc_state *pipe_config)
1624{
1625        struct drm_device *dev = crtc->base.dev;
1626        struct drm_i915_private *dev_priv = dev->dev_private;
1627        int pipe = crtc->pipe;
1628        enum dpio_channel port = vlv_pipe_to_channel(pipe);
1629        u32 tmp;
1630
1631        assert_pipe_disabled(dev_priv, crtc->pipe);
1632
1633        BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1634
1635        mutex_lock(&dev_priv->dpio_lock);
1636
1637        /* Enable back the 10bit clock to display controller */
1638        tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1639        tmp |= DPIO_DCLKP_EN;
1640        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1641
1642        /*
1643         * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1644         */
1645        udelay(1);
1646
1647        /* Enable PLL */
1648        I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1649
1650        /* Check PLL is locked */
1651        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1652                DRM_ERROR("PLL %d failed to lock\n", pipe);
1653
1654        /* not sure when this should be written */
1655        I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1656        POSTING_READ(DPLL_MD(pipe));
1657
1658        mutex_unlock(&dev_priv->dpio_lock);
1659}
1660
1661static int intel_num_dvo_pipes(struct drm_device *dev)
1662{
1663        struct intel_crtc *crtc;
1664        int count = 0;
1665
1666        for_each_intel_crtc(dev, crtc)
1667                count += crtc->active &&
1668                        intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1669
1670        return count;
1671}
1672
1673static void i9xx_enable_pll(struct intel_crtc *crtc)
1674{
1675        struct drm_device *dev = crtc->base.dev;
1676        struct drm_i915_private *dev_priv = dev->dev_private;
1677        int reg = DPLL(crtc->pipe);
1678        u32 dpll = crtc->config->dpll_hw_state.dpll;
1679
1680        assert_pipe_disabled(dev_priv, crtc->pipe);
1681
1682        /* No really, not for ILK+ */
1683        BUG_ON(INTEL_INFO(dev)->gen >= 5);
1684
1685        /* PLL is protected by panel, make sure we can write it */
1686        if (IS_MOBILE(dev) && !IS_I830(dev))
1687                assert_panel_unlocked(dev_priv, crtc->pipe);
1688
1689        /* Enable DVO 2x clock on both PLLs if necessary */
1690        if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1691                /*
1692                 * It appears to be important that we don't enable this
1693                 * for the current pipe before otherwise configuring the
1694                 * PLL. No idea how this should be handled if multiple
1695                 * DVO outputs are enabled simultaneosly.
1696                 */
1697                dpll |= DPLL_DVO_2X_MODE;
1698                I915_WRITE(DPLL(!crtc->pipe),
1699                           I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1700        }
1701
1702        /* Wait for the clocks to stabilize. */
1703        POSTING_READ(reg);
1704        udelay(150);
1705
1706        if (INTEL_INFO(dev)->gen >= 4) {
1707                I915_WRITE(DPLL_MD(crtc->pipe),
1708                           crtc->config->dpll_hw_state.dpll_md);
1709        } else {
1710                /* The pixel multiplier can only be updated once the
1711                 * DPLL is enabled and the clocks are stable.
1712                 *
1713                 * So write it again.
1714                 */
1715                I915_WRITE(reg, dpll);
1716        }
1717
1718        /* We do this three times for luck */
1719        I915_WRITE(reg, dpll);
1720        POSTING_READ(reg);
1721        udelay(150); /* wait for warmup */
1722        I915_WRITE(reg, dpll);
1723        POSTING_READ(reg);
1724        udelay(150); /* wait for warmup */
1725        I915_WRITE(reg, dpll);
1726        POSTING_READ(reg);
1727        udelay(150); /* wait for warmup */
1728}
1729
1730/**
1731 * i9xx_disable_pll - disable a PLL
1732 * @dev_priv: i915 private structure
1733 * @pipe: pipe PLL to disable
1734 *
1735 * Disable the PLL for @pipe, making sure the pipe is off first.
1736 *
1737 * Note!  This is for pre-ILK only.
1738 */
1739static void i9xx_disable_pll(struct intel_crtc *crtc)
1740{
1741        struct drm_device *dev = crtc->base.dev;
1742        struct drm_i915_private *dev_priv = dev->dev_private;
1743        enum pipe pipe = crtc->pipe;
1744
1745        /* Disable DVO 2x clock on both PLLs if necessary */
1746        if (IS_I830(dev) &&
1747            intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1748            intel_num_dvo_pipes(dev) == 1) {
1749                I915_WRITE(DPLL(PIPE_B),
1750                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1751                I915_WRITE(DPLL(PIPE_A),
1752                           I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1753        }
1754
1755        /* Don't disable pipe or pipe PLLs if needed */
1756        if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1757            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1758                return;
1759
1760        /* Make sure the pipe isn't still relying on us */
1761        assert_pipe_disabled(dev_priv, pipe);
1762
1763        I915_WRITE(DPLL(pipe), 0);
1764        POSTING_READ(DPLL(pipe));
1765}
1766
1767static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1768{
1769        u32 val = 0;
1770
1771        /* Make sure the pipe isn't still relying on us */
1772        assert_pipe_disabled(dev_priv, pipe);
1773
1774        /*
1775         * Leave integrated clock source and reference clock enabled for pipe B.
1776         * The latter is needed for VGA hotplug / manual detection.
1777         */
1778        if (pipe == PIPE_B)
1779                val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1780        I915_WRITE(DPLL(pipe), val);
1781        POSTING_READ(DPLL(pipe));
1782
1783}
1784
1785static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1786{
1787        enum dpio_channel port = vlv_pipe_to_channel(pipe);
1788        u32 val;
1789
1790        /* Make sure the pipe isn't still relying on us */
1791        assert_pipe_disabled(dev_priv, pipe);
1792
1793        /* Set PLL en = 0 */
1794        val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1795        if (pipe != PIPE_A)
1796                val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1797        I915_WRITE(DPLL(pipe), val);
1798        POSTING_READ(DPLL(pipe));
1799
1800        mutex_lock(&dev_priv->dpio_lock);
1801
1802        /* Disable 10bit clock to display controller */
1803        val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1804        val &= ~DPIO_DCLKP_EN;
1805        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1806
1807        /* disable left/right clock distribution */
1808        if (pipe != PIPE_B) {
1809                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1810                val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1811                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1812        } else {
1813                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1814                val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1815                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1816        }
1817
1818        mutex_unlock(&dev_priv->dpio_lock);
1819}
1820
1821void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1822                struct intel_digital_port *dport)
1823{
1824        u32 port_mask;
1825        int dpll_reg;
1826
1827        switch (dport->port) {
1828        case PORT_B:
1829                port_mask = DPLL_PORTB_READY_MASK;
1830                dpll_reg = DPLL(0);
1831                break;
1832        case PORT_C:
1833                port_mask = DPLL_PORTC_READY_MASK;
1834                dpll_reg = DPLL(0);
1835                break;
1836        case PORT_D:
1837                port_mask = DPLL_PORTD_READY_MASK;
1838                dpll_reg = DPIO_PHY_STATUS;
1839                break;
1840        default:
1841                BUG();
1842        }
1843
1844        if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1845                WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1846                     port_name(dport->port), I915_READ(dpll_reg));
1847}
1848
1849static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1850{
1851        struct drm_device *dev = crtc->base.dev;
1852        struct drm_i915_private *dev_priv = dev->dev_private;
1853        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1854
1855        if (WARN_ON(pll == NULL))
1856                return;
1857
1858        WARN_ON(!pll->config.crtc_mask);
1859        if (pll->active == 0) {
1860                DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1861                WARN_ON(pll->on);
1862                assert_shared_dpll_disabled(dev_priv, pll);
1863
1864                pll->mode_set(dev_priv, pll);
1865        }
1866}
1867
1868/**
1869 * intel_enable_shared_dpll - enable PCH PLL
1870 * @dev_priv: i915 private structure
1871 * @pipe: pipe PLL to enable
1872 *
1873 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1874 * drives the transcoder clock.
1875 */
1876static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1877{
1878        struct drm_device *dev = crtc->base.dev;
1879        struct drm_i915_private *dev_priv = dev->dev_private;
1880        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1881
1882        if (WARN_ON(pll == NULL))
1883                return;
1884
1885        if (WARN_ON(pll->config.crtc_mask == 0))
1886                return;
1887
1888        DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1889                      pll->name, pll->active, pll->on,
1890                      crtc->base.base.id);
1891
1892        if (pll->active++) {
1893                WARN_ON(!pll->on);
1894                assert_shared_dpll_enabled(dev_priv, pll);
1895                return;
1896        }
1897        WARN_ON(pll->on);
1898
1899        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1900
1901        DRM_DEBUG_KMS("enabling %s\n", pll->name);
1902        pll->enable(dev_priv, pll);
1903        pll->on = true;
1904}
1905
1906static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1907{
1908        struct drm_device *dev = crtc->base.dev;
1909        struct drm_i915_private *dev_priv = dev->dev_private;
1910        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1911
1912        /* PCH only available on ILK+ */
1913        BUG_ON(INTEL_INFO(dev)->gen < 5);
1914        if (WARN_ON(pll == NULL))
1915               return;
1916
1917        if (WARN_ON(pll->config.crtc_mask == 0))
1918                return;
1919
1920        DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1921                      pll->name, pll->active, pll->on,
1922                      crtc->base.base.id);
1923
1924        if (WARN_ON(pll->active == 0)) {
1925                assert_shared_dpll_disabled(dev_priv, pll);
1926                return;
1927        }
1928
1929        assert_shared_dpll_enabled(dev_priv, pll);
1930        WARN_ON(!pll->on);
1931        if (--pll->active)
1932                return;
1933
1934        DRM_DEBUG_KMS("disabling %s\n", pll->name);
1935        pll->disable(dev_priv, pll);
1936        pll->on = false;
1937
1938        intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1939}
1940
1941static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1942                                           enum pipe pipe)
1943{
1944        struct drm_device *dev = dev_priv->dev;
1945        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1946        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1947        uint32_t reg, val, pipeconf_val;
1948
1949        /* PCH only available on ILK+ */
1950        BUG_ON(!HAS_PCH_SPLIT(dev));
1951
1952        /* Make sure PCH DPLL is enabled */
1953        assert_shared_dpll_enabled(dev_priv,
1954                                   intel_crtc_to_shared_dpll(intel_crtc));
1955
1956        /* FDI must be feeding us bits for PCH ports */
1957        assert_fdi_tx_enabled(dev_priv, pipe);
1958        assert_fdi_rx_enabled(dev_priv, pipe);
1959
1960        if (HAS_PCH_CPT(dev)) {
1961                /* Workaround: Set the timing override bit before enabling the
1962                 * pch transcoder. */
1963                reg = TRANS_CHICKEN2(pipe);
1964                val = I915_READ(reg);
1965                val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1966                I915_WRITE(reg, val);
1967        }
1968
1969        reg = PCH_TRANSCONF(pipe);
1970        val = I915_READ(reg);
1971        pipeconf_val = I915_READ(PIPECONF(pipe));
1972
1973        if (HAS_PCH_IBX(dev_priv->dev)) {
1974                /*
1975                 * make the BPC in transcoder be consistent with
1976                 * that in pipeconf reg.
1977                 */
1978                val &= ~PIPECONF_BPC_MASK;
1979                val |= pipeconf_val & PIPECONF_BPC_MASK;
1980        }
1981
1982        val &= ~TRANS_INTERLACE_MASK;
1983        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1984                if (HAS_PCH_IBX(dev_priv->dev) &&
1985                    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1986                        val |= TRANS_LEGACY_INTERLACED_ILK;
1987                else
1988                        val |= TRANS_INTERLACED;
1989        else
1990                val |= TRANS_PROGRESSIVE;
1991
1992        I915_WRITE(reg, val | TRANS_ENABLE);
1993        if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1994                DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1995}
1996
1997static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1998                                      enum transcoder cpu_transcoder)
1999{
2000        u32 val, pipeconf_val;
2001
2002        /* PCH only available on ILK+ */
2003        BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2004
2005        /* FDI must be feeding us bits for PCH ports */
2006        assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2007        assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2008
2009        /* Workaround: set timing override bit. */
2010        val = I915_READ(_TRANSA_CHICKEN2);
2011        val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2012        I915_WRITE(_TRANSA_CHICKEN2, val);
2013
2014        val = TRANS_ENABLE;
2015        pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2016
2017        if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2018            PIPECONF_INTERLACED_ILK)
2019                val |= TRANS_INTERLACED;
2020        else
2021                val |= TRANS_PROGRESSIVE;
2022
2023        I915_WRITE(LPT_TRANSCONF, val);
2024        if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2025                DRM_ERROR("Failed to enable PCH transcoder\n");
2026}
2027
2028static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2029                                            enum pipe pipe)
2030{
2031        struct drm_device *dev = dev_priv->dev;
2032        uint32_t reg, val;
2033
2034        /* FDI relies on the transcoder */
2035        assert_fdi_tx_disabled(dev_priv, pipe);
2036        assert_fdi_rx_disabled(dev_priv, pipe);
2037
2038        /* Ports must be off as well */
2039        assert_pch_ports_disabled(dev_priv, pipe);
2040
2041        reg = PCH_TRANSCONF(pipe);
2042        val = I915_READ(reg);
2043        val &= ~TRANS_ENABLE;
2044        I915_WRITE(reg, val);
2045        /* wait for PCH transcoder off, transcoder state */
2046        if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2047                DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2048
2049        if (!HAS_PCH_IBX(dev)) {
2050                /* Workaround: Clear the timing override chicken bit again. */
2051                reg = TRANS_CHICKEN2(pipe);
2052                val = I915_READ(reg);
2053                val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2054                I915_WRITE(reg, val);
2055        }
2056}
2057
2058static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2059{
2060        u32 val;
2061
2062        val = I915_READ(LPT_TRANSCONF);
2063        val &= ~TRANS_ENABLE;
2064        I915_WRITE(LPT_TRANSCONF, val);
2065        /* wait for PCH transcoder off, transcoder state */
2066        if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2067                DRM_ERROR("Failed to disable PCH transcoder\n");
2068
2069        /* Workaround: clear timing override bit. */
2070        val = I915_READ(_TRANSA_CHICKEN2);
2071        val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2072        I915_WRITE(_TRANSA_CHICKEN2, val);
2073}
2074
2075/**
2076 * intel_enable_pipe - enable a pipe, asserting requirements
2077 * @crtc: crtc responsible for the pipe
2078 *
2079 * Enable @crtc's pipe, making sure that various hardware specific requirements
2080 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2081 */
2082static void intel_enable_pipe(struct intel_crtc *crtc)
2083{
2084        struct drm_device *dev = crtc->base.dev;
2085        struct drm_i915_private *dev_priv = dev->dev_private;
2086        enum pipe pipe = crtc->pipe;
2087        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2088                                                                      pipe);
2089        enum pipe pch_transcoder;
2090        int reg;
2091        u32 val;
2092
2093        assert_planes_disabled(dev_priv, pipe);
2094        assert_cursor_disabled(dev_priv, pipe);
2095        assert_sprites_disabled(dev_priv, pipe);
2096
2097        if (HAS_PCH_LPT(dev_priv->dev))
2098                pch_transcoder = TRANSCODER_A;
2099        else
2100                pch_transcoder = pipe;
2101
2102        /*
2103         * A pipe without a PLL won't actually be able to drive bits from
2104         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2105         * need the check.
2106         */
2107        if (!HAS_PCH_SPLIT(dev_priv->dev))
2108                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2109                        assert_dsi_pll_enabled(dev_priv);
2110                else
2111                        assert_pll_enabled(dev_priv, pipe);
2112        else {
2113                if (crtc->config->has_pch_encoder) {
2114                        /* if driving the PCH, we need FDI enabled */
2115                        assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2116                        assert_fdi_tx_pll_enabled(dev_priv,
2117                                                  (enum pipe) cpu_transcoder);
2118                }
2119                /* FIXME: assert CPU port conditions for SNB+ */
2120        }
2121
2122        reg = PIPECONF(cpu_transcoder);
2123        val = I915_READ(reg);
2124        if (val & PIPECONF_ENABLE) {
2125                WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2126                          (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2127                return;
2128        }
2129
2130        I915_WRITE(reg, val | PIPECONF_ENABLE);
2131        POSTING_READ(reg);
2132}
2133
2134/**
2135 * intel_disable_pipe - disable a pipe, asserting requirements
2136 * @crtc: crtc whose pipes is to be disabled
2137 *
2138 * Disable the pipe of @crtc, making sure that various hardware
2139 * specific requirements are met, if applicable, e.g. plane
2140 * disabled, panel fitter off, etc.
2141 *
2142 * Will wait until the pipe has shut down before returning.
2143 */
2144static void intel_disable_pipe(struct intel_crtc *crtc)
2145{
2146        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2147        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2148        enum pipe pipe = crtc->pipe;
2149        int reg;
2150        u32 val;
2151
2152        /*
2153         * Make sure planes won't keep trying to pump pixels to us,
2154         * or we might hang the display.
2155         */
2156        assert_planes_disabled(dev_priv, pipe);
2157        assert_cursor_disabled(dev_priv, pipe);
2158        assert_sprites_disabled(dev_priv, pipe);
2159
2160        reg = PIPECONF(cpu_transcoder);
2161        val = I915_READ(reg);
2162        if ((val & PIPECONF_ENABLE) == 0)
2163                return;
2164
2165        /*
2166         * Double wide has implications for planes
2167         * so best keep it disabled when not needed.
2168         */
2169        if (crtc->config->double_wide)
2170                val &= ~PIPECONF_DOUBLE_WIDE;
2171
2172        /* Don't disable pipe or pipe PLLs if needed */
2173        if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2174            !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2175                val &= ~PIPECONF_ENABLE;
2176
2177        I915_WRITE(reg, val);
2178        if ((val & PIPECONF_ENABLE) == 0)
2179                intel_wait_for_pipe_off(crtc);
2180}
2181
2182/*
2183 * Plane regs are double buffered, going from enabled->disabled needs a
2184 * trigger in order to latch.  The display address reg provides this.
2185 */
2186void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2187                               enum plane plane)
2188{
2189        struct drm_device *dev = dev_priv->dev;
2190        u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2191
2192        I915_WRITE(reg, I915_READ(reg));
2193        POSTING_READ(reg);
2194}
2195
2196/**
2197 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2198 * @plane:  plane to be enabled
2199 * @crtc: crtc for the plane
2200 *
2201 * Enable @plane on @crtc, making sure that the pipe is running first.
2202 */
2203static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2204                                          struct drm_crtc *crtc)
2205{
2206        struct drm_device *dev = plane->dev;
2207        struct drm_i915_private *dev_priv = dev->dev_private;
2208        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2209
2210        /* If the pipe isn't enabled, we can't pump pixels and may hang */
2211        assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2212
2213        if (intel_crtc->primary_enabled)
2214                return;
2215
2216        intel_crtc->primary_enabled = true;
2217
2218        dev_priv->display.update_primary_plane(crtc, plane->fb,
2219                                               crtc->x, crtc->y);
2220
2221        /*
2222         * BDW signals flip done immediately if the plane
2223         * is disabled, even if the plane enable is already
2224         * armed to occur at the next vblank :(
2225         */
2226        if (IS_BROADWELL(dev))
2227                intel_wait_for_vblank(dev, intel_crtc->pipe);
2228}
2229
2230/**
2231 * intel_disable_primary_hw_plane - disable the primary hardware plane
2232 * @plane: plane to be disabled
2233 * @crtc: crtc for the plane
2234 *
2235 * Disable @plane on @crtc, making sure that the pipe is running first.
2236 */
2237static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2238                                           struct drm_crtc *crtc)
2239{
2240        struct drm_device *dev = plane->dev;
2241        struct drm_i915_private *dev_priv = dev->dev_private;
2242        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2243
2244        if (WARN_ON(!intel_crtc->active))
2245                return;
2246
2247        if (!intel_crtc->primary_enabled)
2248                return;
2249
2250        intel_crtc->primary_enabled = false;
2251
2252        dev_priv->display.update_primary_plane(crtc, plane->fb,
2253                                               crtc->x, crtc->y);
2254}
2255
2256static bool need_vtd_wa(struct drm_device *dev)
2257{
2258#ifdef CONFIG_INTEL_IOMMU
2259        if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2260                return true;
2261#endif
2262        return false;
2263}
2264
2265unsigned int
2266intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2267                  uint64_t fb_format_modifier)
2268{
2269        unsigned int tile_height;
2270        uint32_t pixel_bytes;
2271
2272        switch (fb_format_modifier) {
2273        case DRM_FORMAT_MOD_NONE:
2274                tile_height = 1;
2275                break;
2276        case I915_FORMAT_MOD_X_TILED:
2277                tile_height = IS_GEN2(dev) ? 16 : 8;
2278                break;
2279        case I915_FORMAT_MOD_Y_TILED:
2280                tile_height = 32;
2281                break;
2282        case I915_FORMAT_MOD_Yf_TILED:
2283                pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
2284                switch (pixel_bytes) {
2285                default:
2286                case 1:
2287                        tile_height = 64;
2288                        break;
2289                case 2:
2290                case 4:
2291                        tile_height = 32;
2292                        break;
2293                case 8:
2294                        tile_height = 16;
2295                        break;
2296                case 16:
2297                        WARN_ONCE(1,
2298                                  "128-bit pixels are not supported for display!");
2299                        tile_height = 16;
2300                        break;
2301                }
2302                break;
2303        default:
2304                MISSING_CASE(fb_format_modifier);
2305                tile_height = 1;
2306                break;
2307        }
2308
2309        return tile_height;
2310}
2311
2312unsigned int
2313intel_fb_align_height(struct drm_device *dev, unsigned int height,
2314                      uint32_t pixel_format, uint64_t fb_format_modifier)
2315{
2316        return ALIGN(height, intel_tile_height(dev, pixel_format,
2317                                               fb_format_modifier));
2318}
2319
2320static int
2321intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2322                        const struct drm_plane_state *plane_state)
2323{
2324        struct intel_rotation_info *info = &view->rotation_info;
2325
2326        *view = i915_ggtt_view_normal;
2327
2328        if (!plane_state)
2329                return 0;
2330
2331        if (!intel_rotation_90_or_270(plane_state->rotation))
2332                return 0;
2333
2334        *view = i915_ggtt_view_rotated;
2335
2336        info->height = fb->height;
2337        info->pixel_format = fb->pixel_format;
2338        info->pitch = fb->pitches[0];
2339        info->fb_modifier = fb->modifier[0];
2340
2341        if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
2342              info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
2343                DRM_DEBUG_KMS(
2344                              "Y or Yf tiling is needed for 90/270 rotation!\n");
2345                return -EINVAL;
2346        }
2347
2348        return 0;
2349}
2350
2351int
2352intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2353                           struct drm_framebuffer *fb,
2354                           const struct drm_plane_state *plane_state,
2355                           struct intel_engine_cs *pipelined)
2356{
2357        struct drm_device *dev = fb->dev;
2358        struct drm_i915_private *dev_priv = dev->dev_private;
2359        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2360        struct i915_ggtt_view view;
2361        u32 alignment;
2362        int ret;
2363
2364        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2365
2366        switch (fb->modifier[0]) {
2367        case DRM_FORMAT_MOD_NONE:
2368                if (INTEL_INFO(dev)->gen >= 9)
2369                        alignment = 256 * 1024;
2370                else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2371                        alignment = 128 * 1024;
2372                else if (INTEL_INFO(dev)->gen >= 4)
2373                        alignment = 4 * 1024;
2374                else
2375                        alignment = 64 * 1024;
2376                break;
2377        case I915_FORMAT_MOD_X_TILED:
2378                if (INTEL_INFO(dev)->gen >= 9)
2379                        alignment = 256 * 1024;
2380                else {
2381                        /* pin() will align the object as required by fence */
2382                        alignment = 0;
2383                }
2384                break;
2385        case I915_FORMAT_MOD_Y_TILED:
2386        case I915_FORMAT_MOD_Yf_TILED:
2387                if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2388                          "Y tiling bo slipped through, driver bug!\n"))
2389                        return -EINVAL;
2390                alignment = 1 * 1024 * 1024;
2391                break;
2392        default:
2393                MISSING_CASE(fb->modifier[0]);
2394                return -EINVAL;
2395        }
2396
2397        ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2398        if (ret)
2399                return ret;
2400
2401        /* Note that the w/a also requires 64 PTE of padding following the
2402         * bo. We currently fill all unused PTE with the shadow page and so
2403         * we should always have valid PTE following the scanout preventing
2404         * the VT-d warning.
2405         */
2406        if (need_vtd_wa(dev) && alignment < 256 * 1024)
2407                alignment = 256 * 1024;
2408
2409        /*
2410         * Global gtt pte registers are special registers which actually forward
2411         * writes to a chunk of system memory. Which means that there is no risk
2412         * that the register values disappear as soon as we call
2413         * intel_runtime_pm_put(), so it is correct to wrap only the
2414         * pin/unpin/fence and not more.
2415         */
2416        intel_runtime_pm_get(dev_priv);
2417
2418        dev_priv->mm.interruptible = false;
2419        ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
2420                                                   &view);
2421        if (ret)
2422                goto err_interruptible;
2423
2424        /* Install a fence for tiled scan-out. Pre-i965 always needs a
2425         * fence, whereas 965+ only requires a fence if using
2426         * framebuffer compression.  For simplicity, we always install
2427         * a fence as the cost is not that onerous.
2428         */
2429        ret = i915_gem_object_get_fence(obj);
2430        if (ret)
2431                goto err_unpin;
2432
2433        i915_gem_object_pin_fence(obj);
2434
2435        dev_priv->mm.interruptible = true;
2436        intel_runtime_pm_put(dev_priv);
2437        return 0;
2438
2439err_unpin:
2440        i915_gem_object_unpin_from_display_plane(obj, &view);
2441err_interruptible:
2442        dev_priv->mm.interruptible = true;
2443        intel_runtime_pm_put(dev_priv);
2444        return ret;
2445}
2446
2447static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2448                               const struct drm_plane_state *plane_state)
2449{
2450        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2451        struct i915_ggtt_view view;
2452        int ret;
2453
2454        WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2455
2456        ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2457        WARN_ONCE(ret, "Couldn't get view from plane state!");
2458
2459        i915_gem_object_unpin_fence(obj);
2460        i915_gem_object_unpin_from_display_plane(obj, &view);
2461}
2462
2463/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2464 * is assumed to be a power-of-two. */
2465unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2466                                             unsigned int tiling_mode,
2467                                             unsigned int cpp,
2468                                             unsigned int pitch)
2469{
2470        if (tiling_mode != I915_TILING_NONE) {
2471                unsigned int tile_rows, tiles;
2472
2473                tile_rows = *y / 8;
2474                *y %= 8;
2475
2476                tiles = *x / (512/cpp);
2477                *x %= 512/cpp;
2478
2479                return tile_rows * pitch * 8 + tiles * 4096;
2480        } else {
2481                unsigned int offset;
2482
2483                offset = *y * pitch + *x * cpp;
2484                *y = 0;
2485                *x = (offset & 4095) / cpp;
2486                return offset & -4096;
2487        }
2488}
2489
2490static int i9xx_format_to_fourcc(int format)
2491{
2492        switch (format) {
2493        case DISPPLANE_8BPP:
2494                return DRM_FORMAT_C8;
2495        case DISPPLANE_BGRX555:
2496                return DRM_FORMAT_XRGB1555;
2497        case DISPPLANE_BGRX565:
2498                return DRM_FORMAT_RGB565;
2499        default:
2500        case DISPPLANE_BGRX888:
2501                return DRM_FORMAT_XRGB8888;
2502        case DISPPLANE_RGBX888:
2503                return DRM_FORMAT_XBGR8888;
2504        case DISPPLANE_BGRX101010:
2505                return DRM_FORMAT_XRGB2101010;
2506        case DISPPLANE_RGBX101010:
2507                return DRM_FORMAT_XBGR2101010;
2508        }
2509}
2510
2511static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2512{
2513        switch (format) {
2514        case PLANE_CTL_FORMAT_RGB_565:
2515                return DRM_FORMAT_RGB565;
2516        default:
2517        case PLANE_CTL_FORMAT_XRGB_8888:
2518                if (rgb_order) {
2519                        if (alpha)
2520                                return DRM_FORMAT_ABGR8888;
2521                        else
2522                                return DRM_FORMAT_XBGR8888;
2523                } else {
2524                        if (alpha)
2525                                return DRM_FORMAT_ARGB8888;
2526                        else
2527                                return DRM_FORMAT_XRGB8888;
2528                }
2529        case PLANE_CTL_FORMAT_XRGB_2101010:
2530                if (rgb_order)
2531                        return DRM_FORMAT_XBGR2101010;
2532                else
2533                        return DRM_FORMAT_XRGB2101010;
2534        }
2535}
2536
2537static bool
2538intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2539                              struct intel_initial_plane_config *plane_config)
2540{
2541        struct drm_device *dev = crtc->base.dev;
2542        struct drm_i915_gem_object *obj = NULL;
2543        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2544        struct drm_framebuffer *fb = &plane_config->fb->base;
2545        u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2546        u32 size_aligned = round_up(plane_config->base + plane_config->size,
2547                                    PAGE_SIZE);
2548
2549        size_aligned -= base_aligned;
2550
2551        if (plane_config->size == 0)
2552                return false;
2553
2554        obj = i915_gem_object_create_stolen_for_preallocated(dev,
2555                                                             base_aligned,
2556                                                             base_aligned,
2557                                                             size_aligned);
2558        if (!obj)
2559                return false;
2560
2561        obj->tiling_mode = plane_config->tiling;
2562        if (obj->tiling_mode == I915_TILING_X)
2563                obj->stride = fb->pitches[0];
2564
2565        mode_cmd.pixel_format = fb->pixel_format;
2566        mode_cmd.width = fb->width;
2567        mode_cmd.height = fb->height;
2568        mode_cmd.pitches[0] = fb->pitches[0];
2569        mode_cmd.modifier[0] = fb->modifier[0];
2570        mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2571
2572        mutex_lock(&dev->struct_mutex);
2573        if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2574                                   &mode_cmd, obj)) {
2575                DRM_DEBUG_KMS("intel fb init failed\n");
2576                goto out_unref_obj;
2577        }
2578        mutex_unlock(&dev->struct_mutex);
2579
2580        DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2581        return true;
2582
2583out_unref_obj:
2584        drm_gem_object_unreference(&obj->base);
2585        mutex_unlock(&dev->struct_mutex);
2586        return false;
2587}
2588
2589/* Update plane->state->fb to match plane->fb after driver-internal updates */
2590static void
2591update_state_fb(struct drm_plane *plane)
2592{
2593        if (plane->fb == plane->state->fb)
2594                return;
2595
2596        if (plane->state->fb)
2597                drm_framebuffer_unreference(plane->state->fb);
2598        plane->state->fb = plane->fb;
2599        if (plane->state->fb)
2600                drm_framebuffer_reference(plane->state->fb);
2601}
2602
2603static void
2604intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2605                             struct intel_initial_plane_config *plane_config)
2606{
2607        struct drm_device *dev = intel_crtc->base.dev;
2608        struct drm_i915_private *dev_priv = dev->dev_private;
2609        struct drm_crtc *c;
2610        struct intel_crtc *i;
2611        struct drm_i915_gem_object *obj;
2612        struct drm_plane *primary = intel_crtc->base.primary;
2613        struct drm_framebuffer *fb;
2614
2615        if (!plane_config->fb)
2616                return;
2617
2618        if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2619                fb = &plane_config->fb->base;
2620                goto valid_fb;
2621        }
2622
2623        kfree(plane_config->fb);
2624
2625        /*
2626         * Failed to alloc the obj, check to see if we should share
2627         * an fb with another CRTC instead
2628         */
2629        for_each_crtc(dev, c) {
2630                i = to_intel_crtc(c);
2631
2632                if (c == &intel_crtc->base)
2633                        continue;
2634
2635                if (!i->active)
2636                        continue;
2637
2638                fb = c->primary->fb;
2639                if (!fb)
2640                        continue;
2641
2642                obj = intel_fb_obj(fb);
2643                if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2644                        drm_framebuffer_reference(fb);
2645                        goto valid_fb;
2646                }
2647        }
2648
2649        return;
2650
2651valid_fb:
2652        obj = intel_fb_obj(fb);
2653        if (obj->tiling_mode != I915_TILING_NONE)
2654                dev_priv->preserve_bios_swizzle = true;
2655
2656        primary->fb = fb;
2657        primary->state->crtc = &intel_crtc->base;
2658        primary->crtc = &intel_crtc->base;
2659        update_state_fb(primary);
2660        obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2661}
2662
2663static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2664                                      struct drm_framebuffer *fb,
2665                                      int x, int y)
2666{
2667        struct drm_device *dev = crtc->dev;
2668        struct drm_i915_private *dev_priv = dev->dev_private;
2669        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2670        struct drm_i915_gem_object *obj;
2671        int plane = intel_crtc->plane;
2672        unsigned long linear_offset;
2673        u32 dspcntr;
2674        u32 reg = DSPCNTR(plane);
2675        int pixel_size;
2676
2677        if (!intel_crtc->primary_enabled) {
2678                I915_WRITE(reg, 0);
2679                if (INTEL_INFO(dev)->gen >= 4)
2680                        I915_WRITE(DSPSURF(plane), 0);
2681                else
2682                        I915_WRITE(DSPADDR(plane), 0);
2683                POSTING_READ(reg);
2684                return;
2685        }
2686
2687        obj = intel_fb_obj(fb);
2688        if (WARN_ON(obj == NULL))
2689                return;
2690
2691        pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2692
2693        dspcntr = DISPPLANE_GAMMA_ENABLE;
2694
2695        dspcntr |= DISPLAY_PLANE_ENABLE;
2696
2697        if (INTEL_INFO(dev)->gen < 4) {
2698                if (intel_crtc->pipe == PIPE_B)
2699                        dspcntr |= DISPPLANE_SEL_PIPE_B;
2700
2701                /* pipesrc and dspsize control the size that is scaled from,
2702                 * which should always be the user's requested size.
2703                 */
2704                I915_WRITE(DSPSIZE(plane),
2705                           ((intel_crtc->config->pipe_src_h - 1) << 16) |
2706                           (intel_crtc->config->pipe_src_w - 1));
2707                I915_WRITE(DSPPOS(plane), 0);
2708        } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2709                I915_WRITE(PRIMSIZE(plane),
2710                           ((intel_crtc->config->pipe_src_h - 1) << 16) |
2711                           (intel_crtc->config->pipe_src_w - 1));
2712                I915_WRITE(PRIMPOS(plane), 0);
2713                I915_WRITE(PRIMCNSTALPHA(plane), 0);
2714        }
2715
2716        switch (fb->pixel_format) {
2717        case DRM_FORMAT_C8:
2718                dspcntr |= DISPPLANE_8BPP;
2719                break;
2720        case DRM_FORMAT_XRGB1555:
2721        case DRM_FORMAT_ARGB1555:
2722                dspcntr |= DISPPLANE_BGRX555;
2723                break;
2724        case DRM_FORMAT_RGB565:
2725                dspcntr |= DISPPLANE_BGRX565;
2726                break;
2727        case DRM_FORMAT_XRGB8888:
2728        case DRM_FORMAT_ARGB8888:
2729                dspcntr |= DISPPLANE_BGRX888;
2730                break;
2731        case DRM_FORMAT_XBGR8888:
2732        case DRM_FORMAT_ABGR8888:
2733                dspcntr |= DISPPLANE_RGBX888;
2734                break;
2735        case DRM_FORMAT_XRGB2101010:
2736        case DRM_FORMAT_ARGB2101010:
2737                dspcntr |= DISPPLANE_BGRX101010;
2738                break;
2739        case DRM_FORMAT_XBGR2101010:
2740        case DRM_FORMAT_ABGR2101010:
2741                dspcntr |= DISPPLANE_RGBX101010;
2742                break;
2743        default:
2744                BUG();
2745        }
2746
2747        if (INTEL_INFO(dev)->gen >= 4 &&
2748            obj->tiling_mode != I915_TILING_NONE)
2749                dspcntr |= DISPPLANE_TILED;
2750
2751        if (IS_G4X(dev))
2752                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2753
2754        linear_offset = y * fb->pitches[0] + x * pixel_size;
2755
2756        if (INTEL_INFO(dev)->gen >= 4) {
2757                intel_crtc->dspaddr_offset =
2758                        intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2759                                                       pixel_size,
2760                                                       fb->pitches[0]);
2761                linear_offset -= intel_crtc->dspaddr_offset;
2762        } else {
2763                intel_crtc->dspaddr_offset = linear_offset;
2764        }
2765
2766        if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2767                dspcntr |= DISPPLANE_ROTATE_180;
2768
2769                x += (intel_crtc->config->pipe_src_w - 1);
2770                y += (intel_crtc->config->pipe_src_h - 1);
2771
2772                /* Finding the last pixel of the last line of the display
2773                data and adding to linear_offset*/
2774                linear_offset +=
2775                        (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2776                        (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2777        }
2778
2779        I915_WRITE(reg, dspcntr);
2780
2781        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2782        if (INTEL_INFO(dev)->gen >= 4) {
2783                I915_WRITE(DSPSURF(plane),
2784                           i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2785                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2786                I915_WRITE(DSPLINOFF(plane), linear_offset);
2787        } else
2788                I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2789        POSTING_READ(reg);
2790}
2791
2792static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2793                                          struct drm_framebuffer *fb,
2794                                          int x, int y)
2795{
2796        struct drm_device *dev = crtc->dev;
2797        struct drm_i915_private *dev_priv = dev->dev_private;
2798        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2799        struct drm_i915_gem_object *obj;
2800        int plane = intel_crtc->plane;
2801        unsigned long linear_offset;
2802        u32 dspcntr;
2803        u32 reg = DSPCNTR(plane);
2804        int pixel_size;
2805
2806        if (!intel_crtc->primary_enabled) {
2807                I915_WRITE(reg, 0);
2808                I915_WRITE(DSPSURF(plane), 0);
2809                POSTING_READ(reg);
2810                return;
2811        }
2812
2813        obj = intel_fb_obj(fb);
2814        if (WARN_ON(obj == NULL))
2815                return;
2816
2817        pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2818
2819        dspcntr = DISPPLANE_GAMMA_ENABLE;
2820
2821        dspcntr |= DISPLAY_PLANE_ENABLE;
2822
2823        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2824                dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2825
2826        switch (fb->pixel_format) {
2827        case DRM_FORMAT_C8:
2828                dspcntr |= DISPPLANE_8BPP;
2829                break;
2830        case DRM_FORMAT_RGB565:
2831                dspcntr |= DISPPLANE_BGRX565;
2832                break;
2833        case DRM_FORMAT_XRGB8888:
2834        case DRM_FORMAT_ARGB8888:
2835                dspcntr |= DISPPLANE_BGRX888;
2836                break;
2837        case DRM_FORMAT_XBGR8888:
2838        case DRM_FORMAT_ABGR8888:
2839                dspcntr |= DISPPLANE_RGBX888;
2840                break;
2841        case DRM_FORMAT_XRGB2101010:
2842        case DRM_FORMAT_ARGB2101010:
2843                dspcntr |= DISPPLANE_BGRX101010;
2844                break;
2845        case DRM_FORMAT_XBGR2101010:
2846        case DRM_FORMAT_ABGR2101010:
2847                dspcntr |= DISPPLANE_RGBX101010;
2848                break;
2849        default:
2850                BUG();
2851        }
2852
2853        if (obj->tiling_mode != I915_TILING_NONE)
2854                dspcntr |= DISPPLANE_TILED;
2855
2856        if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2857                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2858
2859        linear_offset = y * fb->pitches[0] + x * pixel_size;
2860        intel_crtc->dspaddr_offset =
2861                intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2862                                               pixel_size,
2863                                               fb->pitches[0]);
2864        linear_offset -= intel_crtc->dspaddr_offset;
2865        if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2866                dspcntr |= DISPPLANE_ROTATE_180;
2867
2868                if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2869                        x += (intel_crtc->config->pipe_src_w - 1);
2870                        y += (intel_crtc->config->pipe_src_h - 1);
2871
2872                        /* Finding the last pixel of the last line of the display
2873                        data and adding to linear_offset*/
2874                        linear_offset +=
2875                                (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2876                                (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2877                }
2878        }
2879
2880        I915_WRITE(reg, dspcntr);
2881
2882        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2883        I915_WRITE(DSPSURF(plane),
2884                   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2885        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2886                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2887        } else {
2888                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2889                I915_WRITE(DSPLINOFF(plane), linear_offset);
2890        }
2891        POSTING_READ(reg);
2892}
2893
2894u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2895                              uint32_t pixel_format)
2896{
2897        u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2898
2899        /*
2900         * The stride is either expressed as a multiple of 64 bytes
2901         * chunks for linear buffers or in number of tiles for tiled
2902         * buffers.
2903         */
2904        switch (fb_modifier) {
2905        case DRM_FORMAT_MOD_NONE:
2906                return 64;
2907        case I915_FORMAT_MOD_X_TILED:
2908                if (INTEL_INFO(dev)->gen == 2)
2909                        return 128;
2910                return 512;
2911        case I915_FORMAT_MOD_Y_TILED:
2912                /* No need to check for old gens and Y tiling since this is
2913                 * about the display engine and those will be blocked before
2914                 * we get here.
2915                 */
2916                return 128;
2917        case I915_FORMAT_MOD_Yf_TILED:
2918                if (bits_per_pixel == 8)
2919                        return 64;
2920                else
2921                        return 128;
2922        default:
2923                MISSING_CASE(fb_modifier);
2924                return 64;
2925        }
2926}
2927
2928unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
2929                                     struct drm_i915_gem_object *obj)
2930{
2931        const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2932
2933        if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2934                view = &i915_ggtt_view_rotated;
2935
2936        return i915_gem_obj_ggtt_offset_view(obj, view);
2937}
2938
2939static void skylake_update_primary_plane(struct drm_crtc *crtc,
2940                                         struct drm_framebuffer *fb,
2941                                         int x, int y)
2942{
2943        struct drm_device *dev = crtc->dev;
2944        struct drm_i915_private *dev_priv = dev->dev_private;
2945        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2946        struct drm_i915_gem_object *obj;
2947        int pipe = intel_crtc->pipe;
2948        u32 plane_ctl, stride_div;
2949        unsigned long surf_addr;
2950
2951        if (!intel_crtc->primary_enabled) {
2952                I915_WRITE(PLANE_CTL(pipe, 0), 0);
2953                I915_WRITE(PLANE_SURF(pipe, 0), 0);
2954                POSTING_READ(PLANE_CTL(pipe, 0));
2955                return;
2956        }
2957
2958        plane_ctl = PLANE_CTL_ENABLE |
2959                    PLANE_CTL_PIPE_GAMMA_ENABLE |
2960                    PLANE_CTL_PIPE_CSC_ENABLE;
2961
2962        switch (fb->pixel_format) {
2963        case DRM_FORMAT_RGB565:
2964                plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2965                break;
2966        case DRM_FORMAT_XRGB8888:
2967                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2968                break;
2969        case DRM_FORMAT_ARGB8888:
2970                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2971                plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2972                break;
2973        case DRM_FORMAT_XBGR8888:
2974                plane_ctl |= PLANE_CTL_ORDER_RGBX;
2975                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2976                break;
2977        case DRM_FORMAT_ABGR8888:
2978                plane_ctl |= PLANE_CTL_ORDER_RGBX;
2979                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2980                plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2981                break;
2982        case DRM_FORMAT_XRGB2101010:
2983                plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2984                break;
2985        case DRM_FORMAT_XBGR2101010:
2986                plane_ctl |= PLANE_CTL_ORDER_RGBX;
2987                plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2988                break;
2989        default:
2990                BUG();
2991        }
2992
2993        switch (fb->modifier[0]) {
2994        case DRM_FORMAT_MOD_NONE:
2995                break;
2996        case I915_FORMAT_MOD_X_TILED:
2997                plane_ctl |= PLANE_CTL_TILED_X;
2998                break;
2999        case I915_FORMAT_MOD_Y_TILED:
3000                plane_ctl |= PLANE_CTL_TILED_Y;
3001                break;
3002        case I915_FORMAT_MOD_Yf_TILED:
3003                plane_ctl |= PLANE_CTL_TILED_YF;
3004                break;
3005        default:
3006                MISSING_CASE(fb->modifier[0]);
3007        }
3008
3009        plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3010        if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
3011                plane_ctl |= PLANE_CTL_ROTATE_180;
3012
3013        obj = intel_fb_obj(fb);
3014        stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3015                                               fb->pixel_format);
3016        surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
3017
3018        I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3019        I915_WRITE(PLANE_POS(pipe, 0), 0);
3020        I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
3021        I915_WRITE(PLANE_SIZE(pipe, 0),
3022                   (intel_crtc->config->pipe_src_h - 1) << 16 |
3023                   (intel_crtc->config->pipe_src_w - 1));
3024        I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
3025        I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3026
3027        POSTING_READ(PLANE_SURF(pipe, 0));
3028}
3029
3030/* Assume fb object is pinned & idle & fenced and just update base pointers */
3031static int
3032intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3033                           int x, int y, enum mode_set_atomic state)
3034{
3035        struct drm_device *dev = crtc->dev;
3036        struct drm_i915_private *dev_priv = dev->dev_private;
3037
3038        if (dev_priv->display.disable_fbc)
3039                dev_priv->display.disable_fbc(dev);
3040
3041        dev_priv->display.update_primary_plane(crtc, fb, x, y);
3042
3043        return 0;
3044}
3045
3046static void intel_complete_page_flips(struct drm_device *dev)
3047{
3048        struct drm_crtc *crtc;
3049
3050        for_each_crtc(dev, crtc) {
3051                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3052                enum plane plane = intel_crtc->plane;
3053
3054                intel_prepare_page_flip(dev, plane);
3055                intel_finish_page_flip_plane(dev, plane);
3056        }
3057}
3058
3059static void intel_update_primary_planes(struct drm_device *dev)
3060{
3061        struct drm_i915_private *dev_priv = dev->dev_private;
3062        struct drm_crtc *crtc;
3063
3064        for_each_crtc(dev, crtc) {
3065                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3066
3067                drm_modeset_lock(&crtc->mutex, NULL);
3068                /*
3069                 * FIXME: Once we have proper support for primary planes (and
3070                 * disabling them without disabling the entire crtc) allow again
3071                 * a NULL crtc->primary->fb.
3072                 */
3073                if (intel_crtc->active && crtc->primary->fb)
3074                        dev_priv->display.update_primary_plane(crtc,
3075                                                               crtc->primary->fb,
3076                                                               crtc->x,
3077                                                               crtc->y);
3078                drm_modeset_unlock(&crtc->mutex);
3079        }
3080}
3081
3082void intel_prepare_reset(struct drm_device *dev)
3083{
3084        struct drm_i915_private *dev_priv = to_i915(dev);
3085        struct intel_crtc *crtc;
3086
3087        /* no reset support for gen2 */
3088        if (IS_GEN2(dev))
3089                return;
3090
3091        /* reset doesn't touch the display */
3092        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3093                return;
3094
3095        drm_modeset_lock_all(dev);
3096
3097        /*
3098         * Disabling the crtcs gracefully seems nicer. Also the
3099         * g33 docs say we should at least disable all the planes.
3100         */
3101        for_each_intel_crtc(dev, crtc) {
3102                if (crtc->active)
3103                        dev_priv->display.crtc_disable(&crtc->base);
3104        }
3105}
3106
3107void intel_finish_reset(struct drm_device *dev)
3108{
3109        struct drm_i915_private *dev_priv = to_i915(dev);
3110
3111        /*
3112         * Flips in the rings will be nuked by the reset,
3113         * so complete all pending flips so that user space
3114         * will get its events and not get stuck.
3115         */
3116        intel_complete_page_flips(dev);
3117
3118        /* no reset support for gen2 */
3119        if (IS_GEN2(dev))
3120                return;
3121
3122        /* reset doesn't touch the display */
3123        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3124                /*
3125                 * Flips in the rings have been nuked by the reset,
3126                 * so update the base address of all primary
3127                 * planes to the the last fb to make sure we're
3128                 * showing the correct fb after a reset.
3129                 */
3130                intel_update_primary_planes(dev);
3131                return;
3132        }
3133
3134        /*
3135         * The display has been reset as well,
3136         * so need a full re-initialization.
3137         */
3138        intel_runtime_pm_disable_interrupts(dev_priv);
3139        intel_runtime_pm_enable_interrupts(dev_priv);
3140
3141        intel_modeset_init_hw(dev);
3142
3143        spin_lock_irq(&dev_priv->irq_lock);
3144        if (dev_priv->display.hpd_irq_setup)
3145                dev_priv->display.hpd_irq_setup(dev);
3146        spin_unlock_irq(&dev_priv->irq_lock);
3147
3148        intel_modeset_setup_hw_state(dev, true);
3149
3150        intel_hpd_init(dev_priv);
3151
3152        drm_modeset_unlock_all(dev);
3153}
3154
3155static int
3156intel_finish_fb(struct drm_framebuffer *old_fb)
3157{
3158        struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3159        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3160        bool was_interruptible = dev_priv->mm.interruptible;
3161        int ret;
3162
3163        /* Big Hammer, we also need to ensure that any pending
3164         * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3165         * current scanout is retired before unpinning the old
3166         * framebuffer.
3167         *
3168         * This should only fail upon a hung GPU, in which case we
3169         * can safely continue.
3170         */
3171        dev_priv->mm.interruptible = false;
3172        ret = i915_gem_object_finish_gpu(obj);
3173        dev_priv->mm.interruptible = was_interruptible;
3174
3175        return ret;
3176}
3177
3178static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3179{
3180        struct drm_device *dev = crtc->dev;
3181        struct drm_i915_private *dev_priv = dev->dev_private;
3182        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3183        bool pending;
3184
3185        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3186            intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3187                return false;
3188
3189        spin_lock_irq(&dev->event_lock);
3190        pending = to_intel_crtc(crtc)->unpin_work != NULL;
3191        spin_unlock_irq(&dev->event_lock);
3192
3193        return pending;
3194}
3195
3196static void intel_update_pipe_size(struct intel_crtc *crtc)
3197{
3198        struct drm_device *dev = crtc->base.dev;
3199        struct drm_i915_private *dev_priv = dev->dev_private;
3200        const struct drm_display_mode *adjusted_mode;
3201
3202        if (!i915.fastboot)
3203                return;
3204
3205        /*
3206         * Update pipe size and adjust fitter if needed: the reason for this is
3207         * that in compute_mode_changes we check the native mode (not the pfit
3208         * mode) to see if we can flip rather than do a full mode set. In the
3209         * fastboot case, we'll flip, but if we don't update the pipesrc and
3210         * pfit state, we'll end up with a big fb scanned out into the wrong
3211         * sized surface.
3212         *
3213         * To fix this properly, we need to hoist the checks up into
3214         * compute_mode_changes (or above), check the actual pfit state and
3215         * whether the platform allows pfit disable with pipe active, and only
3216         * then update the pipesrc and pfit state, even on the flip path.
3217         */
3218
3219        adjusted_mode = &crtc->config->base.adjusted_mode;
3220
3221        I915_WRITE(PIPESRC(crtc->pipe),
3222                   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
3223                   (adjusted_mode->crtc_vdisplay - 1));
3224        if (!crtc->config->pch_pfit.enabled &&
3225            (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3226             intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3227                I915_WRITE(PF_CTL(crtc->pipe), 0);
3228                I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
3229                I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
3230        }
3231        crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
3232        crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
3233}
3234
3235static void intel_fdi_normal_train(struct drm_crtc *crtc)
3236{
3237        struct drm_device *dev = crtc->dev;
3238        struct drm_i915_private *dev_priv = dev->dev_private;
3239        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3240        int pipe = intel_crtc->pipe;
3241        u32 reg, temp;
3242
3243        /* enable normal train */
3244        reg = FDI_TX_CTL(pipe);
3245        temp = I915_READ(reg);
3246        if (IS_IVYBRIDGE(dev)) {
3247                temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3248                temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3249        } else {
3250                temp &= ~FDI_LINK_TRAIN_NONE;
3251                temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3252        }
3253        I915_WRITE(reg, temp);
3254
3255        reg = FDI_RX_CTL(pipe);
3256        temp = I915_READ(reg);
3257        if (HAS_PCH_CPT(dev)) {
3258                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3259                temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3260        } else {
3261                temp &= ~FDI_LINK_TRAIN_NONE;
3262                temp |= FDI_LINK_TRAIN_NONE;
3263        }
3264        I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3265
3266        /* wait one idle pattern time */
3267        POSTING_READ(reg);
3268        udelay(1000);
3269
3270        /* IVB wants error correction enabled */
3271        if (IS_IVYBRIDGE(dev))
3272                I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3273                           FDI_FE_ERRC_ENABLE);
3274}
3275
3276/* The FDI link training functions for ILK/Ibexpeak. */
3277static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3278{
3279        struct drm_device *dev = crtc->dev;
3280        struct drm_i915_private *dev_priv = dev->dev_private;
3281        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3282        int pipe = intel_crtc->pipe;
3283        u32 reg, temp, tries;
3284
3285        /* FDI needs bits from pipe first */
3286        assert_pipe_enabled(dev_priv, pipe);
3287
3288        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3289           for train result */
3290        reg = FDI_RX_IMR(pipe);
3291        temp = I915_READ(reg);
3292        temp &= ~FDI_RX_SYMBOL_LOCK;
3293        temp &= ~FDI_RX_BIT_LOCK;
3294        I915_WRITE(reg, temp);
3295        I915_READ(reg);
3296        udelay(150);
3297
3298        /* enable CPU FDI TX and PCH FDI RX */
3299        reg = FDI_TX_CTL(pipe);
3300        temp = I915_READ(reg);
3301        temp &= ~FDI_DP_PORT_WIDTH_MASK;
3302        temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3303        temp &= ~FDI_LINK_TRAIN_NONE;
3304        temp |= FDI_LINK_TRAIN_PATTERN_1;
3305        I915_WRITE(reg, temp | FDI_TX_ENABLE);
3306
3307        reg = FDI_RX_CTL(pipe);
3308        temp = I915_READ(reg);
3309        temp &= ~FDI_LINK_TRAIN_NONE;
3310        temp |= FDI_LINK_TRAIN_PATTERN_1;
3311        I915_WRITE(reg, temp | FDI_RX_ENABLE);
3312
3313        POSTING_READ(reg);
3314        udelay(150);
3315
3316        /* Ironlake workaround, enable clock pointer after FDI enable*/
3317        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3318        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3319                   FDI_RX_PHASE_SYNC_POINTER_EN);
3320
3321        reg = FDI_RX_IIR(pipe);
3322        for (tries = 0; tries < 5; tries++) {
3323                temp = I915_READ(reg);
3324                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3325
3326                if ((temp & FDI_RX_BIT_LOCK)) {
3327                        DRM_DEBUG_KMS("FDI train 1 done.\n");
3328                        I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3329                        break;
3330                }
3331        }
3332        if (tries == 5)
3333                DRM_ERROR("FDI train 1 fail!\n");
3334
3335        /* Train 2 */
3336        reg = FDI_TX_CTL(pipe);
3337        temp = I915_READ(reg);
3338        temp &= ~FDI_LINK_TRAIN_NONE;
3339        temp |= FDI_LINK_TRAIN_PATTERN_2;
3340        I915_WRITE(reg, temp);
3341
3342        reg = FDI_RX_CTL(pipe);
3343        temp = I915_READ(reg);
3344        temp &= ~FDI_LINK_TRAIN_NONE;
3345        temp |= FDI_LINK_TRAIN_PATTERN_2;
3346        I915_WRITE(reg, temp);
3347
3348        POSTING_READ(reg);
3349        udelay(150);
3350
3351        reg = FDI_RX_IIR(pipe);
3352        for (tries = 0; tries < 5; tries++) {
3353                temp = I915_READ(reg);
3354                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3355
3356                if (temp & FDI_RX_SYMBOL_LOCK) {
3357                        I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3358                        DRM_DEBUG_KMS("FDI train 2 done.\n");
3359                        break;
3360                }
3361        }
3362        if (tries == 5)
3363                DRM_ERROR("FDI train 2 fail!\n");
3364
3365        DRM_DEBUG_KMS("FDI train done\n");
3366
3367}
3368
3369static const int snb_b_fdi_train_param[] = {
3370        FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3371        FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3372        FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3373        FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3374};
3375
3376/* The FDI link training functions for SNB/Cougarpoint. */
3377static void gen6_fdi_link_train(struct drm_crtc *crtc)
3378{
3379        struct drm_device *dev = crtc->dev;
3380        struct drm_i915_private *dev_priv = dev->dev_private;
3381        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3382        int pipe = intel_crtc->pipe;
3383        u32 reg, temp, i, retry;
3384
3385        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3386           for train result */
3387        reg = FDI_RX_IMR(pipe);
3388        temp = I915_READ(reg);
3389        temp &= ~FDI_RX_SYMBOL_LOCK;
3390        temp &= ~FDI_RX_BIT_LOCK;
3391        I915_WRITE(reg, temp);
3392
3393        POSTING_READ(reg);
3394        udelay(150);
3395
3396        /* enable CPU FDI TX and PCH FDI RX */
3397        reg = FDI_TX_CTL(pipe);
3398        temp = I915_READ(reg);
3399        temp &= ~FDI_DP_PORT_WIDTH_MASK;
3400        temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3401        temp &= ~FDI_LINK_TRAIN_NONE;
3402        temp |= FDI_LINK_TRAIN_PATTERN_1;
3403        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3404        /* SNB-B */
3405        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3406        I915_WRITE(reg, temp | FDI_TX_ENABLE);
3407
3408        I915_WRITE(FDI_RX_MISC(pipe),
3409                   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3410
3411        reg = FDI_RX_CTL(pipe);
3412        temp = I915_READ(reg);
3413        if (HAS_PCH_CPT(dev)) {
3414                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3415                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3416        } else {
3417                temp &= ~FDI_LINK_TRAIN_NONE;
3418                temp |= FDI_LINK_TRAIN_PATTERN_1;
3419        }
3420        I915_WRITE(reg, temp | FDI_RX_ENABLE);
3421
3422        POSTING_READ(reg);
3423        udelay(150);
3424
3425        for (i = 0; i < 4; i++) {
3426                reg = FDI_TX_CTL(pipe);
3427                temp = I915_READ(reg);
3428                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3429                temp |= snb_b_fdi_train_param[i];
3430                I915_WRITE(reg, temp);
3431
3432                POSTING_READ(reg);
3433                udelay(500);
3434
3435                for (retry = 0; retry < 5; retry++) {
3436                        reg = FDI_RX_IIR(pipe);
3437                        temp = I915_READ(reg);
3438                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3439                        if (temp & FDI_RX_BIT_LOCK) {
3440                                I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3441                                DRM_DEBUG_KMS("FDI train 1 done.\n");
3442                                break;
3443                        }
3444                        udelay(50);
3445                }
3446                if (retry < 5)
3447                        break;
3448        }
3449        if (i == 4)
3450                DRM_ERROR("FDI train 1 fail!\n");
3451
3452        /* Train 2 */
3453        reg = FDI_TX_CTL(pipe);
3454        temp = I915_READ(reg);
3455        temp &= ~FDI_LINK_TRAIN_NONE;
3456        temp |= FDI_LINK_TRAIN_PATTERN_2;
3457        if (IS_GEN6(dev)) {
3458                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3459                /* SNB-B */
3460                temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3461        }
3462        I915_WRITE(reg, temp);
3463
3464        reg = FDI_RX_CTL(pipe);
3465        temp = I915_READ(reg);
3466        if (HAS_PCH_CPT(dev)) {
3467                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3468                temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3469        } else {
3470                temp &= ~FDI_LINK_TRAIN_NONE;
3471                temp |= FDI_LINK_TRAIN_PATTERN_2;
3472        }
3473        I915_WRITE(reg, temp);
3474
3475        POSTING_READ(reg);
3476        udelay(150);
3477
3478        for (i = 0; i < 4; i++) {
3479                reg = FDI_TX_CTL(pipe);
3480                temp = I915_READ(reg);
3481                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3482                temp |= snb_b_fdi_train_param[i];
3483                I915_WRITE(reg, temp);
3484
3485                POSTING_READ(reg);
3486                udelay(500);
3487
3488                for (retry = 0; retry < 5; retry++) {
3489                        reg = FDI_RX_IIR(pipe);
3490                        temp = I915_READ(reg);
3491                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3492                        if (temp & FDI_RX_SYMBOL_LOCK) {
3493                                I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3494                                DRM_DEBUG_KMS("FDI train 2 done.\n");
3495                                break;
3496                        }
3497                        udelay(50);
3498                }
3499                if (retry < 5)
3500                        break;
3501        }
3502        if (i == 4)
3503                DRM_ERROR("FDI train 2 fail!\n");
3504
3505        DRM_DEBUG_KMS("FDI train done.\n");
3506}
3507
3508/* Manual link training for Ivy Bridge A0 parts */
3509static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3510{
3511        struct drm_device *dev = crtc->dev;
3512        struct drm_i915_private *dev_priv = dev->dev_private;
3513        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3514        int pipe = intel_crtc->pipe;
3515        u32 reg, temp, i, j;
3516
3517        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3518           for train result */
3519        reg = FDI_RX_IMR(pipe);
3520        temp = I915_READ(reg);
3521        temp &= ~FDI_RX_SYMBOL_LOCK;
3522        temp &= ~FDI_RX_BIT_LOCK;
3523        I915_WRITE(reg, temp);
3524
3525        POSTING_READ(reg);
3526        udelay(150);
3527
3528        DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3529                      I915_READ(FDI_RX_IIR(pipe)));
3530
3531        /* Try each vswing and preemphasis setting twice before moving on */
3532        for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3533                /* disable first in case we need to retry */
3534                reg = FDI_TX_CTL(pipe);
3535                temp = I915_READ(reg);
3536                temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3537                temp &= ~FDI_TX_ENABLE;
3538                I915_WRITE(reg, temp);
3539
3540                reg = FDI_RX_CTL(pipe);
3541                temp = I915_READ(reg);
3542                temp &= ~FDI_LINK_TRAIN_AUTO;
3543                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3544                temp &= ~FDI_RX_ENABLE;
3545                I915_WRITE(reg, temp);
3546
3547                /* enable CPU FDI TX and PCH FDI RX */
3548                reg = FDI_TX_CTL(pipe);
3549                temp = I915_READ(reg);
3550                temp &= ~FDI_DP_PORT_WIDTH_MASK;
3551                temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3552                temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3553                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3554                temp |= snb_b_fdi_train_param[j/2];
3555                temp |= FDI_COMPOSITE_SYNC;
3556                I915_WRITE(reg, temp | FDI_TX_ENABLE);
3557
3558                I915_WRITE(FDI_RX_MISC(pipe),
3559                           FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3560
3561                reg = FDI_RX_CTL(pipe);
3562                temp = I915_READ(reg);
3563                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3564                temp |= FDI_COMPOSITE_SYNC;
3565                I915_WRITE(reg, temp | FDI_RX_ENABLE);
3566
3567                POSTING_READ(reg);
3568                udelay(1); /* should be 0.5us */
3569
3570                for (i = 0; i < 4; i++) {
3571                        reg = FDI_RX_IIR(pipe);
3572                        temp = I915_READ(reg);
3573                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3574
3575                        if (temp & FDI_RX_BIT_LOCK ||
3576                            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3577                                I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3578                                DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3579                                              i);
3580                                break;
3581                        }
3582                        udelay(1); /* should be 0.5us */
3583                }
3584                if (i == 4) {
3585                        DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3586                        continue;
3587                }
3588
3589                /* Train 2 */
3590                reg = FDI_TX_CTL(pipe);
3591                temp = I915_READ(reg);
3592                temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3593                temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3594                I915_WRITE(reg, temp);
3595
3596                reg = FDI_RX_CTL(pipe);
3597                temp = I915_READ(reg);
3598                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3599                temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3600                I915_WRITE(reg, temp);
3601
3602                POSTING_READ(reg);
3603                udelay(2); /* should be 1.5us */
3604
3605                for (i = 0; i < 4; i++) {
3606                        reg = FDI_RX_IIR(pipe);
3607                        temp = I915_READ(reg);
3608                        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3609
3610                        if (temp & FDI_RX_SYMBOL_LOCK ||
3611                            (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3612                                I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3613                                DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3614                                              i);
3615                                goto train_done;
3616                        }
3617                        udelay(2); /* should be 1.5us */
3618                }
3619                if (i == 4)
3620                        DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3621        }
3622
3623train_done:
3624        DRM_DEBUG_KMS("FDI train done.\n");
3625}
3626
3627static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3628{
3629        struct drm_device *dev = intel_crtc->base.dev;
3630        struct drm_i915_private *dev_priv = dev->dev_private;
3631        int pipe = intel_crtc->pipe;
3632        u32 reg, temp;
3633
3634
3635        /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3636        reg = FDI_RX_CTL(pipe);
3637        temp = I915_READ(reg);
3638        temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3639        temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3640        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3641        I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3642
3643        POSTING_READ(reg);
3644        udelay(200);
3645
3646        /* Switch from Rawclk to PCDclk */
3647        temp = I915_READ(reg);
3648        I915_WRITE(reg, temp | FDI_PCDCLK);
3649
3650        POSTING_READ(reg);
3651        udelay(200);
3652
3653        /* Enable CPU FDI TX PLL, always on for Ironlake */
3654        reg = FDI_TX_CTL(pipe);
3655        temp = I915_READ(reg);
3656        if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3657                I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3658
3659                POSTING_READ(reg);
3660                udelay(100);
3661        }
3662}
3663
3664static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3665{
3666        struct drm_device *dev = intel_crtc->base.dev;
3667        struct drm_i915_private *dev_priv = dev->dev_private;
3668        int pipe = intel_crtc->pipe;
3669        u32 reg, temp;
3670
3671        /* Switch from PCDclk to Rawclk */
3672        reg = FDI_RX_CTL(pipe);
3673        temp = I915_READ(reg);
3674        I915_WRITE(reg, temp & ~FDI_PCDCLK);
3675
3676        /* Disable CPU FDI TX PLL */
3677        reg = FDI_TX_CTL(pipe);
3678        temp = I915_READ(reg);
3679        I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3680
3681        POSTING_READ(reg);
3682        udelay(100);
3683
3684        reg = FDI_RX_CTL(pipe);
3685        temp = I915_READ(reg);
3686        I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3687
3688        /* Wait for the clocks to turn off. */
3689        POSTING_READ(reg);
3690        udelay(100);
3691}
3692
3693static void ironlake_fdi_disable(struct drm_crtc *crtc)
3694{
3695        struct drm_device *dev = crtc->dev;
3696        struct drm_i915_private *dev_priv = dev->dev_private;
3697        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3698        int pipe = intel_crtc->pipe;
3699        u32 reg, temp;
3700
3701        /* disable CPU FDI tx and PCH FDI rx */
3702        reg = FDI_TX_CTL(pipe);
3703        temp = I915_READ(reg);
3704        I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3705        POSTING_READ(reg);
3706
3707        reg = FDI_RX_CTL(pipe);
3708        temp = I915_READ(reg);
3709        temp &= ~(0x7 << 16);
3710        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3711        I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3712
3713        POSTING_READ(reg);
3714        udelay(100);
3715
3716        /* Ironlake workaround, disable clock pointer after downing FDI */
3717        if (HAS_PCH_IBX(dev))
3718                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3719
3720        /* still set train pattern 1 */
3721        reg = FDI_TX_CTL(pipe);
3722        temp = I915_READ(reg);
3723        temp &= ~FDI_LINK_TRAIN_NONE;
3724        temp |= FDI_LINK_TRAIN_PATTERN_1;
3725        I915_WRITE(reg, temp);
3726
3727        reg = FDI_RX_CTL(pipe);
3728        temp = I915_READ(reg);
3729        if (HAS_PCH_CPT(dev)) {
3730                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3731                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3732        } else {
3733                temp &= ~FDI_LINK_TRAIN_NONE;
3734                temp |= FDI_LINK_TRAIN_PATTERN_1;
3735        }
3736        /* BPC in FDI rx is consistent with that in PIPECONF */
3737        temp &= ~(0x07 << 16);
3738        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3739        I915_WRITE(reg, temp);
3740
3741        POSTING_READ(reg);
3742        udelay(100);
3743}
3744
3745bool intel_has_pending_fb_unpin(struct drm_device *dev)
3746{
3747        struct intel_crtc *crtc;
3748
3749        /* Note that we don't need to be called with mode_config.lock here
3750         * as our list of CRTC objects is static for the lifetime of the
3751         * device and so cannot disappear as we iterate. Similarly, we can
3752         * happily treat the predicates as racy, atomic checks as userspace
3753         * cannot claim and pin a new fb without at least acquring the
3754         * struct_mutex and so serialising with us.
3755         */
3756        for_each_intel_crtc(dev, crtc) {
3757                if (atomic_read(&crtc->unpin_work_count) == 0)
3758                        continue;
3759
3760                if (crtc->unpin_work)
3761                        intel_wait_for_vblank(dev, crtc->pipe);
3762
3763                return true;
3764        }
3765
3766        return false;
3767}
3768
3769static void page_flip_completed(struct intel_crtc *intel_crtc)
3770{
3771        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3772        struct intel_unpin_work *work = intel_crtc->unpin_work;
3773
3774        /* ensure that the unpin work is consistent wrt ->pending. */
3775        smp_rmb();
3776        intel_crtc->unpin_work = NULL;
3777
3778        if (work->event)
3779                drm_send_vblank_event(intel_crtc->base.dev,
3780                                      intel_crtc->pipe,
3781                                      work->event);
3782
3783        drm_crtc_vblank_put(&intel_crtc->base);
3784
3785        wake_up_all(&dev_priv->pending_flip_queue);
3786        queue_work(dev_priv->wq, &work->work);
3787
3788        trace_i915_flip_complete(intel_crtc->plane,
3789                                 work->pending_flip_obj);
3790}
3791
3792void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3793{
3794        struct drm_device *dev = crtc->dev;
3795        struct drm_i915_private *dev_priv = dev->dev_private;
3796
3797        WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3798        if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3799                                       !intel_crtc_has_pending_flip(crtc),
3800                                       60*HZ) == 0)) {
3801                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3802
3803                spin_lock_irq(&dev->event_lock);
3804                if (intel_crtc->unpin_work) {
3805                        WARN_ONCE(1, "Removing stuck page flip\n");
3806                        page_flip_completed(intel_crtc);
3807                }
3808                spin_unlock_irq(&dev->event_lock);
3809        }
3810
3811        if (crtc->primary->fb) {
3812                mutex_lock(&dev->struct_mutex);
3813                intel_finish_fb(crtc->primary->fb);
3814                mutex_unlock(&dev->struct_mutex);
3815        }
3816}
3817
3818/* Program iCLKIP clock to the desired frequency */
3819static void lpt_program_iclkip(struct drm_crtc *crtc)
3820{
3821        struct drm_device *dev = crtc->dev;
3822        struct drm_i915_private *dev_priv = dev->dev_private;
3823        int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3824        u32 divsel, phaseinc, auxdiv, phasedir = 0;
3825        u32 temp;
3826
3827        mutex_lock(&dev_priv->dpio_lock);
3828
3829        /* It is necessary to ungate the pixclk gate prior to programming
3830         * the divisors, and gate it back when it is done.
3831         */
3832        I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3833
3834        /* Disable SSCCTL */
3835        intel_sbi_write(dev_priv, SBI_SSCCTL6,
3836                        intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3837                                SBI_SSCCTL_DISABLE,
3838                        SBI_ICLK);
3839
3840        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3841        if (clock == 20000) {
3842                auxdiv = 1;
3843                divsel = 0x41;
3844                phaseinc = 0x20;
3845        } else {
3846                /* The iCLK virtual clock root frequency is in MHz,
3847                 * but the adjusted_mode->crtc_clock in in KHz. To get the
3848                 * divisors, it is necessary to divide one by another, so we
3849                 * convert the virtual clock precision to KHz here for higher
3850                 * precision.
3851                 */
3852                u32 iclk_virtual_root_freq = 172800 * 1000;
3853                u32 iclk_pi_range = 64;
3854                u32 desired_divisor, msb_divisor_value, pi_value;
3855
3856                desired_divisor = (iclk_virtual_root_freq / clock);
3857                msb_divisor_value = desired_divisor / iclk_pi_range;
3858                pi_value = desired_divisor % iclk_pi_range;
3859
3860                auxdiv = 0;
3861                divsel = msb_divisor_value - 2;
3862                phaseinc = pi_value;
3863        }
3864
3865        /* This should not happen with any sane values */
3866        WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3867                ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3868        WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3869                ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3870
3871        DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3872                        clock,
3873                        auxdiv,
3874                        divsel,
3875                        phasedir,
3876                        phaseinc);
3877
3878        /* Program SSCDIVINTPHASE6 */
3879        temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3880        temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3881        temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3882        temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3883        temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3884        temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3885        temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3886        intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3887
3888        /* Program SSCAUXDIV */
3889        temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3890        temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3891        temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3892        intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3893
3894        /* Enable modulator and associated divider */
3895        temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3896        temp &= ~SBI_SSCCTL_DISABLE;
3897        intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3898
3899        /* Wait for initialization time */
3900        udelay(24);
3901
3902        I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3903
3904        mutex_unlock(&dev_priv->dpio_lock);
3905}
3906
3907static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3908                                                enum pipe pch_transcoder)
3909{
3910        struct drm_device *dev = crtc->base.dev;
3911        struct drm_i915_private *dev_priv = dev->dev_private;
3912        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3913
3914        I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3915                   I915_READ(HTOTAL(cpu_transcoder)));
3916        I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3917                   I915_READ(HBLANK(cpu_transcoder)));
3918        I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3919                   I915_READ(HSYNC(cpu_transcoder)));
3920
3921        I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3922                   I915_READ(VTOTAL(cpu_transcoder)));
3923        I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3924                   I915_READ(VBLANK(cpu_transcoder)));
3925        I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3926                   I915_READ(VSYNC(cpu_transcoder)));
3927        I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3928                   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3929}
3930
3931static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
3932{
3933        struct drm_i915_private *dev_priv = dev->dev_private;
3934        uint32_t temp;
3935
3936        temp = I915_READ(SOUTH_CHICKEN1);
3937        if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
3938                return;
3939
3940        WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3941        WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3942
3943        temp &= ~FDI_BC_BIFURCATION_SELECT;
3944        if (enable)
3945                temp |= FDI_BC_BIFURCATION_SELECT;
3946
3947        DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
3948        I915_WRITE(SOUTH_CHICKEN1, temp);
3949        POSTING_READ(SOUTH_CHICKEN1);
3950}
3951
3952static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3953{
3954        struct drm_device *dev = intel_crtc->base.dev;
3955
3956        switch (intel_crtc->pipe) {
3957        case PIPE_A:
3958                break;
3959        case PIPE_B:
3960                if (intel_crtc->config->fdi_lanes > 2)
3961                        cpt_set_fdi_bc_bifurcation(dev, false);
3962                else
3963                        cpt_set_fdi_bc_bifurcation(dev, true);
3964
3965                break;
3966        case PIPE_C:
3967                cpt_set_fdi_bc_bifurcation(dev, true);
3968
3969                break;
3970        default:
3971                BUG();
3972        }
3973}
3974
3975/*
3976 * Enable PCH resources required for PCH ports:
3977 *   - PCH PLLs
3978 *   - FDI training & RX/TX
3979 *   - update transcoder timings
3980 *   - DP transcoding bits
3981 *   - transcoder
3982 */
3983static void ironlake_pch_enable(struct drm_crtc *crtc)
3984{
3985        struct drm_device *dev = crtc->dev;
3986        struct drm_i915_private *dev_priv = dev->dev_private;
3987        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3988        int pipe = intel_crtc->pipe;
3989        u32 reg, temp;
3990
3991        assert_pch_transcoder_disabled(dev_priv, pipe);
3992
3993        if (IS_IVYBRIDGE(dev))
3994                ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3995
3996        /* Write the TU size bits before fdi link training, so that error
3997         * detection works. */
3998        I915_WRITE(FDI_RX_TUSIZE1(pipe),
3999                   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4000
4001        /* For PCH output, training FDI link */
4002        dev_priv->display.fdi_link_train(crtc);
4003
4004        /* We need to program the right clock selection before writing the pixel
4005         * mutliplier into the DPLL. */
4006        if (HAS_PCH_CPT(dev)) {
4007                u32 sel;
4008
4009                temp = I915_READ(PCH_DPLL_SEL);
4010                temp |= TRANS_DPLL_ENABLE(pipe);
4011                sel = TRANS_DPLLB_SEL(pipe);
4012                if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4013                        temp |= sel;
4014                else
4015                        temp &= ~sel;
4016                I915_WRITE(PCH_DPLL_SEL, temp);
4017        }
4018
4019        /* XXX: pch pll's can be enabled any time before we enable the PCH
4020         * transcoder, and we actually should do this to not upset any PCH
4021         * transcoder that already use the clock when we share it.
4022         *
4023         * Note that enable_shared_dpll tries to do the right thing, but
4024         * get_shared_dpll unconditionally resets the pll - we need that to have
4025         * the right LVDS enable sequence. */
4026        intel_enable_shared_dpll(intel_crtc);
4027
4028        /* set transcoder timing, panel must allow it */
4029        assert_panel_unlocked(dev_priv, pipe);
4030        ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4031
4032        intel_fdi_normal_train(crtc);
4033
4034        /* For PCH DP, enable TRANS_DP_CTL */
4035        if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4036                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4037                reg = TRANS_DP_CTL(pipe);
4038                temp = I915_READ(reg);
4039                temp &= ~(TRANS_DP_PORT_SEL_MASK |
4040                          TRANS_DP_SYNC_MASK |
4041                          TRANS_DP_BPC_MASK);
4042                temp |= (TRANS_DP_OUTPUT_ENABLE |
4043                         TRANS_DP_ENH_FRAMING);
4044                temp |= bpc << 9; /* same format but at 11:9 */
4045
4046                if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
4047                        temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4048                if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
4049                        temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4050
4051                switch (intel_trans_dp_port_sel(crtc)) {
4052                case PCH_DP_B:
4053                        temp |= TRANS_DP_PORT_SEL_B;
4054                        break;
4055                case PCH_DP_C:
4056                        temp |= TRANS_DP_PORT_SEL_C;
4057                        break;
4058                case PCH_DP_D:
4059                        temp |= TRANS_DP_PORT_SEL_D;
4060                        break;
4061                default:
4062                        BUG();
4063                }
4064
4065                I915_WRITE(reg, temp);
4066        }
4067
4068        ironlake_enable_pch_transcoder(dev_priv, pipe);
4069}
4070
4071static void lpt_pch_enable(struct drm_crtc *crtc)
4072{
4073        struct drm_device *dev = crtc->dev;
4074        struct drm_i915_private *dev_priv = dev->dev_private;
4075        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4076        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4077
4078        assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4079
4080        lpt_program_iclkip(crtc);
4081
4082        /* Set transcoder timing. */
4083        ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4084
4085        lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4086}
4087
4088void intel_put_shared_dpll(struct intel_crtc *crtc)
4089{
4090        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
4091
4092        if (pll == NULL)
4093                return;
4094
4095        if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
4096                WARN(1, "bad %s crtc mask\n", pll->name);
4097                return;
4098        }
4099
4100        pll->config.crtc_mask &= ~(1 << crtc->pipe);
4101        if (pll->config.crtc_mask == 0) {
4102                WARN_ON(pll->on);
4103                WARN_ON(pll->active);
4104        }
4105
4106        crtc->config->shared_dpll = DPLL_ID_PRIVATE;
4107}
4108
4109struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4110                                                struct intel_crtc_state *crtc_state)
4111{
4112        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4113        struct intel_shared_dpll *pll;
4114        enum intel_dpll_id i;
4115
4116        if (HAS_PCH_IBX(dev_priv->dev)) {
4117                /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4118                i = (enum intel_dpll_id) crtc->pipe;
4119                pll = &dev_priv->shared_dplls[i];
4120
4121                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4122                              crtc->base.base.id, pll->name);
4123
4124                WARN_ON(pll->new_config->crtc_mask);
4125
4126                goto found;
4127        }
4128
4129        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4130                pll = &dev_priv->shared_dplls[i];
4131
4132                /* Only want to check enabled timings first */
4133                if (pll->new_config->crtc_mask == 0)
4134                        continue;
4135
4136                if (memcmp(&crtc_state->dpll_hw_state,
4137                           &pll->new_config->hw_state,
4138                           sizeof(pll->new_config->hw_state)) == 0) {
4139                        DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4140                                      crtc->base.base.id, pll->name,
4141                                      pll->new_config->crtc_mask,
4142                                      pll->active);
4143                        goto found;
4144                }
4145        }
4146
4147        /* Ok no matching timings, maybe there's a free one? */
4148        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4149                pll = &dev_priv->shared_dplls[i];
4150                if (pll->new_config->crtc_mask == 0) {
4151                        DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4152                                      crtc->base.base.id, pll->name);
4153                        goto found;
4154                }
4155        }
4156
4157        return NULL;
4158
4159found:
4160        if (pll->new_config->crtc_mask == 0)
4161                pll->new_config->hw_state = crtc_state->dpll_hw_state;
4162
4163        crtc_state->shared_dpll = i;
4164        DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4165                         pipe_name(crtc->pipe));
4166
4167        pll->new_config->crtc_mask |= 1 << crtc->pipe;
4168
4169        return pll;
4170}
4171
4172/**
4173 * intel_shared_dpll_start_config - start a new PLL staged config
4174 * @dev_priv: DRM device
4175 * @clear_pipes: mask of pipes that will have their PLLs freed
4176 *
4177 * Starts a new PLL staged config, copying the current config but
4178 * releasing the references of pipes specified in clear_pipes.
4179 */
4180static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
4181                                          unsigned clear_pipes)
4182{
4183        struct intel_shared_dpll *pll;
4184        enum intel_dpll_id i;
4185
4186        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4187                pll = &dev_priv->shared_dplls[i];
4188
4189                pll->new_config = kmemdup(&pll->config, sizeof pll->config,
4190                                          GFP_KERNEL);
4191                if (!pll->new_config)
4192                        goto cleanup;
4193
4194                pll->new_config->crtc_mask &= ~clear_pipes;
4195        }
4196
4197        return 0;
4198
4199cleanup:
4200        while (--i >= 0) {
4201                pll = &dev_priv->shared_dplls[i];
4202                kfree(pll->new_config);
4203                pll->new_config = NULL;
4204        }
4205
4206        return -ENOMEM;
4207}
4208
4209static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
4210{
4211        struct intel_shared_dpll *pll;
4212        enum intel_dpll_id i;
4213
4214        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4215                pll = &dev_priv->shared_dplls[i];
4216
4217                WARN_ON(pll->new_config == &pll->config);
4218
4219                pll->config = *pll->new_config;
4220                kfree(pll->new_config);
4221                pll->new_config = NULL;
4222        }
4223}
4224
4225static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
4226{
4227        struct intel_shared_dpll *pll;
4228        enum intel_dpll_id i;
4229
4230        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4231                pll = &dev_priv->shared_dplls[i];
4232
4233                WARN_ON(pll->new_config == &pll->config);
4234
4235                kfree(pll->new_config);
4236                pll->new_config = NULL;
4237        }
4238}
4239
4240static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4241{
4242        struct drm_i915_private *dev_priv = dev->dev_private;
4243        int dslreg = PIPEDSL(pipe);
4244        u32 temp;
4245
4246        temp = I915_READ(dslreg);
4247        udelay(500);
4248        if (wait_for(I915_READ(dslreg) != temp, 5)) {
4249                if (wait_for(I915_READ(dslreg) != temp, 5))
4250                        DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4251        }
4252}
4253
4254static void skylake_pfit_enable(struct intel_crtc *crtc)
4255{
4256        struct drm_device *dev = crtc->base.dev;
4257        struct drm_i915_private *dev_priv = dev->dev_private;
4258        int pipe = crtc->pipe;
4259
4260        if (crtc->config->pch_pfit.enabled) {
4261                I915_WRITE(PS_CTL(pipe), PS_ENABLE);
4262                I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4263                I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4264        }
4265}
4266
4267static void ironlake_pfit_enable(struct intel_crtc *crtc)
4268{
4269        struct drm_device *dev = crtc->base.dev;
4270        struct drm_i915_private *dev_priv = dev->dev_private;
4271        int pipe = crtc->pipe;
4272
4273        if (crtc->config->pch_pfit.enabled) {
4274                /* Force use of hard-coded filter coefficients
4275                 * as some pre-programmed values are broken,
4276                 * e.g. x201.
4277                 */
4278                if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4279                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4280                                                 PF_PIPE_SEL_IVB(pipe));
4281                else
4282                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4283                I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4284                I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4285        }
4286}
4287
4288static void intel_enable_sprite_planes(struct drm_crtc *crtc)
4289{
4290        struct drm_device *dev = crtc->dev;
4291        enum pipe pipe = to_intel_crtc(crtc)->pipe;
4292        struct drm_plane *plane;
4293        struct intel_plane *intel_plane;
4294
4295        drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4296                intel_plane = to_intel_plane(plane);
4297                if (intel_plane->pipe == pipe)
4298                        intel_plane_restore(&intel_plane->base);
4299        }
4300}
4301
4302/*
4303 * Disable a plane internally without actually modifying the plane's state.
4304 * This will allow us to easily restore the plane later by just reprogramming
4305 * its state.
4306 */
4307static void disable_plane_internal(struct drm_plane *plane)
4308{
4309        struct intel_plane *intel_plane = to_intel_plane(plane);
4310        struct drm_plane_state *state =
4311                plane->funcs->atomic_duplicate_state(plane);
4312        struct intel_plane_state *intel_state = to_intel_plane_state(state);
4313
4314        intel_state->visible = false;
4315        intel_plane->commit_plane(plane, intel_state);
4316
4317        intel_plane_destroy_state(plane, state);
4318}
4319
4320static void intel_disable_sprite_planes(struct drm_crtc *crtc)
4321{
4322        struct drm_device *dev = crtc->dev;
4323        enum pipe pipe = to_intel_crtc(crtc)->pipe;
4324        struct drm_plane *plane;
4325        struct intel_plane *intel_plane;
4326
4327        drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4328                intel_plane = to_intel_plane(plane);
4329                if (plane->fb && intel_plane->pipe == pipe)
4330                        disable_plane_internal(plane);
4331        }
4332}
4333
4334void hsw_enable_ips(struct intel_crtc *crtc)
4335{
4336        struct drm_device *dev = crtc->base.dev;
4337        struct drm_i915_private *dev_priv = dev->dev_private;
4338
4339        if (!crtc->config->ips_enabled)
4340                return;
4341
4342        /* We can only enable IPS after we enable a plane and wait for a vblank */
4343        intel_wait_for_vblank(dev, crtc->pipe);
4344
4345        assert_plane_enabled(dev_priv, crtc->plane);
4346        if (IS_BROADWELL(dev)) {
4347                mutex_lock(&dev_priv->rps.hw_lock);
4348                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4349                mutex_unlock(&dev_priv->rps.hw_lock);
4350                /* Quoting Art Runyan: "its not safe to expect any particular
4351                 * value in IPS_CTL bit 31 after enabling IPS through the
4352                 * mailbox." Moreover, the mailbox may return a bogus state,
4353                 * so we need to just enable it and continue on.
4354                 */
4355        } else {
4356                I915_WRITE(IPS_CTL, IPS_ENABLE);
4357                /* The bit only becomes 1 in the next vblank, so this wait here
4358                 * is essentially intel_wait_for_vblank. If we don't have this
4359                 * and don't wait for vblanks until the end of crtc_enable, then
4360                 * the HW state readout code will complain that the expected
4361                 * IPS_CTL value is not the one we read. */
4362                if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4363                        DRM_ERROR("Timed out waiting for IPS enable\n");
4364        }
4365}
4366
4367void hsw_disable_ips(struct intel_crtc *crtc)
4368{
4369        struct drm_device *dev = crtc->base.dev;
4370        struct drm_i915_private *dev_priv = dev->dev_private;
4371
4372        if (!crtc->config->ips_enabled)
4373                return;
4374
4375        assert_plane_enabled(dev_priv, crtc->plane);
4376        if (IS_BROADWELL(dev)) {
4377                mutex_lock(&dev_priv->rps.hw_lock);
4378                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4379                mutex_unlock(&dev_priv->rps.hw_lock);
4380                /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4381                if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4382                        DRM_ERROR("Timed out waiting for IPS disable\n");
4383        } else {
4384                I915_WRITE(IPS_CTL, 0);
4385                POSTING_READ(IPS_CTL);
4386        }
4387
4388        /* We need to wait for a vblank before we can disable the plane. */
4389        intel_wait_for_vblank(dev, crtc->pipe);
4390}
4391
4392/** Loads the palette/gamma unit for the CRTC with the prepared values */
4393static void intel_crtc_load_lut(struct drm_crtc *crtc)
4394{
4395        struct drm_device *dev = crtc->dev;
4396        struct drm_i915_private *dev_priv = dev->dev_private;
4397        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4398        enum pipe pipe = intel_crtc->pipe;
4399        int palreg = PALETTE(pipe);
4400        int i;
4401        bool reenable_ips = false;
4402
4403        /* The clocks have to be on to load the palette. */
4404        if (!crtc->state->enable || !intel_crtc->active)
4405                return;
4406
4407        if (!HAS_PCH_SPLIT(dev_priv->dev)) {
4408                if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4409                        assert_dsi_pll_enabled(dev_priv);
4410                else
4411                        assert_pll_enabled(dev_priv, pipe);
4412        }
4413
4414        /* use legacy palette for Ironlake */
4415        if (!HAS_GMCH_DISPLAY(dev))
4416                palreg = LGC_PALETTE(pipe);
4417
4418        /* Workaround : Do not read or write the pipe palette/gamma data while
4419         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4420         */
4421        if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4422            ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4423             GAMMA_MODE_MODE_SPLIT)) {
4424                hsw_disable_ips(intel_crtc);
4425                reenable_ips = true;
4426        }
4427
4428        for (i = 0; i < 256; i++) {
4429                I915_WRITE(palreg + 4 * i,
4430                           (intel_crtc->lut_r[i] << 16) |
4431                           (intel_crtc->lut_g[i] << 8) |
4432                           intel_crtc->lut_b[i]);
4433        }
4434
4435        if (reenable_ips)
4436                hsw_enable_ips(intel_crtc);
4437}
4438
4439static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4440{
4441        if (!enable && intel_crtc->overlay) {
4442                struct drm_device *dev = intel_crtc->base.dev;
4443                struct drm_i915_private *dev_priv = dev->dev_private;
4444
4445                mutex_lock(&dev->struct_mutex);
4446                dev_priv->mm.interruptible = false;
4447                (void) intel_overlay_switch_off(intel_crtc->overlay);
4448                dev_priv->mm.interruptible = true;
4449                mutex_unlock(&dev->struct_mutex);
4450        }
4451
4452        /* Let userspace switch the overlay on again. In most cases userspace
4453         * has to recompute where to put it anyway.
4454         */
4455}
4456
4457static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4458{
4459        struct drm_device *dev = crtc->dev;
4460        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4461        int pipe = intel_crtc->pipe;
4462
4463        intel_enable_primary_hw_plane(crtc->primary, crtc);
4464        intel_enable_sprite_planes(crtc);
4465        intel_crtc_update_cursor(crtc, true);
4466        intel_crtc_dpms_overlay(intel_crtc, true);
4467
4468        hsw_enable_ips(intel_crtc);
4469
4470        mutex_lock(&dev->struct_mutex);
4471        intel_fbc_update(dev);
4472        mutex_unlock(&dev->struct_mutex);
4473
4474        /*
4475         * FIXME: Once we grow proper nuclear flip support out of this we need
4476         * to compute the mask of flip planes precisely. For the time being
4477         * consider this a flip from a NULL plane.
4478         */
4479        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4480}
4481
4482static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4483{
4484        struct drm_device *dev = crtc->dev;
4485        struct drm_i915_private *dev_priv = dev->dev_private;
4486        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4487        int pipe = intel_crtc->pipe;
4488
4489        intel_crtc_wait_for_pending_flips(crtc);
4490
4491        if (dev_priv->fbc.crtc == intel_crtc)
4492                intel_fbc_disable(dev);
4493
4494        hsw_disable_ips(intel_crtc);
4495
4496        intel_crtc_dpms_overlay(intel_crtc, false);
4497        intel_crtc_update_cursor(crtc, false);
4498        intel_disable_sprite_planes(crtc);
4499        intel_disable_primary_hw_plane(crtc->primary, crtc);
4500
4501        /*
4502         * FIXME: Once we grow proper nuclear flip support out of this we need
4503         * to compute the mask of flip planes precisely. For the time being
4504         * consider this a flip to a NULL plane.
4505         */
4506        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4507}
4508
4509static void ironlake_crtc_enable(struct drm_crtc *crtc)
4510{
4511        struct drm_device *dev = crtc->dev;
4512        struct drm_i915_private *dev_priv = dev->dev_private;
4513        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4514        struct intel_encoder *encoder;
4515        int pipe = intel_crtc->pipe;
4516
4517        WARN_ON(!crtc->state->enable);
4518
4519        if (intel_crtc->active)
4520                return;
4521
4522        if (intel_crtc->config->has_pch_encoder)
4523                intel_prepare_shared_dpll(intel_crtc);
4524
4525        if (intel_crtc->config->has_dp_encoder)
4526                intel_dp_set_m_n(intel_crtc, M1_N1);
4527
4528        intel_set_pipe_timings(intel_crtc);
4529
4530        if (intel_crtc->config->has_pch_encoder) {
4531                intel_cpu_transcoder_set_m_n(intel_crtc,
4532                                     &intel_crtc->config->fdi_m_n, NULL);
4533        }
4534
4535        ironlake_set_pipeconf(crtc);
4536
4537        intel_crtc->active = true;
4538
4539        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4540        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4541
4542        for_each_encoder_on_crtc(dev, crtc, encoder)
4543                if (encoder->pre_enable)
4544                        encoder->pre_enable(encoder);
4545
4546        if (intel_crtc->config->has_pch_encoder) {
4547                /* Note: FDI PLL enabling _must_ be done before we enable the
4548                 * cpu pipes, hence this is separate from all the other fdi/pch
4549                 * enabling. */
4550                ironlake_fdi_pll_enable(intel_crtc);
4551        } else {
4552                assert_fdi_tx_disabled(dev_priv, pipe);
4553                assert_fdi_rx_disabled(dev_priv, pipe);
4554        }
4555
4556        ironlake_pfit_enable(intel_crtc);
4557
4558        /*
4559         * On ILK+ LUT must be loaded before the pipe is running but with
4560         * clocks enabled
4561         */
4562        intel_crtc_load_lut(crtc);
4563
4564        intel_update_watermarks(crtc);
4565        intel_enable_pipe(intel_crtc);
4566
4567        if (intel_crtc->config->has_pch_encoder)
4568                ironlake_pch_enable(crtc);
4569
4570        assert_vblank_disabled(crtc);
4571        drm_crtc_vblank_on(crtc);
4572
4573        for_each_encoder_on_crtc(dev, crtc, encoder)
4574                encoder->enable(encoder);
4575
4576        if (HAS_PCH_CPT(dev))
4577                cpt_verify_modeset(dev, intel_crtc->pipe);
4578
4579        intel_crtc_enable_planes(crtc);
4580}
4581
4582/* IPS only exists on ULT machines and is tied to pipe A. */
4583static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4584{
4585        return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4586}
4587
4588/*
4589 * This implements the workaround described in the "notes" section of the mode
4590 * set sequence documentation. When going from no pipes or single pipe to
4591 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4592 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4593 */
4594static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4595{
4596        struct drm_device *dev = crtc->base.dev;
4597        struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4598
4599        /* We want to get the other_active_crtc only if there's only 1 other
4600         * active crtc. */
4601        for_each_intel_crtc(dev, crtc_it) {
4602                if (!crtc_it->active || crtc_it == crtc)
4603                        continue;
4604
4605                if (other_active_crtc)
4606                        return;
4607
4608                other_active_crtc = crtc_it;
4609        }
4610        if (!other_active_crtc)
4611                return;
4612
4613        intel_wait_for_vblank(dev, other_active_crtc->pipe);
4614        intel_wait_for_vblank(dev, other_active_crtc->pipe);
4615}
4616
4617static void haswell_crtc_enable(struct drm_crtc *crtc)
4618{
4619        struct drm_device *dev = crtc->dev;
4620        struct drm_i915_private *dev_priv = dev->dev_private;
4621        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4622        struct intel_encoder *encoder;
4623        int pipe = intel_crtc->pipe;
4624
4625        WARN_ON(!crtc->state->enable);
4626
4627        if (intel_crtc->active)
4628                return;
4629
4630        if (intel_crtc_to_shared_dpll(intel_crtc))
4631                intel_enable_shared_dpll(intel_crtc);
4632
4633        if (intel_crtc->config->has_dp_encoder)
4634                intel_dp_set_m_n(intel_crtc, M1_N1);
4635
4636        intel_set_pipe_timings(intel_crtc);
4637
4638        if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4639                I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4640                           intel_crtc->config->pixel_multiplier - 1);
4641        }
4642
4643        if (intel_crtc->config->has_pch_encoder) {
4644                intel_cpu_transcoder_set_m_n(intel_crtc,
4645                                     &intel_crtc->config->fdi_m_n, NULL);
4646        }
4647
4648        haswell_set_pipeconf(crtc);
4649
4650        intel_set_pipe_csc(crtc);
4651
4652        intel_crtc->active = true;
4653
4654        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4655        for_each_encoder_on_crtc(dev, crtc, encoder)
4656                if (encoder->pre_enable)
4657                        encoder->pre_enable(encoder);
4658
4659        if (intel_crtc->config->has_pch_encoder) {
4660                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4661                                                      true);
4662                dev_priv->display.fdi_link_train(crtc);
4663        }
4664
4665        intel_ddi_enable_pipe_clock(intel_crtc);
4666
4667        if (IS_SKYLAKE(dev))
4668                skylake_pfit_enable(intel_crtc);
4669        else
4670                ironlake_pfit_enable(intel_crtc);
4671
4672        /*
4673         * On ILK+ LUT must be loaded before the pipe is running but with
4674         * clocks enabled
4675         */
4676        intel_crtc_load_lut(crtc);
4677
4678        intel_ddi_set_pipe_settings(crtc);
4679        intel_ddi_enable_transcoder_func(crtc);
4680
4681        intel_update_watermarks(crtc);
4682        intel_enable_pipe(intel_crtc);
4683
4684        if (intel_crtc->config->has_pch_encoder)
4685                lpt_pch_enable(crtc);
4686
4687        if (intel_crtc->config->dp_encoder_is_mst)
4688                intel_ddi_set_vc_payload_alloc(crtc, true);
4689
4690        assert_vblank_disabled(crtc);
4691        drm_crtc_vblank_on(crtc);
4692
4693        for_each_encoder_on_crtc(dev, crtc, encoder) {
4694                encoder->enable(encoder);
4695                intel_opregion_notify_encoder(encoder, true);
4696        }
4697
4698        /* If we change the relative order between pipe/planes enabling, we need
4699         * to change the workaround. */
4700        haswell_mode_set_planes_workaround(intel_crtc);
4701        intel_crtc_enable_planes(crtc);
4702}
4703
4704static void skylake_pfit_disable(struct intel_crtc *crtc)
4705{
4706        struct drm_device *dev = crtc->base.dev;
4707        struct drm_i915_private *dev_priv = dev->dev_private;
4708        int pipe = crtc->pipe;
4709
4710        /* To avoid upsetting the power well on haswell only disable the pfit if
4711         * it's in use. The hw state code will make sure we get this right. */
4712        if (crtc->config->pch_pfit.enabled) {
4713                I915_WRITE(PS_CTL(pipe), 0);
4714                I915_WRITE(PS_WIN_POS(pipe), 0);
4715                I915_WRITE(PS_WIN_SZ(pipe), 0);
4716        }
4717}
4718
4719static void ironlake_pfit_disable(struct intel_crtc *crtc)
4720{
4721        struct drm_device *dev = crtc->base.dev;
4722        struct drm_i915_private *dev_priv = dev->dev_private;
4723        int pipe = crtc->pipe;
4724
4725        /* To avoid upsetting the power well on haswell only disable the pfit if
4726         * it's in use. The hw state code will make sure we get this right. */
4727        if (crtc->config->pch_pfit.enabled) {
4728                I915_WRITE(PF_CTL(pipe), 0);
4729                I915_WRITE(PF_WIN_POS(pipe), 0);
4730                I915_WRITE(PF_WIN_SZ(pipe), 0);
4731        }
4732}
4733
4734static void ironlake_crtc_disable(struct drm_crtc *crtc)
4735{
4736        struct drm_device *dev = crtc->dev;
4737        struct drm_i915_private *dev_priv = dev->dev_private;
4738        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4739        struct intel_encoder *encoder;
4740        int pipe = intel_crtc->pipe;
4741        u32 reg, temp;
4742
4743        if (!intel_crtc->active)
4744                return;
4745
4746        intel_crtc_disable_planes(crtc);
4747
4748        for_each_encoder_on_crtc(dev, crtc, encoder)
4749                encoder->disable(encoder);
4750
4751        drm_crtc_vblank_off(crtc);
4752        assert_vblank_disabled(crtc);
4753
4754        if (intel_crtc->config->has_pch_encoder)
4755                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4756
4757        intel_disable_pipe(intel_crtc);
4758
4759        ironlake_pfit_disable(intel_crtc);
4760
4761        for_each_encoder_on_crtc(dev, crtc, encoder)
4762                if (encoder->post_disable)
4763                        encoder->post_disable(encoder);
4764
4765        if (intel_crtc->config->has_pch_encoder) {
4766                ironlake_fdi_disable(crtc);
4767
4768                ironlake_disable_pch_transcoder(dev_priv, pipe);
4769
4770                if (HAS_PCH_CPT(dev)) {
4771                        /* disable TRANS_DP_CTL */
4772                        reg = TRANS_DP_CTL(pipe);
4773                        temp = I915_READ(reg);
4774                        temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4775                                  TRANS_DP_PORT_SEL_MASK);
4776                        temp |= TRANS_DP_PORT_SEL_NONE;
4777                        I915_WRITE(reg, temp);
4778
4779                        /* disable DPLL_SEL */
4780                        temp = I915_READ(PCH_DPLL_SEL);
4781                        temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4782                        I915_WRITE(PCH_DPLL_SEL, temp);
4783                }
4784
4785                /* disable PCH DPLL */
4786                intel_disable_shared_dpll(intel_crtc);
4787
4788                ironlake_fdi_pll_disable(intel_crtc);
4789        }
4790
4791        intel_crtc->active = false;
4792        intel_update_watermarks(crtc);
4793
4794        mutex_lock(&dev->struct_mutex);
4795        intel_fbc_update(dev);
4796        mutex_unlock(&dev->struct_mutex);
4797}
4798
4799static void haswell_crtc_disable(struct drm_crtc *crtc)
4800{
4801        struct drm_device *dev = crtc->dev;
4802        struct drm_i915_private *dev_priv = dev->dev_private;
4803        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4804        struct intel_encoder *encoder;
4805        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4806
4807        if (!intel_crtc->active)
4808                return;
4809
4810        intel_crtc_disable_planes(crtc);
4811
4812        for_each_encoder_on_crtc(dev, crtc, encoder) {
4813                intel_opregion_notify_encoder(encoder, false);
4814                encoder->disable(encoder);
4815        }
4816
4817        drm_crtc_vblank_off(crtc);
4818        assert_vblank_disabled(crtc);
4819
4820        if (intel_crtc->config->has_pch_encoder)
4821                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4822                                                      false);
4823        intel_disable_pipe(intel_crtc);
4824
4825        if (intel_crtc->config->dp_encoder_is_mst)
4826                intel_ddi_set_vc_payload_alloc(crtc, false);
4827
4828        intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4829
4830        if (IS_SKYLAKE(dev))
4831                skylake_pfit_disable(intel_crtc);
4832        else
4833                ironlake_pfit_disable(intel_crtc);
4834
4835        intel_ddi_disable_pipe_clock(intel_crtc);
4836
4837        if (intel_crtc->config->has_pch_encoder) {
4838                lpt_disable_pch_transcoder(dev_priv);
4839                intel_ddi_fdi_disable(crtc);
4840        }
4841
4842        for_each_encoder_on_crtc(dev, crtc, encoder)
4843                if (encoder->post_disable)
4844                        encoder->post_disable(encoder);
4845
4846        intel_crtc->active = false;
4847        intel_update_watermarks(crtc);
4848
4849        mutex_lock(&dev->struct_mutex);
4850        intel_fbc_update(dev);
4851        mutex_unlock(&dev->struct_mutex);
4852
4853        if (intel_crtc_to_shared_dpll(intel_crtc))
4854                intel_disable_shared_dpll(intel_crtc);
4855}
4856
4857static void ironlake_crtc_off(struct drm_crtc *crtc)
4858{
4859        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4860        intel_put_shared_dpll(intel_crtc);
4861}
4862
4863
4864static void i9xx_pfit_enable(struct intel_crtc *crtc)
4865{
4866        struct drm_device *dev = crtc->base.dev;
4867        struct drm_i915_private *dev_priv = dev->dev_private;
4868        struct intel_crtc_state *pipe_config = crtc->config;
4869
4870        if (!pipe_config->gmch_pfit.control)
4871                return;
4872
4873        /*
4874         * The panel fitter should only be adjusted whilst the pipe is disabled,
4875         * according to register description and PRM.
4876         */
4877        WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4878        assert_pipe_disabled(dev_priv, crtc->pipe);
4879
4880        I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4881        I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4882
4883        /* Border color in case we don't scale up to the full screen. Black by
4884         * default, change to something else for debugging. */
4885        I915_WRITE(BCLRPAT(crtc->pipe), 0);
4886}
4887
4888static enum intel_display_power_domain port_to_power_domain(enum port port)
4889{
4890        switch (port) {
4891        case PORT_A:
4892                return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4893        case PORT_B:
4894                return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4895        case PORT_C:
4896                return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4897        case PORT_D:
4898                return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4899        default:
4900                WARN_ON_ONCE(1);
4901                return POWER_DOMAIN_PORT_OTHER;
4902        }
4903}
4904
4905#define for_each_power_domain(domain, mask)                             \
4906        for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
4907                if ((1 << (domain)) & (mask))
4908
4909enum intel_display_power_domain
4910intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4911{
4912        struct drm_device *dev = intel_encoder->base.dev;
4913        struct intel_digital_port *intel_dig_port;
4914
4915        switch (intel_encoder->type) {
4916        case INTEL_OUTPUT_UNKNOWN:
4917                /* Only DDI platforms should ever use this output type */
4918                WARN_ON_ONCE(!HAS_DDI(dev));
4919        case INTEL_OUTPUT_DISPLAYPORT:
4920        case INTEL_OUTPUT_HDMI:
4921        case INTEL_OUTPUT_EDP:
4922                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4923                return port_to_power_domain(intel_dig_port->port);
4924        case INTEL_OUTPUT_DP_MST:
4925                intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4926                return port_to_power_domain(intel_dig_port->port);
4927        case INTEL_OUTPUT_ANALOG:
4928                return POWER_DOMAIN_PORT_CRT;
4929        case INTEL_OUTPUT_DSI:
4930                return POWER_DOMAIN_PORT_DSI;
4931        default:
4932                return POWER_DOMAIN_PORT_OTHER;
4933        }
4934}
4935
4936static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4937{
4938        struct drm_device *dev = crtc->dev;
4939        struct intel_encoder *intel_encoder;
4940        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4941        enum pipe pipe = intel_crtc->pipe;
4942        unsigned long mask;
4943        enum transcoder transcoder;
4944
4945        transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4946
4947        mask = BIT(POWER_DOMAIN_PIPE(pipe));
4948        mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4949        if (intel_crtc->config->pch_pfit.enabled ||
4950            intel_crtc->config->pch_pfit.force_thru)
4951                mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4952
4953        for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4954                mask |= BIT(intel_display_port_power_domain(intel_encoder));
4955
4956        return mask;
4957}
4958
4959static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
4960{
4961        struct drm_device *dev = state->dev;
4962        struct drm_i915_private *dev_priv = dev->dev_private;
4963        unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4964        struct intel_crtc *crtc;
4965
4966        /*
4967         * First get all needed power domains, then put all unneeded, to avoid
4968         * any unnecessary toggling of the power wells.
4969         */
4970        for_each_intel_crtc(dev, crtc) {
4971                enum intel_display_power_domain domain;
4972
4973                if (!crtc->base.state->enable)
4974                        continue;
4975
4976                pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4977
4978                for_each_power_domain(domain, pipe_domains[crtc->pipe])
4979                        intel_display_power_get(dev_priv, domain);
4980        }
4981
4982        if (dev_priv->display.modeset_global_resources)
4983                dev_priv->display.modeset_global_resources(state);
4984
4985        for_each_intel_crtc(dev, crtc) {
4986                enum intel_display_power_domain domain;
4987
4988                for_each_power_domain(domain, crtc->enabled_power_domains)
4989                        intel_display_power_put(dev_priv, domain);
4990
4991                crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4992        }
4993
4994        intel_display_set_init_power(dev_priv, false);
4995}
4996
4997/* returns HPLL frequency in kHz */
4998static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4999{
5000        int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
5001
5002        /* Obtain SKU information */
5003        mutex_lock(&dev_priv->dpio_lock);
5004        hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
5005                CCK_FUSE_HPLL_FREQ_MASK;
5006        mutex_unlock(&dev_priv->dpio_lock);
5007
5008        return vco_freq[hpll_freq] * 1000;
5009}
5010
5011static void vlv_update_cdclk(struct drm_device *dev)
5012{
5013        struct drm_i915_private *dev_priv = dev->dev_private;
5014
5015        dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5016        DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5017                         dev_priv->vlv_cdclk_freq);
5018
5019        /*
5020         * Program the gmbus_freq based on the cdclk frequency.
5021         * BSpec erroneously claims we should aim for 4MHz, but
5022         * in fact 1MHz is the correct frequency.
5023         */
5024        I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
5025}
5026
5027/* Adjust CDclk dividers to allow high res or save power if possible */
5028static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5029{
5030        struct drm_i915_private *dev_priv = dev->dev_private;
5031        u32 val, cmd;
5032
5033        WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
5034
5035        if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5036                cmd = 2;
5037        else if (cdclk == 266667)
5038                cmd = 1;
5039        else
5040                cmd = 0;
5041
5042        mutex_lock(&dev_priv->rps.hw_lock);
5043        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5044        val &= ~DSPFREQGUAR_MASK;
5045        val |= (cmd << DSPFREQGUAR_SHIFT);
5046        vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5047        if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5048                      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5049                     50)) {
5050                DRM_ERROR("timed out waiting for CDclk change\n");
5051        }
5052        mutex_unlock(&dev_priv->rps.hw_lock);
5053
5054        if (cdclk == 400000) {
5055                u32 divider;
5056
5057                divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5058
5059                mutex_lock(&dev_priv->dpio_lock);
5060                /* adjust cdclk divider */
5061                val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5062                val &= ~DISPLAY_FREQUENCY_VALUES;
5063                val |= divider;
5064                vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5065
5066                if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5067                              DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5068                             50))
5069                        DRM_ERROR("timed out waiting for CDclk change\n");
5070                mutex_unlock(&dev_priv->dpio_lock);
5071        }
5072
5073        mutex_lock(&dev_priv->dpio_lock);
5074        /* adjust self-refresh exit latency value */
5075        val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5076        val &= ~0x7f;
5077
5078        /*
5079         * For high bandwidth configs, we set a higher latency in the bunit
5080         * so that the core display fetch happens in time to avoid underruns.
5081         */
5082        if (cdclk == 400000)
5083                val |= 4500 / 250; /* 4.5 usec */
5084        else
5085                val |= 3000 / 250; /* 3.0 usec */
5086        vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5087        mutex_unlock(&dev_priv->dpio_lock);
5088
5089        vlv_update_cdclk(dev);
5090}
5091
5092static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5093{
5094        struct drm_i915_private *dev_priv = dev->dev_private;
5095        u32 val, cmd;
5096
5097        WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
5098
5099        switch (cdclk) {
5100        case 333333:
5101        case 320000:
5102        case 266667:
5103        case 200000:
5104                break;
5105        default:
5106                MISSING_CASE(cdclk);
5107                return;
5108        }
5109
5110        /*
5111         * Specs are full of misinformation, but testing on actual
5112         * hardware has shown that we just need to write the desired
5113         * CCK divider into the Punit register.
5114         */
5115        cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5116
5117        mutex_lock(&dev_priv->rps.hw_lock);
5118        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5119        val &= ~DSPFREQGUAR_MASK_CHV;
5120        val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5121        vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5122        if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5123                      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5124                     50)) {
5125                DRM_ERROR("timed out waiting for CDclk change\n");
5126        }
5127        mutex_unlock(&dev_priv->rps.hw_lock);
5128
5129        vlv_update_cdclk(dev);
5130}
5131
5132static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5133                                 int max_pixclk)
5134{
5135        int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5136        int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5137
5138        /*
5139         * Really only a few cases to deal with, as only 4 CDclks are supported:
5140         *   200MHz
5141         *   267MHz
5142         *   320/333MHz (depends on HPLL freq)
5143         *   400MHz (VLV only)
5144         * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5145         * of the lower bin and adjust if needed.
5146         *
5147         * We seem to get an unstable or solid color picture at 200MHz.
5148         * Not sure what's wrong. For now use 200MHz only when all pipes
5149         * are off.
5150         */
5151        if (!IS_CHERRYVIEW(dev_priv) &&
5152            max_pixclk > freq_320*limit/100)
5153                return 400000;
5154        else if (max_pixclk > 266667*limit/100)
5155                return freq_320;
5156        else if (max_pixclk > 0)
5157                return 266667;
5158        else
5159                return 200000;
5160}
5161
5162/* compute the max pixel clock for new configuration */
5163static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
5164{
5165        struct drm_device *dev = dev_priv->dev;
5166        struct intel_crtc *intel_crtc;
5167        int max_pixclk = 0;
5168
5169        for_each_intel_crtc(dev, intel_crtc) {
5170                if (intel_crtc->new_enabled)
5171                        max_pixclk = max(max_pixclk,
5172                                         intel_crtc->new_config->base.adjusted_mode.crtc_clock);
5173        }
5174
5175        return max_pixclk;
5176}
5177
5178static void valleyview_modeset_global_pipes(struct drm_device *dev,
5179                                            unsigned *prepare_pipes)
5180{
5181        struct drm_i915_private *dev_priv = dev->dev_private;
5182        struct intel_crtc *intel_crtc;
5183        int max_pixclk = intel_mode_max_pixclk(dev_priv);
5184
5185        if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
5186            dev_priv->vlv_cdclk_freq)
5187                return;
5188
5189        /* disable/enable all currently active pipes while we change cdclk */
5190        for_each_intel_crtc(dev, intel_crtc)
5191                if (intel_crtc->base.state->enable)
5192                        *prepare_pipes |= (1 << intel_crtc->pipe);
5193}
5194
5195static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5196{
5197        unsigned int credits, default_credits;
5198
5199        if (IS_CHERRYVIEW(dev_priv))
5200                default_credits = PFI_CREDIT(12);
5201        else
5202                default_credits = PFI_CREDIT(8);
5203
5204        if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
5205                /* CHV suggested value is 31 or 63 */
5206                if (IS_CHERRYVIEW(dev_priv))
5207                        credits = PFI_CREDIT_31;
5208                else
5209                        credits = PFI_CREDIT(15);
5210        } else {
5211                credits = default_credits;
5212        }
5213
5214        /*
5215         * WA - write default credits before re-programming
5216         * FIXME: should we also set the resend bit here?
5217         */
5218        I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
5219                   default_credits);
5220
5221        I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
5222                   credits | PFI_CREDIT_RESEND);
5223
5224        /*
5225         * FIXME is this guaranteed to clear
5226         * immediately or should we poll for it?
5227         */
5228        WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
5229}
5230
5231static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
5232{
5233        struct drm_device *dev = state->dev;
5234        struct drm_i915_private *dev_priv = dev->dev_private;
5235        int max_pixclk = intel_mode_max_pixclk(dev_priv);
5236        int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
5237
5238        if (req_cdclk != dev_priv->vlv_cdclk_freq) {
5239                /*
5240                 * FIXME: We can end up here with all power domains off, yet
5241                 * with a CDCLK frequency other than the minimum. To account
5242                 * for this take the PIPE-A power domain, which covers the HW
5243                 * blocks needed for the following programming. This can be
5244                 * removed once it's guaranteed that we get here either with
5245                 * the minimum CDCLK set, or the required power domains
5246                 * enabled.
5247                 */
5248                intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
5249
5250                if (IS_CHERRYVIEW(dev))
5251                        cherryview_set_cdclk(dev, req_cdclk);
5252                else
5253                        valleyview_set_cdclk(dev, req_cdclk);
5254
5255                vlv_program_pfi_credits(dev_priv);
5256
5257                intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
5258        }
5259}
5260
5261static void valleyview_crtc_enable(struct drm_crtc *crtc)
5262{
5263        struct drm_device *dev = crtc->dev;
5264        struct drm_i915_private *dev_priv = to_i915(dev);
5265        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5266        struct intel_encoder *encoder;
5267        int pipe = intel_crtc->pipe;
5268        bool is_dsi;
5269
5270        WARN_ON(!crtc->state->enable);
5271
5272        if (intel_crtc->active)
5273                return;
5274
5275        is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5276
5277        if (!is_dsi) {
5278                if (IS_CHERRYVIEW(dev))
5279                        chv_prepare_pll(intel_crtc, intel_crtc->config);
5280                else
5281                        vlv_prepare_pll(intel_crtc, intel_crtc->config);
5282        }
5283
5284        if (intel_crtc->config->has_dp_encoder)
5285                intel_dp_set_m_n(intel_crtc, M1_N1);
5286
5287        intel_set_pipe_timings(intel_crtc);
5288
5289        if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
5290                struct drm_i915_private *dev_priv = dev->dev_private;
5291
5292                I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5293                I915_WRITE(CHV_CANVAS(pipe), 0);
5294        }
5295
5296        i9xx_set_pipeconf(intel_crtc);
5297
5298        intel_crtc->active = true;
5299
5300        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5301
5302        for_each_encoder_on_crtc(dev, crtc, encoder)
5303                if (encoder->pre_pll_enable)
5304                        encoder->pre_pll_enable(encoder);
5305
5306        if (!is_dsi) {
5307                if (IS_CHERRYVIEW(dev))
5308                        chv_enable_pll(intel_crtc, intel_crtc->config);
5309                else
5310                        vlv_enable_pll(intel_crtc, intel_crtc->config);
5311        }
5312
5313        for_each_encoder_on_crtc(dev, crtc, encoder)
5314                if (encoder->pre_enable)
5315                        encoder->pre_enable(encoder);
5316
5317        i9xx_pfit_enable(intel_crtc);
5318
5319        intel_crtc_load_lut(crtc);
5320
5321        intel_update_watermarks(crtc);
5322        intel_enable_pipe(intel_crtc);
5323
5324        assert_vblank_disabled(crtc);
5325        drm_crtc_vblank_on(crtc);
5326
5327        for_each_encoder_on_crtc(dev, crtc, encoder)
5328                encoder->enable(encoder);
5329
5330        intel_crtc_enable_planes(crtc);
5331
5332        /* Underruns don't raise interrupts, so check manually. */
5333        i9xx_check_fifo_underruns(dev_priv);
5334}
5335
5336static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5337{
5338        struct drm_device *dev = crtc->base.dev;
5339        struct drm_i915_private *dev_priv = dev->dev_private;
5340
5341        I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
5342        I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
5343}
5344
5345static void i9xx_crtc_enable(struct drm_crtc *crtc)
5346{
5347        struct drm_device *dev = crtc->dev;
5348        struct drm_i915_private *dev_priv = to_i915(dev);
5349        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5350        struct intel_encoder *encoder;
5351        int pipe = intel_crtc->pipe;
5352
5353        WARN_ON(!crtc->state->enable);
5354
5355        if (intel_crtc->active)
5356                return;
5357
5358        i9xx_set_pll_dividers(intel_crtc);
5359
5360        if (intel_crtc->config->has_dp_encoder)
5361                intel_dp_set_m_n(intel_crtc, M1_N1);
5362
5363        intel_set_pipe_timings(intel_crtc);
5364
5365        i9xx_set_pipeconf(intel_crtc);
5366
5367        intel_crtc->active = true;
5368
5369        if (!IS_GEN2(dev))
5370                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5371
5372        for_each_encoder_on_crtc(dev, crtc, encoder)
5373                if (encoder->pre_enable)
5374                        encoder->pre_enable(encoder);
5375
5376        i9xx_enable_pll(intel_crtc);
5377
5378        i9xx_pfit_enable(intel_crtc);
5379
5380        intel_crtc_load_lut(crtc);
5381
5382        intel_update_watermarks(crtc);
5383        intel_enable_pipe(intel_crtc);
5384
5385        assert_vblank_disabled(crtc);
5386        drm_crtc_vblank_on(crtc);
5387
5388        for_each_encoder_on_crtc(dev, crtc, encoder)
5389                encoder->enable(encoder);
5390
5391        intel_crtc_enable_planes(crtc);
5392
5393        /*
5394         * Gen2 reports pipe underruns whenever all planes are disabled.
5395         * So don't enable underrun reporting before at least some planes
5396         * are enabled.
5397         * FIXME: Need to fix the logic to work when we turn off all planes
5398         * but leave the pipe running.
5399         */
5400        if (IS_GEN2(dev))
5401                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5402
5403        /* Underruns don't raise interrupts, so check manually. */
5404        i9xx_check_fifo_underruns(dev_priv);
5405}
5406
5407static void i9xx_pfit_disable(struct intel_crtc *crtc)
5408{
5409        struct drm_device *dev = crtc->base.dev;
5410        struct drm_i915_private *dev_priv = dev->dev_private;
5411
5412        if (!crtc->config->gmch_pfit.control)
5413                return;
5414
5415        assert_pipe_disabled(dev_priv, crtc->pipe);
5416
5417        DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
5418                         I915_READ(PFIT_CONTROL));
5419        I915_WRITE(PFIT_CONTROL, 0);
5420}
5421
5422static void i9xx_crtc_disable(struct drm_crtc *crtc)
5423{
5424        struct drm_device *dev = crtc->dev;
5425        struct drm_i915_private *dev_priv = dev->dev_private;
5426        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5427        struct intel_encoder *encoder;
5428        int pipe = intel_crtc->pipe;
5429
5430        if (!intel_crtc->active)
5431                return;
5432
5433        /*
5434         * Gen2 reports pipe underruns whenever all planes are disabled.
5435         * So diasble underrun reporting before all the planes get disabled.
5436         * FIXME: Need to fix the logic to work when we turn off all planes
5437         * but leave the pipe running.
5438         */
5439        if (IS_GEN2(dev))
5440                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5441
5442        /*
5443         * Vblank time updates from the shadow to live plane control register
5444         * are blocked if the memory self-refresh mode is active at that
5445         * moment. So to make sure the plane gets truly disabled, disable
5446         * first the self-refresh mode. The self-refresh enable bit in turn
5447         * will be checked/applied by the HW only at the next frame start
5448         * event which is after the vblank start event, so we need to have a
5449         * wait-for-vblank between disabling the plane and the pipe.
5450         */
5451        intel_set_memory_cxsr(dev_priv, false);
5452        intel_crtc_disable_planes(crtc);
5453
5454        /*
5455         * On gen2 planes are double buffered but the pipe isn't, so we must
5456         * wait for planes to fully turn off before disabling the pipe.
5457         * We also need to wait on all gmch platforms because of the
5458         * self-refresh mode constraint explained above.
5459         */
5460        intel_wait_for_vblank(dev, pipe);
5461
5462        for_each_encoder_on_crtc(dev, crtc, encoder)
5463                encoder->disable(encoder);
5464
5465        drm_crtc_vblank_off(crtc);
5466        assert_vblank_disabled(crtc);
5467
5468        intel_disable_pipe(intel_crtc);
5469
5470        i9xx_pfit_disable(intel_crtc);
5471
5472        for_each_encoder_on_crtc(dev, crtc, encoder)
5473                if (encoder->post_disable)
5474                        encoder->post_disable(encoder);
5475
5476        if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
5477                if (IS_CHERRYVIEW(dev))
5478                        chv_disable_pll(dev_priv, pipe);
5479                else if (IS_VALLEYVIEW(dev))
5480                        vlv_disable_pll(dev_priv, pipe);
5481                else
5482                        i9xx_disable_pll(intel_crtc);
5483        }
5484
5485        if (!IS_GEN2(dev))
5486                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5487
5488        intel_crtc->active = false;
5489        intel_update_watermarks(crtc);
5490
5491        mutex_lock(&dev->struct_mutex);
5492        intel_fbc_update(dev);
5493        mutex_unlock(&dev->struct_mutex);
5494}
5495
5496static void i9xx_crtc_off(struct drm_crtc *crtc)
5497{
5498}
5499
5500/* Master function to enable/disable CRTC and corresponding power wells */
5501void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5502{
5503        struct drm_device *dev = crtc->dev;
5504        struct drm_i915_private *dev_priv = dev->dev_private;
5505        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5506        enum intel_display_power_domain domain;
5507        unsigned long domains;
5508
5509        if (enable) {
5510                if (!intel_crtc->active) {
5511                        domains = get_crtc_power_domains(crtc);
5512                        for_each_power_domain(domain, domains)
5513                                intel_display_power_get(dev_priv, domain);
5514                        intel_crtc->enabled_power_domains = domains;
5515
5516                        dev_priv->display.crtc_enable(crtc);
5517                }
5518        } else {
5519                if (intel_crtc->active) {
5520                        dev_priv->display.crtc_disable(crtc);
5521
5522                        domains = intel_crtc->enabled_power_domains;
5523                        for_each_power_domain(domain, domains)
5524                                intel_display_power_put(dev_priv, domain);
5525                        intel_crtc->enabled_power_domains = 0;
5526                }
5527        }
5528}
5529
5530/**
5531 * Sets the power management mode of the pipe and plane.
5532 */
5533void intel_crtc_update_dpms(struct drm_crtc *crtc)
5534{
5535        struct drm_device *dev = crtc->dev;
5536        struct intel_encoder *intel_encoder;
5537        bool enable = false;
5538
5539        for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5540                enable |= intel_encoder->connectors_active;
5541
5542        intel_crtc_control(crtc, enable);
5543}
5544
5545static void intel_crtc_disable(struct drm_crtc *crtc)
5546{
5547        struct drm_device *dev = crtc->dev;
5548        struct drm_connector *connector;
5549        struct drm_i915_private *dev_priv = dev->dev_private;
5550
5551        /* crtc should still be enabled when we disable it. */
5552        WARN_ON(!crtc->state->enable);
5553
5554        dev_priv->display.crtc_disable(crtc);
5555        dev_priv->display.off(crtc);
5556
5557        crtc->primary->funcs->disable_plane(crtc->primary);
5558
5559        /* Update computed state. */
5560        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5561                if (!connector->encoder || !connector->encoder->crtc)
5562                        continue;
5563
5564                if (connector->encoder->crtc != crtc)
5565                        continue;
5566
5567                connector->dpms = DRM_MODE_DPMS_OFF;
5568                to_intel_encoder(connector->encoder)->connectors_active = false;
5569        }
5570}
5571
5572void intel_encoder_destroy(struct drm_encoder *encoder)
5573{
5574        struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5575
5576        drm_encoder_cleanup(encoder);
5577        kfree(intel_encoder);
5578}
5579
5580/* Simple dpms helper for encoders with just one connector, no cloning and only
5581 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5582 * state of the entire output pipe. */
5583static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
5584{
5585        if (mode == DRM_MODE_DPMS_ON) {
5586                encoder->connectors_active = true;
5587
5588                intel_crtc_update_dpms(encoder->base.crtc);
5589        } else {
5590                encoder->connectors_active = false;
5591
5592                intel_crtc_update_dpms(encoder->base.crtc);
5593        }
5594}
5595
5596/* Cross check the actual hw state with our own modeset state tracking (and it's
5597 * internal consistency). */
5598static void intel_connector_check_state(struct intel_connector *connector)
5599{
5600        if (connector->get_hw_state(connector)) {
5601                struct intel_encoder *encoder = connector->encoder;
5602                struct drm_crtc *crtc;
5603                bool encoder_enabled;
5604                enum pipe pipe;
5605
5606                DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5607                              connector->base.base.id,
5608                              connector->base.name);
5609
5610                /* there is no real hw state for MST connectors */
5611                if (connector->mst_port)
5612                        return;
5613
5614                I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5615                     "wrong connector dpms state\n");
5616                I915_STATE_WARN(connector->base.encoder != &encoder->base,
5617                     "active connector not linked to encoder\n");
5618
5619                if (encoder) {
5620                        I915_STATE_WARN(!encoder->connectors_active,
5621                             "encoder->connectors_active not set\n");
5622
5623                        encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5624                        I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
5625                        if (I915_STATE_WARN_ON(!encoder->base.crtc))
5626                                return;
5627
5628                        crtc = encoder->base.crtc;
5629
5630                        I915_STATE_WARN(!crtc->state->enable,
5631                                        "crtc not enabled\n");
5632                        I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5633                        I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
5634                             "encoder active on the wrong pipe\n");
5635                }
5636        }
5637}
5638
5639int intel_connector_init(struct intel_connector *connector)
5640{
5641        struct drm_connector_state *connector_state;
5642
5643        connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
5644        if (!connector_state)
5645                return -ENOMEM;
5646
5647        connector->base.state = connector_state;
5648        return 0;
5649}
5650
5651struct intel_connector *intel_connector_alloc(void)
5652{
5653        struct intel_connector *connector;
5654
5655        connector = kzalloc(sizeof *connector, GFP_KERNEL);
5656        if (!connector)
5657                return NULL;
5658
5659        if (intel_connector_init(connector) < 0) {
5660                kfree(connector);
5661                return NULL;
5662        }
5663
5664        return connector;
5665}
5666
5667/* Even simpler default implementation, if there's really no special case to
5668 * consider. */
5669void intel_connector_dpms(struct drm_connector *connector, int mode)
5670{
5671        /* All the simple cases only support two dpms states. */
5672        if (mode != DRM_MODE_DPMS_ON)
5673                mode = DRM_MODE_DPMS_OFF;
5674
5675        if (mode == connector->dpms)
5676                return;
5677
5678        connector->dpms = mode;
5679
5680        /* Only need to change hw state when actually enabled */
5681        if (connector->encoder)
5682                intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5683
5684        intel_modeset_check_state(connector->dev);
5685}
5686
5687/* Simple connector->get_hw_state implementation for encoders that support only
5688 * one connector and no cloning and hence the encoder state determines the state
5689 * of the connector. */
5690bool intel_connector_get_hw_state(struct intel_connector *connector)
5691{
5692        enum pipe pipe = 0;
5693        struct intel_encoder *encoder = connector->encoder;
5694
5695        return encoder->get_hw_state(encoder, &pipe);
5696}
5697
5698static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
5699{
5700        struct intel_crtc *crtc =
5701                to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
5702
5703        if (crtc->base.state->enable &&
5704            crtc->config->has_pch_encoder)
5705                return crtc->config->fdi_lanes;
5706
5707        return 0;
5708}
5709
5710static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5711                                     struct intel_crtc_state *pipe_config)
5712{
5713        DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5714                      pipe_name(pipe), pipe_config->fdi_lanes);
5715        if (pipe_config->fdi_lanes > 4) {
5716                DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5717                              pipe_name(pipe), pipe_config->fdi_lanes);
5718                return false;
5719        }
5720
5721        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5722                if (pipe_config->fdi_lanes > 2) {
5723                        DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5724                                      pipe_config->fdi_lanes);
5725                        return false;
5726                } else {
5727                        return true;
5728                }
5729        }
5730
5731        if (INTEL_INFO(dev)->num_pipes == 2)
5732                return true;
5733
5734        /* Ivybridge 3 pipe is really complicated */
5735        switch (pipe) {
5736        case PIPE_A:
5737                return true;
5738        case PIPE_B:
5739                if (pipe_config->fdi_lanes > 2 &&
5740                    pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
5741                        DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5742                                      pipe_name(pipe), pipe_config->fdi_lanes);
5743                        return false;
5744                }
5745                return true;
5746        case PIPE_C:
5747                if (pipe_config->fdi_lanes > 2) {
5748                        DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
5749                                      pipe_name(pipe), pipe_config->fdi_lanes);
5750                        return false;
5751                }
5752                if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
5753                        DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5754                        return false;
5755                }
5756                return true;
5757        default:
5758                BUG();
5759        }
5760}
5761
5762#define RETRY 1
5763static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5764                                       struct intel_crtc_state *pipe_config)
5765{
5766        struct drm_device *dev = intel_crtc->base.dev;
5767        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5768        int lane, link_bw, fdi_dotclock;
5769        bool setup_ok, needs_recompute = false;
5770
5771retry:
5772        /* FDI is a binary signal running at ~2.7GHz, encoding
5773         * each output octet as 10 bits. The actual frequency
5774         * is stored as a divider into a 100MHz clock, and the
5775         * mode pixel clock is stored in units of 1KHz.
5776         * Hence the bw of each lane in terms of the mode signal
5777         * is:
5778         */
5779        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5780
5781        fdi_dotclock = adjusted_mode->crtc_clock;
5782
5783        lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5784                                           pipe_config->pipe_bpp);
5785
5786        pipe_config->fdi_lanes = lane;
5787
5788        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5789                               link_bw, &pipe_config->fdi_m_n);
5790
5791        setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5792                                            intel_crtc->pipe, pipe_config);
5793        if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5794                pipe_config->pipe_bpp -= 2*3;
5795                DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5796                              pipe_config->pipe_bpp);
5797                needs_recompute = true;
5798                pipe_config->bw_constrained = true;
5799
5800                goto retry;
5801        }
5802
5803        if (needs_recompute)
5804                return RETRY;
5805
5806        return setup_ok ? 0 : -EINVAL;
5807}
5808
5809static void hsw_compute_ips_config(struct intel_crtc *crtc,
5810                                   struct intel_crtc_state *pipe_config)
5811{
5812        pipe_config->ips_enabled = i915.enable_ips &&
5813                                   hsw_crtc_supports_ips(crtc) &&
5814                                   pipe_config->pipe_bpp <= 24;
5815}
5816
5817static int intel_crtc_compute_config(struct intel_crtc *crtc,
5818                                     struct intel_crtc_state *pipe_config)
5819{
5820        struct drm_device *dev = crtc->base.dev;
5821        struct drm_i915_private *dev_priv = dev->dev_private;
5822        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5823
5824        /* FIXME should check pixel clock limits on all platforms */
5825        if (INTEL_INFO(dev)->gen < 4) {
5826                int clock_limit =
5827                        dev_priv->display.get_display_clock_speed(dev);
5828
5829                /*
5830                 * Enable pixel doubling when the dot clock
5831                 * is > 90% of the (display) core speed.
5832                 *
5833                 * GDG double wide on either pipe,
5834                 * otherwise pipe A only.
5835                 */
5836                if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5837                    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5838                        clock_limit *= 2;
5839                        pipe_config->double_wide = true;
5840                }
5841
5842                if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5843                        return -EINVAL;
5844        }
5845
5846        /*
5847         * Pipe horizontal size must be even in:
5848         * - DVO ganged mode
5849         * - LVDS dual channel mode
5850         * - Double wide pipe
5851         */
5852        if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
5853             intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5854                pipe_config->pipe_src_w &= ~1;
5855
5856        /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5857         * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5858         */
5859        if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5860                adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5861                return -EINVAL;
5862
5863        if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5864                pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5865        } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5866                /* only a 8bpc pipe, with 6bpc dither through the panel fitter
5867                 * for lvds. */
5868                pipe_config->pipe_bpp = 8*3;
5869        }
5870
5871        if (HAS_IPS(dev))
5872                hsw_compute_ips_config(crtc, pipe_config);
5873
5874        if (pipe_config->has_pch_encoder)
5875                return ironlake_fdi_compute_config(crtc, pipe_config);
5876
5877        return 0;
5878}
5879
5880static int valleyview_get_display_clock_speed(struct drm_device *dev)
5881{
5882        struct drm_i915_private *dev_priv = dev->dev_private;
5883        u32 val;
5884        int divider;
5885
5886        if (dev_priv->hpll_freq == 0)
5887                dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
5888
5889        mutex_lock(&dev_priv->dpio_lock);
5890        val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5891        mutex_unlock(&dev_priv->dpio_lock);
5892
5893        divider = val & DISPLAY_FREQUENCY_VALUES;
5894
5895        WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5896             (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5897             "cdclk change in progress\n");
5898
5899        return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
5900}
5901
5902static int i945_get_display_clock_speed(struct drm_device *dev)
5903{
5904        return 400000;
5905}
5906
5907static int i915_get_display_clock_speed(struct drm_device *dev)
5908{
5909        return 333000;
5910}
5911
5912static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5913{
5914        return 200000;
5915}
5916
5917static int pnv_get_display_clock_speed(struct drm_device *dev)
5918{
5919        u16 gcfgc = 0;
5920
5921        pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5922
5923        switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5924        case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5925                return 267000;
5926        case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5927                return 333000;
5928        case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5929                return 444000;
5930        case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5931                return 200000;
5932        default:
5933                DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5934        case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5935                return 133000;
5936        case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5937                return 167000;
5938        }
5939}
5940
5941static int i915gm_get_display_clock_speed(struct drm_device *dev)
5942{
5943        u16 gcfgc = 0;
5944
5945        pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5946
5947        if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5948                return 133000;
5949        else {
5950                switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5951                case GC_DISPLAY_CLOCK_333_MHZ:
5952                        return 333000;
5953                default:
5954                case GC_DISPLAY_CLOCK_190_200_MHZ:
5955                        return 190000;
5956                }
5957        }
5958}
5959
5960static int i865_get_display_clock_speed(struct drm_device *dev)
5961{
5962        return 266000;
5963}
5964
5965static int i855_get_display_clock_speed(struct drm_device *dev)
5966{
5967        u16 hpllcc = 0;
5968        /* Assume that the hardware is in the high speed state.  This
5969         * should be the default.
5970         */
5971        switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5972        case GC_CLOCK_133_200:
5973        case GC_CLOCK_100_200:
5974                return 200000;
5975        case GC_CLOCK_166_250:
5976                return 250000;
5977        case GC_CLOCK_100_133:
5978                return 133000;
5979        }
5980
5981        /* Shouldn't happen */
5982        return 0;
5983}
5984
5985static int i830_get_display_clock_speed(struct drm_device *dev)
5986{
5987        return 133000;
5988}
5989
5990static void
5991intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5992{
5993        while (*num > DATA_LINK_M_N_MASK ||
5994               *den > DATA_LINK_M_N_MASK) {
5995                *num >>= 1;
5996                *den >>= 1;
5997        }
5998}
5999
6000static void compute_m_n(unsigned int m, unsigned int n,
6001                        uint32_t *ret_m, uint32_t *ret_n)
6002{
6003        *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6004        *ret_m = div_u64((uint64_t) m * *ret_n, n);
6005        intel_reduce_m_n_ratio(ret_m, ret_n);
6006}
6007
6008void
6009intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6010                       int pixel_clock, int link_clock,
6011                       struct intel_link_m_n *m_n)
6012{
6013        m_n->tu = 64;
6014
6015        compute_m_n(bits_per_pixel * pixel_clock,
6016                    link_clock * nlanes * 8,
6017                    &m_n->gmch_m, &m_n->gmch_n);
6018
6019        compute_m_n(pixel_clock, link_clock,
6020                    &m_n->link_m, &m_n->link_n);
6021}
6022
6023static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6024{
6025        if (i915.panel_use_ssc >= 0)
6026                return i915.panel_use_ssc != 0;
6027        return dev_priv->vbt.lvds_use_ssc
6028                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6029}
6030
6031static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
6032                           int num_connectors)
6033{
6034        struct drm_device *dev = crtc_state->base.crtc->dev;
6035        struct drm_i915_private *dev_priv = dev->dev_private;
6036        int refclk;
6037
6038        WARN_ON(!crtc_state->base.state);
6039
6040        if (IS_VALLEYVIEW(dev)) {
6041                refclk = 100000;
6042        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6043            intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
6044                refclk = dev_priv->vbt.lvds_ssc_freq;
6045                DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
6046        } else if (!IS_GEN2(dev)) {
6047                refclk = 96000;
6048        } else {
6049                refclk = 48000;
6050        }
6051
6052        return refclk;
6053}
6054
6055static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6056{
6057        return (1 << dpll->n) << 16 | dpll->m2;
6058}
6059
6060static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6061{
6062        return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6063}
6064
6065static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6066                                     struct intel_crtc_state *crtc_state,
6067                                     intel_clock_t *reduced_clock)
6068{
6069        struct drm_device *dev = crtc->base.dev;
6070        u32 fp, fp2 = 0;
6071
6072        if (IS_PINEVIEW(dev)) {
6073                fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6074                if (reduced_clock)
6075                        fp2 = pnv_dpll_compute_fp(reduced_clock);
6076        } else {
6077                fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6078                if (reduced_clock)
6079                        fp2 = i9xx_dpll_compute_fp(reduced_clock);
6080        }
6081
6082        crtc_state->dpll_hw_state.fp0 = fp;
6083
6084        crtc->lowfreq_avail = false;
6085        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6086            reduced_clock) {
6087                crtc_state->dpll_hw_state.fp1 = fp2;
6088                crtc->lowfreq_avail = true;
6089        } else {
6090                crtc_state->dpll_hw_state.fp1 = fp;
6091        }
6092}
6093
6094static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6095                pipe)
6096{
6097        u32 reg_val;
6098
6099        /*
6100         * PLLB opamp always calibrates to max value of 0x3f, force enable it
6101         * and set it to a reasonable value instead.
6102         */
6103        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6104        reg_val &= 0xffffff00;
6105        reg_val |= 0x00000030;
6106        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6107
6108        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6109        reg_val &= 0x8cffffff;
6110        reg_val = 0x8c000000;
6111        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6112
6113        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6114        reg_val &= 0xffffff00;
6115        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6116
6117        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6118        reg_val &= 0x00ffffff;
6119        reg_val |= 0xb0000000;
6120        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6121}
6122
6123static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6124                                         struct intel_link_m_n *m_n)
6125{
6126        struct drm_device *dev = crtc->base.dev;
6127        struct drm_i915_private *dev_priv = dev->dev_private;
6128        int pipe = crtc->pipe;
6129
6130        I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6131        I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6132        I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6133        I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6134}
6135
6136static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6137                                         struct intel_link_m_n *m_n,
6138                                         struct intel_link_m_n *m2_n2)
6139{
6140        struct drm_device *dev = crtc->base.dev;
6141        struct drm_i915_private *dev_priv = dev->dev_private;
6142        int pipe = crtc->pipe;
6143        enum transcoder transcoder = crtc->config->cpu_transcoder;
6144
6145        if (INTEL_INFO(dev)->gen >= 5) {
6146                I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6147                I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6148                I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6149                I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6150                /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6151                 * for gen < 8) and if DRRS is supported (to make sure the
6152                 * registers are not unnecessarily accessed).
6153                 */
6154                if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
6155                        crtc->config->has_drrs) {
6156                        I915_WRITE(PIPE_DATA_M2(transcoder),
6157                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6158                        I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6159                        I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6160                        I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6161                }
6162        } else {
6163                I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6164                I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6165                I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6166                I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6167        }
6168}
6169
6170void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6171{
6172        struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6173
6174        if (m_n == M1_N1) {
6175                dp_m_n = &crtc->config->dp_m_n;
6176                dp_m2_n2 = &crtc->config->dp_m2_n2;
6177        } else if (m_n == M2_N2) {
6178
6179                /*
6180                 * M2_N2 registers are not supported. Hence m2_n2 divider value
6181                 * needs to be programmed into M1_N1.
6182                 */
6183                dp_m_n = &crtc->config->dp_m2_n2;
6184        } else {
6185                DRM_ERROR("Unsupported divider value\n");
6186                return;
6187        }
6188
6189        if (crtc->config->has_pch_encoder)
6190                intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6191        else
6192                intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6193}
6194
6195static void vlv_update_pll(struct intel_crtc *crtc,
6196                           struct intel_crtc_state *pipe_config)
6197{
6198        u32 dpll, dpll_md;
6199
6200        /*
6201         * Enable DPIO clock input. We should never disable the reference
6202         * clock for pipe B, since VGA hotplug / manual detection depends
6203         * on it.
6204         */
6205        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
6206                DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
6207        /* We should never disable this, set it here for state tracking */
6208        if (crtc->pipe == PIPE_B)
6209                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6210        dpll |= DPLL_VCO_ENABLE;
6211        pipe_config->dpll_hw_state.dpll = dpll;
6212
6213        dpll_md = (pipe_config->pixel_multiplier - 1)
6214                << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6215        pipe_config->dpll_hw_state.dpll_md = dpll_md;
6216}
6217
6218static void vlv_prepare_pll(struct intel_crtc *crtc,
6219                            const struct intel_crtc_state *pipe_config)
6220{
6221        struct drm_device *dev = crtc->base.dev;
6222        struct drm_i915_private *dev_priv = dev->dev_private;
6223        int pipe = crtc->pipe;
6224        u32 mdiv;
6225        u32 bestn, bestm1, bestm2, bestp1, bestp2;
6226        u32 coreclk, reg_val;
6227
6228        mutex_lock(&dev_priv->dpio_lock);
6229
6230        bestn = pipe_config->dpll.n;
6231        bestm1 = pipe_config->dpll.m1;
6232        bestm2 = pipe_config->dpll.m2;
6233        bestp1 = pipe_config->dpll.p1;
6234        bestp2 = pipe_config->dpll.p2;
6235
6236        /* See eDP HDMI DPIO driver vbios notes doc */
6237
6238        /* PLL B needs special handling */
6239        if (pipe == PIPE_B)
6240                vlv_pllb_recal_opamp(dev_priv, pipe);
6241
6242        /* Set up Tx target for periodic Rcomp update */
6243        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
6244
6245        /* Disable target IRef on PLL */
6246        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
6247        reg_val &= 0x00ffffff;
6248        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
6249
6250        /* Disable fast lock */
6251        vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
6252
6253        /* Set idtafcrecal before PLL is enabled */
6254        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6255        mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6256        mdiv |= ((bestn << DPIO_N_SHIFT));
6257        mdiv |= (1 << DPIO_K_SHIFT);
6258
6259        /*
6260         * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6261         * but we don't support that).
6262         * Note: don't use the DAC post divider as it seems unstable.
6263         */
6264        mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
6265        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6266
6267        mdiv |= DPIO_ENABLE_CALIBRATION;
6268        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6269
6270        /* Set HBR and RBR LPF coefficients */
6271        if (pipe_config->port_clock == 162000 ||
6272            intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
6273            intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
6274                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6275                                 0x009f0003);
6276        else
6277                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6278                                 0x00d0000f);
6279
6280        if (pipe_config->has_dp_encoder) {
6281                /* Use SSC source */
6282                if (pipe == PIPE_A)
6283                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6284                                         0x0df40000);
6285                else
6286                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6287                                         0x0df70000);
6288        } else { /* HDMI or VGA */
6289                /* Use bend source */
6290                if (pipe == PIPE_A)
6291                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6292                                         0x0df70000);
6293                else
6294                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6295                                         0x0df40000);
6296        }
6297
6298        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
6299        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
6300        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
6301            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
6302                coreclk |= 0x01000000;
6303        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
6304
6305        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6306        mutex_unlock(&dev_priv->dpio_lock);
6307}
6308
6309static void chv_update_pll(struct intel_crtc *crtc,
6310                           struct intel_crtc_state *pipe_config)
6311{
6312        pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
6313                DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
6314                DPLL_VCO_ENABLE;
6315        if (crtc->pipe != PIPE_A)
6316                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6317
6318        pipe_config->dpll_hw_state.dpll_md =
6319                (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6320}
6321
6322static void chv_prepare_pll(struct intel_crtc *crtc,
6323                            const struct intel_crtc_state *pipe_config)
6324{
6325        struct drm_device *dev = crtc->base.dev;
6326        struct drm_i915_private *dev_priv = dev->dev_private;
6327        int pipe = crtc->pipe;
6328        int dpll_reg = DPLL(crtc->pipe);
6329        enum dpio_channel port = vlv_pipe_to_channel(pipe);
6330        u32 loopfilter, tribuf_calcntr;
6331        u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6332        u32 dpio_val;
6333        int vco;
6334
6335        bestn = pipe_config->dpll.n;
6336        bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6337        bestm1 = pipe_config->dpll.m1;
6338        bestm2 = pipe_config->dpll.m2 >> 22;
6339        bestp1 = pipe_config->dpll.p1;
6340        bestp2 = pipe_config->dpll.p2;
6341        vco = pipe_config->dpll.vco;
6342        dpio_val = 0;
6343        loopfilter = 0;
6344
6345        /*
6346         * Enable Refclk and SSC
6347         */
6348        I915_WRITE(dpll_reg,
6349                   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6350
6351        mutex_lock(&dev_priv->dpio_lock);
6352
6353        /* p1 and p2 divider */
6354        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6355                        5 << DPIO_CHV_S1_DIV_SHIFT |
6356                        bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6357                        bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6358                        1 << DPIO_CHV_K_DIV_SHIFT);
6359
6360        /* Feedback post-divider - m2 */
6361        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6362
6363        /* Feedback refclk divider - n and m1 */
6364        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6365                        DPIO_CHV_M1_DIV_BY_2 |
6366                        1 << DPIO_CHV_N_DIV_SHIFT);
6367
6368        /* M2 fraction division */
6369        if (bestm2_frac)
6370                vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6371
6372        /* M2 fraction division enable */
6373        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6374        dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
6375        dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
6376        if (bestm2_frac)
6377                dpio_val |= DPIO_CHV_FRAC_DIV_EN;
6378        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
6379
6380        /* Program digital lock detect threshold */
6381        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
6382        dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
6383                                        DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
6384        dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
6385        if (!bestm2_frac)
6386                dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
6387        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
6388
6389        /* Loop filter */
6390        if (vco == 5400000) {
6391                loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
6392                loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
6393                loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
6394                tribuf_calcntr = 0x9;
6395        } else if (vco <= 6200000) {
6396                loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
6397                loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
6398                loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6399                tribuf_calcntr = 0x9;
6400        } else if (vco <= 6480000) {
6401                loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6402                loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6403                loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6404                tribuf_calcntr = 0x8;
6405        } else {
6406                /* Not supported. Apply the same limits as in the max case */
6407                loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6408                loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6409                loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6410                tribuf_calcntr = 0;
6411        }
6412        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6413
6414        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
6415        dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
6416        dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
6417        vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
6418
6419        /* AFC Recal */
6420        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6421                        vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6422                        DPIO_AFC_RECAL);
6423
6424        mutex_unlock(&dev_priv->dpio_lock);
6425}
6426
6427/**
6428 * vlv_force_pll_on - forcibly enable just the PLL
6429 * @dev_priv: i915 private structure
6430 * @pipe: pipe PLL to enable
6431 * @dpll: PLL configuration
6432 *
6433 * Enable the PLL for @pipe using the supplied @dpll config. To be used
6434 * in cases where we need the PLL enabled even when @pipe is not going to
6435 * be enabled.
6436 */
6437void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
6438                      const struct dpll *dpll)
6439{
6440        struct intel_crtc *crtc =
6441                to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6442        struct intel_crtc_state pipe_config = {
6443                .base.crtc = &crtc->base,
6444                .pixel_multiplier = 1,
6445                .dpll = *dpll,
6446        };
6447
6448        if (IS_CHERRYVIEW(dev)) {
6449                chv_update_pll(crtc, &pipe_config);
6450                chv_prepare_pll(crtc, &pipe_config);
6451                chv_enable_pll(crtc, &pipe_config);
6452        } else {
6453                vlv_update_pll(crtc, &pipe_config);
6454                vlv_prepare_pll(crtc, &pipe_config);
6455                vlv_enable_pll(crtc, &pipe_config);
6456        }
6457}
6458
6459/**
6460 * vlv_force_pll_off - forcibly disable just the PLL
6461 * @dev_priv: i915 private structure
6462 * @pipe: pipe PLL to disable
6463 *
6464 * Disable the PLL for @pipe. To be used in cases where we need
6465 * the PLL enabled even when @pipe is not going to be enabled.
6466 */
6467void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
6468{
6469        if (IS_CHERRYVIEW(dev))
6470                chv_disable_pll(to_i915(dev), pipe);
6471        else
6472                vlv_disable_pll(to_i915(dev), pipe);
6473}
6474
6475static void i9xx_update_pll(struct intel_crtc *crtc,
6476                            struct intel_crtc_state *crtc_state,
6477                            intel_clock_t *reduced_clock,
6478                            int num_connectors)
6479{
6480        struct drm_device *dev = crtc->base.dev;
6481        struct drm_i915_private *dev_priv = dev->dev_private;
6482        u32 dpll;
6483        bool is_sdvo;
6484        struct dpll *clock = &crtc_state->dpll;
6485
6486        i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6487
6488        is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
6489                intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
6490
6491        dpll = DPLL_VGA_MODE_DIS;
6492
6493        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
6494                dpll |= DPLLB_MODE_LVDS;
6495        else
6496                dpll |= DPLLB_MODE_DAC_SERIAL;
6497
6498        if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6499                dpll |= (crtc_state->pixel_multiplier - 1)
6500                        << SDVO_MULTIPLIER_SHIFT_HIRES;
6501        }
6502
6503        if (is_sdvo)
6504                dpll |= DPLL_SDVO_HIGH_SPEED;
6505
6506        if (crtc_state->has_dp_encoder)
6507                dpll |= DPLL_SDVO_HIGH_SPEED;
6508
6509        /* compute bitmask from p1 value */
6510        if (IS_PINEVIEW(dev))
6511                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
6512        else {
6513                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6514                if (IS_G4X(dev) && reduced_clock)
6515                        dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6516        }
6517        switch (clock->p2) {
6518        case 5:
6519                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6520                break;
6521        case 7:
6522                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6523                break;
6524        case 10:
6525                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6526                break;
6527        case 14:
6528                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6529                break;
6530        }
6531        if (INTEL_INFO(dev)->gen >= 4)
6532                dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
6533
6534        if (crtc_state->sdvo_tv_clock)
6535                dpll |= PLL_REF_INPUT_TVCLKINBC;
6536        else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6537                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6538                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6539        else
6540                dpll |= PLL_REF_INPUT_DREFCLK;
6541
6542        dpll |= DPLL_VCO_ENABLE;
6543        crtc_state->dpll_hw_state.dpll = dpll;
6544
6545        if (INTEL_INFO(dev)->gen >= 4) {
6546                u32 dpll_md = (crtc_state->pixel_multiplier - 1)
6547                        << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6548                crtc_state->dpll_hw_state.dpll_md = dpll_md;
6549        }
6550}
6551
6552static void i8xx_update_pll(struct intel_crtc *crtc,
6553                            struct intel_crtc_state *crtc_state,
6554                            intel_clock_t *reduced_clock,
6555                            int num_connectors)
6556{
6557        struct drm_device *dev = crtc->base.dev;
6558        struct drm_i915_private *dev_priv = dev->dev_private;
6559        u32 dpll;
6560        struct dpll *clock = &crtc_state->dpll;
6561
6562        i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
6563
6564        dpll = DPLL_VGA_MODE_DIS;
6565
6566        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
6567                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6568        } else {
6569                if (clock->p1 == 2)
6570                        dpll |= PLL_P1_DIVIDE_BY_TWO;
6571                else
6572                        dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6573                if (clock->p2 == 4)
6574                        dpll |= PLL_P2_DIVIDE_BY_4;
6575        }
6576
6577        if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
6578                dpll |= DPLL_DVO_2X_MODE;
6579
6580        if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6581                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6582                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6583        else
6584                dpll |= PLL_REF_INPUT_DREFCLK;
6585
6586        dpll |= DPLL_VCO_ENABLE;
6587        crtc_state->dpll_hw_state.dpll = dpll;
6588}
6589
6590static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6591{
6592        struct drm_device *dev = intel_crtc->base.dev;
6593        struct drm_i915_private *dev_priv = dev->dev_private;
6594        enum pipe pipe = intel_crtc->pipe;
6595        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
6596        struct drm_display_mode *adjusted_mode =
6597                &intel_crtc->config->base.adjusted_mode;
6598        uint32_t crtc_vtotal, crtc_vblank_end;
6599        int vsyncshift = 0;
6600
6601        /* We need to be careful not to changed the adjusted mode, for otherwise
6602         * the hw state checker will get angry at the mismatch. */
6603        crtc_vtotal = adjusted_mode->crtc_vtotal;
6604        crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6605
6606        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6607                /* the chip adds 2 halflines automatically */
6608                crtc_vtotal -= 1;
6609                crtc_vblank_end -= 1;
6610
6611                if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6612                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6613                else
6614                        vsyncshift = adjusted_mode->crtc_hsync_start -
6615                                adjusted_mode->crtc_htotal / 2;
6616                if (vsyncshift < 0)
6617                        vsyncshift += adjusted_mode->crtc_htotal;
6618        }
6619
6620        if (INTEL_INFO(dev)->gen > 3)
6621                I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6622
6623        I915_WRITE(HTOTAL(cpu_transcoder),
6624                   (adjusted_mode->crtc_hdisplay - 1) |
6625                   ((adjusted_mode->crtc_htotal - 1) << 16));
6626        I915_WRITE(HBLANK(cpu_transcoder),
6627                   (adjusted_mode->crtc_hblank_start - 1) |
6628                   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6629        I915_WRITE(HSYNC(cpu_transcoder),
6630                   (adjusted_mode->crtc_hsync_start - 1) |
6631                   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6632
6633        I915_WRITE(VTOTAL(cpu_transcoder),
6634                   (adjusted_mode->crtc_vdisplay - 1) |
6635                   ((crtc_vtotal - 1) << 16));
6636        I915_WRITE(VBLANK(cpu_transcoder),
6637                   (adjusted_mode->crtc_vblank_start - 1) |
6638                   ((crtc_vblank_end - 1) << 16));
6639        I915_WRITE(VSYNC(cpu_transcoder),
6640                   (adjusted_mode->crtc_vsync_start - 1) |
6641                   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6642
6643        /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6644         * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6645         * documented on the DDI_FUNC_CTL register description, EDP Input Select
6646         * bits. */
6647        if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
6648            (pipe == PIPE_B || pipe == PIPE_C))
6649                I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6650
6651        /* pipesrc controls the size that is scaled from, which should
6652         * always be the user's requested size.
6653         */
6654        I915_WRITE(PIPESRC(pipe),
6655                   ((intel_crtc->config->pipe_src_w - 1) << 16) |
6656                   (intel_crtc->config->pipe_src_h - 1));
6657}
6658
6659static void intel_get_pipe_timings(struct intel_crtc *crtc,
6660                                   struct intel_crtc_state *pipe_config)
6661{
6662        struct drm_device *dev = crtc->base.dev;
6663        struct drm_i915_private *dev_priv = dev->dev_private;
6664        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6665        uint32_t tmp;
6666
6667        tmp = I915_READ(HTOTAL(cpu_transcoder));
6668        pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6669        pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6670        tmp = I915_READ(HBLANK(cpu_transcoder));
6671        pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6672        pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6673        tmp = I915_READ(HSYNC(cpu_transcoder));
6674        pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6675        pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6676
6677        tmp = I915_READ(VTOTAL(cpu_transcoder));
6678        pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6679        pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6680        tmp = I915_READ(VBLANK(cpu_transcoder));
6681        pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6682        pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6683        tmp = I915_READ(VSYNC(cpu_transcoder));
6684        pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6685        pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6686
6687        if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6688                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6689                pipe_config->base.adjusted_mode.crtc_vtotal += 1;
6690                pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
6691        }
6692
6693        tmp = I915_READ(PIPESRC(crtc->pipe));
6694        pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6695        pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6696
6697        pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
6698        pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
6699}
6700
6701void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6702                                 struct intel_crtc_state *pipe_config)
6703{
6704        mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
6705        mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
6706        mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
6707        mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
6708
6709        mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
6710        mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
6711        mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
6712        mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
6713
6714        mode->flags = pipe_config->base.adjusted_mode.flags;
6715
6716        mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
6717        mode->flags |= pipe_config->base.adjusted_mode.flags;
6718}
6719
6720static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6721{
6722        struct drm_device *dev = intel_crtc->base.dev;
6723        struct drm_i915_private *dev_priv = dev->dev_private;
6724        uint32_t pipeconf;
6725
6726        pipeconf = 0;
6727
6728        if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
6729            (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
6730                pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
6731
6732        if (intel_crtc->config->double_wide)
6733                pipeconf |= PIPECONF_DOUBLE_WIDE;
6734
6735        /* only g4x and later have fancy bpc/dither controls */
6736        if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6737                /* Bspec claims that we can't use dithering for 30bpp pipes. */
6738                if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
6739                        pipeconf |= PIPECONF_DITHER_EN |
6740                                    PIPECONF_DITHER_TYPE_SP;
6741
6742                switch (intel_crtc->config->pipe_bpp) {
6743                case 18:
6744                        pipeconf |= PIPECONF_6BPC;
6745                        break;
6746                case 24:
6747                        pipeconf |= PIPECONF_8BPC;
6748                        break;
6749                case 30:
6750                        pipeconf |= PIPECONF_10BPC;
6751                        break;
6752                default:
6753                        /* Case prevented by intel_choose_pipe_bpp_dither. */
6754                        BUG();
6755                }
6756        }
6757
6758        if (HAS_PIPE_CXSR(dev)) {
6759                if (intel_crtc->lowfreq_avail) {
6760                        DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6761                        pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6762                } else {
6763                        DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6764                }
6765        }
6766
6767        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6768                if (INTEL_INFO(dev)->gen < 4 ||
6769                    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6770                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6771                else
6772                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6773        } else
6774                pipeconf |= PIPECONF_PROGRESSIVE;
6775
6776        if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
6777                pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6778
6779        I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6780        POSTING_READ(PIPECONF(intel_crtc->pipe));
6781}
6782
6783static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
6784                                   struct intel_crtc_state *crtc_state)
6785{
6786        struct drm_device *dev = crtc->base.dev;
6787        struct drm_i915_private *dev_priv = dev->dev_private;
6788        int refclk, num_connectors = 0;
6789        intel_clock_t clock, reduced_clock;
6790        bool ok, has_reduced_clock = false;
6791        bool is_lvds = false, is_dsi = false;
6792        struct intel_encoder *encoder;
6793        const intel_limit_t *limit;
6794        struct drm_atomic_state *state = crtc_state->base.state;
6795        struct drm_connector_state *connector_state;
6796        int i;
6797
6798        for (i = 0; i < state->num_connector; i++) {
6799                if (!state->connectors[i])
6800                        continue;
6801
6802                connector_state = state->connector_states[i];
6803                if (connector_state->crtc != &crtc->base)
6804                        continue;
6805
6806                encoder = to_intel_encoder(connector_state->best_encoder);
6807
6808                switch (encoder->type) {
6809                case INTEL_OUTPUT_LVDS:
6810                        is_lvds = true;
6811                        break;
6812                case INTEL_OUTPUT_DSI:
6813                        is_dsi = true;
6814                        break;
6815                default:
6816                        break;
6817                }
6818
6819                num_connectors++;
6820        }
6821
6822        if (is_dsi)
6823                return 0;
6824
6825        if (!crtc_state->clock_set) {
6826                refclk = i9xx_get_refclk(crtc_state, num_connectors);
6827
6828                /*
6829                 * Returns a set of divisors for the desired target clock with
6830                 * the given refclk, or FALSE.  The returned values represent
6831                 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6832                 * 2) / p1 / p2.
6833                 */
6834                limit = intel_limit(crtc_state, refclk);
6835                ok = dev_priv->display.find_dpll(limit, crtc_state,
6836                                                 crtc_state->port_clock,
6837                                                 refclk, NULL, &clock);
6838                if (!ok) {
6839                        DRM_ERROR("Couldn't find PLL settings for mode!\n");
6840                        return -EINVAL;
6841                }
6842
6843                if (is_lvds && dev_priv->lvds_downclock_avail) {
6844                        /*
6845                         * Ensure we match the reduced clock's P to the target
6846                         * clock.  If the clocks don't match, we can't switch
6847                         * the display clock by using the FP0/FP1. In such case
6848                         * we will disable the LVDS downclock feature.
6849                         */
6850                        has_reduced_clock =
6851                                dev_priv->display.find_dpll(limit, crtc_state,
6852                                                            dev_priv->lvds_downclock,
6853                                                            refclk, &clock,
6854                                                            &reduced_clock);
6855                }
6856                /* Compat-code for transition, will disappear. */
6857                crtc_state->dpll.n = clock.n;
6858                crtc_state->dpll.m1 = clock.m1;
6859                crtc_state->dpll.m2 = clock.m2;
6860                crtc_state->dpll.p1 = clock.p1;
6861                crtc_state->dpll.p2 = clock.p2;
6862        }
6863
6864        if (IS_GEN2(dev)) {
6865                i8xx_update_pll(crtc, crtc_state,
6866                                has_reduced_clock ? &reduced_clock : NULL,
6867                                num_connectors);
6868        } else if (IS_CHERRYVIEW(dev)) {
6869                chv_update_pll(crtc, crtc_state);
6870        } else if (IS_VALLEYVIEW(dev)) {
6871                vlv_update_pll(crtc, crtc_state);
6872        } else {
6873                i9xx_update_pll(crtc, crtc_state,
6874                                has_reduced_clock ? &reduced_clock : NULL,
6875                                num_connectors);
6876        }
6877
6878        return 0;
6879}
6880
6881static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6882                                 struct intel_crtc_state *pipe_config)
6883{
6884        struct drm_device *dev = crtc->base.dev;
6885        struct drm_i915_private *dev_priv = dev->dev_private;
6886        uint32_t tmp;
6887
6888        if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6889                return;
6890
6891        tmp = I915_READ(PFIT_CONTROL);
6892        if (!(tmp & PFIT_ENABLE))
6893                return;
6894
6895        /* Check whether the pfit is attached to our pipe. */
6896        if (INTEL_INFO(dev)->gen < 4) {
6897                if (crtc->pipe != PIPE_B)
6898                        return;
6899        } else {
6900                if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6901                        return;
6902        }
6903
6904        pipe_config->gmch_pfit.control = tmp;
6905        pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6906        if (INTEL_INFO(dev)->gen < 5)
6907                pipe_config->gmch_pfit.lvds_border_bits =
6908                        I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6909}
6910
6911static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6912                               struct intel_crtc_state *pipe_config)
6913{
6914        struct drm_device *dev = crtc->base.dev;
6915        struct drm_i915_private *dev_priv = dev->dev_private;
6916        int pipe = pipe_config->cpu_transcoder;
6917        intel_clock_t clock;
6918        u32 mdiv;
6919        int refclk = 100000;
6920
6921        /* In case of MIPI DPLL will not even be used */
6922        if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6923                return;
6924
6925        mutex_lock(&dev_priv->dpio_lock);
6926        mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6927        mutex_unlock(&dev_priv->dpio_lock);
6928
6929        clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6930        clock.m2 = mdiv & DPIO_M2DIV_MASK;
6931        clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6932        clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6933        clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6934
6935        vlv_clock(refclk, &clock);
6936
6937        /* clock.dot is the fast clock */
6938        pipe_config->port_clock = clock.dot / 5;
6939}
6940
6941static void
6942i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6943                              struct intel_initial_plane_config *plane_config)
6944{
6945        struct drm_device *dev = crtc->base.dev;
6946        struct drm_i915_private *dev_priv = dev->dev_private;
6947        u32 val, base, offset;
6948        int pipe = crtc->pipe, plane = crtc->plane;
6949        int fourcc, pixel_format;
6950        unsigned int aligned_height;
6951        struct drm_framebuffer *fb;
6952        struct intel_framebuffer *intel_fb;
6953
6954        val = I915_READ(DSPCNTR(plane));
6955        if (!(val & DISPLAY_PLANE_ENABLE))
6956                return;
6957
6958        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6959        if (!intel_fb) {
6960                DRM_DEBUG_KMS("failed to alloc fb\n");
6961                return;
6962        }
6963
6964        fb = &intel_fb->base;
6965
6966        if (INTEL_INFO(dev)->gen >= 4) {
6967                if (val & DISPPLANE_TILED) {
6968                        plane_config->tiling = I915_TILING_X;
6969                        fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
6970                }
6971        }
6972
6973        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6974        fourcc = i9xx_format_to_fourcc(pixel_format);
6975        fb->pixel_format = fourcc;
6976        fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
6977
6978        if (INTEL_INFO(dev)->gen >= 4) {
6979                if (plane_config->tiling)
6980                        offset = I915_READ(DSPTILEOFF(plane));
6981                else
6982                        offset = I915_READ(DSPLINOFF(plane));
6983                base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6984        } else {
6985                base = I915_READ(DSPADDR(plane));
6986        }
6987        plane_config->base = base;
6988
6989        val = I915_READ(PIPESRC(pipe));
6990        fb->width = ((val >> 16) & 0xfff) + 1;
6991        fb->height = ((val >> 0) & 0xfff) + 1;
6992
6993        val = I915_READ(DSPSTRIDE(pipe));
6994        fb->pitches[0] = val & 0xffffffc0;
6995
6996        aligned_height = intel_fb_align_height(dev, fb->height,
6997                                               fb->pixel_format,
6998                                               fb->modifier[0]);
6999
7000        plane_config->size = fb->pitches[0] * aligned_height;
7001
7002        DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7003                      pipe_name(pipe), plane, fb->width, fb->height,
7004                      fb->bits_per_pixel, base, fb->pitches[0],
7005                      plane_config->size);
7006
7007        plane_config->fb = intel_fb;
7008}
7009
7010static void chv_crtc_clock_get(struct intel_crtc *crtc,
7011                               struct intel_crtc_state *pipe_config)
7012{
7013        struct drm_device *dev = crtc->base.dev;
7014        struct drm_i915_private *dev_priv = dev->dev_private;
7015        int pipe = pipe_config->cpu_transcoder;
7016        enum dpio_channel port = vlv_pipe_to_channel(pipe);
7017        intel_clock_t clock;
7018        u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
7019        int refclk = 100000;
7020
7021        mutex_lock(&dev_priv->dpio_lock);
7022        cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7023        pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7024        pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7025        pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7026        mutex_unlock(&dev_priv->dpio_lock);
7027
7028        clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7029        clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
7030        clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7031        clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7032        clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7033
7034        chv_clock(refclk, &clock);
7035
7036        /* clock.dot is the fast clock */
7037        pipe_config->port_clock = clock.dot / 5;
7038}
7039
7040static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7041                                 struct intel_crtc_state *pipe_config)
7042{
7043        struct drm_device *dev = crtc->base.dev;
7044        struct drm_i915_private *dev_priv = dev->dev_private;
7045        uint32_t tmp;
7046
7047        if (!intel_display_power_is_enabled(dev_priv,
7048                                            POWER_DOMAIN_PIPE(crtc->pipe)))
7049                return false;
7050
7051        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7052        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7053
7054        tmp = I915_READ(PIPECONF(crtc->pipe));
7055        if (!(tmp & PIPECONF_ENABLE))
7056                return false;
7057
7058        if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
7059                switch (tmp & PIPECONF_BPC_MASK) {
7060                case PIPECONF_6BPC:
7061                        pipe_config->pipe_bpp = 18;
7062                        break;
7063                case PIPECONF_8BPC:
7064                        pipe_config->pipe_bpp = 24;
7065                        break;
7066                case PIPECONF_10BPC:
7067                        pipe_config->pipe_bpp = 30;
7068                        break;
7069                default:
7070                        break;
7071                }
7072        }
7073
7074        if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
7075                pipe_config->limited_color_range = true;
7076
7077        if (INTEL_INFO(dev)->gen < 4)
7078                pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7079
7080        intel_get_pipe_timings(crtc, pipe_config);
7081
7082        i9xx_get_pfit_config(crtc, pipe_config);
7083
7084        if (INTEL_INFO(dev)->gen >= 4) {
7085                tmp = I915_READ(DPLL_MD(crtc->pipe));
7086                pipe_config->pixel_multiplier =
7087                        ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7088                         >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7089                pipe_config->dpll_hw_state.dpll_md = tmp;
7090        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7091                tmp = I915_READ(DPLL(crtc->pipe));
7092                pipe_config->pixel_multiplier =
7093                        ((tmp & SDVO_MULTIPLIER_MASK)
7094                         >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7095        } else {
7096                /* Note that on i915G/GM the pixel multiplier is in the sdvo
7097                 * port and will be fixed up in the encoder->get_config
7098                 * function. */
7099                pipe_config->pixel_multiplier = 1;
7100        }
7101        pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7102        if (!IS_VALLEYVIEW(dev)) {
7103                /*
7104                 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7105                 * on 830. Filter it out here so that we don't
7106                 * report errors due to that.
7107                 */
7108                if (IS_I830(dev))
7109                        pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7110
7111                pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7112                pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7113        } else {
7114                /* Mask out read-only status bits. */
7115                pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7116                                                     DPLL_PORTC_READY_MASK |
7117                                                     DPLL_PORTB_READY_MASK);
7118        }
7119
7120        if (IS_CHERRYVIEW(dev))
7121                chv_crtc_clock_get(crtc, pipe_config);
7122        else if (IS_VALLEYVIEW(dev))
7123                vlv_crtc_clock_get(crtc, pipe_config);
7124        else
7125                i9xx_crtc_clock_get(crtc, pipe_config);
7126
7127        return true;
7128}
7129
7130static void ironlake_init_pch_refclk(struct drm_device *dev)
7131{
7132        struct drm_i915_private *dev_priv = dev->dev_private;
7133        struct intel_encoder *encoder;
7134        u32 val, final;
7135        bool has_lvds = false;
7136        bool has_cpu_edp = false;
7137        bool has_panel = false;
7138        bool has_ck505 = false;
7139        bool can_ssc = false;
7140
7141        /* We need to take the global config into account */
7142        for_each_intel_encoder(dev, encoder) {
7143                switch (encoder->type) {
7144                case INTEL_OUTPUT_LVDS:
7145                        has_panel = true;
7146                        has_lvds = true;
7147                        break;
7148                case INTEL_OUTPUT_EDP:
7149                        has_panel = true;
7150                        if (enc_to_dig_port(&encoder->base)->port == PORT_A)
7151                                has_cpu_edp = true;
7152                        break;
7153                default:
7154                        break;
7155                }
7156        }
7157
7158        if (HAS_PCH_IBX(dev)) {
7159                has_ck505 = dev_priv->vbt.display_clock_mode;
7160                can_ssc = has_ck505;
7161        } else {
7162                has_ck505 = false;
7163                can_ssc = true;
7164        }
7165
7166        DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
7167                      has_panel, has_lvds, has_ck505);
7168
7169        /* Ironlake: try to setup display ref clock before DPLL
7170         * enabling. This is only under driver's control after
7171         * PCH B stepping, previous chipset stepping should be
7172         * ignoring this setting.
7173         */
7174        val = I915_READ(PCH_DREF_CONTROL);
7175
7176        /* As we must carefully and slowly disable/enable each source in turn,
7177         * compute the final state we want first and check if we need to
7178         * make any changes at all.
7179         */
7180        final = val;
7181        final &= ~DREF_NONSPREAD_SOURCE_MASK;
7182        if (has_ck505)
7183                final |= DREF_NONSPREAD_CK505_ENABLE;
7184        else
7185                final |= DREF_NONSPREAD_SOURCE_ENABLE;
7186
7187        final &= ~DREF_SSC_SOURCE_MASK;
7188        final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
7189        final &= ~DREF_SSC1_ENABLE;
7190
7191        if (has_panel) {
7192                final |= DREF_SSC_SOURCE_ENABLE;
7193
7194                if (intel_panel_use_ssc(dev_priv) && can_ssc)
7195                        final |= DREF_SSC1_ENABLE;
7196
7197                if (has_cpu_edp) {
7198                        if (intel_panel_use_ssc(dev_priv) && can_ssc)
7199                                final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
7200                        else
7201                                final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
7202                } else
7203                        final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7204        } else {
7205                final |= DREF_SSC_SOURCE_DISABLE;
7206                final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7207        }
7208
7209        if (final == val)
7210                return;
7211
7212        /* Always enable nonspread source */
7213        val &= ~DREF_NONSPREAD_SOURCE_MASK;
7214
7215        if (has_ck505)
7216                val |= DREF_NONSPREAD_CK505_ENABLE;
7217        else
7218                val |= DREF_NONSPREAD_SOURCE_ENABLE;
7219
7220        if (has_panel) {
7221                val &= ~DREF_SSC_SOURCE_MASK;
7222                val |= DREF_SSC_SOURCE_ENABLE;
7223
7224                /* SSC must be turned on before enabling the CPU output  */
7225                if (intel_panel_use_ssc(dev_priv) && can_ssc) {
7226                        DRM_DEBUG_KMS("Using SSC on panel\n");
7227                        val |= DREF_SSC1_ENABLE;
7228                } else
7229                        val &= ~DREF_SSC1_ENABLE;
7230
7231                /* Get SSC going before enabling the outputs */
7232                I915_WRITE(PCH_DREF_CONTROL, val);
7233                POSTING_READ(PCH_DREF_CONTROL);
7234                udelay(200);
7235
7236                val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
7237
7238                /* Enable CPU source on CPU attached eDP */
7239                if (has_cpu_edp) {
7240                        if (intel_panel_use_ssc(dev_priv) && can_ssc) {
7241                                DRM_DEBUG_KMS("Using SSC on eDP\n");
7242                                val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
7243                        } else
7244                                val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
7245                } else
7246                        val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7247
7248                I915_WRITE(PCH_DREF_CONTROL, val);
7249                POSTING_READ(PCH_DREF_CONTROL);
7250                udelay(200);
7251        } else {
7252                DRM_DEBUG_KMS("Disabling SSC entirely\n");
7253
7254                val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
7255
7256                /* Turn off CPU output */
7257                val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7258
7259                I915_WRITE(PCH_DREF_CONTROL, val);
7260                POSTING_READ(PCH_DREF_CONTROL);
7261                udelay(200);
7262
7263                /* Turn off the SSC source */
7264                val &= ~DREF_SSC_SOURCE_MASK;
7265                val |= DREF_SSC_SOURCE_DISABLE;
7266
7267                /* Turn off SSC1 */
7268                val &= ~DREF_SSC1_ENABLE;
7269
7270                I915_WRITE(PCH_DREF_CONTROL, val);
7271                POSTING_READ(PCH_DREF_CONTROL);
7272                udelay(200);
7273        }
7274
7275        BUG_ON(val != final);
7276}
7277
7278static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
7279{
7280        uint32_t tmp;
7281
7282        tmp = I915_READ(SOUTH_CHICKEN2);
7283        tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
7284        I915_WRITE(SOUTH_CHICKEN2, tmp);
7285
7286        if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
7287                               FDI_MPHY_IOSFSB_RESET_STATUS, 100))
7288                DRM_ERROR("FDI mPHY reset assert timeout\n");
7289
7290        tmp = I915_READ(SOUTH_CHICKEN2);
7291        tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
7292        I915_WRITE(SOUTH_CHICKEN2, tmp);
7293
7294        if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
7295                                FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
7296                DRM_ERROR("FDI mPHY reset de-assert timeout\n");
7297}
7298
7299/* WaMPhyProgramming:hsw */
7300static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
7301{
7302        uint32_t tmp;
7303
7304        tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
7305        tmp &= ~(0xFF << 24);
7306        tmp |= (0x12 << 24);
7307        intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
7308
7309        tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
7310        tmp |= (1 << 11);
7311        intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
7312
7313        tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
7314        tmp |= (1 << 11);
7315        intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
7316
7317        tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
7318        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
7319        intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
7320
7321        tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
7322        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
7323        intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
7324
7325        tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
7326        tmp &= ~(7 << 13);
7327        tmp |= (5 << 13);
7328        intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
7329
7330        tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
7331        tmp &= ~(7 << 13);
7332        tmp |= (5 << 13);
7333        intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
7334
7335        tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
7336        tmp &= ~0xFF;
7337        tmp |= 0x1C;
7338        intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
7339
7340        tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
7341        tmp &= ~0xFF;
7342        tmp |= 0x1C;
7343        intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
7344
7345        tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
7346        tmp &= ~(0xFF << 16);
7347        tmp |= (0x1C << 16);
7348        intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
7349
7350        tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
7351        tmp &= ~(0xFF << 16);
7352        tmp |= (0x1C << 16);
7353        intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
7354
7355        tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
7356        tmp |= (1 << 27);
7357        intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
7358
7359        tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
7360        tmp |= (1 << 27);
7361        intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
7362
7363        tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
7364        tmp &= ~(0xF << 28);
7365        tmp |= (4 << 28);
7366        intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
7367
7368        tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
7369        tmp &= ~(0xF << 28);
7370        tmp |= (4 << 28);
7371        intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
7372}
7373
7374/* Implements 3 different sequences from BSpec chapter "Display iCLK
7375 * Programming" based on the parameters passed:
7376 * - Sequence to enable CLKOUT_DP
7377 * - Sequence to enable CLKOUT_DP without spread
7378 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
7379 */
7380static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
7381                                 bool with_fdi)
7382{
7383        struct drm_i915_private *dev_priv = dev->dev_private;
7384        uint32_t reg, tmp;
7385
7386        if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
7387                with_spread = true;
7388        if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
7389                 with_fdi, "LP PCH doesn't have FDI\n"))
7390                with_fdi = false;
7391
7392        mutex_lock(&dev_priv->dpio_lock);
7393
7394        tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7395        tmp &= ~SBI_SSCCTL_DISABLE;
7396        tmp |= SBI_SSCCTL_PATHALT;
7397        intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7398
7399        udelay(24);
7400
7401        if (with_spread) {
7402                tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7403                tmp &= ~SBI_SSCCTL_PATHALT;
7404                intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7405
7406                if (with_fdi) {
7407                        lpt_reset_fdi_mphy(dev_priv);
7408                        lpt_program_fdi_mphy(dev_priv);
7409                }
7410        }
7411
7412        reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7413               SBI_GEN0 : SBI_DBUFF0;
7414        tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7415        tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7416        intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7417
7418        mutex_unlock(&dev_priv->dpio_lock);
7419}
7420
7421/* Sequence to disable CLKOUT_DP */
7422static void lpt_disable_clkout_dp(struct drm_device *dev)
7423{
7424        struct drm_i915_private *dev_priv = dev->dev_private;
7425        uint32_t reg, tmp;
7426
7427        mutex_lock(&dev_priv->dpio_lock);
7428
7429        reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7430               SBI_GEN0 : SBI_DBUFF0;
7431        tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7432        tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7433        intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7434
7435        tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7436        if (!(tmp & SBI_SSCCTL_DISABLE)) {
7437                if (!(tmp & SBI_SSCCTL_PATHALT)) {
7438                        tmp |= SBI_SSCCTL_PATHALT;
7439                        intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7440                        udelay(32);
7441                }
7442                tmp |= SBI_SSCCTL_DISABLE;
7443                intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7444        }
7445
7446        mutex_unlock(&dev_priv->dpio_lock);
7447}
7448
7449static void lpt_init_pch_refclk(struct drm_device *dev)
7450{
7451        struct intel_encoder *encoder;
7452        bool has_vga = false;
7453
7454        for_each_intel_encoder(dev, encoder) {
7455                switch (encoder->type) {
7456                case INTEL_OUTPUT_ANALOG:
7457                        has_vga = true;
7458                        break;
7459                default:
7460                        break;
7461                }
7462        }
7463
7464        if (has_vga)
7465                lpt_enable_clkout_dp(dev, true, true);
7466        else
7467                lpt_disable_clkout_dp(dev);
7468}
7469
7470/*
7471 * Initialize reference clocks when the driver loads
7472 */
7473void intel_init_pch_refclk(struct drm_device *dev)
7474{
7475        if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
7476                ironlake_init_pch_refclk(dev);
7477        else if (HAS_PCH_LPT(dev))
7478                lpt_init_pch_refclk(dev);
7479}
7480
7481static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
7482{
7483        struct drm_device *dev = crtc_state->base.crtc->dev;
7484        struct drm_i915_private *dev_priv = dev->dev_private;
7485        struct drm_atomic_state *state = crtc_state->base.state;
7486        struct drm_connector_state *connector_state;
7487        struct intel_encoder *encoder;
7488        int num_connectors = 0, i;
7489        bool is_lvds = false;
7490
7491        for (i = 0; i < state->num_connector; i++) {
7492                if (!state->connectors[i])
7493                        continue;
7494
7495                connector_state = state->connector_states[i];
7496                if (connector_state->crtc != crtc_state->base.crtc)
7497                        continue;
7498
7499                encoder = to_intel_encoder(connector_state->best_encoder);
7500
7501                switch (encoder->type) {
7502                case INTEL_OUTPUT_LVDS:
7503                        is_lvds = true;
7504                        break;
7505                default:
7506                        break;
7507                }
7508                num_connectors++;
7509        }
7510
7511        if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7512                DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
7513                              dev_priv->vbt.lvds_ssc_freq);
7514                return dev_priv->vbt.lvds_ssc_freq;
7515        }
7516
7517        return 120000;
7518}
7519
7520static void ironlake_set_pipeconf(struct drm_crtc *crtc)
7521{
7522        struct drm_i915_private *dev_priv = crtc->dev->dev_private;
7523        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7524        int pipe = intel_crtc->pipe;
7525        uint32_t val;
7526
7527        val = 0;
7528
7529        switch (intel_crtc->config->pipe_bpp) {
7530        case 18:
7531                val |= PIPECONF_6BPC;
7532                break;
7533        case 24:
7534                val |= PIPECONF_8BPC;
7535                break;
7536        case 30:
7537                val |= PIPECONF_10BPC;
7538                break;
7539        case 36:
7540                val |= PIPECONF_12BPC;
7541                break;
7542        default:
7543                /* Case prevented by intel_choose_pipe_bpp_dither. */
7544                BUG();
7545        }
7546
7547        if (intel_crtc->config->dither)
7548                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7549
7550        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7551                val |= PIPECONF_INTERLACED_ILK;
7552        else
7553                val |= PIPECONF_PROGRESSIVE;
7554
7555        if (intel_crtc->config->limited_color_range)
7556                val |= PIPECONF_COLOR_RANGE_SELECT;
7557
7558        I915_WRITE(PIPECONF(pipe), val);
7559        POSTING_READ(PIPECONF(pipe));
7560}
7561
7562/*
7563 * Set up the pipe CSC unit.
7564 *
7565 * Currently only full range RGB to limited range RGB conversion
7566 * is supported, but eventually this should handle various
7567 * RGB<->YCbCr scenarios as well.
7568 */
7569static void intel_set_pipe_csc(struct drm_crtc *crtc)
7570{
7571        struct drm_device *dev = crtc->dev;
7572        struct drm_i915_private *dev_priv = dev->dev_private;
7573        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7574        int pipe = intel_crtc->pipe;
7575        uint16_t coeff = 0x7800; /* 1.0 */
7576
7577        /*
7578         * TODO: Check what kind of values actually come out of the pipe
7579         * with these coeff/postoff values and adjust to get the best
7580         * accuracy. Perhaps we even need to take the bpc value into
7581         * consideration.
7582         */
7583
7584        if (intel_crtc->config->limited_color_range)
7585                coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
7586
7587        /*
7588         * GY/GU and RY/RU should be the other way around according
7589         * to BSpec, but reality doesn't agree. Just set them up in
7590         * a way that results in the correct picture.
7591         */
7592        I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
7593        I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
7594
7595        I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
7596        I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
7597
7598        I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
7599        I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
7600
7601        I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
7602        I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
7603        I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
7604
7605        if (INTEL_INFO(dev)->gen > 6) {
7606                uint16_t postoff = 0;
7607
7608                if (intel_crtc->config->limited_color_range)
7609                        postoff = (16 * (1 << 12) / 255) & 0x1fff;
7610
7611                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
7612                I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
7613                I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
7614
7615                I915_WRITE(PIPE_CSC_MODE(pipe), 0);
7616        } else {
7617                uint32_t mode = CSC_MODE_YUV_TO_RGB;
7618
7619                if (intel_crtc->config->limited_color_range)
7620                        mode |= CSC_BLACK_SCREEN_OFFSET;
7621
7622                I915_WRITE(PIPE_CSC_MODE(pipe), mode);
7623        }
7624}
7625
7626static void haswell_set_pipeconf(struct drm_crtc *crtc)
7627{
7628        struct drm_device *dev = crtc->dev;
7629        struct drm_i915_private *dev_priv = dev->dev_private;
7630        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7631        enum pipe pipe = intel_crtc->pipe;
7632        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7633        uint32_t val;
7634
7635        val = 0;
7636
7637        if (IS_HASWELL(dev) && intel_crtc->config->dither)
7638                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7639
7640        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7641                val |= PIPECONF_INTERLACED_ILK;
7642        else
7643                val |= PIPECONF_PROGRESSIVE;
7644
7645        I915_WRITE(PIPECONF(cpu_transcoder), val);
7646        POSTING_READ(PIPECONF(cpu_transcoder));
7647
7648        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7649        POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
7650
7651        if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
7652                val = 0;
7653
7654                switch (intel_crtc->config->pipe_bpp) {
7655                case 18:
7656                        val |= PIPEMISC_DITHER_6_BPC;
7657                        break;
7658                case 24:
7659                        val |= PIPEMISC_DITHER_8_BPC;
7660                        break;
7661                case 30:
7662                        val |= PIPEMISC_DITHER_10_BPC;
7663                        break;
7664                case 36:
7665                        val |= PIPEMISC_DITHER_12_BPC;
7666                        break;
7667                default:
7668                        /* Case prevented by pipe_config_set_bpp. */
7669                        BUG();
7670                }
7671
7672                if (intel_crtc->config->dither)
7673                        val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7674
7675                I915_WRITE(PIPEMISC(pipe), val);
7676        }
7677}
7678
7679static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7680                                    struct intel_crtc_state *crtc_state,
7681                                    intel_clock_t *clock,
7682                                    bool *has_reduced_clock,
7683                                    intel_clock_t *reduced_clock)
7684{
7685        struct drm_device *dev = crtc->dev;
7686        struct drm_i915_private *dev_priv = dev->dev_private;
7687        int refclk;
7688        const intel_limit_t *limit;
7689        bool ret, is_lvds = false;
7690
7691        is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
7692
7693        refclk = ironlake_get_refclk(crtc_state);
7694
7695        /*
7696         * Returns a set of divisors for the desired target clock with the given
7697         * refclk, or FALSE.  The returned values represent the clock equation:
7698         * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7699         */
7700        limit = intel_limit(crtc_state, refclk);
7701        ret = dev_priv->display.find_dpll(limit, crtc_state,
7702                                          crtc_state->port_clock,
7703                                          refclk, NULL, clock);
7704        if (!ret)
7705                return false;
7706
7707        if (is_lvds && dev_priv->lvds_downclock_avail) {
7708                /*
7709                 * Ensure we match the reduced clock's P to the target clock.
7710                 * If the clocks don't match, we can't switch the display clock
7711                 * by using the FP0/FP1. In such case we will disable the LVDS
7712                 * downclock feature.
7713                */
7714                *has_reduced_clock =
7715                        dev_priv->display.find_dpll(limit, crtc_state,
7716                                                    dev_priv->lvds_downclock,
7717                                                    refclk, clock,
7718                                                    reduced_clock);
7719        }
7720
7721        return true;
7722}
7723
7724int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
7725{
7726        /*
7727         * Account for spread spectrum to avoid
7728         * oversubscribing the link. Max center spread
7729         * is 2.5%; use 5% for safety's sake.
7730         */
7731        u32 bps = target_clock * bpp * 21 / 20;
7732        return DIV_ROUND_UP(bps, link_bw * 8);
7733}
7734
7735static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
7736{
7737        return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
7738}
7739
7740static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7741                                      struct intel_crtc_state *crtc_state,
7742                                      u32 *fp,
7743                                      intel_clock_t *reduced_clock, u32 *fp2)
7744{
7745        struct drm_crtc *crtc = &intel_crtc->base;
7746        struct drm_device *dev = crtc->dev;
7747        struct drm_i915_private *dev_priv = dev->dev_private;
7748        struct drm_atomic_state *state = crtc_state->base.state;
7749        struct drm_connector_state *connector_state;
7750        struct intel_encoder *encoder;
7751        uint32_t dpll;
7752        int factor, num_connectors = 0, i;
7753        bool is_lvds = false, is_sdvo = false;
7754
7755        for (i = 0; i < state->num_connector; i++) {
7756                if (!state->connectors[i])
7757                        continue;
7758
7759                connector_state = state->connector_states[i];
7760                if (connector_state->crtc != crtc_state->base.crtc)
7761                        continue;
7762
7763                encoder = to_intel_encoder(connector_state->best_encoder);
7764
7765                switch (encoder->type) {
7766                case INTEL_OUTPUT_LVDS:
7767                        is_lvds = true;
7768                        break;
7769                case INTEL_OUTPUT_SDVO:
7770                case INTEL_OUTPUT_HDMI:
7771                        is_sdvo = true;
7772                        break;
7773                default:
7774                        break;
7775                }
7776
7777                num_connectors++;
7778        }
7779
7780        /* Enable autotuning of the PLL clock (if permissible) */
7781        factor = 21;
7782        if (is_lvds) {
7783                if ((intel_panel_use_ssc(dev_priv) &&
7784                     dev_priv->vbt.lvds_ssc_freq == 100000) ||
7785                    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
7786                        factor = 25;
7787        } else if (crtc_state->sdvo_tv_clock)
7788                factor = 20;
7789
7790        if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
7791                *fp |= FP_CB_TUNE;
7792
7793        if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7794                *fp2 |= FP_CB_TUNE;
7795
7796        dpll = 0;
7797
7798        if (is_lvds)
7799                dpll |= DPLLB_MODE_LVDS;
7800        else
7801                dpll |= DPLLB_MODE_DAC_SERIAL;
7802
7803        dpll |= (crtc_state->pixel_multiplier - 1)
7804                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7805
7806        if (is_sdvo)
7807                dpll |= DPLL_SDVO_HIGH_SPEED;
7808        if (crtc_state->has_dp_encoder)
7809                dpll |= DPLL_SDVO_HIGH_SPEED;
7810
7811        /* compute bitmask from p1 value */
7812        dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7813        /* also FPA1 */
7814        dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7815
7816        switch (crtc_state->dpll.p2) {
7817        case 5:
7818                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7819                break;
7820        case 7:
7821                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7822                break;
7823        case 10:
7824                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7825                break;
7826        case 14:
7827                dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7828                break;
7829        }
7830
7831        if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7832                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7833        else
7834                dpll |= PLL_REF_INPUT_DREFCLK;
7835
7836        return dpll | DPLL_VCO_ENABLE;
7837}
7838
7839static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
7840                                       struct intel_crtc_state *crtc_state)
7841{
7842        struct drm_device *dev = crtc->base.dev;
7843        intel_clock_t clock, reduced_clock;
7844        u32 dpll = 0, fp = 0, fp2 = 0;
7845        bool ok, has_reduced_clock = false;
7846        bool is_lvds = false;
7847        struct intel_shared_dpll *pll;
7848
7849        is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
7850
7851        WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7852             "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7853
7854        ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
7855                                     &has_reduced_clock, &reduced_clock);
7856        if (!ok && !crtc_state->clock_set) {
7857                DRM_ERROR("Couldn't find PLL settings for mode!\n");
7858                return -EINVAL;
7859        }
7860        /* Compat-code for transition, will disappear. */
7861        if (!crtc_state->clock_set) {
7862                crtc_state->dpll.n = clock.n;
7863                crtc_state->dpll.m1 = clock.m1;
7864                crtc_state->dpll.m2 = clock.m2;
7865                crtc_state->dpll.p1 = clock.p1;
7866                crtc_state->dpll.p2 = clock.p2;
7867        }
7868
7869        /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7870        if (crtc_state->has_pch_encoder) {
7871                fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7872                if (has_reduced_clock)
7873                        fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7874
7875                dpll = ironlake_compute_dpll(crtc, crtc_state,
7876                                             &fp, &reduced_clock,
7877                                             has_reduced_clock ? &fp2 : NULL);
7878
7879                crtc_state->dpll_hw_state.dpll = dpll;
7880                crtc_state->dpll_hw_state.fp0 = fp;
7881                if (has_reduced_clock)
7882                        crtc_state->dpll_hw_state.fp1 = fp2;
7883                else
7884                        crtc_state->dpll_hw_state.fp1 = fp;
7885
7886                pll = intel_get_shared_dpll(crtc, crtc_state);
7887                if (pll == NULL) {
7888                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7889                                         pipe_name(crtc->pipe));
7890                        return -EINVAL;
7891                }
7892        }
7893
7894        if (is_lvds && has_reduced_clock)
7895                crtc->lowfreq_avail = true;
7896        else
7897                crtc->lowfreq_avail = false;
7898
7899        return 0;
7900}
7901
7902static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7903                                         struct intel_link_m_n *m_n)
7904{
7905        struct drm_device *dev = crtc->base.dev;
7906        struct drm_i915_private *dev_priv = dev->dev_private;
7907        enum pipe pipe = crtc->pipe;
7908
7909        m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7910        m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7911        m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7912                & ~TU_SIZE_MASK;
7913        m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7914        m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7915                    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7916}
7917
7918static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7919                                         enum transcoder transcoder,
7920                                         struct intel_link_m_n *m_n,
7921                                         struct intel_link_m_n *m2_n2)
7922{
7923        struct drm_device *dev = crtc->base.dev;
7924        struct drm_i915_private *dev_priv = dev->dev_private;
7925        enum pipe pipe = crtc->pipe;
7926
7927        if (INTEL_INFO(dev)->gen >= 5) {
7928                m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7929                m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7930                m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7931                        & ~TU_SIZE_MASK;
7932                m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7933                m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7934                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7935                /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
7936                 * gen < 8) and if DRRS is supported (to make sure the
7937                 * registers are not unnecessarily read).
7938                 */
7939                if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
7940                        crtc->config->has_drrs) {
7941                        m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
7942                        m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
7943                        m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
7944                                        & ~TU_SIZE_MASK;
7945                        m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
7946                        m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
7947                                        & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7948                }
7949        } else {
7950                m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7951                m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7952                m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7953                        & ~TU_SIZE_MASK;
7954                m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7955                m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7956                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7957        }
7958}
7959
7960void intel_dp_get_m_n(struct intel_crtc *crtc,
7961                      struct intel_crtc_state *pipe_config)
7962{
7963        if (pipe_config->has_pch_encoder)
7964                intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7965        else
7966                intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7967                                             &pipe_config->dp_m_n,
7968                                             &pipe_config->dp_m2_n2);
7969}
7970
7971static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7972                                        struct intel_crtc_state *pipe_config)
7973{
7974        intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7975                                     &pipe_config->fdi_m_n, NULL);
7976}
7977
7978static void skylake_get_pfit_config(struct intel_crtc *crtc,
7979                                    struct intel_crtc_state *pipe_config)
7980{
7981        struct drm_device *dev = crtc->base.dev;
7982        struct drm_i915_private *dev_priv = dev->dev_private;
7983        uint32_t tmp;
7984
7985        tmp = I915_READ(PS_CTL(crtc->pipe));
7986
7987        if (tmp & PS_ENABLE) {
7988                pipe_config->pch_pfit.enabled = true;
7989                pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
7990                pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
7991        }
7992}
7993
7994static void
7995skylake_get_initial_plane_config(struct intel_crtc *crtc,
7996                                 struct intel_initial_plane_config *plane_config)
7997{
7998        struct drm_device *dev = crtc->base.dev;
7999        struct drm_i915_private *dev_priv = dev->dev_private;
8000        u32 val, base, offset, stride_mult, tiling;
8001        int pipe = crtc->pipe;
8002        int fourcc, pixel_format;
8003        unsigned int aligned_height;
8004        struct drm_framebuffer *fb;
8005        struct intel_framebuffer *intel_fb;
8006
8007        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8008        if (!intel_fb) {
8009                DRM_DEBUG_KMS("failed to alloc fb\n");
8010                return;
8011        }
8012
8013        fb = &intel_fb->base;
8014
8015        val = I915_READ(PLANE_CTL(pipe, 0));
8016        if (!(val & PLANE_CTL_ENABLE))
8017                goto error;
8018
8019        pixel_format = val & PLANE_CTL_FORMAT_MASK;
8020        fourcc = skl_format_to_fourcc(pixel_format,
8021                                      val & PLANE_CTL_ORDER_RGBX,
8022                                      val & PLANE_CTL_ALPHA_MASK);
8023        fb->pixel_format = fourcc;
8024        fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8025
8026        tiling = val & PLANE_CTL_TILED_MASK;
8027        switch (tiling) {
8028        case PLANE_CTL_TILED_LINEAR:
8029                fb->modifier[0] = DRM_FORMAT_MOD_NONE;
8030                break;
8031        case PLANE_CTL_TILED_X:
8032                plane_config->tiling = I915_TILING_X;
8033                fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8034                break;
8035        case PLANE_CTL_TILED_Y:
8036                fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
8037                break;
8038        case PLANE_CTL_TILED_YF:
8039                fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
8040                break;
8041        default:
8042                MISSING_CASE(tiling);
8043                goto error;
8044        }
8045
8046        base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
8047        plane_config->base = base;
8048
8049        offset = I915_READ(PLANE_OFFSET(pipe, 0));
8050
8051        val = I915_READ(PLANE_SIZE(pipe, 0));
8052        fb->height = ((val >> 16) & 0xfff) + 1;
8053        fb->width = ((val >> 0) & 0x1fff) + 1;
8054
8055        val = I915_READ(PLANE_STRIDE(pipe, 0));
8056        stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
8057                                                fb->pixel_format);
8058        fb->pitches[0] = (val & 0x3ff) * stride_mult;
8059
8060        aligned_height = intel_fb_align_height(dev, fb->height,
8061                                               fb->pixel_format,
8062                                               fb->modifier[0]);
8063
8064        plane_config->size = fb->pitches[0] * aligned_height;
8065
8066        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8067                      pipe_name(pipe), fb->width, fb->height,
8068                      fb->bits_per_pixel, base, fb->pitches[0],
8069                      plane_config->size);
8070
8071        plane_config->fb = intel_fb;
8072        return;
8073
8074error:
8075        kfree(fb);
8076}
8077
8078static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8079                                     struct intel_crtc_state *pipe_config)
8080{
8081        struct drm_device *dev = crtc->base.dev;
8082        struct drm_i915_private *dev_priv = dev->dev_private;
8083        uint32_t tmp;
8084
8085        tmp = I915_READ(PF_CTL(crtc->pipe));
8086
8087        if (tmp & PF_ENABLE) {
8088                pipe_config->pch_pfit.enabled = true;
8089                pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8090                pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8091
8092                /* We currently do not free assignements of panel fitters on
8093                 * ivb/hsw (since we don't use the higher upscaling modes which
8094                 * differentiates them) so just WARN about this case for now. */
8095                if (IS_GEN7(dev)) {
8096                        WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8097                                PF_PIPE_SEL_IVB(crtc->pipe));
8098                }
8099        }
8100}
8101
8102static void
8103ironlake_get_initial_plane_config(struct intel_crtc *crtc,
8104                                  struct intel_initial_plane_config *plane_config)
8105{
8106        struct drm_device *dev = crtc->base.dev;
8107        struct drm_i915_private *dev_priv = dev->dev_private;
8108        u32 val, base, offset;
8109        int pipe = crtc->pipe;
8110        int fourcc, pixel_format;
8111        unsigned int aligned_height;
8112        struct drm_framebuffer *fb;
8113        struct intel_framebuffer *intel_fb;
8114
8115        val = I915_READ(DSPCNTR(pipe));
8116        if (!(val & DISPLAY_PLANE_ENABLE))
8117                return;
8118
8119        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8120        if (!intel_fb) {
8121                DRM_DEBUG_KMS("failed to alloc fb\n");
8122                return;
8123        }
8124
8125        fb = &intel_fb->base;
8126
8127        if (INTEL_INFO(dev)->gen >= 4) {
8128                if (val & DISPPLANE_TILED) {
8129                        plane_config->tiling = I915_TILING_X;
8130                        fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8131                }
8132        }
8133
8134        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8135        fourcc = i9xx_format_to_fourcc(pixel_format);
8136        fb->pixel_format = fourcc;
8137        fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8138
8139        base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
8140        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
8141                offset = I915_READ(DSPOFFSET(pipe));
8142        } else {
8143                if (plane_config->tiling)
8144                        offset = I915_READ(DSPTILEOFF(pipe));
8145                else
8146                        offset = I915_READ(DSPLINOFF(pipe));
8147        }
8148        plane_config->base = base;
8149
8150        val = I915_READ(PIPESRC(pipe));
8151        fb->width = ((val >> 16) & 0xfff) + 1;
8152        fb->height = ((val >> 0) & 0xfff) + 1;
8153
8154        val = I915_READ(DSPSTRIDE(pipe));
8155        fb->pitches[0] = val & 0xffffffc0;
8156
8157        aligned_height = intel_fb_align_height(dev, fb->height,
8158                                               fb->pixel_format,
8159                                               fb->modifier[0]);
8160
8161        plane_config->size = fb->pitches[0] * aligned_height;
8162
8163        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8164                      pipe_name(pipe), fb->width, fb->height,
8165                      fb->bits_per_pixel, base, fb->pitches[0],
8166                      plane_config->size);
8167
8168        plane_config->fb = intel_fb;
8169}
8170
8171static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8172                                     struct intel_crtc_state *pipe_config)
8173{
8174        struct drm_device *dev = crtc->base.dev;
8175        struct drm_i915_private *dev_priv = dev->dev_private;
8176        uint32_t tmp;
8177
8178        if (!intel_display_power_is_enabled(dev_priv,
8179                                            POWER_DOMAIN_PIPE(crtc->pipe)))
8180                return false;
8181
8182        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8183        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8184
8185        tmp = I915_READ(PIPECONF(crtc->pipe));
8186        if (!(tmp & PIPECONF_ENABLE))
8187                return false;
8188
8189        switch (tmp & PIPECONF_BPC_MASK) {
8190        case PIPECONF_6BPC:
8191                pipe_config->pipe_bpp = 18;
8192                break;
8193        case PIPECONF_8BPC:
8194                pipe_config->pipe_bpp = 24;
8195                break;
8196        case PIPECONF_10BPC:
8197                pipe_config->pipe_bpp = 30;
8198                break;
8199        case PIPECONF_12BPC:
8200                pipe_config->pipe_bpp = 36;
8201                break;
8202        default:
8203                break;
8204        }
8205
8206        if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8207                pipe_config->limited_color_range = true;
8208
8209        if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8210                struct intel_shared_dpll *pll;
8211
8212                pipe_config->has_pch_encoder = true;
8213
8214                tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8215                pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8216                                          FDI_DP_PORT_WIDTH_SHIFT) + 1;
8217
8218                ironlake_get_fdi_m_n_config(crtc, pipe_config);
8219
8220                if (HAS_PCH_IBX(dev_priv->dev)) {
8221                        pipe_config->shared_dpll =
8222                                (enum intel_dpll_id) crtc->pipe;
8223                } else {
8224                        tmp = I915_READ(PCH_DPLL_SEL);
8225                        if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8226                                pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
8227                        else
8228                                pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
8229                }
8230
8231                pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
8232
8233                WARN_ON(!pll->get_hw_state(dev_priv, pll,
8234                                           &pipe_config->dpll_hw_state));
8235
8236                tmp = pipe_config->dpll_hw_state.dpll;
8237                pipe_config->pixel_multiplier =
8238                        ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8239                         >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
8240
8241                ironlake_pch_clock_get(crtc, pipe_config);
8242        } else {
8243                pipe_config->pixel_multiplier = 1;
8244        }
8245
8246        intel_get_pipe_timings(crtc, pipe_config);
8247
8248        ironlake_get_pfit_config(crtc, pipe_config);
8249
8250        return true;
8251}
8252
8253static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
8254{
8255        struct drm_device *dev = dev_priv->dev;
8256        struct intel_crtc *crtc;
8257
8258        for_each_intel_crtc(dev, crtc)
8259                I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
8260                     pipe_name(crtc->pipe));
8261
8262        I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
8263        I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
8264        I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
8265        I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
8266        I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
8267        I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
8268             "CPU PWM1 enabled\n");
8269        if (IS_HASWELL(dev))
8270                I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
8271                     "CPU PWM2 enabled\n");
8272        I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
8273             "PCH PWM1 enabled\n");
8274        I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
8275             "Utility pin enabled\n");
8276        I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
8277
8278        /*
8279         * In theory we can still leave IRQs enabled, as long as only the HPD
8280         * interrupts remain enabled. We used to check for that, but since it's
8281         * gen-specific and since we only disable LCPLL after we fully disable
8282         * the interrupts, the check below should be enough.
8283         */
8284        I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
8285}
8286
8287static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
8288{
8289        struct drm_device *dev = dev_priv->dev;
8290
8291        if (IS_HASWELL(dev))
8292                return I915_READ(D_COMP_HSW);
8293        else
8294                return I915_READ(D_COMP_BDW);
8295}
8296
8297static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
8298{
8299        struct drm_device *dev = dev_priv->dev;
8300
8301        if (IS_HASWELL(dev)) {
8302                mutex_lock(&dev_priv->rps.hw_lock);
8303                if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
8304                                            val))
8305                        DRM_ERROR("Failed to write to D_COMP\n");
8306                mutex_unlock(&dev_priv->rps.hw_lock);
8307        } else {
8308                I915_WRITE(D_COMP_BDW, val);
8309                POSTING_READ(D_COMP_BDW);
8310        }
8311}
8312
8313/*
8314 * This function implements pieces of two sequences from BSpec:
8315 * - Sequence for display software to disable LCPLL
8316 * - Sequence for display software to allow package C8+
8317 * The steps implemented here are just the steps that actually touch the LCPLL
8318 * register. Callers should take care of disabling all the display engine
8319 * functions, doing the mode unset, fixing interrupts, etc.
8320 */
8321static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
8322                              bool switch_to_fclk, bool allow_power_down)
8323{
8324        uint32_t val;
8325
8326        assert_can_disable_lcpll(dev_priv);
8327
8328        val = I915_READ(LCPLL_CTL);
8329
8330        if (switch_to_fclk) {
8331                val |= LCPLL_CD_SOURCE_FCLK;
8332                I915_WRITE(LCPLL_CTL, val);
8333
8334                if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
8335                                       LCPLL_CD_SOURCE_FCLK_DONE, 1))
8336                        DRM_ERROR("Switching to FCLK failed\n");
8337
8338                val = I915_READ(LCPLL_CTL);
8339        }
8340
8341        val |= LCPLL_PLL_DISABLE;
8342        I915_WRITE(LCPLL_CTL, val);
8343        POSTING_READ(LCPLL_CTL);
8344
8345        if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
8346                DRM_ERROR("LCPLL still locked\n");
8347
8348        val = hsw_read_dcomp(dev_priv);
8349        val |= D_COMP_COMP_DISABLE;
8350        hsw_write_dcomp(dev_priv, val);
8351        ndelay(100);
8352
8353        if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
8354                     1))
8355                DRM_ERROR("D_COMP RCOMP still in progress\n");
8356
8357        if (allow_power_down) {
8358                val = I915_READ(LCPLL_CTL);
8359                val |= LCPLL_POWER_DOWN_ALLOW;
8360                I915_WRITE(LCPLL_CTL, val);
8361                POSTING_READ(LCPLL_CTL);
8362        }
8363}
8364
8365/*
8366 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
8367 * source.
8368 */
8369static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
8370{
8371        uint32_t val;
8372
8373        val = I915_READ(LCPLL_CTL);
8374
8375        if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
8376                    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
8377                return;
8378
8379        /*
8380         * Make sure we're not on PC8 state before disabling PC8, otherwise
8381         * we'll hang the machine. To prevent PC8 state, just enable force_wake.
8382         */
8383        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
8384
8385        if (val & LCPLL_POWER_DOWN_ALLOW) {
8386                val &= ~LCPLL_POWER_DOWN_ALLOW;
8387                I915_WRITE(LCPLL_CTL, val);
8388                POSTING_READ(LCPLL_CTL);
8389        }
8390
8391        val = hsw_read_dcomp(dev_priv);
8392        val |= D_COMP_COMP_FORCE;
8393        val &= ~D_COMP_COMP_DISABLE;
8394        hsw_write_dcomp(dev_priv, val);
8395
8396        val = I915_READ(LCPLL_CTL);
8397        val &= ~LCPLL_PLL_DISABLE;
8398        I915_WRITE(LCPLL_CTL, val);
8399
8400        if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
8401                DRM_ERROR("LCPLL not locked yet\n");
8402
8403        if (val & LCPLL_CD_SOURCE_FCLK) {
8404                val = I915_READ(LCPLL_CTL);
8405                val &= ~LCPLL_CD_SOURCE_FCLK;
8406                I915_WRITE(LCPLL_CTL, val);
8407
8408                if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
8409                                        LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
8410                        DRM_ERROR("Switching back to LCPLL failed\n");
8411        }
8412
8413        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
8414}
8415
8416/*
8417 * Package states C8 and deeper are really deep PC states that can only be
8418 * reached when all the devices on the system allow it, so even if the graphics
8419 * device allows PC8+, it doesn't mean the system will actually get to these
8420 * states. Our driver only allows PC8+ when going into runtime PM.
8421 *
8422 * The requirements for PC8+ are that all the outputs are disabled, the power
8423 * well is disabled and most interrupts are disabled, and these are also
8424 * requirements for runtime PM. When these conditions are met, we manually do
8425 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
8426 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
8427 * hang the machine.
8428 *
8429 * When we really reach PC8 or deeper states (not just when we allow it) we lose
8430 * the state of some registers, so when we come back from PC8+ we need to
8431 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
8432 * need to take care of the registers kept by RC6. Notice that this happens even
8433 * if we don't put the device in PCI D3 state (which is what currently happens
8434 * because of the runtime PM support).
8435 *
8436 * For more, read "Display Sequences for Package C8" on the hardware
8437 * documentation.
8438 */
8439void hsw_enable_pc8(struct drm_i915_private *dev_priv)
8440{
8441        struct drm_device *dev = dev_priv->dev;
8442        uint32_t val;
8443
8444        DRM_DEBUG_KMS("Enabling package C8+\n");
8445
8446        if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8447                val = I915_READ(SOUTH_DSPCLK_GATE_D);
8448                val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
8449                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8450        }
8451
8452        lpt_disable_clkout_dp(dev);
8453        hsw_disable_lcpll(dev_priv, true, true);
8454}
8455
8456void hsw_disable_pc8(struct drm_i915_private *dev_priv)
8457{
8458        struct drm_device *dev = dev_priv->dev;
8459        uint32_t val;
8460
8461        DRM_DEBUG_KMS("Disabling package C8+\n");
8462
8463        hsw_restore_lcpll(dev_priv);
8464        lpt_init_pch_refclk(dev);
8465
8466        if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8467                val = I915_READ(SOUTH_DSPCLK_GATE_D);
8468                val |= PCH_LP_PARTITION_LEVEL_DISABLE;
8469                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8470        }
8471
8472        intel_prepare_ddi(dev);
8473}
8474
8475static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
8476                                      struct intel_crtc_state *crtc_state)
8477{
8478        if (!intel_ddi_pll_select(crtc, crtc_state))
8479                return -EINVAL;
8480
8481        crtc->lowfreq_avail = false;
8482
8483        return 0;
8484}
8485
8486static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
8487                                enum port port,
8488                                struct intel_crtc_state *pipe_config)
8489{
8490        u32 temp, dpll_ctl1;
8491
8492        temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8493        pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
8494
8495        switch (pipe_config->ddi_pll_sel) {
8496        case SKL_DPLL0:
8497                /*
8498                 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
8499                 * of the shared DPLL framework and thus needs to be read out
8500                 * separately
8501                 */
8502                dpll_ctl1 = I915_READ(DPLL_CTRL1);
8503                pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
8504                break;
8505        case SKL_DPLL1:
8506                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
8507                break;
8508        case SKL_DPLL2:
8509                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
8510                break;
8511        case SKL_DPLL3:
8512                pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
8513                break;
8514        }
8515}
8516
8517static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
8518                                enum port port,
8519                                struct intel_crtc_state *pipe_config)
8520{
8521        pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
8522
8523        switch (pipe_config->ddi_pll_sel) {
8524        case PORT_CLK_SEL_WRPLL1:
8525                pipe_config->shared_dpll = DPLL_ID_WRPLL1;
8526                break;
8527        case PORT_CLK_SEL_WRPLL2:
8528                pipe_config->shared_dpll = DPLL_ID_WRPLL2;
8529                break;
8530        }
8531}
8532
8533static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
8534                                       struct intel_crtc_state *pipe_config)
8535{
8536        struct drm_device *dev = crtc->base.dev;
8537        struct drm_i915_private *dev_priv = dev->dev_private;
8538        struct intel_shared_dpll *pll;
8539        enum port port;
8540        uint32_t tmp;
8541
8542        tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
8543
8544        port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
8545
8546        if (IS_SKYLAKE(dev))
8547                skylake_get_ddi_pll(dev_priv, port, pipe_config);
8548        else
8549                haswell_get_ddi_pll(dev_priv, port, pipe_config);
8550
8551        if (pipe_config->shared_dpll >= 0) {
8552                pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
8553
8554                WARN_ON(!pll->get_hw_state(dev_priv, pll,
8555                                           &pipe_config->dpll_hw_state));
8556        }
8557
8558        /*
8559         * Haswell has only FDI/PCH transcoder A. It is which is connected to
8560         * DDI E. So just check whether this pipe is wired to DDI E and whether
8561         * the PCH transcoder is on.
8562         */
8563        if (INTEL_INFO(dev)->gen < 9 &&
8564            (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
8565                pipe_config->has_pch_encoder = true;
8566
8567                tmp = I915_READ(FDI_RX_CTL(PIPE_A));
8568                pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8569                                          FDI_DP_PORT_WIDTH_SHIFT) + 1;
8570
8571                ironlake_get_fdi_m_n_config(crtc, pipe_config);
8572        }
8573}
8574
8575static bool haswell_get_pipe_config(struct intel_crtc *crtc,
8576                                    struct intel_crtc_state *pipe_config)
8577{
8578        struct drm_device *dev = crtc->base.dev;
8579        struct drm_i915_private *dev_priv = dev->dev_private;
8580        enum intel_display_power_domain pfit_domain;
8581        uint32_t tmp;
8582
8583        if (!intel_display_power_is_enabled(dev_priv,
8584                                         POWER_DOMAIN_PIPE(crtc->pipe)))
8585                return false;
8586
8587        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8588        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8589
8590        tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8591        if (tmp & TRANS_DDI_FUNC_ENABLE) {
8592                enum pipe trans_edp_pipe;
8593                switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8594                default:
8595                        WARN(1, "unknown pipe linked to edp transcoder\n");
8596                case TRANS_DDI_EDP_INPUT_A_ONOFF:
8597                case TRANS_DDI_EDP_INPUT_A_ON:
8598                        trans_edp_pipe = PIPE_A;
8599                        break;
8600                case TRANS_DDI_EDP_INPUT_B_ONOFF:
8601                        trans_edp_pipe = PIPE_B;
8602                        break;
8603                case TRANS_DDI_EDP_INPUT_C_ONOFF:
8604                        trans_edp_pipe = PIPE_C;
8605                        break;
8606                }
8607
8608                if (trans_edp_pipe == crtc->pipe)
8609                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
8610        }
8611
8612        if (!intel_display_power_is_enabled(dev_priv,
8613                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
8614                return false;
8615
8616        tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
8617        if (!(tmp & PIPECONF_ENABLE))
8618                return false;
8619
8620        haswell_get_ddi_port_state(crtc, pipe_config);
8621
8622        intel_get_pipe_timings(crtc, pipe_config);
8623
8624        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
8625        if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
8626                if (IS_SKYLAKE(dev))
8627                        skylake_get_pfit_config(crtc, pipe_config);
8628                else
8629                        ironlake_get_pfit_config(crtc, pipe_config);
8630        }
8631
8632        if (IS_HASWELL(dev))
8633                pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
8634                        (I915_READ(IPS_CTL) & IPS_ENABLE);
8635
8636        if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
8637                pipe_config->pixel_multiplier =
8638                        I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
8639        } else {
8640                pipe_config->pixel_multiplier = 1;
8641        }
8642
8643        return true;
8644}
8645
8646static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8647{
8648        struct drm_device *dev = crtc->dev;
8649        struct drm_i915_private *dev_priv = dev->dev_private;
8650        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8651        uint32_t cntl = 0, size = 0;
8652
8653        if (base) {
8654                unsigned int width = intel_crtc->base.cursor->state->crtc_w;
8655                unsigned int height = intel_crtc->base.cursor->state->crtc_h;
8656                unsigned int stride = roundup_pow_of_two(width) * 4;
8657
8658                switch (stride) {
8659                default:
8660                        WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
8661                                  width, stride);
8662                        stride = 256;
8663                        /* fallthrough */
8664                case 256:
8665                case 512:
8666                case 1024:
8667                case 2048:
8668                        break;
8669                }
8670
8671                cntl |= CURSOR_ENABLE |
8672                        CURSOR_GAMMA_ENABLE |
8673                        CURSOR_FORMAT_ARGB |
8674                        CURSOR_STRIDE(stride);
8675
8676                size = (height << 12) | width;
8677        }
8678
8679        if (intel_crtc->cursor_cntl != 0 &&
8680            (intel_crtc->cursor_base != base ||
8681             intel_crtc->cursor_size != size ||
8682             intel_crtc->cursor_cntl != cntl)) {
8683                /* On these chipsets we can only modify the base/size/stride
8684                 * whilst the cursor is disabled.
8685                 */
8686                I915_WRITE(_CURACNTR, 0);
8687                POSTING_READ(_CURACNTR);
8688                intel_crtc->cursor_cntl = 0;
8689        }
8690
8691        if (intel_crtc->cursor_base != base) {
8692                I915_WRITE(_CURABASE, base);
8693                intel_crtc->cursor_base = base;
8694        }
8695
8696        if (intel_crtc->cursor_size != size) {
8697                I915_WRITE(CURSIZE, size);
8698                intel_crtc->cursor_size = size;
8699        }
8700
8701        if (intel_crtc->cursor_cntl != cntl) {
8702                I915_WRITE(_CURACNTR, cntl);
8703                POSTING_READ(_CURACNTR);
8704                intel_crtc->cursor_cntl = cntl;
8705        }
8706}
8707
8708static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8709{
8710        struct drm_device *dev = crtc->dev;
8711        struct drm_i915_private *dev_priv = dev->dev_private;
8712        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8713        int pipe = intel_crtc->pipe;
8714        uint32_t cntl;
8715
8716        cntl = 0;
8717        if (base) {
8718                cntl = MCURSOR_GAMMA_ENABLE;
8719                switch (intel_crtc->base.cursor->state->crtc_w) {
8720                        case 64:
8721                                cntl |= CURSOR_MODE_64_ARGB_AX;
8722                                break;
8723                        case 128:
8724                                cntl |= CURSOR_MODE_128_ARGB_AX;
8725                                break;
8726                        case 256:
8727                                cntl |= CURSOR_MODE_256_ARGB_AX;
8728                                break;
8729                        default:
8730                                MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
8731                                return;
8732                }
8733                cntl |= pipe << 28; /* Connect to correct pipe */
8734
8735                if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8736                        cntl |= CURSOR_PIPE_CSC_ENABLE;
8737        }
8738
8739        if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
8740                cntl |= CURSOR_ROTATE_180;
8741
8742        if (intel_crtc->cursor_cntl != cntl) {
8743                I915_WRITE(CURCNTR(pipe), cntl);
8744                POSTING_READ(CURCNTR(pipe));
8745                intel_crtc->cursor_cntl = cntl;
8746        }
8747
8748        /* and commit changes on next vblank */
8749        I915_WRITE(CURBASE(pipe), base);
8750        POSTING_READ(CURBASE(pipe));
8751
8752        intel_crtc->cursor_base = base;
8753}
8754
8755/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8756static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8757                                     bool on)
8758{
8759        struct drm_device *dev = crtc->dev;
8760        struct drm_i915_private *dev_priv = dev->dev_private;
8761        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8762        int pipe = intel_crtc->pipe;
8763        int x = crtc->cursor_x;
8764        int y = crtc->cursor_y;
8765        u32 base = 0, pos = 0;
8766
8767        if (on)
8768                base = intel_crtc->cursor_addr;
8769
8770        if (x >= intel_crtc->config->pipe_src_w)
8771                base = 0;
8772
8773        if (y >= intel_crtc->config->pipe_src_h)
8774                base = 0;
8775
8776        if (x < 0) {
8777                if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
8778                        base = 0;
8779
8780                pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8781                x = -x;
8782        }
8783        pos |= x << CURSOR_X_SHIFT;
8784
8785        if (y < 0) {
8786                if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
8787                        base = 0;
8788
8789                pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8790                y = -y;
8791        }
8792        pos |= y << CURSOR_Y_SHIFT;
8793
8794        if (base == 0 && intel_crtc->cursor_base == 0)
8795                return;
8796
8797        I915_WRITE(CURPOS(pipe), pos);
8798
8799        /* ILK+ do this automagically */
8800        if (HAS_GMCH_DISPLAY(dev) &&
8801            crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
8802                base += (intel_crtc->base.cursor->state->crtc_h *
8803                        intel_crtc->base.cursor->state->crtc_w - 1) * 4;
8804        }
8805
8806        if (IS_845G(dev) || IS_I865G(dev))
8807                i845_update_cursor(crtc, base);
8808        else
8809                i9xx_update_cursor(crtc, base);
8810}
8811
8812static bool cursor_size_ok(struct drm_device *dev,
8813                           uint32_t width, uint32_t height)
8814{
8815        if (width == 0 || height == 0)
8816                return false;
8817
8818        /*
8819         * 845g/865g are special in that they are only limited by
8820         * the width of their cursors, the height is arbitrary up to
8821         * the precision of the register. Everything else requires
8822         * square cursors, limited to a few power-of-two sizes.
8823         */
8824        if (IS_845G(dev) || IS_I865G(dev)) {
8825                if ((width & 63) != 0)
8826                        return false;
8827
8828                if (width > (IS_845G(dev) ? 64 : 512))
8829                        return false;
8830
8831                if (height > 1023)
8832                        return false;
8833        } else {
8834                switch (width | height) {
8835                case 256:
8836                case 128:
8837                        if (IS_GEN2(dev))
8838                                return false;
8839                case 64:
8840                        break;
8841                default:
8842                        return false;
8843                }
8844        }
8845
8846        return true;
8847}
8848
8849static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8850                                 u16 *blue, uint32_t start, uint32_t size)
8851{
8852        int end = (start + size > 256) ? 256 : start + size, i;
8853        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8854
8855        for (i = start; i < end; i++) {
8856                intel_crtc->lut_r[i] = red[i] >> 8;
8857                intel_crtc->lut_g[i] = green[i] >> 8;
8858                intel_crtc->lut_b[i] = blue[i] >> 8;
8859        }
8860
8861        intel_crtc_load_lut(crtc);
8862}
8863
8864/* VESA 640x480x72Hz mode to set on the pipe */
8865static struct drm_display_mode load_detect_mode = {
8866        DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8867                 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8868};
8869
8870struct drm_framebuffer *
8871__intel_framebuffer_create(struct drm_device *dev,
8872                           struct drm_mode_fb_cmd2 *mode_cmd,
8873                           struct drm_i915_gem_object *obj)
8874{
8875        struct intel_framebuffer *intel_fb;
8876        int ret;
8877
8878        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8879        if (!intel_fb) {
8880                drm_gem_object_unreference(&obj->base);
8881                return ERR_PTR(-ENOMEM);
8882        }
8883
8884        ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8885        if (ret)
8886                goto err;
8887
8888        return &intel_fb->base;
8889err:
8890        drm_gem_object_unreference(&obj->base);
8891        kfree(intel_fb);
8892
8893        return ERR_PTR(ret);
8894}
8895
8896static struct drm_framebuffer *
8897intel_framebuffer_create(struct drm_device *dev,
8898                         struct drm_mode_fb_cmd2 *mode_cmd,
8899                         struct drm_i915_gem_object *obj)
8900{
8901        struct drm_framebuffer *fb;
8902        int ret;
8903
8904        ret = i915_mutex_lock_interruptible(dev);
8905        if (ret)
8906                return ERR_PTR(ret);
8907        fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8908        mutex_unlock(&dev->struct_mutex);
8909
8910        return fb;
8911}
8912
8913static u32
8914intel_framebuffer_pitch_for_width(int width, int bpp)
8915{
8916        u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8917        return ALIGN(pitch, 64);
8918}
8919
8920static u32
8921intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8922{
8923        u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8924        return PAGE_ALIGN(pitch * mode->vdisplay);
8925}
8926
8927static struct drm_framebuffer *
8928intel_framebuffer_create_for_mode(struct drm_device *dev,
8929                                  struct drm_display_mode *mode,
8930                                  int depth, int bpp)
8931{
8932        struct drm_i915_gem_object *obj;
8933        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8934
8935        obj = i915_gem_alloc_object(dev,
8936                                    intel_framebuffer_size_for_mode(mode, bpp));
8937        if (obj == NULL)
8938                return ERR_PTR(-ENOMEM);
8939
8940        mode_cmd.width = mode->hdisplay;
8941        mode_cmd.height = mode->vdisplay;
8942        mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8943                                                                bpp);
8944        mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8945
8946        return intel_framebuffer_create(dev, &mode_cmd, obj);
8947}
8948
8949static struct drm_framebuffer *
8950mode_fits_in_fbdev(struct drm_device *dev,
8951                   struct drm_display_mode *mode)
8952{
8953#ifdef CONFIG_DRM_I915_FBDEV
8954        struct drm_i915_private *dev_priv = dev->dev_private;
8955        struct drm_i915_gem_object *obj;
8956        struct drm_framebuffer *fb;
8957
8958        if (!dev_priv->fbdev)
8959                return NULL;
8960
8961        if (!dev_priv->fbdev->fb)
8962                return NULL;
8963
8964        obj = dev_priv->fbdev->fb->obj;
8965        BUG_ON(!obj);
8966
8967        fb = &dev_priv->fbdev->fb->base;
8968        if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8969                                                               fb->bits_per_pixel))
8970                return NULL;
8971
8972        if (obj->base.size < mode->vdisplay * fb->pitches[0])
8973                return NULL;
8974
8975        return fb;
8976#else
8977        return NULL;
8978#endif
8979}
8980
8981bool intel_get_load_detect_pipe(struct drm_connector *connector,
8982                                struct drm_display_mode *mode,
8983                                struct intel_load_detect_pipe *old,
8984                                struct drm_modeset_acquire_ctx *ctx)
8985{
8986        struct intel_crtc *intel_crtc;
8987        struct intel_encoder *intel_encoder =
8988                intel_attached_encoder(connector);
8989        struct drm_crtc *possible_crtc;
8990        struct drm_encoder *encoder = &intel_encoder->base;
8991        struct drm_crtc *crtc = NULL;
8992        struct drm_device *dev = encoder->dev;
8993        struct drm_framebuffer *fb;
8994        struct drm_mode_config *config = &dev->mode_config;
8995        struct drm_atomic_state *state = NULL;
8996        struct drm_connector_state *connector_state;
8997        int ret, i = -1;
8998
8999        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
9000                      connector->base.id, connector->name,
9001                      encoder->base.id, encoder->name);
9002
9003retry:
9004        ret = drm_modeset_lock(&config->connection_mutex, ctx);
9005        if (ret)
9006                goto fail_unlock;
9007
9008        /*
9009         * Algorithm gets a little messy:
9010         *
9011         *   - if the connector already has an assigned crtc, use it (but make
9012         *     sure it's on first)
9013         *
9014         *   - try to find the first unused crtc that can drive this connector,
9015         *     and use that if we find one
9016         */
9017
9018        /* See if we already have a CRTC for this connector */
9019        if (encoder->crtc) {
9020                crtc = encoder->crtc;
9021
9022                ret = drm_modeset_lock(&crtc->mutex, ctx);
9023                if (ret)
9024                        goto fail_unlock;
9025                ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
9026                if (ret)
9027                        goto fail_unlock;
9028
9029                old->dpms_mode = connector->dpms;
9030                old->load_detect_temp = false;
9031
9032                /* Make sure the crtc and connector are running */
9033                if (connector->dpms != DRM_MODE_DPMS_ON)
9034                        connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
9035
9036                return true;
9037        }
9038
9039        /* Find an unused one (if possible) */
9040        for_each_crtc(dev, possible_crtc) {
9041                i++;
9042                if (!(encoder->possible_crtcs & (1 << i)))
9043                        continue;
9044                if (possible_crtc->state->enable)
9045                        continue;
9046                /* This can occur when applying the pipe A quirk on resume. */
9047                if (to_intel_crtc(possible_crtc)->new_enabled)
9048                        continue;
9049
9050                crtc = possible_crtc;
9051                break;
9052        }
9053
9054        /*
9055         * If we didn't find an unused CRTC, don't use any.
9056         */
9057        if (!crtc) {
9058                DRM_DEBUG_KMS("no pipe available for load-detect\n");
9059                goto fail_unlock;
9060        }
9061
9062        ret = drm_modeset_lock(&crtc->mutex, ctx);
9063        if (ret)
9064                goto fail_unlock;
9065        ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
9066        if (ret)
9067                goto fail_unlock;
9068        intel_encoder->new_crtc = to_intel_crtc(crtc);
9069        to_intel_connector(connector)->new_encoder = intel_encoder;
9070
9071        intel_crtc = to_intel_crtc(crtc);
9072        intel_crtc->new_enabled = true;
9073        intel_crtc->new_config = intel_crtc->config;
9074        old->dpms_mode = connector->dpms;
9075        old->load_detect_temp = true;
9076        old->release_fb = NULL;
9077
9078        state = drm_atomic_state_alloc(dev);
9079        if (!state)
9080                return false;
9081
9082        state->acquire_ctx = ctx;
9083
9084        connector_state = drm_atomic_get_connector_state(state, connector);
9085        if (IS_ERR(connector_state)) {
9086                ret = PTR_ERR(connector_state);
9087                goto fail;
9088        }
9089
9090        connector_state->crtc = crtc;
9091        connector_state->best_encoder = &intel_encoder->base;
9092
9093        if (!mode)
9094                mode = &load_detect_mode;
9095
9096        /* We need a framebuffer large enough to accommodate all accesses
9097         * that the plane may generate whilst we perform load detection.
9098         * We can not rely on the fbcon either being present (we get called
9099         * during its initialisation to detect all boot displays, or it may
9100         * not even exist) or that it is large enough to satisfy the
9101         * requested mode.
9102         */
9103        fb = mode_fits_in_fbdev(dev, mode);
9104        if (fb == NULL) {
9105                DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
9106                fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
9107                old->release_fb = fb;
9108        } else
9109                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
9110        if (IS_ERR(fb)) {
9111                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
9112                goto fail;
9113        }
9114
9115        if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
9116                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
9117                if (old->release_fb)
9118                        old->release_fb->funcs->destroy(old->release_fb);
9119                goto fail;
9120        }
9121        crtc->primary->crtc = crtc;
9122
9123        /* let the connector get through one full cycle before testing */
9124        intel_wait_for_vblank(dev, intel_crtc->pipe);
9125        return true;
9126
9127 fail:
9128        intel_crtc->new_enabled = crtc->state->enable;
9129        if (intel_crtc->new_enabled)
9130                intel_crtc->new_config = intel_crtc->config;
9131        else
9132                intel_crtc->new_config = NULL;
9133fail_unlock:
9134        if (state) {
9135                drm_atomic_state_free(state);
9136                state = NULL;
9137        }
9138
9139        if (ret == -EDEADLK) {
9140                drm_modeset_backoff(ctx);
9141                goto retry;
9142        }
9143
9144        return false;
9145}
9146
9147void intel_release_load_detect_pipe(struct drm_connector *connector,
9148                                    struct intel_load_detect_pipe *old,
9149                                    struct drm_modeset_acquire_ctx *ctx)
9150{
9151        struct drm_device *dev = connector->dev;
9152        struct intel_encoder *intel_encoder =
9153                intel_attached_encoder(connector);
9154        struct drm_encoder *encoder = &intel_encoder->base;
9155        struct drm_crtc *crtc = encoder->crtc;
9156        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9157        struct drm_atomic_state *state;
9158        struct drm_connector_state *connector_state;
9159
9160        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
9161                      connector->base.id, connector->name,
9162                      encoder->base.id, encoder->name);
9163
9164        if (old->load_detect_temp) {
9165                state = drm_atomic_state_alloc(dev);
9166                if (!state)
9167                        goto fail;
9168
9169                state->acquire_ctx = ctx;
9170
9171                connector_state = drm_atomic_get_connector_state(state, connector);
9172                if (IS_ERR(connector_state))
9173                        goto fail;
9174
9175                to_intel_connector(connector)->new_encoder = NULL;
9176                intel_encoder->new_crtc = NULL;
9177                intel_crtc->new_enabled = false;
9178                intel_crtc->new_config = NULL;
9179
9180                connector_state->best_encoder = NULL;
9181                connector_state->crtc = NULL;
9182
9183                intel_set_mode(crtc, NULL, 0, 0, NULL, state);
9184
9185                drm_atomic_state_free(state);
9186
9187                if (old->release_fb) {
9188                        drm_framebuffer_unregister_private(old->release_fb);
9189                        drm_framebuffer_unreference(old->release_fb);
9190                }
9191
9192                return;
9193        }
9194
9195        /* Switch crtc and encoder back off if necessary */
9196        if (old->dpms_mode != DRM_MODE_DPMS_ON)
9197                connector->funcs->dpms(connector, old->dpms_mode);
9198
9199        return;
9200fail:
9201        DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
9202        drm_atomic_state_free(state);
9203}
9204
9205static int i9xx_pll_refclk(struct drm_device *dev,
9206                           const struct intel_crtc_state *pipe_config)
9207{
9208        struct drm_i915_private *dev_priv = dev->dev_private;
9209        u32 dpll = pipe_config->dpll_hw_state.dpll;
9210
9211        if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
9212                return dev_priv->vbt.lvds_ssc_freq;
9213        else if (HAS_PCH_SPLIT(dev))
9214                return 120000;
9215        else if (!IS_GEN2(dev))
9216                return 96000;
9217        else
9218                return 48000;
9219}
9220
9221/* Returns the clock of the currently programmed mode of the given pipe. */
9222static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
9223                                struct intel_crtc_state *pipe_config)
9224{
9225        struct drm_device *dev = crtc->base.dev;
9226        struct drm_i915_private *dev_priv = dev->dev_private;
9227        int pipe = pipe_config->cpu_transcoder;
9228        u32 dpll = pipe_config->dpll_hw_state.dpll;
9229        u32 fp;
9230        intel_clock_t clock;
9231        int refclk = i9xx_pll_refclk(dev, pipe_config);
9232
9233        if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
9234                fp = pipe_config->dpll_hw_state.fp0;
9235        else
9236                fp = pipe_config->dpll_hw_state.fp1;
9237
9238        clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
9239        if (IS_PINEVIEW(dev)) {
9240                clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
9241                clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
9242        } else {
9243                clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
9244                clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
9245        }
9246
9247        if (!IS_GEN2(dev)) {
9248                if (IS_PINEVIEW(dev))
9249                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
9250                                DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
9251                else
9252                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
9253                               DPLL_FPA01_P1_POST_DIV_SHIFT);
9254
9255                switch (dpll & DPLL_MODE_MASK) {
9256                case DPLLB_MODE_DAC_SERIAL:
9257                        clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
9258                                5 : 10;
9259                        break;
9260                case DPLLB_MODE_LVDS:
9261                        clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
9262                                7 : 14;
9263                        break;
9264                default:
9265                        DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
9266                                  "mode\n", (int)(dpll & DPLL_MODE_MASK));
9267                        return;
9268                }
9269
9270                if (IS_PINEVIEW(dev))
9271                        pineview_clock(refclk, &clock);
9272                else
9273                        i9xx_clock(refclk, &clock);
9274        } else {
9275                u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
9276                bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
9277
9278                if (is_lvds) {
9279                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
9280                                       DPLL_FPA01_P1_POST_DIV_SHIFT);
9281
9282                        if (lvds & LVDS_CLKB_POWER_UP)
9283                                clock.p2 = 7;
9284                        else
9285                                clock.p2 = 14;
9286                } else {
9287                        if (dpll & PLL_P1_DIVIDE_BY_TWO)
9288                                clock.p1 = 2;
9289                        else {
9290                                clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
9291                                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
9292                        }
9293                        if (dpll & PLL_P2_DIVIDE_BY_4)
9294                                clock.p2 = 4;
9295                        else
9296                                clock.p2 = 2;
9297                }
9298
9299                i9xx_clock(refclk, &clock);
9300        }
9301
9302        /*
9303         * This value includes pixel_multiplier. We will use
9304         * port_clock to compute adjusted_mode.crtc_clock in the
9305         * encoder's get_config() function.
9306         */
9307        pipe_config->port_clock = clock.dot;
9308}
9309
9310int intel_dotclock_calculate(int link_freq,
9311                             const struct intel_link_m_n *m_n)
9312{
9313        /*
9314         * The calculation for the data clock is:
9315         * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
9316         * But we want to avoid losing precison if possible, so:
9317         * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
9318         *
9319         * and the link clock is simpler:
9320         * link_clock = (m * link_clock) / n
9321         */
9322
9323        if (!m_n->link_n)
9324                return 0;
9325
9326        return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
9327}
9328
9329static void ironlake_pch_clock_get(struct intel_crtc *crtc,
9330                                   struct intel_crtc_state *pipe_config)
9331{
9332        struct drm_device *dev = crtc->base.dev;
9333
9334        /* read out port_clock from the DPLL */
9335        i9xx_crtc_clock_get(crtc, pipe_config);
9336
9337        /*
9338         * This value does not include pixel_multiplier.
9339         * We will check that port_clock and adjusted_mode.crtc_clock
9340         * agree once we know their relationship in the encoder's
9341         * get_config() function.
9342         */
9343        pipe_config->base.adjusted_mode.crtc_clock =
9344                intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
9345                                         &pipe_config->fdi_m_n);
9346}
9347
9348/** Returns the currently programmed mode of the given pipe. */
9349struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
9350                                             struct drm_crtc *crtc)
9351{
9352        struct drm_i915_private *dev_priv = dev->dev_private;
9353        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9354        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
9355        struct drm_display_mode *mode;
9356        struct intel_crtc_state pipe_config;
9357        int htot = I915_READ(HTOTAL(cpu_transcoder));
9358        int hsync = I915_READ(HSYNC(cpu_transcoder));
9359        int vtot = I915_READ(VTOTAL(cpu_transcoder));
9360        int vsync = I915_READ(VSYNC(cpu_transcoder));
9361        enum pipe pipe = intel_crtc->pipe;
9362
9363        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
9364        if (!mode)
9365                return NULL;
9366
9367        /*
9368         * Construct a pipe_config sufficient for getting the clock info
9369         * back out of crtc_clock_get.
9370         *
9371         * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
9372         * to use a real value here instead.
9373         */
9374        pipe_config.cpu_transcoder = (enum transcoder) pipe;
9375        pipe_config.pixel_multiplier = 1;
9376        pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
9377        pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
9378        pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
9379        i9xx_crtc_clock_get(intel_crtc, &pipe_config);
9380
9381        mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
9382        mode->hdisplay = (htot & 0xffff) + 1;
9383        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
9384        mode->hsync_start = (hsync & 0xffff) + 1;
9385        mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
9386        mode->vdisplay = (vtot & 0xffff) + 1;
9387        mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
9388        mode->vsync_start = (vsync & 0xffff) + 1;
9389        mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
9390
9391        drm_mode_set_name(mode);
9392
9393        return mode;
9394}
9395
9396static void intel_decrease_pllclock(struct drm_crtc *crtc)
9397{
9398        struct drm_device *dev = crtc->dev;
9399        struct drm_i915_private *dev_priv = dev->dev_private;
9400        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9401
9402        if (!HAS_GMCH_DISPLAY(dev))
9403                return;
9404
9405        if (!dev_priv->lvds_downclock_avail)
9406                return;
9407
9408        /*
9409         * Since this is called by a timer, we should never get here in
9410         * the manual case.
9411         */
9412        if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
9413                int pipe = intel_crtc->pipe;
9414                int dpll_reg = DPLL(pipe);
9415                int dpll;
9416
9417                DRM_DEBUG_DRIVER("downclocking LVDS\n");
9418
9419                assert_panel_unlocked(dev_priv, pipe);
9420
9421                dpll = I915_READ(dpll_reg);
9422                dpll |= DISPLAY_RATE_SELECT_FPA1;
9423                I915_WRITE(dpll_reg, dpll);
9424                intel_wait_for_vblank(dev, pipe);
9425                dpll = I915_READ(dpll_reg);
9426                if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
9427                        DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
9428        }
9429
9430}
9431
9432void intel_mark_busy(struct drm_device *dev)
9433{
9434        struct drm_i915_private *dev_priv = dev->dev_private;
9435
9436        if (dev_priv->mm.busy)
9437                return;
9438
9439        intel_runtime_pm_get(dev_priv);
9440        i915_update_gfx_val(dev_priv);
9441        if (INTEL_INFO(dev)->gen >= 6)
9442                gen6_rps_busy(dev_priv);
9443        dev_priv->mm.busy = true;
9444}
9445
9446void intel_mark_idle(struct drm_device *dev)
9447{
9448        struct drm_i915_private *dev_priv = dev->dev_private;
9449        struct drm_crtc *crtc;
9450
9451        if (!dev_priv->mm.busy)
9452                return;
9453
9454        dev_priv->mm.busy = false;
9455
9456        for_each_crtc(dev, crtc) {
9457                if (!crtc->primary->fb)
9458                        continue;
9459
9460                intel_decrease_pllclock(crtc);
9461        }
9462
9463        if (INTEL_INFO(dev)->gen >= 6)
9464                gen6_rps_idle(dev->dev_private);
9465
9466        intel_runtime_pm_put(dev_priv);
9467}
9468
9469static void intel_crtc_set_state(struct intel_crtc *crtc,
9470                                 struct intel_crtc_state *crtc_state)
9471{
9472        kfree(crtc->config);
9473        crtc->config = crtc_state;
9474        crtc->base.state = &crtc_state->base;
9475}
9476
9477static void intel_crtc_destroy(struct drm_crtc *crtc)
9478{
9479        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9480        struct drm_device *dev = crtc->dev;
9481        struct intel_unpin_work *work;
9482
9483        spin_lock_irq(&dev->event_lock);
9484        work = intel_crtc->unpin_work;
9485        intel_crtc->unpin_work = NULL;
9486        spin_unlock_irq(&dev->event_lock);
9487
9488        if (work) {
9489                cancel_work_sync(&work->work);
9490                kfree(work);
9491        }
9492
9493        intel_crtc_set_state(intel_crtc, NULL);
9494        drm_crtc_cleanup(crtc);
9495
9496        kfree(intel_crtc);
9497}
9498
9499static void intel_unpin_work_fn(struct work_struct *__work)
9500{
9501        struct intel_unpin_work *work =
9502                container_of(__work, struct intel_unpin_work, work);
9503        struct drm_device *dev = work->crtc->dev;
9504        enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9505
9506        mutex_lock(&dev->struct_mutex);
9507        intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
9508        drm_gem_object_unreference(&work->pending_flip_obj->base);
9509
9510        intel_fbc_update(dev);
9511
9512        if (work->flip_queued_req)
9513                i915_gem_request_assign(&work->flip_queued_req, NULL);
9514        mutex_unlock(&dev->struct_mutex);
9515
9516        intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9517        drm_framebuffer_unreference(work->old_fb);
9518
9519        BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9520        atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9521
9522        kfree(work);
9523}
9524
9525static void do_intel_finish_page_flip(struct drm_device *dev,
9526                                      struct drm_crtc *crtc)
9527{
9528        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9529        struct intel_unpin_work *work;
9530        unsigned long flags;
9531
9532        /* Ignore early vblank irqs */
9533        if (intel_crtc == NULL)
9534                return;
9535
9536        /*
9537         * This is called both by irq handlers and the reset code (to complete
9538         * lost pageflips) so needs the full irqsave spinlocks.
9539         */
9540        spin_lock_irqsave(&dev->event_lock, flags);
9541        work = intel_crtc->unpin_work;
9542
9543        /* Ensure we don't miss a work->pending update ... */
9544        smp_rmb();
9545
9546        if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9547                spin_unlock_irqrestore(&dev->event_lock, flags);
9548                return;
9549        }
9550
9551        page_flip_completed(intel_crtc);
9552
9553        spin_unlock_irqrestore(&dev->event_lock, flags);
9554}
9555
9556void intel_finish_page_flip(struct drm_device *dev, int pipe)
9557{
9558        struct drm_i915_private *dev_priv = dev->dev_private;
9559        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9560
9561        do_intel_finish_page_flip(dev, crtc);
9562}
9563
9564void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9565{
9566        struct drm_i915_private *dev_priv = dev->dev_private;
9567        struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9568
9569        do_intel_finish_page_flip(dev, crtc);
9570}
9571
9572/* Is 'a' after or equal to 'b'? */
9573static bool g4x_flip_count_after_eq(u32 a, u32 b)
9574{
9575        return !((a - b) & 0x80000000);
9576}
9577
9578static bool page_flip_finished(struct intel_crtc *crtc)
9579{
9580        struct drm_device *dev = crtc->base.dev;
9581        struct drm_i915_private *dev_priv = dev->dev_private;
9582
9583        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9584            crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9585                return true;
9586
9587        /*
9588         * The relevant registers doen't exist on pre-ctg.
9589         * As the flip done interrupt doesn't trigger for mmio
9590         * flips on gmch platforms, a flip count check isn't
9591         * really needed there. But since ctg has the registers,
9592         * include it in the check anyway.
9593         */
9594        if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9595                return true;
9596
9597        /*
9598         * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9599         * used the same base address. In that case the mmio flip might
9600         * have completed, but the CS hasn't even executed the flip yet.
9601         *
9602         * A flip count check isn't enough as the CS might have updated
9603         * the base address just after start of vblank, but before we
9604         * managed to process the interrupt. This means we'd complete the
9605         * CS flip too soon.
9606         *
9607         * Combining both checks should get us a good enough result. It may
9608         * still happen that the CS flip has been executed, but has not
9609         * yet actually completed. But in case the base address is the same
9610         * anyway, we don't really care.
9611         */
9612        return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9613                crtc->unpin_work->gtt_offset &&
9614                g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9615                                    crtc->unpin_work->flip_count);
9616}
9617
9618void intel_prepare_page_flip(struct drm_device *dev, int plane)
9619{
9620        struct drm_i915_private *dev_priv = dev->dev_private;
9621        struct intel_crtc *intel_crtc =
9622                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9623        unsigned long flags;
9624
9625
9626        /*
9627         * This is called both by irq handlers and the reset code (to complete
9628         * lost pageflips) so needs the full irqsave spinlocks.
9629         *
9630         * NB: An MMIO update of the plane base pointer will also
9631         * generate a page-flip completion irq, i.e. every modeset
9632         * is also accompanied by a spurious intel_prepare_page_flip().
9633         */
9634        spin_lock_irqsave(&dev->event_lock, flags);
9635        if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9636                atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9637        spin_unlock_irqrestore(&dev->event_lock, flags);
9638}
9639
9640static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9641{
9642        /* Ensure that the work item is consistent when activating it ... */
9643        smp_wmb();
9644        atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9645        /* and that it is marked active as soon as the irq could fire. */
9646        smp_wmb();
9647}
9648
9649static int intel_gen2_queue_flip(struct drm_device *dev,
9650                                 struct drm_crtc *crtc,
9651                                 struct drm_framebuffer *fb,
9652                                 struct drm_i915_gem_object *obj,
9653                                 struct intel_engine_cs *ring,
9654                                 uint32_t flags)
9655{
9656        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9657        u32 flip_mask;
9658        int ret;
9659
9660        ret = intel_ring_begin(ring, 6);
9661        if (ret)
9662                return ret;
9663
9664        /* Can't queue multiple flips, so wait for the previous
9665         * one to finish before executing the next.
9666         */
9667        if (intel_crtc->plane)
9668                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9669        else
9670                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9671        intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9672        intel_ring_emit(ring, MI_NOOP);
9673        intel_ring_emit(ring, MI_DISPLAY_FLIP |
9674                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9675        intel_ring_emit(ring, fb->pitches[0]);
9676        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9677        intel_ring_emit(ring, 0); /* aux display base address, unused */
9678
9679        intel_mark_page_flip_active(intel_crtc);
9680        __intel_ring_advance(ring);
9681        return 0;
9682}
9683
9684static int intel_gen3_queue_flip(struct drm_device *dev,
9685                                 struct drm_crtc *crtc,
9686                                 struct drm_framebuffer *fb,
9687                                 struct drm_i915_gem_object *obj,
9688                                 struct intel_engine_cs *ring,
9689                                 uint32_t flags)
9690{
9691        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9692        u32 flip_mask;
9693        int ret;
9694
9695        ret = intel_ring_begin(ring, 6);
9696        if (ret)
9697                return ret;
9698
9699        if (intel_crtc->plane)
9700                flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9701        else
9702                flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9703        intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9704        intel_ring_emit(ring, MI_NOOP);
9705        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9706                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9707        intel_ring_emit(ring, fb->pitches[0]);
9708        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9709        intel_ring_emit(ring, MI_NOOP);
9710
9711        intel_mark_page_flip_active(intel_crtc);
9712        __intel_ring_advance(ring);
9713        return 0;
9714}
9715
9716static int intel_gen4_queue_flip(struct drm_device *dev,
9717                                 struct drm_crtc *crtc,
9718                                 struct drm_framebuffer *fb,
9719                                 struct drm_i915_gem_object *obj,
9720                                 struct intel_engine_cs *ring,
9721                                 uint32_t flags)
9722{
9723        struct drm_i915_private *dev_priv = dev->dev_private;
9724        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9725        uint32_t pf, pipesrc;
9726        int ret;
9727
9728        ret = intel_ring_begin(ring, 4);
9729        if (ret)
9730                return ret;
9731
9732        /* i965+ uses the linear or tiled offsets from the
9733         * Display Registers (which do not change across a page-flip)
9734         * so we need only reprogram the base address.
9735         */
9736        intel_ring_emit(ring, MI_DISPLAY_FLIP |
9737                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9738        intel_ring_emit(ring, fb->pitches[0]);
9739        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9740                        obj->tiling_mode);
9741
9742        /* XXX Enabling the panel-fitter across page-flip is so far
9743         * untested on non-native modes, so ignore it for now.
9744         * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9745         */
9746        pf = 0;
9747        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9748        intel_ring_emit(ring, pf | pipesrc);
9749
9750        intel_mark_page_flip_active(intel_crtc);
9751        __intel_ring_advance(ring);
9752        return 0;
9753}
9754
9755static int intel_gen6_queue_flip(struct drm_device *dev,
9756                                 struct drm_crtc *crtc,
9757                                 struct drm_framebuffer *fb,
9758                                 struct drm_i915_gem_object *obj,
9759                                 struct intel_engine_cs *ring,
9760                                 uint32_t flags)
9761{
9762        struct drm_i915_private *dev_priv = dev->dev_private;
9763        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9764        uint32_t pf, pipesrc;
9765        int ret;
9766
9767        ret = intel_ring_begin(ring, 4);
9768        if (ret)
9769                return ret;
9770
9771        intel_ring_emit(ring, MI_DISPLAY_FLIP |
9772                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9773        intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9774        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9775
9776        /* Contrary to the suggestions in the documentation,
9777         * "Enable Panel Fitter" does not seem to be required when page
9778         * flipping with a non-native mode, and worse causes a normal
9779         * modeset to fail.
9780         * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9781         */
9782        pf = 0;
9783        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9784        intel_ring_emit(ring, pf | pipesrc);
9785
9786        intel_mark_page_flip_active(intel_crtc);
9787        __intel_ring_advance(ring);
9788        return 0;
9789}
9790
9791static int intel_gen7_queue_flip(struct drm_device *dev,
9792                                 struct drm_crtc *crtc,
9793                                 struct drm_framebuffer *fb,
9794                                 struct drm_i915_gem_object *obj,
9795                                 struct intel_engine_cs *ring,
9796                                 uint32_t flags)
9797{
9798        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9799        uint32_t plane_bit = 0;
9800        int len, ret;
9801
9802        switch (intel_crtc->plane) {
9803        case PLANE_A:
9804                plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9805                break;
9806        case PLANE_B:
9807                plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9808                break;
9809        case PLANE_C:
9810                plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9811                break;
9812        default:
9813                WARN_ONCE(1, "unknown plane in flip command\n");
9814                return -ENODEV;
9815        }
9816
9817        len = 4;
9818        if (ring->id == RCS) {
9819                len += 6;
9820                /*
9821                 * On Gen 8, SRM is now taking an extra dword to accommodate
9822                 * 48bits addresses, and we need a NOOP for the batch size to
9823                 * stay even.
9824                 */
9825                if (IS_GEN8(dev))
9826                        len += 2;
9827        }
9828
9829        /*
9830         * BSpec MI_DISPLAY_FLIP for IVB:
9831         * "The full packet must be contained within the same cache line."
9832         *
9833         * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9834         * cacheline, if we ever start emitting more commands before
9835         * the MI_DISPLAY_FLIP we may need to first emit everything else,
9836         * then do the cacheline alignment, and finally emit the
9837         * MI_DISPLAY_FLIP.
9838         */
9839        ret = intel_ring_cacheline_align(ring);
9840        if (ret)
9841                return ret;
9842
9843        ret = intel_ring_begin(ring, len);
9844        if (ret)
9845                return ret;
9846
9847        /* Unmask the flip-done completion message. Note that the bspec says that
9848         * we should do this for both the BCS and RCS, and that we must not unmask
9849         * more than one flip event at any time (or ensure that one flip message
9850         * can be sent by waiting for flip-done prior to queueing new flips).
9851         * Experimentation says that BCS works despite DERRMR masking all
9852         * flip-done completion events and that unmasking all planes at once
9853         * for the RCS also doesn't appear to drop events. Setting the DERRMR
9854         * to zero does lead to lockups within MI_DISPLAY_FLIP.
9855         */
9856        if (ring->id == RCS) {
9857                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9858                intel_ring_emit(ring, DERRMR);
9859                intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9860                                        DERRMR_PIPEB_PRI_FLIP_DONE |
9861                                        DERRMR_PIPEC_PRI_FLIP_DONE));
9862                if (IS_GEN8(dev))
9863                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9864                                              MI_SRM_LRM_GLOBAL_GTT);
9865                else
9866                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9867                                              MI_SRM_LRM_GLOBAL_GTT);
9868                intel_ring_emit(ring, DERRMR);
9869                intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9870                if (IS_GEN8(dev)) {
9871                        intel_ring_emit(ring, 0);
9872                        intel_ring_emit(ring, MI_NOOP);
9873                }
9874        }
9875
9876        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9877        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9878        intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9879        intel_ring_emit(ring, (MI_NOOP));
9880
9881        intel_mark_page_flip_active(intel_crtc);
9882        __intel_ring_advance(ring);
9883        return 0;
9884}
9885
9886static bool use_mmio_flip(struct intel_engine_cs *ring,
9887                          struct drm_i915_gem_object *obj)
9888{
9889        /*
9890         * This is not being used for older platforms, because
9891         * non-availability of flip done interrupt forces us to use
9892         * CS flips. Older platforms derive flip done using some clever
9893         * tricks involving the flip_pending status bits and vblank irqs.
9894         * So using MMIO flips there would disrupt this mechanism.
9895         */
9896
9897        if (ring == NULL)
9898                return true;
9899
9900        if (INTEL_INFO(ring->dev)->gen < 5)
9901                return false;
9902
9903        if (i915.use_mmio_flip < 0)
9904                return false;
9905        else if (i915.use_mmio_flip > 0)
9906                return true;
9907        else if (i915.enable_execlists)
9908                return true;
9909        else
9910                return ring != i915_gem_request_get_ring(obj->last_read_req);
9911}
9912
9913static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
9914{
9915        struct drm_device *dev = intel_crtc->base.dev;
9916        struct drm_i915_private *dev_priv = dev->dev_private;
9917        struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
9918        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
9919        struct drm_i915_gem_object *obj = intel_fb->obj;
9920        const enum pipe pipe = intel_crtc->pipe;
9921        u32 ctl, stride;
9922
9923        ctl = I915_READ(PLANE_CTL(pipe, 0));
9924        ctl &= ~PLANE_CTL_TILED_MASK;
9925        if (obj->tiling_mode == I915_TILING_X)
9926                ctl |= PLANE_CTL_TILED_X;
9927
9928        /*
9929         * The stride is either expressed as a multiple of 64 bytes chunks for
9930         * linear buffers or in number of tiles for tiled buffers.
9931         */
9932        stride = fb->pitches[0] >> 6;
9933        if (obj->tiling_mode == I915_TILING_X)
9934                stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
9935
9936        /*
9937         * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
9938         * PLANE_SURF updates, the update is then guaranteed to be atomic.
9939         */
9940        I915_WRITE(PLANE_CTL(pipe, 0), ctl);
9941        I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
9942
9943        I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset);
9944        POSTING_READ(PLANE_SURF(pipe, 0));
9945}
9946
9947static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
9948{
9949        struct drm_device *dev = intel_crtc->base.dev;
9950        struct drm_i915_private *dev_priv = dev->dev_private;
9951        struct intel_framebuffer *intel_fb =
9952                to_intel_framebuffer(intel_crtc->base.primary->fb);
9953        struct drm_i915_gem_object *obj = intel_fb->obj;
9954        u32 dspcntr;
9955        u32 reg;
9956
9957        reg = DSPCNTR(intel_crtc->plane);
9958        dspcntr = I915_READ(reg);
9959
9960        if (obj->tiling_mode != I915_TILING_NONE)
9961                dspcntr |= DISPPLANE_TILED;
9962        else
9963                dspcntr &= ~DISPPLANE_TILED;
9964
9965        I915_WRITE(reg, dspcntr);
9966
9967        I915_WRITE(DSPSURF(intel_crtc->plane),
9968                   intel_crtc->unpin_work->gtt_offset);
9969        POSTING_READ(DSPSURF(intel_crtc->plane));
9970
9971}
9972
9973/*
9974 * XXX: This is the temporary way to update the plane registers until we get
9975 * around to using the usual plane update functions for MMIO flips
9976 */
9977static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
9978{
9979        struct drm_device *dev = intel_crtc->base.dev;
9980        bool atomic_update;
9981        u32 start_vbl_count;
9982
9983        intel_mark_page_flip_active(intel_crtc);
9984
9985        atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
9986
9987        if (INTEL_INFO(dev)->gen >= 9)
9988                skl_do_mmio_flip(intel_crtc);
9989        else
9990                /* use_mmio_flip() retricts MMIO flips to ilk+ */
9991                ilk_do_mmio_flip(intel_crtc);
9992
9993        if (atomic_update)
9994                intel_pipe_update_end(intel_crtc, start_vbl_count);
9995}
9996
9997static void intel_mmio_flip_work_func(struct work_struct *work)
9998{
9999        struct intel_crtc *crtc =
10000                container_of(work, struct intel_crtc, mmio_flip.work);
10001        struct intel_mmio_flip *mmio_flip;
10002
10003        mmio_flip = &crtc->mmio_flip;
10004        if (mmio_flip->req)
10005                WARN_ON(__i915_wait_request(mmio_flip->req,
10006                                            crtc->reset_counter,
10007                                            false, NULL, NULL) != 0);
10008
10009        intel_do_mmio_flip(crtc);
10010        if (mmio_flip->req) {
10011                mutex_lock(&crtc->base.dev->struct_mutex);
10012                i915_gem_request_assign(&mmio_flip->req, NULL);
10013                mutex_unlock(&crtc->base.dev->struct_mutex);
10014        }
10015}
10016
10017static int intel_queue_mmio_flip(struct drm_device *dev,
10018                                 struct drm_crtc *crtc,
10019                                 struct drm_framebuffer *fb,
10020                                 struct drm_i915_gem_object *obj,
10021                                 struct intel_engine_cs *ring,
10022                                 uint32_t flags)
10023{
10024        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10025
10026        i915_gem_request_assign(&intel_crtc->mmio_flip.req,
10027                                obj->last_write_req);
10028
10029        schedule_work(&intel_crtc->mmio_flip.work);
10030
10031        return 0;
10032}
10033
10034static int intel_default_queue_flip(struct drm_device *dev,
10035                                    struct drm_crtc *crtc,
10036                                    struct drm_framebuffer *fb,
10037                                    struct drm_i915_gem_object *obj,
10038                                    struct intel_engine_cs *ring,
10039                                    uint32_t flags)
10040{
10041        return -ENODEV;
10042}
10043
10044static bool __intel_pageflip_stall_check(struct drm_device *dev,
10045                                         struct drm_crtc *crtc)
10046{
10047        struct drm_i915_private *dev_priv = dev->dev_private;
10048        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10049        struct intel_unpin_work *work = intel_crtc->unpin_work;
10050        u32 addr;
10051
10052        if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
10053                return true;
10054
10055        if (!work->enable_stall_check)
10056                return false;
10057
10058        if (work->flip_ready_vblank == 0) {
10059                if (work->flip_queued_req &&
10060                    !i915_gem_request_completed(work->flip_queued_req, true))
10061                        return false;
10062
10063                work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
10064        }
10065
10066        if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
10067                return false;
10068
10069        /* Potential stall - if we see that the flip has happened,
10070         * assume a missed interrupt. */
10071        if (INTEL_INFO(dev)->gen >= 4)
10072                addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
10073        else
10074                addr = I915_READ(DSPADDR(intel_crtc->plane));
10075
10076        /* There is a potential issue here with a false positive after a flip
10077         * to the same address. We could address this by checking for a
10078         * non-incrementing frame counter.
10079         */
10080        return addr == work->gtt_offset;
10081}
10082
10083void intel_check_page_flip(struct drm_device *dev, int pipe)
10084{
10085        struct drm_i915_private *dev_priv = dev->dev_private;
10086        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10087        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10088
10089        WARN_ON(!in_interrupt());
10090
10091        if (crtc == NULL)
10092                return;
10093
10094        spin_lock(&dev->event_lock);
10095        if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
10096                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
10097                         intel_crtc->unpin_work->flip_queued_vblank,
10098                         drm_vblank_count(dev, pipe));
10099                page_flip_completed(intel_crtc);
10100        }
10101        spin_unlock(&dev->event_lock);
10102}
10103
10104static int intel_crtc_page_flip(struct drm_crtc *crtc,
10105                                struct drm_framebuffer *fb,
10106                                struct drm_pending_vblank_event *event,
10107                                uint32_t page_flip_flags)
10108{
10109        struct drm_device *dev = crtc->dev;
10110        struct drm_i915_private *dev_priv = dev->dev_private;
10111        struct drm_framebuffer *old_fb = crtc->primary->fb;
10112        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10113        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10114        struct drm_plane *primary = crtc->primary;
10115        enum pipe pipe = intel_crtc->pipe;
10116        struct intel_unpin_work *work;
10117        struct intel_engine_cs *ring;
10118        int ret;
10119
10120        /*
10121         * drm_mode_page_flip_ioctl() should already catch this, but double
10122         * check to be safe.  In the future we may enable pageflipping from
10123         * a disabled primary plane.
10124         */
10125        if (WARN_ON(intel_fb_obj(old_fb) == NULL))
10126                return -EBUSY;
10127
10128        /* Can't change pixel format via MI display flips. */
10129        if (fb->pixel_format != crtc->primary->fb->pixel_format)
10130                return -EINVAL;
10131
10132        /*
10133         * TILEOFF/LINOFF registers can't be changed via MI display flips.
10134         * Note that pitch changes could also affect these register.
10135         */
10136        if (INTEL_INFO(dev)->gen > 3 &&
10137            (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
10138             fb->pitches[0] != crtc->primary->fb->pitches[0]))
10139                return -EINVAL;
10140
10141        if (i915_terminally_wedged(&dev_priv->gpu_error))
10142                goto out_hang;
10143
10144        work = kzalloc(sizeof(*work), GFP_KERNEL);
10145        if (work == NULL)
10146                return -ENOMEM;
10147
10148        work->event = event;
10149        work->crtc = crtc;
10150        work->old_fb = old_fb;
10151        INIT_WORK(&work->work, intel_unpin_work_fn);
10152
10153        ret = drm_crtc_vblank_get(crtc);
10154        if (ret)
10155                goto free_work;
10156
10157        /* We borrow the event spin lock for protecting unpin_work */
10158        spin_lock_irq(&dev->event_lock);
10159        if (intel_crtc->unpin_work) {
10160                /* Before declaring the flip queue wedged, check if
10161                 * the hardware completed the operation behind our backs.
10162                 */
10163                if (__intel_pageflip_stall_check(dev, crtc)) {
10164                        DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
10165                        page_flip_completed(intel_crtc);
10166                } else {
10167                        DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
10168                        spin_unlock_irq(&dev->event_lock);
10169
10170                        drm_crtc_vblank_put(crtc);
10171                        kfree(work);
10172                        return -EBUSY;
10173                }
10174        }
10175        intel_crtc->unpin_work = work;
10176        spin_unlock_irq(&dev->event_lock);
10177
10178        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
10179                flush_workqueue(dev_priv->wq);
10180
10181        /* Reference the objects for the scheduled work. */
10182        drm_framebuffer_reference(work->old_fb);
10183        drm_gem_object_reference(&obj->base);
10184
10185        crtc->primary->fb = fb;
10186        update_state_fb(crtc->primary);
10187
10188        work->pending_flip_obj = obj;
10189
10190        ret = i915_mutex_lock_interruptible(dev);
10191        if (ret)
10192                goto cleanup;
10193
10194        atomic_inc(&intel_crtc->unpin_work_count);
10195        intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
10196
10197        if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
10198                work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
10199
10200        if (IS_VALLEYVIEW(dev)) {
10201                ring = &dev_priv->ring[BCS];
10202                if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
10203                        /* vlv: DISPLAY_FLIP fails to change tiling */
10204                        ring = NULL;
10205        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
10206                ring = &dev_priv->ring[BCS];
10207        } else if (INTEL_INFO(dev)->gen >= 7) {
10208                ring = i915_gem_request_get_ring(obj->last_read_req);
10209                if (ring == NULL || ring->id != RCS)
10210                        ring = &dev_priv->ring[BCS];
10211        } else {
10212                ring = &dev_priv->ring[RCS];
10213        }
10214
10215        ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
10216                                         crtc->primary->state, ring);
10217        if (ret)
10218                goto cleanup_pending;
10219
10220        work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
10221                                                  + intel_crtc->dspaddr_offset;
10222
10223        if (use_mmio_flip(ring, obj)) {
10224                ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
10225                                            page_flip_flags);
10226                if (ret)
10227                        goto cleanup_unpin;
10228
10229                i915_gem_request_assign(&work->flip_queued_req,
10230                                        obj->last_write_req);
10231        } else {
10232                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
10233                                                   page_flip_flags);
10234                if (ret)
10235                        goto cleanup_unpin;
10236
10237                i915_gem_request_assign(&work->flip_queued_req,
10238                                        intel_ring_get_request(ring));
10239        }
10240
10241        work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
10242        work->enable_stall_check = true;
10243
10244        i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
10245                          INTEL_FRONTBUFFER_PRIMARY(pipe));
10246
10247        intel_fbc_disable(dev);
10248        intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
10249        mutex_unlock(&dev->struct_mutex);
10250
10251        trace_i915_flip_request(intel_crtc->plane, obj);
10252
10253        return 0;
10254
10255cleanup_unpin:
10256        intel_unpin_fb_obj(fb, crtc->primary->state);
10257cleanup_pending:
10258        atomic_dec(&intel_crtc->unpin_work_count);
10259        mutex_unlock(&dev->struct_mutex);
10260cleanup:
10261        crtc->primary->fb = old_fb;
10262        update_state_fb(crtc->primary);
10263
10264        drm_gem_object_unreference_unlocked(&obj->base);
10265        drm_framebuffer_unreference(work->old_fb);
10266
10267        spin_lock_irq(&dev->event_lock);
10268        intel_crtc->unpin_work = NULL;
10269        spin_unlock_irq(&dev->event_lock);
10270
10271        drm_crtc_vblank_put(crtc);
10272free_work:
10273        kfree(work);
10274
10275        if (ret == -EIO) {
10276out_hang:
10277                ret = intel_plane_restore(primary);
10278                if (ret == 0 && event) {
10279                        spin_lock_irq(&dev->event_lock);
10280                        drm_send_vblank_event(dev, pipe, event);
10281                        spin_unlock_irq(&dev->event_lock);
10282                }
10283        }
10284        return ret;
10285}
10286
10287static struct drm_crtc_helper_funcs intel_helper_funcs = {
10288        .mode_set_base_atomic = intel_pipe_set_base_atomic,
10289        .load_lut = intel_crtc_load_lut,
10290        .atomic_begin = intel_begin_crtc_commit,
10291        .atomic_flush = intel_finish_crtc_commit,
10292};
10293
10294/**
10295 * intel_modeset_update_staged_output_state
10296 *
10297 * Updates the staged output configuration state, e.g. after we've read out the
10298 * current hw state.
10299 */
10300static void intel_modeset_update_staged_output_state(struct drm_device *dev)
10301{
10302        struct intel_crtc *crtc;
10303        struct intel_encoder *encoder;
10304        struct intel_connector *connector;
10305
10306        for_each_intel_connector(dev, connector) {
10307                connector->new_encoder =
10308                        to_intel_encoder(connector->base.encoder);
10309        }
10310
10311        for_each_intel_encoder(dev, encoder) {
10312                encoder->new_crtc =
10313                        to_intel_crtc(encoder->base.crtc);
10314        }
10315
10316        for_each_intel_crtc(dev, crtc) {
10317                crtc->new_enabled = crtc->base.state->enable;
10318
10319                if (crtc->new_enabled)
10320                        crtc->new_config = crtc->config;
10321                else
10322                        crtc->new_config = NULL;
10323        }
10324}
10325
10326/* Transitional helper to copy current connector/encoder state to
10327 * connector->state. This is needed so that code that is partially
10328 * converted to atomic does the right thing.
10329 */
10330static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10331{
10332        struct intel_connector *connector;
10333
10334        for_each_intel_connector(dev, connector) {
10335                if (connector->base.encoder) {
10336                        connector->base.state->best_encoder =
10337                                connector->base.encoder;
10338                        connector->base.state->crtc =
10339                                connector->base.encoder->crtc;
10340                } else {
10341                        connector->base.state->best_encoder = NULL;
10342                        connector->base.state->crtc = NULL;
10343                }
10344        }
10345}
10346
10347/**
10348 * intel_modeset_commit_output_state
10349 *
10350 * This function copies the stage display pipe configuration to the real one.
10351 */
10352static void intel_modeset_commit_output_state(struct drm_device *dev)
10353{
10354        struct intel_crtc *crtc;
10355        struct intel_encoder *encoder;
10356        struct intel_connector *connector;
10357
10358        for_each_intel_connector(dev, connector) {
10359                connector->base.encoder = &connector->new_encoder->base;
10360        }
10361
10362        for_each_intel_encoder(dev, encoder) {
10363                encoder->base.crtc = &encoder->new_crtc->base;
10364        }
10365
10366        for_each_intel_crtc(dev, crtc) {
10367                crtc->base.state->enable = crtc->new_enabled;
10368                crtc->base.enabled = crtc->new_enabled;
10369        }
10370
10371        intel_modeset_update_connector_atomic_state(dev);
10372}
10373
10374static void
10375connected_sink_compute_bpp(struct intel_connector *connector,
10376                           struct intel_crtc_state *pipe_config)
10377{
10378        int bpp = pipe_config->pipe_bpp;
10379
10380        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10381                connector->base.base.id,
10382                connector->base.name);
10383
10384        /* Don't use an invalid EDID bpc value */
10385        if (connector->base.display_info.bpc &&
10386            connector->base.display_info.bpc * 3 < bpp) {
10387                DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10388                              bpp, connector->base.display_info.bpc*3);
10389                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
10390        }
10391
10392        /* Clamp bpp to 8 on screens without EDID 1.4 */
10393        if (connector->base.display_info.bpc == 0 && bpp > 24) {
10394                DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10395                              bpp);
10396                pipe_config->pipe_bpp = 24;
10397        }
10398}
10399
10400static int
10401compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10402                          struct drm_framebuffer *fb,
10403                          struct intel_crtc_state *pipe_config)
10404{
10405        struct drm_device *dev = crtc->base.dev;
10406        struct drm_atomic_state *state;
10407        struct intel_connector *connector;
10408        int bpp, i;
10409
10410        switch (fb->pixel_format) {
10411        case DRM_FORMAT_C8:
10412                bpp = 8*3; /* since we go through a colormap */
10413                break;
10414        case DRM_FORMAT_XRGB1555:
10415        case DRM_FORMAT_ARGB1555:
10416                /* checked in intel_framebuffer_init already */
10417                if (WARN_ON(INTEL_INFO(dev)->gen > 3))
10418                        return -EINVAL;
10419        case DRM_FORMAT_RGB565:
10420                bpp = 6*3; /* min is 18bpp */
10421                break;
10422        case DRM_FORMAT_XBGR8888:
10423        case DRM_FORMAT_ABGR8888:
10424                /* checked in intel_framebuffer_init already */
10425                if (WARN_ON(INTEL_INFO(dev)->gen < 4))
10426                        return -EINVAL;
10427        case DRM_FORMAT_XRGB8888:
10428        case DRM_FORMAT_ARGB8888:
10429                bpp = 8*3;
10430                break;
10431        case DRM_FORMAT_XRGB2101010:
10432        case DRM_FORMAT_ARGB2101010:
10433        case DRM_FORMAT_XBGR2101010:
10434        case DRM_FORMAT_ABGR2101010:
10435                /* checked in intel_framebuffer_init already */
10436                if (WARN_ON(INTEL_INFO(dev)->gen < 4))
10437                        return -EINVAL;
10438                bpp = 10*3;
10439                break;
10440        /* TODO: gen4+ supports 16 bpc floating point, too. */
10441        default:
10442                DRM_DEBUG_KMS("unsupported depth\n");
10443                return -EINVAL;
10444        }
10445
10446        pipe_config->pipe_bpp = bpp;
10447
10448        state = pipe_config->base.state;
10449
10450        /* Clamp display bpp to EDID value */
10451        for (i = 0; i < state->num_connector; i++) {
10452                if (!state->connectors[i])
10453                        continue;
10454
10455                connector = to_intel_connector(state->connectors[i]);
10456                if (state->connector_states[i]->crtc != &crtc->base)
10457                        continue;
10458
10459                connected_sink_compute_bpp(connector, pipe_config);
10460        }
10461
10462        return bpp;
10463}
10464
10465static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10466{
10467        DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10468                        "type: 0x%x flags: 0x%x\n",
10469                mode->crtc_clock,
10470                mode->crtc_hdisplay, mode->crtc_hsync_start,
10471                mode->crtc_hsync_end, mode->crtc_htotal,
10472                mode->crtc_vdisplay, mode->crtc_vsync_start,
10473                mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10474}
10475
10476static void intel_dump_pipe_config(struct intel_crtc *crtc,
10477                                   struct intel_crtc_state *pipe_config,
10478                                   const char *context)
10479{
10480        DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
10481                      context, pipe_name(crtc->pipe));
10482
10483        DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
10484        DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
10485                      pipe_config->pipe_bpp, pipe_config->dither);
10486        DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10487                      pipe_config->has_pch_encoder,
10488                      pipe_config->fdi_lanes,
10489                      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
10490                      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
10491                      pipe_config->fdi_m_n.tu);
10492        DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10493                      pipe_config->has_dp_encoder,
10494                      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
10495                      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
10496                      pipe_config->dp_m_n.tu);
10497
10498        DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
10499                      pipe_config->has_dp_encoder,
10500                      pipe_config->dp_m2_n2.gmch_m,
10501                      pipe_config->dp_m2_n2.gmch_n,
10502                      pipe_config->dp_m2_n2.link_m,
10503                      pipe_config->dp_m2_n2.link_n,
10504                      pipe_config->dp_m2_n2.tu);
10505
10506        DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10507                      pipe_config->has_audio,
10508                      pipe_config->has_infoframe);
10509
10510        DRM_DEBUG_KMS("requested mode:\n");
10511        drm_mode_debug_printmodeline(&pipe_config->base.mode);
10512        DRM_DEBUG_KMS("adjusted mode:\n");
10513        drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
10514        intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
10515        DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
10516        DRM_DEBUG_KMS("pipe src size: %dx%d\n",
10517                      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
10518        DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
10519                      pipe_config->gmch_pfit.control,
10520                      pipe_config->gmch_pfit.pgm_ratios,
10521                      pipe_config->gmch_pfit.lvds_border_bits);
10522        DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
10523                      pipe_config->pch_pfit.pos,
10524                      pipe_config->pch_pfit.size,
10525                      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
10526        DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
10527        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
10528}
10529
10530static bool encoders_cloneable(const struct intel_encoder *a,
10531                               const struct intel_encoder *b)
10532{
10533        /* masks could be asymmetric, so check both ways */
10534        return a == b || (a->cloneable & (1 << b->type) &&
10535                          b->cloneable & (1 << a->type));
10536}
10537
10538static bool check_single_encoder_cloning(struct intel_crtc *crtc,
10539                                         struct intel_encoder *encoder)
10540{
10541        struct drm_device *dev = crtc->base.dev;
10542        struct intel_encoder *source_encoder;
10543
10544        for_each_intel_encoder(dev, source_encoder) {
10545                if (source_encoder->new_crtc != crtc)
10546                        continue;
10547
10548                if (!encoders_cloneable(encoder, source_encoder))
10549                        return false;
10550        }
10551
10552        return true;
10553}
10554
10555static bool check_encoder_cloning(struct intel_crtc *crtc)
10556{
10557        struct drm_device *dev = crtc->base.dev;
10558        struct intel_encoder *encoder;
10559
10560        for_each_intel_encoder(dev, encoder) {
10561                if (encoder->new_crtc != crtc)
10562                        continue;
10563
10564                if (!check_single_encoder_cloning(crtc, encoder))
10565                        return false;
10566        }
10567
10568        return true;
10569}
10570
10571static bool check_digital_port_conflicts(struct drm_device *dev)
10572{
10573        struct intel_connector *connector;
10574        unsigned int used_ports = 0;
10575
10576        /*
10577         * Walk the connector list instead of the encoder
10578         * list to detect the problem on ddi platforms
10579         * where there's just one encoder per digital port.
10580         */
10581        for_each_intel_connector(dev, connector) {
10582                struct intel_encoder *encoder = connector->new_encoder;
10583
10584                if (!encoder)
10585                        continue;
10586
10587                WARN_ON(!encoder->new_crtc);
10588
10589                switch (encoder->type) {
10590                        unsigned int port_mask;
10591                case INTEL_OUTPUT_UNKNOWN:
10592                        if (WARN_ON(!HAS_DDI(dev)))
10593                                break;
10594                case INTEL_OUTPUT_DISPLAYPORT:
10595                case INTEL_OUTPUT_HDMI:
10596                case INTEL_OUTPUT_EDP:
10597                        port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
10598
10599                        /* the same port mustn't appear more than once */
10600                        if (used_ports & port_mask)
10601                                return false;
10602
10603                        used_ports |= port_mask;
10604                default:
10605                        break;
10606                }
10607        }
10608
10609        return true;
10610}
10611
10612static void
10613clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
10614{
10615        struct drm_crtc_state tmp_state;
10616
10617        /* Clear only the intel specific part of the crtc state */
10618        tmp_state = crtc_state->base;
10619        memset(crtc_state, 0, sizeof *crtc_state);
10620        crtc_state->base = tmp_state;
10621}
10622
10623static struct intel_crtc_state *
10624intel_modeset_pipe_config(struct drm_crtc *crtc,
10625                          struct drm_framebuffer *fb,
10626                          struct drm_display_mode *mode,
10627                          struct drm_atomic_state *state)
10628{
10629        struct drm_device *dev = crtc->dev;
10630        struct intel_encoder *encoder;
10631        struct intel_connector *connector;
10632        struct drm_connector_state *connector_state;
10633        struct intel_crtc_state *pipe_config;
10634        int plane_bpp, ret = -EINVAL;
10635        int i;
10636        bool retry = true;
10637
10638        if (!check_encoder_cloning(to_intel_crtc(crtc))) {
10639                DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
10640                return ERR_PTR(-EINVAL);
10641        }
10642
10643        if (!check_digital_port_conflicts(dev)) {
10644                DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
10645                return ERR_PTR(-EINVAL);
10646        }
10647
10648        pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
10649        if (IS_ERR(pipe_config))
10650                return pipe_config;
10651
10652        clear_intel_crtc_state(pipe_config);
10653
10654        pipe_config->base.crtc = crtc;
10655        drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
10656        drm_mode_copy(&pipe_config->base.mode, mode);
10657
10658        pipe_config->cpu_transcoder =
10659                (enum transcoder) to_intel_crtc(crtc)->pipe;
10660        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
10661
10662        /*
10663         * Sanitize sync polarity flags based on requested ones. If neither
10664         * positive or negative polarity is requested, treat this as meaning
10665         * negative polarity.
10666         */
10667        if (!(pipe_config->base.adjusted_mode.flags &
10668              (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
10669                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
10670
10671        if (!(pipe_config->base.adjusted_mode.flags &
10672              (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
10673                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
10674
10675        /* Compute a starting value for pipe_config->pipe_bpp taking the source
10676         * plane pixel format and any sink constraints into account. Returns the
10677         * source plane bpp so that dithering can be selected on mismatches
10678         * after encoders and crtc also have had their say. */
10679        plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10680                                              fb, pipe_config);
10681        if (plane_bpp < 0)
10682                goto fail;
10683
10684        /*
10685         * Determine the real pipe dimensions. Note that stereo modes can
10686         * increase the actual pipe size due to the frame doubling and
10687         * insertion of additional space for blanks between the frame. This
10688         * is stored in the crtc timings. We use the requested mode to do this
10689         * computation to clearly distinguish it from the adjusted mode, which
10690         * can be changed by the connectors in the below retry loop.
10691         */
10692        drm_crtc_get_hv_timing(&pipe_config->base.mode,
10693                               &pipe_config->pipe_src_w,
10694                               &pipe_config->pipe_src_h);
10695
10696encoder_retry:
10697        /* Ensure the port clock defaults are reset when retrying. */
10698        pipe_config->port_clock = 0;
10699        pipe_config->pixel_multiplier = 1;
10700
10701        /* Fill in default crtc timings, allow encoders to overwrite them. */
10702        drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
10703                              CRTC_STEREO_DOUBLE);
10704
10705        /* Pass our mode to the connectors and the CRTC to give them a chance to
10706         * adjust it according to limitations or connector properties, and also
10707         * a chance to reject the mode entirely.
10708         */
10709        for (i = 0; i < state->num_connector; i++) {
10710                connector = to_intel_connector(state->connectors[i]);
10711                if (!connector)
10712                        continue;
10713
10714                connector_state = state->connector_states[i];
10715                if (connector_state->crtc != crtc)
10716                        continue;
10717
10718                encoder = to_intel_encoder(connector_state->best_encoder);
10719
10720                if (!(encoder->compute_config(encoder, pipe_config))) {
10721                        DRM_DEBUG_KMS("Encoder config failure\n");
10722                        goto fail;
10723                }
10724        }
10725
10726        /* Set default port clock if not overwritten by the encoder. Needs to be
10727         * done afterwards in case the encoder adjusts the mode. */
10728        if (!pipe_config->port_clock)
10729                pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
10730                        * pipe_config->pixel_multiplier;
10731
10732        ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10733        if (ret < 0) {
10734                DRM_DEBUG_KMS("CRTC fixup failed\n");
10735                goto fail;
10736        }
10737
10738        if (ret == RETRY) {
10739                if (WARN(!retry, "loop in pipe configuration computation\n")) {
10740                        ret = -EINVAL;
10741                        goto fail;
10742                }
10743
10744                DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10745                retry = false;
10746                goto encoder_retry;
10747        }
10748
10749        pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
10750        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
10751                      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10752
10753        return pipe_config;
10754fail:
10755        return ERR_PTR(ret);
10756}
10757
10758/* Computes which crtcs are affected and sets the relevant bits in the mask. For
10759 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
10760static void
10761intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10762                             unsigned *prepare_pipes, unsigned *disable_pipes)
10763{
10764        struct intel_crtc *intel_crtc;
10765        struct drm_device *dev = crtc->dev;
10766        struct intel_encoder *encoder;
10767        struct intel_connector *connector;
10768        struct drm_crtc *tmp_crtc;
10769
10770        *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
10771
10772        /* Check which crtcs have changed outputs connected to them, these need
10773         * to be part of the prepare_pipes mask. We don't (yet) support global
10774         * modeset across multiple crtcs, so modeset_pipes will only have one
10775         * bit set at most. */
10776        for_each_intel_connector(dev, connector) {
10777                if (connector->base.encoder == &connector->new_encoder->base)
10778                        continue;
10779
10780                if (connector->base.encoder) {
10781                        tmp_crtc = connector->base.encoder->crtc;
10782
10783                        *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10784                }
10785
10786                if (connector->new_encoder)
10787                        *prepare_pipes |=
10788                                1 << connector->new_encoder->new_crtc->pipe;
10789        }
10790
10791        for_each_intel_encoder(dev, encoder) {
10792                if (encoder->base.crtc == &encoder->new_crtc->base)
10793                        continue;
10794
10795                if (encoder->base.crtc) {
10796                        tmp_crtc = encoder->base.crtc;
10797
10798                        *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10799                }
10800
10801                if (encoder->new_crtc)
10802                        *prepare_pipes |= 1 << encoder->new_crtc->pipe;
10803        }
10804
10805        /* Check for pipes that will be enabled/disabled ... */
10806        for_each_intel_crtc(dev, intel_crtc) {
10807                if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
10808                        continue;
10809
10810                if (!intel_crtc->new_enabled)
10811                        *disable_pipes |= 1 << intel_crtc->pipe;
10812                else
10813                        *prepare_pipes |= 1 << intel_crtc->pipe;
10814        }
10815
10816
10817        /* set_mode is also used to update properties on life display pipes. */
10818        intel_crtc = to_intel_crtc(crtc);
10819        if (intel_crtc->new_enabled)
10820                *prepare_pipes |= 1 << intel_crtc->pipe;
10821
10822        /*
10823         * For simplicity do a full modeset on any pipe where the output routing
10824         * changed. We could be more clever, but that would require us to be
10825         * more careful with calling the relevant encoder->mode_set functions.
10826         */
10827        if (*prepare_pipes)
10828                *modeset_pipes = *prepare_pipes;
10829
10830        /* ... and mask these out. */
10831        *modeset_pipes &= ~(*disable_pipes);
10832        *prepare_pipes &= ~(*disable_pipes);
10833
10834        /*
10835         * HACK: We don't (yet) fully support global modesets. intel_set_config
10836         * obies this rule, but the modeset restore mode of
10837         * intel_modeset_setup_hw_state does not.
10838         */
10839        *modeset_pipes &= 1 << intel_crtc->pipe;
10840        *prepare_pipes &= 1 << intel_crtc->pipe;
10841
10842        DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10843                      *modeset_pipes, *prepare_pipes, *disable_pipes);
10844}
10845
10846static bool intel_crtc_in_use(struct drm_crtc *crtc)
10847{
10848        struct drm_encoder *encoder;
10849        struct drm_device *dev = crtc->dev;
10850
10851        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10852                if (encoder->crtc == crtc)
10853                        return true;
10854
10855        return false;
10856}
10857
10858static void
10859intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10860{
10861        struct drm_i915_private *dev_priv = dev->dev_private;
10862        struct intel_encoder *intel_encoder;
10863        struct intel_crtc *intel_crtc;
10864        struct drm_connector *connector;
10865
10866        intel_shared_dpll_commit(dev_priv);
10867
10868        for_each_intel_encoder(dev, intel_encoder) {
10869                if (!intel_encoder->base.crtc)
10870                        continue;
10871
10872                intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10873
10874                if (prepare_pipes & (1 << intel_crtc->pipe))
10875                        intel_encoder->connectors_active = false;
10876        }
10877
10878        intel_modeset_commit_output_state(dev);
10879
10880        /* Double check state. */
10881        for_each_intel_crtc(dev, intel_crtc) {
10882                WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
10883                WARN_ON(intel_crtc->new_config &&
10884                        intel_crtc->new_config != intel_crtc->config);
10885                WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
10886        }
10887
10888        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10889                if (!connector->encoder || !connector->encoder->crtc)
10890                        continue;
10891
10892                intel_crtc = to_intel_crtc(connector->encoder->crtc);
10893
10894                if (prepare_pipes & (1 << intel_crtc->pipe)) {
10895                        struct drm_property *dpms_property =
10896                                dev->mode_config.dpms_property;
10897
10898                        connector->dpms = DRM_MODE_DPMS_ON;
10899                        drm_object_property_set_value(&connector->base,
10900                                                         dpms_property,
10901                                                         DRM_MODE_DPMS_ON);
10902
10903                        intel_encoder = to_intel_encoder(connector->encoder);
10904                        intel_encoder->connectors_active = true;
10905                }
10906        }
10907
10908}
10909
10910static bool intel_fuzzy_clock_check(int clock1, int clock2)
10911{
10912        int diff;
10913
10914        if (clock1 == clock2)
10915                return true;
10916
10917        if (!clock1 || !clock2)
10918                return false;
10919
10920        diff = abs(clock1 - clock2);
10921
10922        if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10923                return true;
10924
10925        return false;
10926}
10927
10928#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10929        list_for_each_entry((intel_crtc), \
10930                            &(dev)->mode_config.crtc_list, \
10931                            base.head) \
10932                if (mask & (1 <<(intel_crtc)->pipe))
10933
10934static bool
10935intel_pipe_config_compare(struct drm_device *dev,
10936                          struct intel_crtc_state *current_config,
10937                          struct intel_crtc_state *pipe_config)
10938{
10939#define PIPE_CONF_CHECK_X(name) \
10940        if (current_config->name != pipe_config->name) { \
10941                DRM_ERROR("mismatch in " #name " " \
10942                          "(expected 0x%08x, found 0x%08x)\n", \
10943                          current_config->name, \
10944                          pipe_config->name); \
10945                return false; \
10946        }
10947
10948#define PIPE_CONF_CHECK_I(name) \
10949        if (current_config->name != pipe_config->name) { \
10950                DRM_ERROR("mismatch in " #name " " \
10951                          "(expected %i, found %i)\n", \
10952                          current_config->name, \
10953                          pipe_config->name); \
10954                return false; \
10955        }
10956
10957/* This is required for BDW+ where there is only one set of registers for
10958 * switching between high and low RR.
10959 * This macro can be used whenever a comparison has to be made between one
10960 * hw state and multiple sw state variables.
10961 */
10962#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
10963        if ((current_config->name != pipe_config->name) && \
10964                (current_config->alt_name != pipe_config->name)) { \
10965                        DRM_ERROR("mismatch in " #name " " \
10966                                  "(expected %i or %i, found %i)\n", \
10967                                  current_config->name, \
10968                                  current_config->alt_name, \
10969                                  pipe_config->name); \
10970                        return false; \
10971        }
10972
10973#define PIPE_CONF_CHECK_FLAGS(name, mask)       \
10974        if ((current_config->name ^ pipe_config->name) & (mask)) { \
10975                DRM_ERROR("mismatch in " #name "(" #mask ") "      \
10976                          "(expected %i, found %i)\n", \
10977                          current_config->name & (mask), \
10978                          pipe_config->name & (mask)); \
10979                return false; \
10980        }
10981
10982#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10983        if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10984                DRM_ERROR("mismatch in " #name " " \
10985                          "(expected %i, found %i)\n", \
10986                          current_config->name, \
10987                          pipe_config->name); \
10988                return false; \
10989        }
10990
10991#define PIPE_CONF_QUIRK(quirk)  \
10992        ((current_config->quirks | pipe_config->quirks) & (quirk))
10993
10994        PIPE_CONF_CHECK_I(cpu_transcoder);
10995
10996        PIPE_CONF_CHECK_I(has_pch_encoder);
10997        PIPE_CONF_CHECK_I(fdi_lanes);
10998        PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10999        PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
11000        PIPE_CONF_CHECK_I(fdi_m_n.link_m);
11001        PIPE_CONF_CHECK_I(fdi_m_n.link_n);
11002        PIPE_CONF_CHECK_I(fdi_m_n.tu);
11003
11004        PIPE_CONF_CHECK_I(has_dp_encoder);
11005
11006        if (INTEL_INFO(dev)->gen < 8) {
11007                PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
11008                PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
11009                PIPE_CONF_CHECK_I(dp_m_n.link_m);
11010                PIPE_CONF_CHECK_I(dp_m_n.link_n);
11011                PIPE_CONF_CHECK_I(dp_m_n.tu);
11012
11013                if (current_config->has_drrs) {
11014                        PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
11015                        PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
11016                        PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
11017                        PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
11018                        PIPE_CONF_CHECK_I(dp_m2_n2.tu);
11019                }
11020        } else {
11021                PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
11022                PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
11023                PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
11024                PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
11025                PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
11026        }
11027
11028        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11029        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11030        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11031        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11032        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11033        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11034
11035        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11036        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11037        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11038        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11039        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11040        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11041
11042        PIPE_CONF_CHECK_I(pixel_multiplier);
11043        PIPE_CONF_CHECK_I(has_hdmi_sink);
11044        if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
11045            IS_VALLEYVIEW(dev))
11046                PIPE_CONF_CHECK_I(limited_color_range);
11047        PIPE_CONF_CHECK_I(has_infoframe);
11048
11049        PIPE_CONF_CHECK_I(has_audio);
11050
11051        PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11052                              DRM_MODE_FLAG_INTERLACE);
11053
11054        if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11055                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11056                                      DRM_MODE_FLAG_PHSYNC);
11057                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11058                                      DRM_MODE_FLAG_NHSYNC);
11059                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11060                                      DRM_MODE_FLAG_PVSYNC);
11061                PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11062                                      DRM_MODE_FLAG_NVSYNC);
11063        }
11064
11065        PIPE_CONF_CHECK_I(pipe_src_w);
11066        PIPE_CONF_CHECK_I(pipe_src_h);
11067
11068        /*
11069         * FIXME: BIOS likes to set up a cloned config with lvds+external
11070         * screen. Since we don't yet re-compute the pipe config when moving
11071         * just the lvds port away to another pipe the sw tracking won't match.
11072         *
11073         * Proper atomic modesets with recomputed global state will fix this.
11074         * Until then just don't check gmch state for inherited modes.
11075         */
11076        if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
11077                PIPE_CONF_CHECK_I(gmch_pfit.control);
11078                /* pfit ratios are autocomputed by the hw on gen4+ */
11079                if (INTEL_INFO(dev)->gen < 4)
11080                        PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
11081                PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
11082        }
11083
11084        PIPE_CONF_CHECK_I(pch_pfit.enabled);
11085        if (current_config->pch_pfit.enabled) {
11086                PIPE_CONF_CHECK_I(pch_pfit.pos);
11087                PIPE_CONF_CHECK_I(pch_pfit.size);
11088        }
11089
11090        /* BDW+ don't expose a synchronous way to read the state */
11091        if (IS_HASWELL(dev))
11092                PIPE_CONF_CHECK_I(ips_enabled);
11093
11094        PIPE_CONF_CHECK_I(double_wide);
11095
11096        PIPE_CONF_CHECK_X(ddi_pll_sel);
11097
11098        PIPE_CONF_CHECK_I(shared_dpll);
11099        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11100        PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11101        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11102        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11103        PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11104        PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11105        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11106        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11107
11108        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
11109                PIPE_CONF_CHECK_I(pipe_bpp);
11110
11111        PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11112        PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11113
11114#undef PIPE_CONF_CHECK_X
11115#undef PIPE_CONF_CHECK_I
11116#undef PIPE_CONF_CHECK_I_ALT
11117#undef PIPE_CONF_CHECK_FLAGS
11118#undef PIPE_CONF_CHECK_CLOCK_FUZZY
11119#undef PIPE_CONF_QUIRK
11120
11121        return true;
11122}
11123
11124static void check_wm_state(struct drm_device *dev)
11125{
11126        struct drm_i915_private *dev_priv = dev->dev_private;
11127        struct skl_ddb_allocation hw_ddb, *sw_ddb;
11128        struct intel_crtc *intel_crtc;
11129        int plane;
11130
11131        if (INTEL_INFO(dev)->gen < 9)
11132                return;
11133
11134        skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11135        sw_ddb = &dev_priv->wm.skl_hw.ddb;
11136
11137        for_each_intel_crtc(dev, intel_crtc) {
11138                struct skl_ddb_entry *hw_entry, *sw_entry;
11139                const enum pipe pipe = intel_crtc->pipe;
11140
11141                if (!intel_crtc->active)
11142                        continue;
11143
11144                /* planes */
11145                for_each_plane(dev_priv, pipe, plane) {
11146                        hw_entry = &hw_ddb.plane[pipe][plane];
11147                        sw_entry = &sw_ddb->plane[pipe][plane];
11148
11149                        if (skl_ddb_entry_equal(hw_entry, sw_entry))
11150                                continue;
11151
11152                        DRM_ERROR("mismatch in DDB state pipe %c plane %d "
11153                                  "(expected (%u,%u), found (%u,%u))\n",
11154                                  pipe_name(pipe), plane + 1,
11155                                  sw_entry->start, sw_entry->end,
11156                                  hw_entry->start, hw_entry->end);
11157                }
11158
11159                /* cursor */
11160                hw_entry = &hw_ddb.cursor[pipe];
11161                sw_entry = &sw_ddb->cursor[pipe];
11162
11163                if (skl_ddb_entry_equal(hw_entry, sw_entry))
11164                        continue;
11165
11166                DRM_ERROR("mismatch in DDB state pipe %c cursor "
11167                          "(expected (%u,%u), found (%u,%u))\n",
11168                          pipe_name(pipe),
11169                          sw_entry->start, sw_entry->end,
11170                          hw_entry->start, hw_entry->end);
11171        }
11172}
11173
11174static void
11175check_connector_state(struct drm_device *dev)
11176{
11177        struct intel_connector *connector;
11178
11179        for_each_intel_connector(dev, connector) {
11180                /* This also checks the encoder/connector hw state with the
11181                 * ->get_hw_state callbacks. */
11182                intel_connector_check_state(connector);
11183
11184                I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
11185                     "connector's staged encoder doesn't match current encoder\n");
11186        }
11187}
11188
11189static void
11190check_encoder_state(struct drm_device *dev)
11191{
11192        struct intel_encoder *encoder;
11193        struct intel_connector *connector;
11194
11195        for_each_intel_encoder(dev, encoder) {
11196                bool enabled = false;
11197                bool active = false;
11198                enum pipe pipe, tracked_pipe;
11199
11200                DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11201                              encoder->base.base.id,
11202                              encoder->base.name);
11203
11204                I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
11205                     "encoder's stage crtc doesn't match current crtc\n");
11206                I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
11207                     "encoder's active_connectors set, but no crtc\n");
11208
11209                for_each_intel_connector(dev, connector) {
11210                        if (connector->base.encoder != &encoder->base)
11211                                continue;
11212                        enabled = true;
11213                        if (connector->base.dpms != DRM_MODE_DPMS_OFF)
11214                                active = true;
11215                }
11216                /*
11217                 * for MST connectors if we unplug the connector is gone
11218                 * away but the encoder is still connected to a crtc
11219                 * until a modeset happens in response to the hotplug.
11220                 */
11221                if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
11222                        continue;
11223
11224                I915_STATE_WARN(!!encoder->base.crtc != enabled,
11225                     "encoder's enabled state mismatch "
11226                     "(expected %i, found %i)\n",
11227                     !!encoder->base.crtc, enabled);
11228                I915_STATE_WARN(active && !encoder->base.crtc,
11229                     "active encoder with no crtc\n");
11230
11231                I915_STATE_WARN(encoder->connectors_active != active,
11232                     "encoder's computed active state doesn't match tracked active state "
11233                     "(expected %i, found %i)\n", active, encoder->connectors_active);
11234
11235                active = encoder->get_hw_state(encoder, &pipe);
11236                I915_STATE_WARN(active != encoder->connectors_active,
11237                     "encoder's hw state doesn't match sw tracking "
11238                     "(expected %i, found %i)\n",
11239                     encoder->connectors_active, active);
11240
11241                if (!encoder->base.crtc)
11242                        continue;
11243
11244                tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
11245                I915_STATE_WARN(active && pipe != tracked_pipe,
11246                     "active encoder's pipe doesn't match"
11247                     "(expected %i, found %i)\n",
11248                     tracked_pipe, pipe);
11249
11250        }
11251}
11252
11253static void
11254check_crtc_state(struct drm_device *dev)
11255{
11256        struct drm_i915_private *dev_priv = dev->dev_private;
11257        struct intel_crtc *crtc;
11258        struct intel_encoder *encoder;
11259        struct intel_crtc_state pipe_config;
11260
11261        for_each_intel_crtc(dev, crtc) {
11262                bool enabled = false;
11263                bool active = false;
11264
11265                memset(&pipe_config, 0, sizeof(pipe_config));
11266
11267                DRM_DEBUG_KMS("[CRTC:%d]\n",
11268                              crtc->base.base.id);
11269
11270                I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
11271                     "active crtc, but not enabled in sw tracking\n");
11272
11273                for_each_intel_encoder(dev, encoder) {
11274                        if (encoder->base.crtc != &crtc->base)
11275                                continue;
11276                        enabled = true;
11277                        if (encoder->connectors_active)
11278                                active = true;
11279                }
11280
11281                I915_STATE_WARN(active != crtc->active,
11282                     "crtc's computed active state doesn't match tracked active state "
11283                     "(expected %i, found %i)\n", active, crtc->active);
11284                I915_STATE_WARN(enabled != crtc->base.state->enable,
11285                     "crtc's computed enabled state doesn't match tracked enabled state "
11286                     "(expected %i, found %i)\n", enabled,
11287                                crtc->base.state->enable);
11288
11289                active = dev_priv->display.get_pipe_config(crtc,
11290                                                           &pipe_config);
11291
11292                /* hw state is inconsistent with the pipe quirk */
11293                if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
11294                    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
11295                        active = crtc->active;
11296
11297                for_each_intel_encoder(dev, encoder) {
11298                        enum pipe pipe;
11299                        if (encoder->base.crtc != &crtc->base)
11300                                continue;
11301                        if (encoder->get_hw_state(encoder, &pipe))
11302                                encoder->get_config(encoder, &pipe_config);
11303                }
11304
11305                I915_STATE_WARN(crtc->active != active,
11306                     "crtc active state doesn't match with hw state "
11307                     "(expected %i, found %i)\n", crtc->active, active);
11308
11309                if (active &&
11310                    !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
11311                        I915_STATE_WARN(1, "pipe state doesn't match!\n");
11312                        intel_dump_pipe_config(crtc, &pipe_config,
11313                                               "[hw state]");
11314                        intel_dump_pipe_config(crtc, crtc->config,
11315                                               "[sw state]");
11316                }
11317        }
11318}
11319
11320static void
11321check_shared_dpll_state(struct drm_device *dev)
11322{
11323        struct drm_i915_private *dev_priv = dev->dev_private;
11324        struct intel_crtc *crtc;
11325        struct intel_dpll_hw_state dpll_hw_state;
11326        int i;
11327
11328        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11329                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11330                int enabled_crtcs = 0, active_crtcs = 0;
11331                bool active;
11332
11333                memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11334
11335                DRM_DEBUG_KMS("%s\n", pll->name);
11336
11337                active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
11338
11339                I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
11340                     "more active pll users than references: %i vs %i\n",
11341                     pll->active, hweight32(pll->config.crtc_mask));
11342                I915_STATE_WARN(pll->active && !pll->on,
11343                     "pll in active use but not on in sw tracking\n");
11344                I915_STATE_WARN(pll->on && !pll->active,
11345                     "pll in on but not on in use in sw tracking\n");
11346                I915_STATE_WARN(pll->on != active,
11347                     "pll on state mismatch (expected %i, found %i)\n",
11348                     pll->on, active);
11349
11350                for_each_intel_crtc(dev, crtc) {
11351                        if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
11352                                enabled_crtcs++;
11353                        if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
11354                                active_crtcs++;
11355                }
11356                I915_STATE_WARN(pll->active != active_crtcs,
11357                     "pll active crtcs mismatch (expected %i, found %i)\n",
11358                     pll->active, active_crtcs);
11359                I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
11360                     "pll enabled crtcs mismatch (expected %i, found %i)\n",
11361                     hweight32(pll->config.crtc_mask), enabled_crtcs);
11362
11363                I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
11364                                       sizeof(dpll_hw_state)),
11365                     "pll hw state mismatch\n");
11366        }
11367}
11368
11369void
11370intel_modeset_check_state(struct drm_device *dev)
11371{
11372        check_wm_state(dev);
11373        check_connector_state(dev);
11374        check_encoder_state(dev);
11375        check_crtc_state(dev);
11376        check_shared_dpll_state(dev);
11377}
11378
11379void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
11380                                     int dotclock)
11381{
11382        /*
11383         * FDI already provided one idea for the dotclock.
11384         * Yell if the encoder disagrees.
11385         */
11386        WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
11387             "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11388             pipe_config->base.adjusted_mode.crtc_clock, dotclock);
11389}
11390
11391static void update_scanline_offset(struct intel_crtc *crtc)
11392{
11393        struct drm_device *dev = crtc->base.dev;
11394
11395        /*
11396         * The scanline counter increments at the leading edge of hsync.
11397         *
11398         * On most platforms it starts counting from vtotal-1 on the
11399         * first active line. That means the scanline counter value is
11400         * always one less than what we would expect. Ie. just after
11401         * start of vblank, which also occurs at start of hsync (on the
11402         * last active line), the scanline counter will read vblank_start-1.
11403         *
11404         * On gen2 the scanline counter starts counting from 1 instead
11405         * of vtotal-1, so we have to subtract one (or rather add vtotal-1
11406         * to keep the value positive), instead of adding one.
11407         *
11408         * On HSW+ the behaviour of the scanline counter depends on the output
11409         * type. For DP ports it behaves like most other platforms, but on HDMI
11410         * there's an extra 1 line difference. So we need to add two instead of
11411         * one to the value.
11412         */
11413        if (IS_GEN2(dev)) {
11414                const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
11415                int vtotal;
11416
11417                vtotal = mode->crtc_vtotal;
11418                if (mode->flags & DRM_MODE_FLAG_INTERLACE)
11419                        vtotal /= 2;
11420
11421                crtc->scanline_offset = vtotal - 1;
11422        } else if (HAS_DDI(dev) &&
11423                   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
11424                crtc->scanline_offset = 2;
11425        } else
11426                crtc->scanline_offset = 1;
11427}
11428
11429static struct intel_crtc_state *
11430intel_modeset_compute_config(struct drm_crtc *crtc,
11431                             struct drm_display_mode *mode,
11432                             struct drm_framebuffer *fb,
11433                             struct drm_atomic_state *state,
11434                             unsigned *modeset_pipes,
11435                             unsigned *prepare_pipes,
11436                             unsigned *disable_pipes)
11437{
11438        struct drm_device *dev = crtc->dev;
11439        struct intel_crtc_state *pipe_config = NULL;
11440        struct intel_crtc *intel_crtc;
11441        int ret = 0;
11442
11443        ret = drm_atomic_add_affected_connectors(state, crtc);
11444        if (ret)
11445                return ERR_PTR(ret);
11446
11447        intel_modeset_affected_pipes(crtc, modeset_pipes,
11448                                     prepare_pipes, disable_pipes);
11449
11450        for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
11451                pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
11452                if (IS_ERR(pipe_config))
11453                        return pipe_config;
11454
11455                pipe_config->base.enable = false;
11456        }
11457
11458        /*
11459         * Note this needs changes when we start tracking multiple modes
11460         * and crtcs.  At that point we'll need to compute the whole config
11461         * (i.e. one pipe_config for each crtc) rather than just the one
11462         * for this crtc.
11463         */
11464        for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
11465                /* FIXME: For now we still expect modeset_pipes has at most
11466                 * one bit set. */
11467                if (WARN_ON(&intel_crtc->base != crtc))
11468                        continue;
11469
11470                pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
11471                if (IS_ERR(pipe_config))
11472                        return pipe_config;
11473
11474                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
11475                                       "[modeset]");
11476        }
11477
11478        return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
11479}
11480
11481static int __intel_set_mode_setup_plls(struct drm_device *dev,
11482                                       unsigned modeset_pipes,
11483                                       unsigned disable_pipes)
11484{
11485        struct drm_i915_private *dev_priv = to_i915(dev);
11486        unsigned clear_pipes = modeset_pipes | disable_pipes;
11487        struct intel_crtc *intel_crtc;
11488        int ret = 0;
11489
11490        if (!dev_priv->display.crtc_compute_clock)
11491                return 0;
11492
11493        ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
11494        if (ret)
11495                goto done;
11496
11497        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
11498                struct intel_crtc_state *state = intel_crtc->new_config;
11499                ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11500                                                           state);
11501                if (ret) {
11502                        intel_shared_dpll_abort_config(dev_priv);
11503                        goto done;
11504                }
11505        }
11506
11507done:
11508        return ret;
11509}
11510
11511static int __intel_set_mode(struct drm_crtc *crtc,
11512                            struct drm_display_mode *mode,
11513                            int x, int y, struct drm_framebuffer *fb,
11514                            struct intel_crtc_state *pipe_config,
11515                            unsigned modeset_pipes,
11516                            unsigned prepare_pipes,
11517                            unsigned disable_pipes)
11518{
11519        struct drm_device *dev = crtc->dev;
11520        struct drm_i915_private *dev_priv = dev->dev_private;
11521        struct drm_display_mode *saved_mode;
11522        struct intel_crtc_state *crtc_state_copy = NULL;
11523        struct intel_crtc *intel_crtc;
11524        int ret = 0;
11525
11526        saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
11527        if (!saved_mode)
11528                return -ENOMEM;
11529
11530        crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
11531        if (!crtc_state_copy) {
11532                ret = -ENOMEM;
11533                goto done;
11534        }
11535
11536        *saved_mode = crtc->mode;
11537
11538        if (modeset_pipes)
11539                to_intel_crtc(crtc)->new_config = pipe_config;
11540
11541        /*
11542         * See if the config requires any additional preparation, e.g.
11543         * to adjust global state with pipes off.  We need to do this
11544         * here so we can get the modeset_pipe updated config for the new
11545         * mode set on this crtc.  For other crtcs we need to use the
11546         * adjusted_mode bits in the crtc directly.
11547         */
11548        if (IS_VALLEYVIEW(dev)) {
11549                valleyview_modeset_global_pipes(dev, &prepare_pipes);
11550
11551                /* may have added more to prepare_pipes than we should */
11552                prepare_pipes &= ~disable_pipes;
11553        }
11554
11555        ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
11556        if (ret)
11557                goto done;
11558
11559        for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
11560                intel_crtc_disable(&intel_crtc->base);
11561
11562        for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
11563                if (intel_crtc->base.state->enable)
11564                        dev_priv->display.crtc_disable(&intel_crtc->base);
11565        }
11566
11567        /* crtc->mode is already used by the ->mode_set callbacks, hence we need
11568         * to set it here already despite that we pass it down the callchain.
11569         *
11570         * Note we'll need to fix this up when we start tracking multiple
11571         * pipes; here we assume a single modeset_pipe and only track the
11572         * single crtc and mode.
11573         */
11574        if (modeset_pipes) {
11575                crtc->mode = *mode;
11576                /* mode_set/enable/disable functions rely on a correct pipe
11577                 * config. */
11578                intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
11579
11580                /*
11581                 * Calculate and store various constants which
11582                 * are later needed by vblank and swap-completion
11583                 * timestamping. They are derived from true hwmode.
11584                 */
11585                drm_calc_timestamping_constants(crtc,
11586                                                &pipe_config->base.adjusted_mode);
11587        }
11588
11589        /* Only after disabling all output pipelines that will be changed can we
11590         * update the the output configuration. */
11591        intel_modeset_update_state(dev, prepare_pipes);
11592
11593        modeset_update_crtc_power_domains(pipe_config->base.state);
11594
11595        /* Set up the DPLL and any encoders state that needs to adjust or depend
11596         * on the DPLL.
11597         */
11598        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
11599                struct drm_plane *primary = intel_crtc->base.primary;
11600                int vdisplay, hdisplay;
11601
11602                drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
11603                ret = primary->funcs->update_plane(primary, &intel_crtc->base,
11604                                                   fb, 0, 0,
11605                                                   hdisplay, vdisplay,
11606                                                   x << 16, y << 16,
11607                                                   hdisplay << 16, vdisplay << 16);
11608        }
11609
11610        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
11611        for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
11612                update_scanline_offset(intel_crtc);
11613
11614                dev_priv->display.crtc_enable(&intel_crtc->base);
11615        }
11616
11617        /* FIXME: add subpixel order */
11618done:
11619        if (ret && crtc->state->enable)
11620                crtc->mode = *saved_mode;
11621
11622        if (ret == 0 && pipe_config) {
11623                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11624
11625                /* The pipe_config will be freed with the atomic state, so
11626                 * make a copy. */
11627                memcpy(crtc_state_copy, intel_crtc->config,
11628                       sizeof *crtc_state_copy);
11629                intel_crtc->config = crtc_state_copy;
11630                intel_crtc->base.state = &crtc_state_copy->base;
11631
11632                if (modeset_pipes)
11633                        intel_crtc->new_config = intel_crtc->config;
11634        } else {
11635                kfree(crtc_state_copy);
11636        }
11637
11638        kfree(saved_mode);
11639        return ret;
11640}
11641
11642static int intel_set_mode_pipes(struct drm_crtc *crtc,
11643                                struct drm_display_mode *mode,
11644                                int x, int y, struct drm_framebuffer *fb,
11645                                struct intel_crtc_state *pipe_config,
11646                                unsigned modeset_pipes,
11647                                unsigned prepare_pipes,
11648                                unsigned disable_pipes)
11649{
11650        int ret;
11651
11652        ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
11653                               prepare_pipes, disable_pipes);
11654
11655        if (ret == 0)
11656                intel_modeset_check_state(crtc->dev);
11657
11658        return ret;
11659}
11660
11661static int intel_set_mode(struct drm_crtc *crtc,
11662                          struct drm_display_mode *mode,
11663                          int x, int y, struct drm_framebuffer *fb,
11664                          struct drm_atomic_state *state)
11665{
11666        struct intel_crtc_state *pipe_config;
11667        unsigned modeset_pipes, prepare_pipes, disable_pipes;
11668        int ret = 0;
11669
11670        pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
11671                                                   &modeset_pipes,
11672                                                   &prepare_pipes,
11673                                                   &disable_pipes);
11674
11675        if (IS_ERR(pipe_config)) {
11676                ret = PTR_ERR(pipe_config);
11677                goto out;
11678        }
11679
11680        ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
11681                                   modeset_pipes, prepare_pipes,
11682                                   disable_pipes);
11683        if (ret)
11684                goto out;
11685
11686out:
11687        return ret;
11688}
11689
11690void intel_crtc_restore_mode(struct drm_crtc *crtc)
11691{
11692        struct drm_device *dev = crtc->dev;
11693        struct drm_atomic_state *state;
11694        struct intel_encoder *encoder;
11695        struct intel_connector *connector;
11696        struct drm_connector_state *connector_state;
11697
11698        state = drm_atomic_state_alloc(dev);
11699        if (!state) {
11700                DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
11701                              crtc->base.id);
11702                return;
11703        }
11704
11705        state->acquire_ctx = dev->mode_config.acquire_ctx;
11706
11707        /* The force restore path in the HW readout code relies on the staged
11708         * config still keeping the user requested config while the actual
11709         * state has been overwritten by the configuration read from HW. We
11710         * need to copy the staged config to the atomic state, otherwise the
11711         * mode set will just reapply the state the HW is already in. */
11712        for_each_intel_encoder(dev, encoder) {
11713                if (&encoder->new_crtc->base != crtc)
11714                        continue;
11715
11716                for_each_intel_connector(dev, connector) {
11717                        if (connector->new_encoder != encoder)
11718                                continue;
11719
11720                        connector_state = drm_atomic_get_connector_state(state, &connector->base);
11721                        if (IS_ERR(connector_state)) {
11722                                DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
11723                                              connector->base.base.id,
11724                                              connector->base.name,
11725                                              PTR_ERR(connector_state));
11726                                continue;
11727                        }
11728
11729                        connector_state->crtc = crtc;
11730                        connector_state->best_encoder = &encoder->base;
11731                }
11732        }
11733
11734        intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
11735                       state);
11736
11737        drm_atomic_state_free(state);
11738}
11739
11740#undef for_each_intel_crtc_masked
11741
11742static void intel_set_config_free(struct intel_set_config *config)
11743{
11744        if (!config)
11745                return;
11746
11747        kfree(config->save_connector_encoders);
11748        kfree(config->save_encoder_crtcs);
11749        kfree(config->save_crtc_enabled);
11750        kfree(config);
11751}
11752
11753static int intel_set_config_save_state(struct drm_device *dev,
11754                                       struct intel_set_config *config)
11755{
11756        struct drm_crtc *crtc;
11757        struct drm_encoder *encoder;
11758        struct drm_connector *connector;
11759        int count;
11760
11761        config->save_crtc_enabled =
11762                kcalloc(dev->mode_config.num_crtc,
11763                        sizeof(bool), GFP_KERNEL);
11764        if (!config->save_crtc_enabled)
11765                return -ENOMEM;
11766
11767        config->save_encoder_crtcs =
11768                kcalloc(dev->mode_config.num_encoder,
11769                        sizeof(struct drm_crtc *), GFP_KERNEL);
11770        if (!config->save_encoder_crtcs)
11771                return -ENOMEM;
11772
11773        config->save_connector_encoders =
11774                kcalloc(dev->mode_config.num_connector,
11775                        sizeof(struct drm_encoder *), GFP_KERNEL);
11776        if (!config->save_connector_encoders)
11777                return -ENOMEM;
11778
11779        /* Copy data. Note that driver private data is not affected.
11780         * Should anything bad happen only the expected state is
11781         * restored, not the drivers personal bookkeeping.
11782         */
11783        count = 0;
11784        for_each_crtc(dev, crtc) {
11785                config->save_crtc_enabled[count++] = crtc->state->enable;
11786        }
11787
11788        count = 0;
11789        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
11790                config->save_encoder_crtcs[count++] = encoder->crtc;
11791        }
11792
11793        count = 0;
11794        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11795                config->save_connector_encoders[count++] = connector->encoder;
11796        }
11797
11798        return 0;
11799}
11800
11801static void intel_set_config_restore_state(struct drm_device *dev,
11802                                           struct intel_set_config *config)
11803{
11804        struct intel_crtc *crtc;
11805        struct intel_encoder *encoder;
11806        struct intel_connector *connector;
11807        int count;
11808
11809        count = 0;
11810        for_each_intel_crtc(dev, crtc) {
11811                crtc->new_enabled = config->save_crtc_enabled[count++];
11812
11813                if (crtc->new_enabled)
11814                        crtc->new_config = crtc->config;
11815                else
11816                        crtc->new_config = NULL;
11817        }
11818
11819        count = 0;
11820        for_each_intel_encoder(dev, encoder) {
11821                encoder->new_crtc =
11822                        to_intel_crtc(config->save_encoder_crtcs[count++]);
11823        }
11824
11825        count = 0;
11826        for_each_intel_connector(dev, connector) {
11827                connector->new_encoder =
11828                        to_intel_encoder(config->save_connector_encoders[count++]);
11829        }
11830}
11831
11832static bool
11833is_crtc_connector_off(struct drm_mode_set *set)
11834{
11835        int i;
11836
11837        if (set->num_connectors == 0)
11838                return false;
11839
11840        if (WARN_ON(set->connectors == NULL))
11841                return false;
11842
11843        for (i = 0; i < set->num_connectors; i++)
11844                if (set->connectors[i]->encoder &&
11845                    set->connectors[i]->encoder->crtc == set->crtc &&
11846                    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
11847                        return true;
11848
11849        return false;
11850}
11851
11852static void
11853intel_set_config_compute_mode_changes(struct drm_mode_set *set,
11854                                      struct intel_set_config *config)
11855{
11856
11857        /* We should be able to check here if the fb has the same properties
11858         * and then just flip_or_move it */
11859        if (is_crtc_connector_off(set)) {
11860                config->mode_changed = true;
11861        } else if (set->crtc->primary->fb != set->fb) {
11862                /*
11863                 * If we have no fb, we can only flip as long as the crtc is
11864                 * active, otherwise we need a full mode set.  The crtc may
11865                 * be active if we've only disabled the primary plane, or
11866                 * in fastboot situations.
11867                 */
11868                if (set->crtc->primary->fb == NULL) {
11869                        struct intel_crtc *intel_crtc =
11870                                to_intel_crtc(set->crtc);
11871
11872                        if (intel_crtc->active) {
11873                                DRM_DEBUG_KMS("crtc has no fb, will flip\n");
11874                                config->fb_changed = true;
11875                        } else {
11876                                DRM_DEBUG_KMS("inactive crtc, full mode set\n");
11877                                config->mode_changed = true;
11878                        }
11879                } else if (set->fb == NULL) {
11880                        config->mode_changed = true;
11881                } else if (set->fb->pixel_format !=
11882                           set->crtc->primary->fb->pixel_format) {
11883                        config->mode_changed = true;
11884                } else {
11885                        config->fb_changed = true;
11886                }
11887        }
11888
11889        if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
11890                config->fb_changed = true;
11891
11892        if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
11893                DRM_DEBUG_KMS("modes are different, full mode set\n");
11894                drm_mode_debug_printmodeline(&set->crtc->mode);
11895                drm_mode_debug_printmodeline(set->mode);
11896                config->mode_changed = true;
11897        }
11898
11899        DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
11900                        set->crtc->base.id, config->mode_changed, config->fb_changed);
11901}
11902
11903static int
11904intel_modeset_stage_output_state(struct drm_device *dev,
11905                                 struct drm_mode_set *set,
11906                                 struct intel_set_config *config,
11907                                 struct drm_atomic_state *state)
11908{
11909        struct intel_connector *connector;
11910        struct drm_connector_state *connector_state;
11911        struct intel_encoder *encoder;
11912        struct intel_crtc *crtc;
11913        int ro;
11914
11915        /* The upper layers ensure that we either disable a crtc or have a list
11916         * of connectors. For paranoia, double-check this. */
11917        WARN_ON(!set->fb && (set->num_connectors != 0));
11918        WARN_ON(set->fb && (set->num_connectors == 0));
11919
11920        for_each_intel_connector(dev, connector) {
11921                /* Otherwise traverse passed in connector list and get encoders
11922                 * for them. */
11923                for (ro = 0; ro < set->num_connectors; ro++) {
11924                        if (set->connectors[ro] == &connector->base) {
11925                                connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
11926                                break;
11927                        }
11928                }
11929
11930                /* If we disable the crtc, disable all its connectors. Also, if
11931                 * the connector is on the changing crtc but not on the new
11932                 * connector list, disable it. */
11933                if ((!set->fb || ro == set->num_connectors) &&
11934                    connector->base.encoder &&
11935                    connector->base.encoder->crtc == set->crtc) {
11936                        connector->new_encoder = NULL;
11937
11938                        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
11939                                connector->base.base.id,
11940                                connector->base.name);
11941                }
11942
11943
11944                if (&connector->new_encoder->base != connector->base.encoder) {
11945                        DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
11946                                      connector->base.base.id,
11947                                      connector->base.name);
11948                        config->mode_changed = true;
11949                }
11950        }
11951        /* connector->new_encoder is now updated for all connectors. */
11952
11953        /* Update crtc of enabled connectors. */
11954        for_each_intel_connector(dev, connector) {
11955                struct drm_crtc *new_crtc;
11956
11957                if (!connector->new_encoder)
11958                        continue;
11959
11960                new_crtc = connector->new_encoder->base.crtc;
11961
11962                for (ro = 0; ro < set->num_connectors; ro++) {
11963                        if (set->connectors[ro] == &connector->base)
11964                                new_crtc = set->crtc;
11965                }
11966
11967                /* Make sure the new CRTC will work with the encoder */
11968                if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
11969                                         new_crtc)) {
11970                        return -EINVAL;
11971                }
11972                connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
11973
11974                connector_state =
11975                        drm_atomic_get_connector_state(state, &connector->base);
11976                if (IS_ERR(connector_state))
11977                        return PTR_ERR(connector_state);
11978
11979                connector_state->crtc = new_crtc;
11980                connector_state->best_encoder = &connector->new_encoder->base;
11981
11982                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11983                        connector->base.base.id,
11984                        connector->base.name,
11985                        new_crtc->base.id);
11986        }
11987
11988        /* Check for any encoders that needs to be disabled. */
11989        for_each_intel_encoder(dev, encoder) {
11990                int num_connectors = 0;
11991                for_each_intel_connector(dev, connector) {
11992                        if (connector->new_encoder == encoder) {
11993                                WARN_ON(!connector->new_encoder->new_crtc);
11994                                num_connectors++;
11995                        }
11996                }
11997
11998                if (num_connectors == 0)
11999                        encoder->new_crtc = NULL;
12000                else if (num_connectors > 1)
12001                        return -EINVAL;
12002
12003                /* Only now check for crtc changes so we don't miss encoders
12004                 * that will be disabled. */
12005                if (&encoder->new_crtc->base != encoder->base.crtc) {
12006                        DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
12007                                      encoder->base.base.id,
12008                                      encoder->base.name);
12009                        config->mode_changed = true;
12010                }
12011        }
12012        /* Now we've also updated encoder->new_crtc for all encoders. */
12013        for_each_intel_connector(dev, connector) {
12014                connector_state =
12015                        drm_atomic_get_connector_state(state, &connector->base);
12016                if (IS_ERR(connector_state))
12017                        return PTR_ERR(connector_state);
12018
12019                if (connector->new_encoder) {
12020                        if (connector->new_encoder != connector->encoder)
12021                                connector->encoder = connector->new_encoder;
12022                } else {
12023                        connector_state->crtc = NULL;
12024                }
12025        }
12026        for_each_intel_crtc(dev, crtc) {
12027                crtc->new_enabled = false;
12028
12029                for_each_intel_encoder(dev, encoder) {
12030                        if (encoder->new_crtc == crtc) {
12031                                crtc->new_enabled = true;
12032                                break;
12033                        }
12034                }
12035
12036                if (crtc->new_enabled != crtc->base.state->enable) {
12037                        DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
12038                                      crtc->base.base.id,
12039                                      crtc->new_enabled ? "en" : "dis");
12040                        config->mode_changed = true;
12041                }
12042
12043                if (crtc->new_enabled)
12044                        crtc->new_config = crtc->config;
12045                else
12046                        crtc->new_config = NULL;
12047        }
12048
12049        return 0;
12050}
12051
12052static void disable_crtc_nofb(struct intel_crtc *crtc)
12053{
12054        struct drm_device *dev = crtc->base.dev;
12055        struct intel_encoder *encoder;
12056        struct intel_connector *connector;
12057
12058        DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
12059                      pipe_name(crtc->pipe));
12060
12061        for_each_intel_connector(dev, connector) {
12062                if (connector->new_encoder &&
12063                    connector->new_encoder->new_crtc == crtc)
12064                        connector->new_encoder = NULL;
12065        }
12066
12067        for_each_intel_encoder(dev, encoder) {
12068                if (encoder->new_crtc == crtc)
12069                        encoder->new_crtc = NULL;
12070        }
12071
12072        crtc->new_enabled = false;
12073        crtc->new_config = NULL;
12074}
12075
12076static int intel_crtc_set_config(struct drm_mode_set *set)
12077{
12078        struct drm_device *dev;
12079        struct drm_mode_set save_set;
12080        struct drm_atomic_state *state = NULL;
12081        struct intel_set_config *config;
12082        struct intel_crtc_state *pipe_config;
12083        unsigned modeset_pipes, prepare_pipes, disable_pipes;
12084        int ret;
12085
12086        BUG_ON(!set);
12087        BUG_ON(!set->crtc);
12088        BUG_ON(!set->crtc->helper_private);
12089
12090        /* Enforce sane interface api - has been abused by the fb helper. */
12091        BUG_ON(!set->mode && set->fb);
12092        BUG_ON(set->fb && set->num_connectors == 0);
12093
12094        if (set->fb) {
12095                DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
12096                                set->crtc->base.id, set->fb->base.id,
12097                                (int)set->num_connectors, set->x, set->y);
12098        } else {
12099                DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
12100        }
12101
12102        dev = set->crtc->dev;
12103
12104        ret = -ENOMEM;
12105        config = kzalloc(sizeof(*config), GFP_KERNEL);
12106        if (!config)
12107                goto out_config;
12108
12109        ret = intel_set_config_save_state(dev, config);
12110        if (ret)
12111                goto out_config;
12112
12113        save_set.crtc = set->crtc;
12114        save_set.mode = &set->crtc->mode;
12115        save_set.x = set->crtc->x;
12116        save_set.y = set->crtc->y;
12117        save_set.fb = set->crtc->primary->fb;
12118
12119        /* Compute whether we need a full modeset, only an fb base update or no
12120         * change at all. In the future we might also check whether only the
12121         * mode changed, e.g. for LVDS where we only change the panel fitter in
12122         * such cases. */
12123        intel_set_config_compute_mode_changes(set, config);
12124
12125        state = drm_atomic_state_alloc(dev);
12126        if (!state) {
12127                ret = -ENOMEM;
12128                goto out_config;
12129        }
12130
12131        state->acquire_ctx = dev->mode_config.acquire_ctx;
12132
12133        ret = intel_modeset_stage_output_state(dev, set, config, state);
12134        if (ret)
12135                goto fail;
12136
12137        pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
12138                                                   set->fb, state,
12139                                                   &modeset_pipes,
12140                                                   &prepare_pipes,
12141                                                   &disable_pipes);
12142        if (IS_ERR(pipe_config)) {
12143                ret = PTR_ERR(pipe_config);
12144                goto fail;
12145        } else if (pipe_config) {
12146                if (pipe_config->has_audio !=
12147                    to_intel_crtc(set->crtc)->config->has_audio)
12148                        config->mode_changed = true;
12149
12150                /*
12151                 * Note we have an issue here with infoframes: current code
12152                 * only updates them on the full mode set path per hw
12153                 * requirements.  So here we should be checking for any
12154                 * required changes and forcing a mode set.
12155                 */
12156        }
12157
12158        intel_update_pipe_size(to_intel_crtc(set->crtc));
12159
12160        if (config->mode_changed) {
12161                ret = intel_set_mode_pipes(set->crtc, set->mode,
12162                                           set->x, set->y, set->fb, pipe_config,
12163                                           modeset_pipes, prepare_pipes,
12164                                           disable_pipes);
12165        } else if (config->fb_changed) {
12166                struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
12167                struct drm_plane *primary = set->crtc->primary;
12168                int vdisplay, hdisplay;
12169
12170                drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
12171                ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
12172                                                   0, 0, hdisplay, vdisplay,
12173                                                   set->x << 16, set->y << 16,
12174                                                   hdisplay << 16, vdisplay << 16);
12175
12176                /*
12177                 * We need to make sure the primary plane is re-enabled if it
12178                 * has previously been turned off.
12179                 */
12180                if (!intel_crtc->primary_enabled && ret == 0) {
12181                        WARN_ON(!intel_crtc->active);
12182                        intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
12183                }
12184
12185                /*
12186                 * In the fastboot case this may be our only check of the
12187                 * state after boot.  It would be better to only do it on
12188                 * the first update, but we don't have a nice way of doing that
12189                 * (and really, set_config isn't used much for high freq page
12190                 * flipping, so increasing its cost here shouldn't be a big
12191                 * deal).
12192                 */
12193                if (i915.fastboot && ret == 0)
12194                        intel_modeset_check_state(set->crtc->dev);
12195        }
12196
12197        if (ret) {
12198                DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
12199                              set->crtc->base.id, ret);
12200fail:
12201                intel_set_config_restore_state(dev, config);
12202
12203                drm_atomic_state_clear(state);
12204
12205                /*
12206                 * HACK: if the pipe was on, but we didn't have a framebuffer,
12207                 * force the pipe off to avoid oopsing in the modeset code
12208                 * due to fb==NULL. This should only happen during boot since
12209                 * we don't yet reconstruct the FB from the hardware state.
12210                 */
12211                if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
12212                        disable_crtc_nofb(to_intel_crtc(save_set.crtc));
12213
12214                /* Try to restore the config */
12215                if (config->mode_changed &&
12216                    intel_set_mode(save_set.crtc, save_set.mode,
12217                                   save_set.x, save_set.y, save_set.fb,
12218                                   state))
12219                        DRM_ERROR("failed to restore config after modeset failure\n");
12220        }
12221
12222out_config:
12223        if (state)
12224                drm_atomic_state_free(state);
12225
12226        intel_set_config_free(config);
12227        return ret;
12228}
12229
12230static const struct drm_crtc_funcs intel_crtc_funcs = {
12231        .gamma_set = intel_crtc_gamma_set,
12232        .set_config = intel_crtc_set_config,
12233        .destroy = intel_crtc_destroy,
12234        .page_flip = intel_crtc_page_flip,
12235        .atomic_duplicate_state = intel_crtc_duplicate_state,
12236        .atomic_destroy_state = intel_crtc_destroy_state,
12237};
12238
12239static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
12240                                      struct intel_shared_dpll *pll,
12241                                      struct intel_dpll_hw_state *hw_state)
12242{
12243        uint32_t val;
12244
12245        if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
12246                return false;
12247
12248        val = I915_READ(PCH_DPLL(pll->id));
12249        hw_state->dpll = val;
12250        hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
12251        hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
12252
12253        return val & DPLL_VCO_ENABLE;
12254}
12255
12256static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
12257                                  struct intel_shared_dpll *pll)
12258{
12259        I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
12260        I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
12261}
12262
12263static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
12264                                struct intel_shared_dpll *pll)
12265{
12266        /* PCH refclock must be enabled first */
12267        ibx_assert_pch_refclk_enabled(dev_priv);
12268
12269        I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
12270
12271        /* Wait for the clocks to stabilize. */
12272        POSTING_READ(PCH_DPLL(pll->id));
12273        udelay(150);
12274
12275        /* The pixel multiplier can only be updated once the
12276         * DPLL is enabled and the clocks are stable.
12277         *
12278         * So write it again.
12279         */
12280        I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
12281        POSTING_READ(PCH_DPLL(pll->id));
12282        udelay(200);
12283}
12284
12285static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
12286                                 struct intel_shared_dpll *pll)
12287{
12288        struct drm_device *dev = dev_priv->dev;
12289        struct intel_crtc *crtc;
12290
12291        /* Make sure no transcoder isn't still depending on us. */
12292        for_each_intel_crtc(dev, crtc) {
12293                if (intel_crtc_to_shared_dpll(crtc) == pll)
12294                        assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
12295        }
12296
12297        I915_WRITE(PCH_DPLL(pll->id), 0);
12298        POSTING_READ(PCH_DPLL(pll->id));
12299        udelay(200);
12300}
12301
12302static char *ibx_pch_dpll_names[] = {
12303        "PCH DPLL A",
12304        "PCH DPLL B",
12305};
12306
12307static void ibx_pch_dpll_init(struct drm_device *dev)
12308{
12309        struct drm_i915_private *dev_priv = dev->dev_private;
12310        int i;
12311
12312        dev_priv->num_shared_dpll = 2;
12313
12314        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12315                dev_priv->shared_dplls[i].id = i;
12316                dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
12317                dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
12318                dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
12319                dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
12320                dev_priv->shared_dplls[i].get_hw_state =
12321                        ibx_pch_dpll_get_hw_state;
12322        }
12323}
12324
12325static void intel_shared_dpll_init(struct drm_device *dev)
12326{
12327        struct drm_i915_private *dev_priv = dev->dev_private;
12328
12329        if (HAS_DDI(dev))
12330                intel_ddi_pll_init(dev);
12331        else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
12332                ibx_pch_dpll_init(dev);
12333        else
12334                dev_priv->num_shared_dpll = 0;
12335
12336        BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
12337}
12338
12339/**
12340 * intel_wm_need_update - Check whether watermarks need updating
12341 * @plane: drm plane
12342 * @state: new plane state
12343 *
12344 * Check current plane state versus the new one to determine whether
12345 * watermarks need to be recalculated.
12346 *
12347 * Returns true or false.
12348 */
12349bool intel_wm_need_update(struct drm_plane *plane,
12350                          struct drm_plane_state *state)
12351{
12352        /* Update watermarks on tiling changes. */
12353        if (!plane->state->fb || !state->fb ||
12354            plane->state->fb->modifier[0] != state->fb->modifier[0] ||
12355            plane->state->rotation != state->rotation)
12356                return true;
12357
12358        return false;
12359}
12360
12361/**
12362 * intel_prepare_plane_fb - Prepare fb for usage on plane
12363 * @plane: drm plane to prepare for
12364 * @fb: framebuffer to prepare for presentation
12365 *
12366 * Prepares a framebuffer for usage on a display plane.  Generally this
12367 * involves pinning the underlying object and updating the frontbuffer tracking
12368 * bits.  Some older platforms need special physical address handling for
12369 * cursor planes.
12370 *
12371 * Returns 0 on success, negative error code on failure.
12372 */
12373int
12374intel_prepare_plane_fb(struct drm_plane *plane,
12375                       struct drm_framebuffer *fb,
12376                       const struct drm_plane_state *new_state)
12377{
12378        struct drm_device *dev = plane->dev;
12379        struct intel_plane *intel_plane = to_intel_plane(plane);
12380        enum pipe pipe = intel_plane->pipe;
12381        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12382        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
12383        unsigned frontbuffer_bits = 0;
12384        int ret = 0;
12385
12386        if (!obj)
12387                return 0;
12388
12389        switch (plane->type) {
12390        case DRM_PLANE_TYPE_PRIMARY:
12391                frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
12392                break;
12393        case DRM_PLANE_TYPE_CURSOR:
12394                frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
12395                break;
12396        case DRM_PLANE_TYPE_OVERLAY:
12397                frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
12398                break;
12399        }
12400
12401        mutex_lock(&dev->struct_mutex);
12402
12403        if (plane->type == DRM_PLANE_TYPE_CURSOR &&
12404            INTEL_INFO(dev)->cursor_needs_physical) {
12405                int align = IS_I830(dev) ? 16 * 1024 : 256;
12406                ret = i915_gem_object_attach_phys(obj, align);
12407                if (ret)
12408                        DRM_DEBUG_KMS("failed to attach phys object\n");
12409        } else {
12410                ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
12411        }
12412
12413        if (ret == 0)
12414                i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
12415
12416        mutex_unlock(&dev->struct_mutex);
12417
12418        return ret;
12419}
12420
12421/**
12422 * intel_cleanup_plane_fb - Cleans up an fb after plane use
12423 * @plane: drm plane to clean up for
12424 * @fb: old framebuffer that was on plane
12425 *
12426 * Cleans up a framebuffer that has just been removed from a plane.
12427 */
12428void
12429intel_cleanup_plane_fb(struct drm_plane *plane,
12430                       struct drm_framebuffer *fb,
12431                       const struct drm_plane_state *old_state)
12432{
12433        struct drm_device *dev = plane->dev;
12434        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12435
12436        if (WARN_ON(!obj))
12437                return;
12438
12439        if (plane->type != DRM_PLANE_TYPE_CURSOR ||
12440            !INTEL_INFO(dev)->cursor_needs_physical) {
12441                mutex_lock(&dev->struct_mutex);
12442                intel_unpin_fb_obj(fb, old_state);
12443                mutex_unlock(&dev->struct_mutex);
12444        }
12445}
12446
12447static int
12448intel_check_primary_plane(struct drm_plane *plane,
12449                          struct intel_plane_state *state)
12450{
12451        struct drm_device *dev = plane->dev;
12452        struct drm_i915_private *dev_priv = dev->dev_private;
12453        struct drm_crtc *crtc = state->base.crtc;
12454        struct intel_crtc *intel_crtc;
12455        struct drm_framebuffer *fb = state->base.fb;
12456        struct drm_rect *dest = &state->dst;
12457        struct drm_rect *src = &state->src;
12458        const struct drm_rect *clip = &state->clip;
12459        int ret;
12460
12461        crtc = crtc ? crtc : plane->crtc;
12462        intel_crtc = to_intel_crtc(crtc);
12463
12464        ret = drm_plane_helper_check_update(plane, crtc, fb,
12465                                            src, dest, clip,
12466                                            DRM_PLANE_HELPER_NO_SCALING,
12467                                            DRM_PLANE_HELPER_NO_SCALING,
12468                                            false, true, &state->visible);
12469        if (ret)
12470                return ret;
12471
12472        if (intel_crtc->active) {
12473                intel_crtc->atomic.wait_for_flips = true;
12474
12475                /*
12476                 * FBC does not work on some platforms for rotated
12477                 * planes, so disable it when rotation is not 0 and
12478                 * update it when rotation is set back to 0.
12479                 *
12480                 * FIXME: This is redundant with the fbc update done in
12481                 * the primary plane enable function except that that
12482                 * one is done too late. We eventually need to unify
12483                 * this.
12484                 */
12485                if (intel_crtc->primary_enabled &&
12486                    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
12487                    dev_priv->fbc.crtc == intel_crtc &&
12488                    state->base.rotation != BIT(DRM_ROTATE_0)) {
12489                        intel_crtc->atomic.disable_fbc = true;
12490                }
12491
12492                if (state->visible) {
12493                        /*
12494                         * BDW signals flip done immediately if the plane
12495                         * is disabled, even if the plane enable is already
12496                         * armed to occur at the next vblank :(
12497                         */
12498                        if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled)
12499                                intel_crtc->atomic.wait_vblank = true;
12500                }
12501
12502                intel_crtc->atomic.fb_bits |=
12503                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
12504
12505                intel_crtc->atomic.update_fbc = true;
12506
12507                if (intel_wm_need_update(plane, &state->base))
12508                        intel_crtc->atomic.update_wm = true;
12509        }
12510
12511        return 0;
12512}
12513
12514static void
12515intel_commit_primary_plane(struct drm_plane *plane,
12516                           struct intel_plane_state *state)
12517{
12518        struct drm_crtc *crtc = state->base.crtc;
12519        struct drm_framebuffer *fb = state->base.fb;
12520        struct drm_device *dev = plane->dev;
12521        struct drm_i915_private *dev_priv = dev->dev_private;
12522        struct intel_crtc *intel_crtc;
12523        struct drm_rect *src = &state->src;
12524
12525        crtc = crtc ? crtc : plane->crtc;
12526        intel_crtc = to_intel_crtc(crtc);
12527
12528        plane->fb = fb;
12529        crtc->x = src->x1 >> 16;
12530        crtc->y = src->y1 >> 16;
12531
12532        if (intel_crtc->active) {
12533                if (state->visible) {
12534                        /* FIXME: kill this fastboot hack */
12535                        intel_update_pipe_size(intel_crtc);
12536
12537                        intel_crtc->primary_enabled = true;
12538
12539                        dev_priv->display.update_primary_plane(crtc, plane->fb,
12540                                        crtc->x, crtc->y);
12541                } else {
12542                        /*
12543                         * If clipping results in a non-visible primary plane,
12544                         * we'll disable the primary plane.  Note that this is
12545                         * a bit different than what happens if userspace
12546                         * explicitly disables the plane by passing fb=0
12547                         * because plane->fb still gets set and pinned.
12548                         */
12549                        intel_disable_primary_hw_plane(plane, crtc);
12550                }
12551        }
12552}
12553
12554static void intel_begin_crtc_commit(struct drm_crtc *crtc)
12555{
12556        struct drm_device *dev = crtc->dev;
12557        struct drm_i915_private *dev_priv = dev->dev_private;
12558        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12559        struct intel_plane *intel_plane;
12560        struct drm_plane *p;
12561        unsigned fb_bits = 0;
12562
12563        /* Track fb's for any planes being disabled */
12564        list_for_each_entry(p, &dev->mode_config.plane_list, head) {
12565                intel_plane = to_intel_plane(p);
12566
12567                if (intel_crtc->atomic.disabled_planes &
12568                    (1 << drm_plane_index(p))) {
12569                        switch (p->type) {
12570                        case DRM_PLANE_TYPE_PRIMARY:
12571                                fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
12572                                break;
12573                        case DRM_PLANE_TYPE_CURSOR:
12574                                fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
12575                                break;
12576                        case DRM_PLANE_TYPE_OVERLAY:
12577                                fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
12578                                break;
12579                        }
12580
12581                        mutex_lock(&dev->struct_mutex);
12582                        i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
12583                        mutex_unlock(&dev->struct_mutex);
12584                }
12585        }
12586
12587        if (intel_crtc->atomic.wait_for_flips)
12588                intel_crtc_wait_for_pending_flips(crtc);
12589
12590        if (intel_crtc->atomic.disable_fbc)
12591                intel_fbc_disable(dev);
12592
12593        if (intel_crtc->atomic.pre_disable_primary)
12594                intel_pre_disable_primary(crtc);
12595
12596        if (intel_crtc->atomic.update_wm)
12597                intel_update_watermarks(crtc);
12598
12599        intel_runtime_pm_get(dev_priv);
12600
12601        /* Perform vblank evasion around commit operation */
12602        if (intel_crtc->active)
12603                intel_crtc->atomic.evade =
12604                        intel_pipe_update_start(intel_crtc,
12605                                                &intel_crtc->atomic.start_vbl_count);
12606}
12607
12608static void intel_finish_crtc_commit(struct drm_crtc *crtc)
12609{
12610        struct drm_device *dev = crtc->dev;
12611        struct drm_i915_private *dev_priv = dev->dev_private;
12612        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12613        struct drm_plane *p;
12614
12615        if (intel_crtc->atomic.evade)
12616                intel_pipe_update_end(intel_crtc,
12617                                      intel_crtc->atomic.start_vbl_count);
12618
12619        intel_runtime_pm_put(dev_priv);
12620
12621        if (intel_crtc->atomic.wait_vblank)
12622                intel_wait_for_vblank(dev, intel_crtc->pipe);
12623
12624        intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
12625
12626        if (intel_crtc->atomic.update_fbc) {
12627                mutex_lock(&dev->struct_mutex);
12628                intel_fbc_update(dev);
12629                mutex_unlock(&dev->struct_mutex);
12630        }
12631
12632        if (intel_crtc->atomic.post_enable_primary)
12633                intel_post_enable_primary(crtc);
12634
12635        drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
12636                if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
12637                        intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
12638                                                       false, false);
12639
12640        memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
12641}
12642
12643/**
12644 * intel_plane_destroy - destroy a plane
12645 * @plane: plane to destroy
12646 *
12647 * Common destruction function for all types of planes (primary, cursor,
12648 * sprite).
12649 */
12650void intel_plane_destroy(struct drm_plane *plane)
12651{
12652        struct intel_plane *intel_plane = to_intel_plane(plane);
12653        drm_plane_cleanup(plane);
12654        kfree(intel_plane);
12655}
12656
12657const struct drm_plane_funcs intel_plane_funcs = {
12658        .update_plane = drm_plane_helper_update,
12659        .disable_plane = drm_plane_helper_disable,
12660        .destroy = intel_plane_destroy,
12661        .set_property = drm_atomic_helper_plane_set_property,
12662        .atomic_get_property = intel_plane_atomic_get_property,
12663        .atomic_set_property = intel_plane_atomic_set_property,
12664        .atomic_duplicate_state = intel_plane_duplicate_state,
12665        .atomic_destroy_state = intel_plane_destroy_state,
12666
12667};
12668
12669static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
12670                                                    int pipe)
12671{
12672        struct intel_plane *primary;
12673        struct intel_plane_state *state;
12674        const uint32_t *intel_primary_formats;
12675        int num_formats;
12676
12677        primary = kzalloc(sizeof(*primary), GFP_KERNEL);
12678        if (primary == NULL)
12679                return NULL;
12680
12681        state = intel_create_plane_state(&primary->base);
12682        if (!state) {
12683                kfree(primary);
12684                return NULL;
12685        }
12686        primary->base.state = &state->base;
12687
12688        primary->can_scale = false;
12689        primary->max_downscale = 1;
12690        primary->pipe = pipe;
12691        primary->plane = pipe;
12692        primary->check_plane = intel_check_primary_plane;
12693        primary->commit_plane = intel_commit_primary_plane;
12694        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
12695                primary->plane = !pipe;
12696
12697        if (INTEL_INFO(dev)->gen <= 3) {
12698                intel_primary_formats = intel_primary_formats_gen2;
12699                num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
12700        } else {
12701                intel_primary_formats = intel_primary_formats_gen4;
12702                num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
12703        }
12704
12705        drm_universal_plane_init(dev, &primary->base, 0,
12706                                 &intel_plane_funcs,
12707                                 intel_primary_formats, num_formats,
12708                                 DRM_PLANE_TYPE_PRIMARY);
12709
12710        if (INTEL_INFO(dev)->gen >= 4) {
12711                if (!dev->mode_config.rotation_property)
12712                        dev->mode_config.rotation_property =
12713                                drm_mode_create_rotation_property(dev,
12714                                                        BIT(DRM_ROTATE_0) |
12715                                                        BIT(DRM_ROTATE_180));
12716                if (dev->mode_config.rotation_property)
12717                        drm_object_attach_property(&primary->base.base,
12718                                dev->mode_config.rotation_property,
12719                                state->base.rotation);
12720        }
12721
12722        drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
12723
12724        return &primary->base;
12725}
12726
12727static int
12728intel_check_cursor_plane(struct drm_plane *plane,
12729                         struct intel_plane_state *state)
12730{
12731        struct drm_crtc *crtc = state->base.crtc;
12732        struct drm_device *dev = plane->dev;
12733        struct drm_framebuffer *fb = state->base.fb;
12734        struct drm_rect *dest = &state->dst;
12735        struct drm_rect *src = &state->src;
12736        const struct drm_rect *clip = &state->clip;
12737        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12738        struct intel_crtc *intel_crtc;
12739        unsigned stride;
12740        int ret;
12741
12742        crtc = crtc ? crtc : plane->crtc;
12743        intel_crtc = to_intel_crtc(crtc);
12744
12745        ret = drm_plane_helper_check_update(plane, crtc, fb,
12746                                            src, dest, clip,
12747                                            DRM_PLANE_HELPER_NO_SCALING,
12748                                            DRM_PLANE_HELPER_NO_SCALING,
12749                                            true, true, &state->visible);
12750        if (ret)
12751                return ret;
12752
12753
12754        /* if we want to turn off the cursor ignore width and height */
12755        if (!obj)
12756                goto finish;
12757
12758        /* Check for which cursor types we support */
12759        if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
12760                DRM_DEBUG("Cursor dimension %dx%d not supported\n",
12761                          state->base.crtc_w, state->base.crtc_h);
12762                return -EINVAL;
12763        }
12764
12765        stride = roundup_pow_of_two(state->base.crtc_w) * 4;
12766        if (obj->base.size < stride * state->base.crtc_h) {
12767                DRM_DEBUG_KMS("buffer is too small\n");
12768                return -ENOMEM;
12769        }
12770
12771        if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
12772                DRM_DEBUG_KMS("cursor cannot be tiled\n");
12773                ret = -EINVAL;
12774        }
12775
12776finish:
12777        if (intel_crtc->active) {
12778                if (plane->state->crtc_w != state->base.crtc_w)
12779                        intel_crtc->atomic.update_wm = true;
12780
12781                intel_crtc->atomic.fb_bits |=
12782                        INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
12783        }
12784
12785        return ret;
12786}
12787
12788static void
12789intel_commit_cursor_plane(struct drm_plane *plane,
12790                          struct intel_plane_state *state)
12791{
12792        struct drm_crtc *crtc = state->base.crtc;
12793        struct drm_device *dev = plane->dev;
12794        struct intel_crtc *intel_crtc;
12795        struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
12796        uint32_t addr;
12797
12798        crtc = crtc ? crtc : plane->crtc;
12799        intel_crtc = to_intel_crtc(crtc);
12800
12801        plane->fb = state->base.fb;
12802        crtc->cursor_x = state->base.crtc_x;
12803        crtc->cursor_y = state->base.crtc_y;
12804
12805        if (intel_crtc->cursor_bo == obj)
12806                goto update;
12807
12808        if (!obj)
12809                addr = 0;
12810        else if (!INTEL_INFO(dev)->cursor_needs_physical)
12811                addr = i915_gem_obj_ggtt_offset(obj);
12812        else
12813                addr = obj->phys_handle->busaddr;
12814
12815        intel_crtc->cursor_addr = addr;
12816        intel_crtc->cursor_bo = obj;
12817update:
12818
12819        if (intel_crtc->active)
12820                intel_crtc_update_cursor(crtc, state->visible);
12821}
12822
12823static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
12824                                                   int pipe)
12825{
12826        struct intel_plane *cursor;
12827        struct intel_plane_state *state;
12828
12829        cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
12830        if (cursor == NULL)
12831                return NULL;
12832
12833        state = intel_create_plane_state(&cursor->base);
12834        if (!state) {
12835                kfree(cursor);
12836                return NULL;
12837        }
12838        cursor->base.state = &state->base;
12839
12840        cursor->can_scale = false;
12841        cursor->max_downscale = 1;
12842        cursor->pipe = pipe;
12843        cursor->plane = pipe;
12844        cursor->check_plane = intel_check_cursor_plane;
12845        cursor->commit_plane = intel_commit_cursor_plane;
12846
12847        drm_universal_plane_init(dev, &cursor->base, 0,
12848                                 &intel_plane_funcs,
12849                                 intel_cursor_formats,
12850                                 ARRAY_SIZE(intel_cursor_formats),
12851                                 DRM_PLANE_TYPE_CURSOR);
12852
12853        if (INTEL_INFO(dev)->gen >= 4) {
12854                if (!dev->mode_config.rotation_property)
12855                        dev->mode_config.rotation_property =
12856                                drm_mode_create_rotation_property(dev,
12857                                                        BIT(DRM_ROTATE_0) |
12858                                                        BIT(DRM_ROTATE_180));
12859                if (dev->mode_config.rotation_property)
12860                        drm_object_attach_property(&cursor->base.base,
12861                                dev->mode_config.rotation_property,
12862                                state->base.rotation);
12863        }
12864
12865        drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
12866
12867        return &cursor->base;
12868}
12869
12870static void intel_crtc_init(struct drm_device *dev, int pipe)
12871{
12872        struct drm_i915_private *dev_priv = dev->dev_private;
12873        struct intel_crtc *intel_crtc;
12874        struct intel_crtc_state *crtc_state = NULL;
12875        struct drm_plane *primary = NULL;
12876        struct drm_plane *cursor = NULL;
12877        int i, ret;
12878
12879        intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
12880        if (intel_crtc == NULL)
12881                return;
12882
12883        crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
12884        if (!crtc_state)
12885                goto fail;
12886        intel_crtc_set_state(intel_crtc, crtc_state);
12887        crtc_state->base.crtc = &intel_crtc->base;
12888
12889        primary = intel_primary_plane_create(dev, pipe);
12890        if (!primary)
12891                goto fail;
12892
12893        cursor = intel_cursor_plane_create(dev, pipe);
12894        if (!cursor)
12895                goto fail;
12896
12897        ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
12898                                        cursor, &intel_crtc_funcs);
12899        if (ret)
12900                goto fail;
12901
12902        drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
12903        for (i = 0; i < 256; i++) {
12904                intel_crtc->lut_r[i] = i;
12905                intel_crtc->lut_g[i] = i;
12906                intel_crtc->lut_b[i] = i;
12907        }
12908
12909        /*
12910         * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
12911         * is hooked to pipe B. Hence we want plane A feeding pipe B.
12912         */
12913        intel_crtc->pipe = pipe;
12914        intel_crtc->plane = pipe;
12915        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
12916                DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
12917                intel_crtc->plane = !pipe;
12918        }
12919
12920        intel_crtc->cursor_base = ~0;
12921        intel_crtc->cursor_cntl = ~0;
12922        intel_crtc->cursor_size = ~0;
12923
12924        BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
12925               dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
12926        dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12927        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
12928
12929        INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
12930
12931        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
12932
12933        WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
12934        return;
12935
12936fail:
12937        if (primary)
12938                drm_plane_cleanup(primary);
12939        if (cursor)
12940                drm_plane_cleanup(cursor);
12941        kfree(crtc_state);
12942        kfree(intel_crtc);
12943}
12944
12945enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12946{
12947        struct drm_encoder *encoder = connector->base.encoder;
12948        struct drm_device *dev = connector->base.dev;
12949
12950        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
12951
12952        if (!encoder || WARN_ON(!encoder->crtc))
12953                return INVALID_PIPE;
12954
12955        return to_intel_crtc(encoder->crtc)->pipe;
12956}
12957
12958int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12959                                struct drm_file *file)
12960{
12961        struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
12962        struct drm_crtc *drmmode_crtc;
12963        struct intel_crtc *crtc;
12964
12965        drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
12966
12967        if (!drmmode_crtc) {
12968                DRM_ERROR("no such CRTC id\n");
12969                return -ENOENT;
12970        }
12971
12972        crtc = to_intel_crtc(drmmode_crtc);
12973        pipe_from_crtc_id->pipe = crtc->pipe;
12974
12975        return 0;
12976}
12977
12978static int intel_encoder_clones(struct intel_encoder *encoder)
12979{
12980        struct drm_device *dev = encoder->base.dev;
12981        struct intel_encoder *source_encoder;
12982        int index_mask = 0;
12983        int entry = 0;
12984
12985        for_each_intel_encoder(dev, source_encoder) {
12986                if (encoders_cloneable(encoder, source_encoder))
12987                        index_mask |= (1 << entry);
12988
12989                entry++;
12990        }
12991
12992        return index_mask;
12993}
12994
12995static bool has_edp_a(struct drm_device *dev)
12996{
12997        struct drm_i915_private *dev_priv = dev->dev_private;
12998
12999        if (!IS_MOBILE(dev))
13000                return false;
13001
13002        if ((I915_READ(DP_A) & DP_DETECTED) == 0)
13003                return false;
13004
13005        if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
13006                return false;
13007
13008        return true;
13009}
13010
13011static bool intel_crt_present(struct drm_device *dev)
13012{
13013        struct drm_i915_private *dev_priv = dev->dev_private;
13014
13015        if (INTEL_INFO(dev)->gen >= 9)
13016                return false;
13017
13018        if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
13019                return false;
13020
13021        if (IS_CHERRYVIEW(dev))
13022                return false;
13023
13024        if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
13025                return false;
13026
13027        return true;
13028}
13029
13030static void intel_setup_outputs(struct drm_device *dev)
13031{
13032        struct drm_i915_private *dev_priv = dev->dev_private;
13033        struct intel_encoder *encoder;
13034        bool dpd_is_edp = false;
13035
13036        intel_lvds_init(dev);
13037
13038        if (intel_crt_present(dev))
13039                intel_crt_init(dev);
13040
13041        if (HAS_DDI(dev)) {
13042                int found;
13043
13044                /*
13045                 * Haswell uses DDI functions to detect digital outputs.
13046                 * On SKL pre-D0 the strap isn't connected, so we assume
13047                 * it's there.
13048                 */
13049                found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
13050                /* WaIgnoreDDIAStrap: skl */
13051                if (found ||
13052                    (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
13053                        intel_ddi_init(dev, PORT_A);
13054
13055                /* DDI B, C and D detection is indicated by the SFUSE_STRAP
13056                 * register */
13057                found = I915_READ(SFUSE_STRAP);
13058
13059                if (found & SFUSE_STRAP_DDIB_DETECTED)
13060                        intel_ddi_init(dev, PORT_B);
13061                if (found & SFUSE_STRAP_DDIC_DETECTED)
13062                        intel_ddi_init(dev, PORT_C);
13063                if (found & SFUSE_STRAP_DDID_DETECTED)
13064                        intel_ddi_init(dev, PORT_D);
13065        } else if (HAS_PCH_SPLIT(dev)) {
13066                int found;
13067                dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
13068
13069                if (has_edp_a(dev))
13070                        intel_dp_init(dev, DP_A, PORT_A);
13071
13072                if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
13073                        /* PCH SDVOB multiplex with HDMIB */
13074                        found = intel_sdvo_init(dev, PCH_SDVOB, true);
13075                        if (!found)
13076                                intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
13077                        if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
13078                                intel_dp_init(dev, PCH_DP_B, PORT_B);
13079                }
13080
13081                if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
13082                        intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
13083
13084                if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
13085                        intel_hdmi_init(dev, PCH_HDMID, PORT_D);
13086
13087                if (I915_READ(PCH_DP_C) & DP_DETECTED)
13088                        intel_dp_init(dev, PCH_DP_C, PORT_C);
13089
13090                if (I915_READ(PCH_DP_D) & DP_DETECTED)
13091                        intel_dp_init(dev, PCH_DP_D, PORT_D);
13092        } else if (IS_VALLEYVIEW(dev)) {
13093                /*
13094                 * The DP_DETECTED bit is the latched state of the DDC
13095                 * SDA pin at boot. However since eDP doesn't require DDC
13096                 * (no way to plug in a DP->HDMI dongle) the DDC pins for
13097                 * eDP ports may have been muxed to an alternate function.
13098                 * Thus we can't rely on the DP_DETECTED bit alone to detect
13099                 * eDP ports. Consult the VBT as well as DP_DETECTED to
13100                 * detect eDP ports.
13101                 */
13102                if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED &&
13103                    !intel_dp_is_edp(dev, PORT_B))
13104                        intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
13105                                        PORT_B);
13106                if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
13107                    intel_dp_is_edp(dev, PORT_B))
13108                        intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
13109
13110                if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED &&
13111                    !intel_dp_is_edp(dev, PORT_C))
13112                        intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
13113                                        PORT_C);
13114                if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
13115                    intel_dp_is_edp(dev, PORT_C))
13116                        intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
13117
13118                if (IS_CHERRYVIEW(dev)) {
13119                        if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
13120                                intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
13121                                                PORT_D);
13122                        /* eDP not supported on port D, so don't check VBT */
13123                        if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
13124                                intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
13125                }
13126
13127                intel_dsi_init(dev);
13128        } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
13129                bool found = false;
13130
13131                if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
13132                        DRM_DEBUG_KMS("probing SDVOB\n");
13133                        found = intel_sdvo_init(dev, GEN3_SDVOB, true);
13134                        if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
13135                                DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
13136                                intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
13137                        }
13138
13139                        if (!found && SUPPORTS_INTEGRATED_DP(dev))
13140                                intel_dp_init(dev, DP_B, PORT_B);
13141                }
13142
13143                /* Before G4X SDVOC doesn't have its own detect register */
13144
13145                if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
13146                        DRM_DEBUG_KMS("probing SDVOC\n");
13147                        found = intel_sdvo_init(dev, GEN3_SDVOC, false);
13148                }
13149
13150                if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
13151
13152                        if (SUPPORTS_INTEGRATED_HDMI(dev)) {
13153                                DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
13154                                intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
13155                        }
13156                        if (SUPPORTS_INTEGRATED_DP(dev))
13157                                intel_dp_init(dev, DP_C, PORT_C);
13158                }
13159
13160                if (SUPPORTS_INTEGRATED_DP(dev) &&
13161                    (I915_READ(DP_D) & DP_DETECTED))
13162                        intel_dp_init(dev, DP_D, PORT_D);
13163        } else if (IS_GEN2(dev))
13164                intel_dvo_init(dev);
13165
13166        if (SUPPORTS_TV(dev))
13167                intel_tv_init(dev);
13168
13169        intel_psr_init(dev);
13170
13171        for_each_intel_encoder(dev, encoder) {
13172                encoder->base.possible_crtcs = encoder->crtc_mask;
13173                encoder->base.possible_clones =
13174                        intel_encoder_clones(encoder);
13175        }
13176
13177        intel_init_pch_refclk(dev);
13178
13179        drm_helper_move_panel_connectors_to_head(dev);
13180}
13181
13182static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
13183{
13184        struct drm_device *dev = fb->dev;
13185        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
13186
13187        drm_framebuffer_cleanup(fb);
13188        mutex_lock(&dev->struct_mutex);
13189        WARN_ON(!intel_fb->obj->framebuffer_references--);
13190        drm_gem_object_unreference(&intel_fb->obj->base);
13191        mutex_unlock(&dev->struct_mutex);
13192        kfree(intel_fb);
13193}
13194
13195static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
13196                                                struct drm_file *file,
13197                                                unsigned int *handle)
13198{
13199        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
13200        struct drm_i915_gem_object *obj = intel_fb->obj;
13201
13202        return drm_gem_handle_create(file, &obj->base, handle);
13203}
13204
13205static const struct drm_framebuffer_funcs intel_fb_funcs = {
13206        .destroy = intel_user_framebuffer_destroy,
13207        .create_handle = intel_user_framebuffer_create_handle,
13208};
13209
13210static
13211u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
13212                         uint32_t pixel_format)
13213{
13214        u32 gen = INTEL_INFO(dev)->gen;
13215
13216        if (gen >= 9) {
13217                /* "The stride in bytes must not exceed the of the size of 8K
13218                 *  pixels and 32K bytes."
13219                 */
13220                 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
13221        } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
13222                return 32*1024;
13223        } else if (gen >= 4) {
13224                if (fb_modifier == I915_FORMAT_MOD_X_TILED)
13225                        return 16*1024;
13226                else
13227                        return 32*1024;
13228        } else if (gen >= 3) {
13229                if (fb_modifier == I915_FORMAT_MOD_X_TILED)
13230                        return 8*1024;
13231                else
13232                        return 16*1024;
13233        } else {
13234                /* XXX DSPC is limited to 4k tiled */
13235                return 8*1024;
13236        }
13237}
13238
13239static int intel_framebuffer_init(struct drm_device *dev,
13240                                  struct intel_framebuffer *intel_fb,
13241                                  struct drm_mode_fb_cmd2 *mode_cmd,
13242                                  struct drm_i915_gem_object *obj)
13243{
13244        unsigned int aligned_height;
13245        int ret;
13246        u32 pitch_limit, stride_alignment;
13247
13248        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
13249
13250        if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
13251                /* Enforce that fb modifier and tiling mode match, but only for
13252                 * X-tiled. This is needed for FBC. */
13253                if (!!(obj->tiling_mode == I915_TILING_X) !=
13254                    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
13255                        DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
13256                        return -EINVAL;
13257                }
13258        } else {
13259                if (obj->tiling_mode == I915_TILING_X)
13260                        mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
13261                else if (obj->tiling_mode == I915_TILING_Y) {
13262                        DRM_DEBUG("No Y tiling for legacy addfb\n");
13263                        return -EINVAL;
13264                }
13265        }
13266
13267        /* Passed in modifier sanity checking. */
13268        switch (mode_cmd->modifier[0]) {
13269        case I915_FORMAT_MOD_Y_TILED:
13270        case I915_FORMAT_MOD_Yf_TILED:
13271                if (INTEL_INFO(dev)->gen < 9) {
13272                        DRM_DEBUG("Unsupported tiling 0x%llx!\n",
13273                                  mode_cmd->modifier[0]);
13274                        return -EINVAL;
13275                }
13276        case DRM_FORMAT_MOD_NONE:
13277        case I915_FORMAT_MOD_X_TILED:
13278                break;
13279        default:
13280                DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
13281                          mode_cmd->modifier[0]);
13282                return -EINVAL;
13283        }
13284
13285        stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
13286                                                     mode_cmd->pixel_format);
13287        if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
13288                DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
13289                          mode_cmd->pitches[0], stride_alignment);
13290                return -EINVAL;
13291        }
13292
13293        pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
13294                                           mode_cmd->pixel_format);
13295        if (mode_cmd->pitches[0] > pitch_limit) {
13296                DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
13297                          mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
13298                          "tiled" : "linear",
13299                          mode_cmd->pitches[0], pitch_limit);
13300                return -EINVAL;
13301        }
13302
13303        if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
13304            mode_cmd->pitches[0] != obj->stride) {
13305                DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
13306                          mode_cmd->pitches[0], obj->stride);
13307                return -EINVAL;
13308        }
13309
13310        /* Reject formats not supported by any plane early. */
13311        switch (mode_cmd->pixel_format) {
13312        case DRM_FORMAT_C8:
13313        case DRM_FORMAT_RGB565:
13314        case DRM_FORMAT_XRGB8888:
13315        case DRM_FORMAT_ARGB8888:
13316                break;
13317        case DRM_FORMAT_XRGB1555:
13318        case DRM_FORMAT_ARGB1555:
13319                if (INTEL_INFO(dev)->gen > 3) {
13320                        DRM_DEBUG("unsupported pixel format: %s\n",
13321                                  drm_get_format_name(mode_cmd->pixel_format));
13322                        return -EINVAL;
13323                }
13324                break;
13325        case DRM_FORMAT_XBGR8888:
13326        case DRM_FORMAT_ABGR8888:
13327        case DRM_FORMAT_XRGB2101010:
13328        case DRM_FORMAT_ARGB2101010:
13329        case DRM_FORMAT_XBGR2101010:
13330        case DRM_FORMAT_ABGR2101010:
13331                if (INTEL_INFO(dev)->gen < 4) {
13332                        DRM_DEBUG("unsupported pixel format: %s\n",
13333                                  drm_get_format_name(mode_cmd->pixel_format));
13334                        return -EINVAL;
13335                }
13336                break;
13337        case DRM_FORMAT_YUYV:
13338        case DRM_FORMAT_UYVY:
13339        case DRM_FORMAT_YVYU:
13340        case DRM_FORMAT_VYUY:
13341                if (INTEL_INFO(dev)->gen < 5) {
13342                        DRM_DEBUG("unsupported pixel format: %s\n",
13343                                  drm_get_format_name(mode_cmd->pixel_format));
13344                        return -EINVAL;
13345                }
13346                break;
13347        default:
13348                DRM_DEBUG("unsupported pixel format: %s\n",
13349                          drm_get_format_name(mode_cmd->pixel_format));
13350                return -EINVAL;
13351        }
13352
13353        /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
13354        if (mode_cmd->offsets[0] != 0)
13355                return -EINVAL;
13356
13357        aligned_height = intel_fb_align_height(dev, mode_cmd->height,
13358                                               mode_cmd->pixel_format,
13359                                               mode_cmd->modifier[0]);
13360        /* FIXME drm helper for size checks (especially planar formats)? */
13361        if (obj->base.size < aligned_height * mode_cmd->pitches[0])
13362                return -EINVAL;
13363
13364        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
13365        intel_fb->obj = obj;
13366        intel_fb->obj->framebuffer_references++;
13367
13368        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
13369        if (ret) {
13370                DRM_ERROR("framebuffer init failed %d\n", ret);
13371                return ret;
13372        }
13373
13374        return 0;
13375}
13376
13377static struct drm_framebuffer *
13378intel_user_framebuffer_create(struct drm_device *dev,
13379                              struct drm_file *filp,
13380                              struct drm_mode_fb_cmd2 *mode_cmd)
13381{
13382        struct drm_i915_gem_object *obj;
13383
13384        obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
13385                                                mode_cmd->handles[0]));
13386        if (&obj->base == NULL)
13387                return ERR_PTR(-ENOENT);
13388
13389        return intel_framebuffer_create(dev, mode_cmd, obj);
13390}
13391
13392#ifndef CONFIG_DRM_I915_FBDEV
13393static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
13394{
13395}
13396#endif
13397
13398static const struct drm_mode_config_funcs intel_mode_funcs = {
13399        .fb_create = intel_user_framebuffer_create,
13400        .output_poll_changed = intel_fbdev_output_poll_changed,
13401        .atomic_check = intel_atomic_check,
13402        .atomic_commit = intel_atomic_commit,
13403};
13404
13405/* Set up chip specific display functions */
13406static void intel_init_display(struct drm_device *dev)
13407{
13408        struct drm_i915_private *dev_priv = dev->dev_private;
13409
13410        if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
13411                dev_priv->display.find_dpll = g4x_find_best_dpll;
13412        else if (IS_CHERRYVIEW(dev))
13413                dev_priv->display.find_dpll = chv_find_best_dpll;
13414        else if (IS_VALLEYVIEW(dev))
13415                dev_priv->display.find_dpll = vlv_find_best_dpll;
13416        else if (IS_PINEVIEW(dev))
13417                dev_priv->display.find_dpll = pnv_find_best_dpll;
13418        else
13419                dev_priv->display.find_dpll = i9xx_find_best_dpll;
13420
13421        if (INTEL_INFO(dev)->gen >= 9) {
13422                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
13423                dev_priv->display.get_initial_plane_config =
13424                        skylake_get_initial_plane_config;
13425                dev_priv->display.crtc_compute_clock =
13426                        haswell_crtc_compute_clock;
13427                dev_priv->display.crtc_enable = haswell_crtc_enable;
13428                dev_priv->display.crtc_disable = haswell_crtc_disable;
13429                dev_priv->display.off = ironlake_crtc_off;
13430                dev_priv->display.update_primary_plane =
13431                        skylake_update_primary_plane;
13432        } else if (HAS_DDI(dev)) {
13433                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
13434                dev_priv->display.get_initial_plane_config =
13435                        ironlake_get_initial_plane_config;
13436                dev_priv->display.crtc_compute_clock =
13437                        haswell_crtc_compute_clock;
13438                dev_priv->display.crtc_enable = haswell_crtc_enable;
13439                dev_priv->display.crtc_disable = haswell_crtc_disable;
13440                dev_priv->display.off = ironlake_crtc_off;
13441                dev_priv->display.update_primary_plane =
13442                        ironlake_update_primary_plane;
13443        } else if (HAS_PCH_SPLIT(dev)) {
13444                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
13445                dev_priv->display.get_initial_plane_config =
13446                        ironlake_get_initial_plane_config;
13447                dev_priv->display.crtc_compute_clock =
13448                        ironlake_crtc_compute_clock;
13449                dev_priv->display.crtc_enable = ironlake_crtc_enable;
13450                dev_priv->display.crtc_disable = ironlake_crtc_disable;
13451                dev_priv->display.off = ironlake_crtc_off;
13452                dev_priv->display.update_primary_plane =
13453                        ironlake_update_primary_plane;
13454        } else if (IS_VALLEYVIEW(dev)) {
13455                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
13456                dev_priv->display.get_initial_plane_config =
13457                        i9xx_get_initial_plane_config;
13458                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
13459                dev_priv->display.crtc_enable = valleyview_crtc_enable;
13460                dev_priv->display.crtc_disable = i9xx_crtc_disable;
13461                dev_priv->display.off = i9xx_crtc_off;
13462                dev_priv->display.update_primary_plane =
13463                        i9xx_update_primary_plane;
13464        } else {
13465                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
13466                dev_priv->display.get_initial_plane_config =
13467                        i9xx_get_initial_plane_config;
13468                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
13469                dev_priv->display.crtc_enable = i9xx_crtc_enable;
13470                dev_priv->display.crtc_disable = i9xx_crtc_disable;
13471                dev_priv->display.off = i9xx_crtc_off;
13472                dev_priv->display.update_primary_plane =
13473                        i9xx_update_primary_plane;
13474        }
13475
13476        /* Returns the core display clock speed */
13477        if (IS_VALLEYVIEW(dev))
13478                dev_priv->display.get_display_clock_speed =
13479                        valleyview_get_display_clock_speed;
13480        else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
13481                dev_priv->display.get_display_clock_speed =
13482                        i945_get_display_clock_speed;
13483        else if (IS_I915G(dev))
13484                dev_priv->display.get_display_clock_speed =
13485                        i915_get_display_clock_speed;
13486        else if (IS_I945GM(dev) || IS_845G(dev))
13487                dev_priv->display.get_display_clock_speed =
13488                        i9xx_misc_get_display_clock_speed;
13489        else if (IS_PINEVIEW(dev))
13490                dev_priv->display.get_display_clock_speed =
13491                        pnv_get_display_clock_speed;
13492        else if (IS_I915GM(dev))
13493                dev_priv->display.get_display_clock_speed =
13494                        i915gm_get_display_clock_speed;
13495        else if (IS_I865G(dev))
13496                dev_priv->display.get_display_clock_speed =
13497                        i865_get_display_clock_speed;
13498        else if (IS_I85X(dev))
13499                dev_priv->display.get_display_clock_speed =
13500                        i855_get_display_clock_speed;
13501        else /* 852, 830 */
13502                dev_priv->display.get_display_clock_speed =
13503                        i830_get_display_clock_speed;
13504
13505        if (IS_GEN5(dev)) {
13506                dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
13507        } else if (IS_GEN6(dev)) {
13508                dev_priv->display.fdi_link_train = gen6_fdi_link_train;
13509        } else if (IS_IVYBRIDGE(dev)) {
13510                /* FIXME: detect B0+ stepping and use auto training */
13511                dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
13512        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
13513                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
13514        } else if (IS_VALLEYVIEW(dev)) {
13515                dev_priv->display.modeset_global_resources =
13516                        valleyview_modeset_global_resources;
13517        }
13518
13519        switch (INTEL_INFO(dev)->gen) {
13520        case 2:
13521                dev_priv->display.queue_flip = intel_gen2_queue_flip;
13522                break;
13523
13524        case 3:
13525                dev_priv->display.queue_flip = intel_gen3_queue_flip;
13526                break;
13527
13528        case 4:
13529        case 5:
13530                dev_priv->display.queue_flip = intel_gen4_queue_flip;
13531                break;
13532
13533        case 6:
13534                dev_priv->display.queue_flip = intel_gen6_queue_flip;
13535                break;
13536        case 7:
13537        case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
13538                dev_priv->display.queue_flip = intel_gen7_queue_flip;
13539                break;
13540        case 9:
13541                /* Drop through - unsupported since execlist only. */
13542        default:
13543                /* Default just returns -ENODEV to indicate unsupported */
13544                dev_priv->display.queue_flip = intel_default_queue_flip;
13545        }
13546
13547        intel_panel_init_backlight_funcs(dev);
13548
13549        mutex_init(&dev_priv->pps_mutex);
13550}
13551
13552/*
13553 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
13554 * resume, or other times.  This quirk makes sure that's the case for
13555 * affected systems.
13556 */
13557static void quirk_pipea_force(struct drm_device *dev)
13558{
13559        struct drm_i915_private *dev_priv = dev->dev_private;
13560
13561        dev_priv->quirks |= QUIRK_PIPEA_FORCE;
13562        DRM_INFO("applying pipe a force quirk\n");
13563}
13564
13565static void quirk_pipeb_force(struct drm_device *dev)
13566{
13567        struct drm_i915_private *dev_priv = dev->dev_private;
13568
13569        dev_priv->quirks |= QUIRK_PIPEB_FORCE;
13570        DRM_INFO("applying pipe b force quirk\n");
13571}
13572
13573/*
13574 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
13575 */
13576static void quirk_ssc_force_disable(struct drm_device *dev)
13577{
13578        struct drm_i915_private *dev_priv = dev->dev_private;
13579        dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
13580        DRM_INFO("applying lvds SSC disable quirk\n");
13581}
13582
13583/*
13584 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
13585 * brightness value
13586 */
13587static void quirk_invert_brightness(struct drm_device *dev)
13588{
13589        struct drm_i915_private *dev_priv = dev->dev_private;
13590        dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
13591        DRM_INFO("applying inverted panel brightness quirk\n");
13592}
13593
13594/* Some VBT's incorrectly indicate no backlight is present */
13595static void quirk_backlight_present(struct drm_device *dev)
13596{
13597        struct drm_i915_private *dev_priv = dev->dev_private;
13598        dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
13599        DRM_INFO("applying backlight present quirk\n");
13600}
13601
13602struct intel_quirk {
13603        int device;
13604        int subsystem_vendor;
13605        int subsystem_device;
13606        void (*hook)(struct drm_device *dev);
13607};
13608
13609/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
13610struct intel_dmi_quirk {
13611        void (*hook)(struct drm_device *dev);
13612        const struct dmi_system_id (*dmi_id_list)[];
13613};
13614
13615static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
13616{
13617        DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
13618        return 1;
13619}
13620
13621static const struct intel_dmi_quirk intel_dmi_quirks[] = {
13622        {
13623                .dmi_id_list = &(const struct dmi_system_id[]) {
13624                        {
13625                                .callback = intel_dmi_reverse_brightness,
13626                                .ident = "NCR Corporation",
13627                                .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
13628                                            DMI_MATCH(DMI_PRODUCT_NAME, ""),
13629                                },
13630                        },
13631                        { }  /* terminating entry */
13632                },
13633                .hook = quirk_invert_brightness,
13634        },
13635};
13636
13637static struct intel_quirk intel_quirks[] = {
13638        /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
13639        { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
13640
13641        /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
13642        { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
13643
13644        /* 830 needs to leave pipe A & dpll A up */
13645        { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
13646
13647        /* 830 needs to leave pipe B & dpll B up */
13648        { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
13649
13650        /* Lenovo U160 cannot use SSC on LVDS */
13651        { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
13652
13653        /* Sony Vaio Y cannot use SSC on LVDS */
13654        { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
13655
13656        /* Acer Aspire 5734Z must invert backlight brightness */
13657        { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
13658
13659        /* Acer/eMachines G725 */
13660        { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
13661
13662        /* Acer/eMachines e725 */
13663        { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
13664
13665        /* Acer/Packard Bell NCL20 */
13666        { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
13667
13668        /* Acer Aspire 4736Z */
13669        { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
13670
13671        /* Acer Aspire 5336 */
13672        { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
13673
13674        /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
13675        { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
13676
13677        /* Acer C720 Chromebook (Core i3 4005U) */
13678        { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
13679
13680        /* Apple Macbook 2,1 (Core 2 T7400) */
13681        { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
13682
13683        /* Toshiba CB35 Chromebook (Celeron 2955U) */
13684        { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
13685
13686        /* HP Chromebook 14 (Celeron 2955U) */
13687        { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
13688
13689        /* Dell Chromebook 11 */
13690        { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
13691};
13692
13693static void intel_init_quirks(struct drm_device *dev)
13694{
13695        struct pci_dev *d = dev->pdev;
13696        int i;
13697
13698        for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
13699                struct intel_quirk *q = &intel_quirks[i];
13700
13701                if (d->device == q->device &&
13702                    (d->subsystem_vendor == q->subsystem_vendor ||
13703                     q->subsystem_vendor == PCI_ANY_ID) &&
13704                    (d->subsystem_device == q->subsystem_device ||
13705                     q->subsystem_device == PCI_ANY_ID))
13706                        q->hook(dev);
13707        }
13708        for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
13709                if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
13710                        intel_dmi_quirks[i].hook(dev);
13711        }
13712}
13713
13714/* Disable the VGA plane that we never use */
13715static void i915_disable_vga(struct drm_device *dev)
13716{
13717        struct drm_i915_private *dev_priv = dev->dev_private;
13718        u8 sr1;
13719        u32 vga_reg = i915_vgacntrl_reg(dev);
13720
13721        /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
13722        vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
13723        outb(SR01, VGA_SR_INDEX);
13724        sr1 = inb(VGA_SR_DATA);
13725        outb(sr1 | 1<<5, VGA_SR_DATA);
13726        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
13727        udelay(300);
13728
13729        I915_WRITE(vga_reg, VGA_DISP_DISABLE);
13730        POSTING_READ(vga_reg);
13731}
13732
13733void intel_modeset_init_hw(struct drm_device *dev)
13734{
13735        intel_prepare_ddi(dev);
13736
13737        if (IS_VALLEYVIEW(dev))
13738                vlv_update_cdclk(dev);
13739
13740        intel_init_clock_gating(dev);
13741
13742        intel_enable_gt_powersave(dev);
13743}
13744
13745void intel_modeset_init(struct drm_device *dev)
13746{
13747        struct drm_i915_private *dev_priv = dev->dev_private;
13748        int sprite, ret;
13749        enum pipe pipe;
13750        struct intel_crtc *crtc;
13751
13752        drm_mode_config_init(dev);
13753
13754        dev->mode_config.min_width = 0;
13755        dev->mode_config.min_height = 0;
13756
13757        dev->mode_config.preferred_depth = 24;
13758        dev->mode_config.prefer_shadow = 1;
13759
13760        dev->mode_config.allow_fb_modifiers = true;
13761
13762        dev->mode_config.funcs = &intel_mode_funcs;
13763
13764        intel_init_quirks(dev);
13765
13766        intel_init_pm(dev);
13767
13768        if (INTEL_INFO(dev)->num_pipes == 0)
13769                return;
13770
13771        intel_init_display(dev);
13772        intel_init_audio(dev);
13773
13774        if (IS_GEN2(dev)) {
13775                dev->mode_config.max_width = 2048;
13776                dev->mode_config.max_height = 2048;
13777        } else if (IS_GEN3(dev)) {
13778                dev->mode_config.max_width = 4096;
13779                dev->mode_config.max_height = 4096;
13780        } else {
13781                dev->mode_config.max_width = 8192;
13782                dev->mode_config.max_height = 8192;
13783        }
13784
13785        if (IS_845G(dev) || IS_I865G(dev)) {
13786                dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
13787                dev->mode_config.cursor_height = 1023;
13788        } else if (IS_GEN2(dev)) {
13789                dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
13790                dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
13791        } else {
13792                dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
13793                dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
13794        }
13795
13796        dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
13797
13798        DRM_DEBUG_KMS("%d display pipe%s available.\n",
13799                      INTEL_INFO(dev)->num_pipes,
13800                      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
13801
13802        for_each_pipe(dev_priv, pipe) {
13803                intel_crtc_init(dev, pipe);
13804                for_each_sprite(dev_priv, pipe, sprite) {
13805                        ret = intel_plane_init(dev, pipe, sprite);
13806                        if (ret)
13807                                DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
13808                                              pipe_name(pipe), sprite_name(pipe, sprite), ret);
13809                }
13810        }
13811
13812        intel_init_dpio(dev);
13813
13814        intel_shared_dpll_init(dev);
13815
13816        /* Just disable it once at startup */
13817        i915_disable_vga(dev);
13818        intel_setup_outputs(dev);
13819
13820        /* Just in case the BIOS is doing something questionable. */
13821        intel_fbc_disable(dev);
13822
13823        drm_modeset_lock_all(dev);
13824        intel_modeset_setup_hw_state(dev, false);
13825        drm_modeset_unlock_all(dev);
13826
13827        for_each_intel_crtc(dev, crtc) {
13828                if (!crtc->active)
13829                        continue;
13830
13831                /*
13832                 * Note that reserving the BIOS fb up front prevents us
13833                 * from stuffing other stolen allocations like the ring
13834                 * on top.  This prevents some ugliness at boot time, and
13835                 * can even allow for smooth boot transitions if the BIOS
13836                 * fb is large enough for the active pipe configuration.
13837                 */
13838                if (dev_priv->display.get_initial_plane_config) {
13839                        dev_priv->display.get_initial_plane_config(crtc,
13840                                                           &crtc->plane_config);
13841                        /*
13842                         * If the fb is shared between multiple heads, we'll
13843                         * just get the first one.
13844                         */
13845                        intel_find_initial_plane_obj(crtc, &crtc->plane_config);
13846                }
13847        }
13848}
13849
13850static void intel_enable_pipe_a(struct drm_device *dev)
13851{
13852        struct intel_connector *connector;
13853        struct drm_connector *crt = NULL;
13854        struct intel_load_detect_pipe load_detect_temp;
13855        struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
13856
13857        /* We can't just switch on the pipe A, we need to set things up with a
13858         * proper mode and output configuration. As a gross hack, enable pipe A
13859         * by enabling the load detect pipe once. */
13860        for_each_intel_connector(dev, connector) {
13861                if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
13862                        crt = &connector->base;
13863                        break;
13864                }
13865        }
13866
13867        if (!crt)
13868                return;
13869
13870        if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
13871                intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
13872}
13873
13874static bool
13875intel_check_plane_mapping(struct intel_crtc *crtc)
13876{
13877        struct drm_device *dev = crtc->base.dev;
13878        struct drm_i915_private *dev_priv = dev->dev_private;
13879        u32 reg, val;
13880
13881        if (INTEL_INFO(dev)->num_pipes == 1)
13882                return true;
13883
13884        reg = DSPCNTR(!crtc->plane);
13885        val = I915_READ(reg);
13886
13887        if ((val & DISPLAY_PLANE_ENABLE) &&
13888            (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
13889                return false;
13890
13891        return true;
13892}
13893
13894static void intel_sanitize_crtc(struct intel_crtc *crtc)
13895{
13896        struct drm_device *dev = crtc->base.dev;
13897        struct drm_i915_private *dev_priv = dev->dev_private;
13898        u32 reg;
13899
13900        /* Clear any frame start delays used for debugging left by the BIOS */
13901        reg = PIPECONF(crtc->config->cpu_transcoder);
13902        I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
13903
13904        /* restore vblank interrupts to correct state */
13905        drm_crtc_vblank_reset(&crtc->base);
13906        if (crtc->active) {
13907                update_scanline_offset(crtc);
13908                drm_crtc_vblank_on(&crtc->base);
13909        }
13910
13911        /* We need to sanitize the plane -> pipe mapping first because this will
13912         * disable the crtc (and hence change the state) if it is wrong. Note
13913         * that gen4+ has a fixed plane -> pipe mapping.  */
13914        if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
13915                struct intel_connector *connector;
13916                bool plane;
13917
13918                DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
13919                              crtc->base.base.id);
13920
13921                /* Pipe has the wrong plane attached and the plane is active.
13922                 * Temporarily change the plane mapping and disable everything
13923                 * ...  */
13924                plane = crtc->plane;
13925                crtc->plane = !plane;
13926                crtc->primary_enabled = true;
13927                dev_priv->display.crtc_disable(&crtc->base);
13928                crtc->plane = plane;
13929
13930                /* ... and break all links. */
13931                for_each_intel_connector(dev, connector) {
13932                        if (connector->encoder->base.crtc != &crtc->base)
13933                                continue;
13934
13935                        connector->base.dpms = DRM_MODE_DPMS_OFF;
13936                        connector->base.encoder = NULL;
13937                }
13938                /* multiple connectors may have the same encoder:
13939                 *  handle them and break crtc link separately */
13940                for_each_intel_connector(dev, connector)
13941                        if (connector->encoder->base.crtc == &crtc->base) {
13942                                connector->encoder->base.crtc = NULL;
13943                                connector->encoder->connectors_active = false;
13944                        }
13945
13946                WARN_ON(crtc->active);
13947                crtc->base.state->enable = false;
13948                crtc->base.enabled = false;
13949        }
13950
13951        if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
13952            crtc->pipe == PIPE_A && !crtc->active) {
13953                /* BIOS forgot to enable pipe A, this mostly happens after
13954                 * resume. Force-enable the pipe to fix this, the update_dpms
13955                 * call below we restore the pipe to the right state, but leave
13956                 * the required bits on. */
13957                intel_enable_pipe_a(dev);
13958        }
13959
13960        /* Adjust the state of the output pipe according to whether we
13961         * have active connectors/encoders. */
13962        intel_crtc_update_dpms(&crtc->base);
13963
13964        if (crtc->active != crtc->base.state->enable) {
13965                struct intel_encoder *encoder;
13966
13967                /* This can happen either due to bugs in the get_hw_state
13968                 * functions or because the pipe is force-enabled due to the
13969                 * pipe A quirk. */
13970                DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
13971                              crtc->base.base.id,
13972                              crtc->base.state->enable ? "enabled" : "disabled",
13973                              crtc->active ? "enabled" : "disabled");
13974
13975                crtc->base.state->enable = crtc->active;
13976                crtc->base.enabled = crtc->active;
13977
13978                /* Because we only establish the connector -> encoder ->
13979                 * crtc links if something is active, this means the
13980                 * crtc is now deactivated. Break the links. connector
13981                 * -> encoder links are only establish when things are
13982                 *  actually up, hence no need to break them. */
13983                WARN_ON(crtc->active);
13984
13985                for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13986                        WARN_ON(encoder->connectors_active);
13987                        encoder->base.crtc = NULL;
13988                }
13989        }
13990
13991        if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
13992                /*
13993                 * We start out with underrun reporting disabled to avoid races.
13994                 * For correct bookkeeping mark this on active crtcs.
13995                 *
13996                 * Also on gmch platforms we dont have any hardware bits to
13997                 * disable the underrun reporting. Which means we need to start
13998                 * out with underrun reporting disabled also on inactive pipes,
13999                 * since otherwise we'll complain about the garbage we read when
14000                 * e.g. coming up after runtime pm.
14001                 *
14002                 * No protection against concurrent access is required - at
14003                 * worst a fifo underrun happens which also sets this to false.
14004                 */
14005                crtc->cpu_fifo_underrun_disabled = true;
14006                crtc->pch_fifo_underrun_disabled = true;
14007        }
14008}
14009
14010static void intel_sanitize_encoder(struct intel_encoder *encoder)
14011{
14012        struct intel_connector *connector;
14013        struct drm_device *dev = encoder->base.dev;
14014
14015        /* We need to check both for a crtc link (meaning that the
14016         * encoder is active and trying to read from a pipe) and the
14017         * pipe itself being active. */
14018        bool has_active_crtc = encoder->base.crtc &&
14019                to_intel_crtc(encoder->base.crtc)->active;
14020
14021        if (encoder->connectors_active && !has_active_crtc) {
14022                DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
14023                              encoder->base.base.id,
14024                              encoder->base.name);
14025
14026                /* Connector is active, but has no active pipe. This is
14027                 * fallout from our resume register restoring. Disable
14028                 * the encoder manually again. */
14029                if (encoder->base.crtc) {
14030                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
14031                                      encoder->base.base.id,
14032                                      encoder->base.name);
14033                        encoder->disable(encoder);
14034                        if (encoder->post_disable)
14035                                encoder->post_disable(encoder);
14036                }
14037                encoder->base.crtc = NULL;
14038                encoder->connectors_active = false;
14039
14040                /* Inconsistent output/port/pipe state happens presumably due to
14041                 * a bug in one of the get_hw_state functions. Or someplace else
14042                 * in our code, like the register restore mess on resume. Clamp
14043                 * things to off as a safer default. */
14044                for_each_intel_connector(dev, connector) {
14045                        if (connector->encoder != encoder)
14046                                continue;
14047                        connector->base.dpms = DRM_MODE_DPMS_OFF;
14048                        connector->base.encoder = NULL;
14049                }
14050        }
14051        /* Enabled encoders without active connectors will be fixed in
14052         * the crtc fixup. */
14053}
14054
14055void i915_redisable_vga_power_on(struct drm_device *dev)
14056{
14057        struct drm_i915_private *dev_priv = dev->dev_private;
14058        u32 vga_reg = i915_vgacntrl_reg(dev);
14059
14060        if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
14061                DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
14062                i915_disable_vga(dev);
14063        }
14064}
14065
14066void i915_redisable_vga(struct drm_device *dev)
14067{
14068        struct drm_i915_private *dev_priv = dev->dev_private;
14069
14070        /* This function can be called both from intel_modeset_setup_hw_state or
14071         * at a very early point in our resume sequence, where the power well
14072         * structures are not yet restored. Since this function is at a very
14073         * paranoid "someone might have enabled VGA while we were not looking"
14074         * level, just check if the power well is enabled instead of trying to
14075         * follow the "don't touch the power well if we don't need it" policy
14076         * the rest of the driver uses. */
14077        if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
14078                return;
14079
14080        i915_redisable_vga_power_on(dev);
14081}
14082
14083static bool primary_get_hw_state(struct intel_crtc *crtc)
14084{
14085        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
14086
14087        if (!crtc->active)
14088                return false;
14089
14090        return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
14091}
14092
14093static void intel_modeset_readout_hw_state(struct drm_device *dev)
14094{
14095        struct drm_i915_private *dev_priv = dev->dev_private;
14096        enum pipe pipe;
14097        struct intel_crtc *crtc;
14098        struct intel_encoder *encoder;
14099        struct intel_connector *connector;
14100        int i;
14101
14102        for_each_intel_crtc(dev, crtc) {
14103                memset(crtc->config, 0, sizeof(*crtc->config));
14104
14105                crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
14106
14107                crtc->active = dev_priv->display.get_pipe_config(crtc,
14108                                                                 crtc->config);
14109
14110                crtc->base.state->enable = crtc->active;
14111                crtc->base.enabled = crtc->active;
14112                crtc->primary_enabled = primary_get_hw_state(crtc);
14113
14114                DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
14115                              crtc->base.base.id,
14116                              crtc->active ? "enabled" : "disabled");
14117        }
14118
14119        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
14120                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
14121
14122                pll->on = pll->get_hw_state(dev_priv, pll,
14123                                            &pll->config.hw_state);
14124                pll->active = 0;
14125                pll->config.crtc_mask = 0;
14126                for_each_intel_crtc(dev, crtc) {
14127                        if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
14128                                pll->active++;
14129                                pll->config.crtc_mask |= 1 << crtc->pipe;
14130                        }
14131                }
14132
14133                DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
14134                              pll->name, pll->config.crtc_mask, pll->on);
14135
14136                if (pll->config.crtc_mask)
14137                        intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
14138        }
14139
14140        for_each_intel_encoder(dev, encoder) {
14141                pipe = 0;
14142
14143                if (encoder->get_hw_state(encoder, &pipe)) {
14144                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
14145                        encoder->base.crtc = &crtc->base;
14146                        encoder->get_config(encoder, crtc->config);
14147                } else {
14148                        encoder->base.crtc = NULL;
14149                }
14150
14151                encoder->connectors_active = false;
14152                DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
14153                              encoder->base.base.id,
14154                              encoder->base.name,
14155                              encoder->base.crtc ? "enabled" : "disabled",
14156                              pipe_name(pipe));
14157        }
14158
14159        for_each_intel_connector(dev, connector) {
14160                if (connector->get_hw_state(connector)) {
14161                        connector->base.dpms = DRM_MODE_DPMS_ON;
14162                        connector->encoder->connectors_active = true;
14163                        connector->base.encoder = &connector->encoder->base;
14164                } else {
14165                        connector->base.dpms = DRM_MODE_DPMS_OFF;
14166                        connector->base.encoder = NULL;
14167                }
14168                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
14169                              connector->base.base.id,
14170                              connector->base.name,
14171                              connector->base.encoder ? "enabled" : "disabled");
14172        }
14173}
14174
14175/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
14176 * and i915 state tracking structures. */
14177void intel_modeset_setup_hw_state(struct drm_device *dev,
14178                                  bool force_restore)
14179{
14180        struct drm_i915_private *dev_priv = dev->dev_private;
14181        enum pipe pipe;
14182        struct intel_crtc *crtc;
14183        struct intel_encoder *encoder;
14184        int i;
14185
14186        intel_modeset_readout_hw_state(dev);
14187
14188        /*
14189         * Now that we have the config, copy it to each CRTC struct
14190         * Note that this could go away if we move to using crtc_config
14191         * checking everywhere.
14192         */
14193        for_each_intel_crtc(dev, crtc) {
14194                if (crtc->active && i915.fastboot) {
14195                        intel_mode_from_pipe_config(&crtc->base.mode,
14196                                                    crtc->config);
14197                        DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
14198                                      crtc->base.base.id);
14199                        drm_mode_debug_printmodeline(&crtc->base.mode);
14200                }
14201        }
14202
14203        /* HW state is read out, now we need to sanitize this mess. */
14204        for_each_intel_encoder(dev, encoder) {
14205                intel_sanitize_encoder(encoder);
14206        }
14207
14208        for_each_pipe(dev_priv, pipe) {
14209                crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
14210                intel_sanitize_crtc(crtc);
14211                intel_dump_pipe_config(crtc, crtc->config,
14212                                       "[setup_hw_state]");
14213        }
14214
14215        intel_modeset_update_connector_atomic_state(dev);
14216
14217        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
14218                struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
14219
14220                if (!pll->on || pll->active)
14221                        continue;
14222
14223                DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
14224
14225                pll->disable(dev_priv, pll);
14226                pll->on = false;
14227        }
14228
14229        if (IS_GEN9(dev))
14230                skl_wm_get_hw_state(dev);
14231        else if (HAS_PCH_SPLIT(dev))
14232                ilk_wm_get_hw_state(dev);
14233
14234        if (force_restore) {
14235                i915_redisable_vga(dev);
14236
14237                /*
14238                 * We need to use raw interfaces for restoring state to avoid
14239                 * checking (bogus) intermediate states.
14240                 */
14241                for_each_pipe(dev_priv, pipe) {
14242                        struct drm_crtc *crtc =
14243                                dev_priv->pipe_to_crtc_mapping[pipe];
14244
14245                        intel_crtc_restore_mode(crtc);
14246                }
14247        } else {
14248                intel_modeset_update_staged_output_state(dev);
14249        }
14250
14251        intel_modeset_check_state(dev);
14252}
14253
14254void intel_modeset_gem_init(struct drm_device *dev)
14255{
14256        struct drm_i915_private *dev_priv = dev->dev_private;
14257        struct drm_crtc *c;
14258        struct drm_i915_gem_object *obj;
14259        int ret;
14260
14261        mutex_lock(&dev->struct_mutex);
14262        intel_init_gt_powersave(dev);
14263        mutex_unlock(&dev->struct_mutex);
14264
14265        /*
14266         * There may be no VBT; and if the BIOS enabled SSC we can
14267         * just keep using it to avoid unnecessary flicker.  Whereas if the
14268         * BIOS isn't using it, don't assume it will work even if the VBT
14269         * indicates as much.
14270         */
14271        if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
14272                dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14273                                                DREF_SSC1_ENABLE);
14274
14275        intel_modeset_init_hw(dev);
14276
14277        intel_setup_overlay(dev);
14278
14279        /*
14280         * Make sure any fbs we allocated at startup are properly
14281         * pinned & fenced.  When we do the allocation it's too early
14282         * for this.
14283         */
14284        for_each_crtc(dev, c) {
14285                obj = intel_fb_obj(c->primary->fb);
14286                if (obj == NULL)
14287                        continue;
14288
14289                mutex_lock(&dev->struct_mutex);
14290                ret = intel_pin_and_fence_fb_obj(c->primary,
14291                                                 c->primary->fb,
14292                                                 c->primary->state,
14293                                                 NULL);
14294                mutex_unlock(&dev->struct_mutex);
14295                if (ret) {
14296                        DRM_ERROR("failed to pin boot fb on pipe %d\n",
14297                                  to_intel_crtc(c)->pipe);
14298                        drm_framebuffer_unreference(c->primary->fb);
14299                        c->primary->fb = NULL;
14300                        update_state_fb(c->primary);
14301                }
14302        }
14303
14304        intel_backlight_register(dev);
14305}
14306
14307void intel_connector_unregister(struct intel_connector *intel_connector)
14308{
14309        struct drm_connector *connector = &intel_connector->base;
14310
14311        intel_panel_destroy_backlight(connector);
14312        drm_connector_unregister(connector);
14313}
14314
14315void intel_modeset_cleanup(struct drm_device *dev)
14316{
14317        struct drm_i915_private *dev_priv = dev->dev_private;
14318        struct drm_connector *connector;
14319
14320        intel_disable_gt_powersave(dev);
14321
14322        intel_backlight_unregister(dev);
14323
14324        /*
14325         * Interrupts and polling as the first thing to avoid creating havoc.
14326         * Too much stuff here (turning of connectors, ...) would
14327         * experience fancy races otherwise.
14328         */
14329        intel_irq_uninstall(dev_priv);
14330
14331        /*
14332         * Due to the hpd irq storm handling the hotplug work can re-arm the
14333         * poll handlers. Hence disable polling after hpd handling is shut down.
14334         */
14335        drm_kms_helper_poll_fini(dev);
14336
14337        mutex_lock(&dev->struct_mutex);
14338
14339        intel_unregister_dsm_handler();
14340
14341        intel_fbc_disable(dev);
14342
14343        mutex_unlock(&dev->struct_mutex);
14344
14345        /* flush any delayed tasks or pending work */
14346        flush_scheduled_work();
14347
14348        /* destroy the backlight and sysfs files before encoders/connectors */
14349        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
14350                struct intel_connector *intel_connector;
14351
14352                intel_connector = to_intel_connector(connector);
14353                intel_connector->unregister(intel_connector);
14354        }
14355
14356        drm_mode_config_cleanup(dev);
14357
14358        intel_cleanup_overlay(dev);
14359
14360        mutex_lock(&dev->struct_mutex);
14361        intel_cleanup_gt_powersave(dev);
14362        mutex_unlock(&dev->struct_mutex);
14363}
14364
14365/*
14366 * Return which encoder is currently attached for connector.
14367 */
14368struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
14369{
14370        return &intel_attached_encoder(connector)->base;
14371}
14372
14373void intel_connector_attach_encoder(struct intel_connector *connector,
14374                                    struct intel_encoder *encoder)
14375{
14376        connector->encoder = encoder;
14377        drm_mode_connector_attach_encoder(&connector->base,
14378                                          &encoder->base);
14379}
14380
14381/*
14382 * set vga decode state - true == enable VGA decode
14383 */
14384int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
14385{
14386        struct drm_i915_private *dev_priv = dev->dev_private;
14387        unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
14388        u16 gmch_ctrl;
14389
14390        if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
14391                DRM_ERROR("failed to read control word\n");
14392                return -EIO;
14393        }
14394
14395        if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
14396                return 0;
14397
14398        if (state)
14399                gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
14400        else
14401                gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
14402
14403        if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
14404                DRM_ERROR("failed to write control word\n");
14405                return -EIO;
14406        }
14407
14408        return 0;
14409}
14410
14411struct intel_display_error_state {
14412
14413        u32 power_well_driver;
14414
14415        int num_transcoders;
14416
14417        struct intel_cursor_error_state {
14418                u32 control;
14419                u32 position;
14420                u32 base;
14421                u32 size;
14422        } cursor[I915_MAX_PIPES];
14423
14424        struct intel_pipe_error_state {
14425                bool power_domain_on;
14426                u32 source;
14427                u32 stat;
14428        } pipe[I915_MAX_PIPES];
14429
14430        struct intel_plane_error_state {
14431                u32 control;
14432                u32 stride;
14433                u32 size;
14434                u32 pos;
14435                u32 addr;
14436                u32 surface;
14437                u32 tile_offset;
14438        } plane[I915_MAX_PIPES];
14439
14440        struct intel_transcoder_error_state {
14441                bool power_domain_on;
14442                enum transcoder cpu_transcoder;
14443
14444                u32 conf;
14445
14446                u32 htotal;
14447                u32 hblank;
14448                u32 hsync;
14449                u32 vtotal;
14450                u32 vblank;
14451                u32 vsync;
14452        } transcoder[4];
14453};
14454
14455struct intel_display_error_state *
14456intel_display_capture_error_state(struct drm_device *dev)
14457{
14458        struct drm_i915_private *dev_priv = dev->dev_private;
14459        struct intel_display_error_state *error;
14460        int transcoders[] = {
14461                TRANSCODER_A,
14462                TRANSCODER_B,
14463                TRANSCODER_C,
14464                TRANSCODER_EDP,
14465        };
14466        int i;
14467
14468        if (INTEL_INFO(dev)->num_pipes == 0)
14469                return NULL;
14470
14471        error = kzalloc(sizeof(*error), GFP_ATOMIC);
14472        if (error == NULL)
14473                return NULL;
14474
14475        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
14476                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
14477
14478        for_each_pipe(dev_priv, i) {
14479                error->pipe[i].power_domain_on =
14480                        __intel_display_power_is_enabled(dev_priv,
14481                                                         POWER_DOMAIN_PIPE(i));
14482                if (!error->pipe[i].power_domain_on)
14483                        continue;
14484
14485                error->cursor[i].control = I915_READ(CURCNTR(i));
14486                error->cursor[i].position = I915_READ(CURPOS(i));
14487                error->cursor[i].base = I915_READ(CURBASE(i));
14488
14489                error->plane[i].control = I915_READ(DSPCNTR(i));
14490                error->plane[i].stride = I915_READ(DSPSTRIDE(i));
14491                if (INTEL_INFO(dev)->gen <= 3) {
14492                        error->plane[i].size = I915_READ(DSPSIZE(i));
14493                        error->plane[i].pos = I915_READ(DSPPOS(i));
14494                }
14495                if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
14496                        error->plane[i].addr = I915_READ(DSPADDR(i));
14497                if (INTEL_INFO(dev)->gen >= 4) {
14498                        error->plane[i].surface = I915_READ(DSPSURF(i));
14499                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
14500                }
14501
14502                error->pipe[i].source = I915_READ(PIPESRC(i));
14503
14504                if (HAS_GMCH_DISPLAY(dev))
14505                        error->pipe[i].stat = I915_READ(PIPESTAT(i));
14506        }
14507
14508        error->num_transcoders = INTEL_INFO(dev)->num_pipes;
14509        if (HAS_DDI(dev_priv->dev))
14510                error->num_transcoders++; /* Account for eDP. */
14511
14512        for (i = 0; i < error->num_transcoders; i++) {
14513                enum transcoder cpu_transcoder = transcoders[i];
14514
14515                error->transcoder[i].power_domain_on =
14516                        __intel_display_power_is_enabled(dev_priv,
14517                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
14518                if (!error->transcoder[i].power_domain_on)
14519                        continue;
14520
14521                error->transcoder[i].cpu_transcoder = cpu_transcoder;
14522
14523                error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
14524                error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
14525                error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
14526                error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
14527                error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
14528                error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
14529                error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
14530        }
14531
14532        return error;
14533}
14534
14535#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
14536
14537void
14538intel_display_print_error_state(struct drm_i915_error_state_buf *m,
14539                                struct drm_device *dev,
14540                                struct intel_display_error_state *error)
14541{
14542        struct drm_i915_private *dev_priv = dev->dev_private;
14543        int i;
14544
14545        if (!error)
14546                return;
14547
14548        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
14549        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
14550                err_printf(m, "PWR_WELL_CTL2: %08x\n",
14551                           error->power_well_driver);
14552        for_each_pipe(dev_priv, i) {
14553                err_printf(m, "Pipe [%d]:\n", i);
14554                err_printf(m, "  Power: %s\n",
14555                           error->pipe[i].power_domain_on ? "on" : "off");
14556                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
14557                err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
14558
14559                err_printf(m, "Plane [%d]:\n", i);
14560                err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
14561                err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
14562                if (INTEL_INFO(dev)->gen <= 3) {
14563                        err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
14564                        err_printf(m, "  POS: %08x\n", error->plane[i].pos);
14565                }
14566                if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
14567                        err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
14568                if (INTEL_INFO(dev)->gen >= 4) {
14569                        err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
14570                        err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
14571                }
14572
14573                err_printf(m, "Cursor [%d]:\n", i);
14574                err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
14575                err_printf(m, "  POS: %08x\n", error->cursor[i].position);
14576                err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
14577        }
14578
14579        for (i = 0; i < error->num_transcoders; i++) {
14580                err_printf(m, "CPU transcoder: %c\n",
14581                           transcoder_name(error->transcoder[i].cpu_transcoder));
14582                err_printf(m, "  Power: %s\n",
14583                           error->transcoder[i].power_domain_on ? "on" : "off");
14584                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
14585                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
14586                err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
14587                err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
14588                err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
14589                err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
14590                err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
14591        }
14592}
14593
14594void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
14595{
14596        struct intel_crtc *crtc;
14597
14598        for_each_intel_crtc(dev, crtc) {
14599                struct intel_unpin_work *work;
14600
14601                spin_lock_irq(&dev->event_lock);
14602
14603                work = crtc->unpin_work;
14604
14605                if (work && work->event &&
14606                    work->event->base.file_priv == file) {
14607                        kfree(work->event);
14608                        work->event = NULL;
14609                }
14610
14611                spin_unlock_irq(&dev->event_lock);
14612        }
14613}
14614