linux/drivers/gpu/drm/i915/display/intel_display.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2006-2007 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Eric Anholt <eric@anholt.net>
  25 */
  26
  27#include <acpi/video.h>
  28#include <linux/i2c.h>
  29#include <linux/input.h>
  30#include <linux/intel-iommu.h>
  31#include <linux/kernel.h>
  32#include <linux/module.h>
  33#include <linux/dma-resv.h>
  34#include <linux/slab.h>
  35#include <linux/vga_switcheroo.h>
  36
  37#include <drm/drm_atomic.h>
  38#include <drm/drm_atomic_helper.h>
  39#include <drm/drm_atomic_uapi.h>
  40#include <drm/drm_damage_helper.h>
  41#include <drm/drm_dp_helper.h>
  42#include <drm/drm_edid.h>
  43#include <drm/drm_fourcc.h>
  44#include <drm/drm_plane_helper.h>
  45#include <drm/drm_privacy_screen_consumer.h>
  46#include <drm/drm_probe_helper.h>
  47#include <drm/drm_rect.h>
  48
  49#include "display/intel_audio.h"
  50#include "display/intel_crt.h"
  51#include "display/intel_ddi.h"
  52#include "display/intel_display_debugfs.h"
  53#include "display/intel_dp.h"
  54#include "display/intel_dp_mst.h"
  55#include "display/intel_dpll.h"
  56#include "display/intel_dpll_mgr.h"
  57#include "display/intel_drrs.h"
  58#include "display/intel_dsi.h"
  59#include "display/intel_dvo.h"
  60#include "display/intel_fb.h"
  61#include "display/intel_gmbus.h"
  62#include "display/intel_hdmi.h"
  63#include "display/intel_lvds.h"
  64#include "display/intel_sdvo.h"
  65#include "display/intel_snps_phy.h"
  66#include "display/intel_tv.h"
  67#include "display/intel_vdsc.h"
  68#include "display/intel_vrr.h"
  69
  70#include "gem/i915_gem_lmem.h"
  71#include "gem/i915_gem_object.h"
  72
  73#include "gt/gen8_ppgtt.h"
  74
  75#include "g4x_dp.h"
  76#include "g4x_hdmi.h"
  77#include "i915_drv.h"
  78#include "icl_dsi.h"
  79#include "intel_acpi.h"
  80#include "intel_atomic.h"
  81#include "intel_atomic_plane.h"
  82#include "intel_bw.h"
  83#include "intel_cdclk.h"
  84#include "intel_color.h"
  85#include "intel_crtc.h"
  86#include "intel_de.h"
  87#include "intel_display_types.h"
  88#include "intel_dmc.h"
  89#include "intel_dp_link_training.h"
  90#include "intel_dpt.h"
  91#include "intel_fbc.h"
  92#include "intel_fbdev.h"
  93#include "intel_fdi.h"
  94#include "intel_fifo_underrun.h"
  95#include "intel_frontbuffer.h"
  96#include "intel_hdcp.h"
  97#include "intel_hotplug.h"
  98#include "intel_overlay.h"
  99#include "intel_panel.h"
 100#include "intel_pch_display.h"
 101#include "intel_pch_refclk.h"
 102#include "intel_pcode.h"
 103#include "intel_pipe_crc.h"
 104#include "intel_plane_initial.h"
 105#include "intel_pm.h"
 106#include "intel_pps.h"
 107#include "intel_psr.h"
 108#include "intel_quirks.h"
 109#include "intel_sprite.h"
 110#include "intel_tc.h"
 111#include "intel_vga.h"
 112#include "i9xx_plane.h"
 113#include "skl_scaler.h"
 114#include "skl_universal_plane.h"
 115#include "vlv_dsi_pll.h"
 116#include "vlv_sideband.h"
 117#include "vlv_dsi.h"
 118
 119static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
 120static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
 121static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
 122                                         const struct intel_link_m_n *m_n,
 123                                         const struct intel_link_m_n *m2_n2);
 124static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
 125static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
 126static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
 127static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
 128static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
 129static void intel_modeset_setup_hw_state(struct drm_device *dev,
 130                                         struct drm_modeset_acquire_ctx *ctx);
 131
 132/**
 133 * intel_update_watermarks - update FIFO watermark values based on current modes
 134 * @dev_priv: i915 device
 135 *
 136 * Calculate watermark values for the various WM regs based on current mode
 137 * and plane configuration.
 138 *
 139 * There are several cases to deal with here:
 140 *   - normal (i.e. non-self-refresh)
 141 *   - self-refresh (SR) mode
 142 *   - lines are large relative to FIFO size (buffer can hold up to 2)
 143 *   - lines are small relative to FIFO size (buffer can hold more than 2
 144 *     lines), so need to account for TLB latency
 145 *
 146 *   The normal calculation is:
 147 *     watermark = dotclock * bytes per pixel * latency
 148 *   where latency is platform & configuration dependent (we assume pessimal
 149 *   values here).
 150 *
 151 *   The SR calculation is:
 152 *     watermark = (trunc(latency/line time)+1) * surface width *
 153 *       bytes per pixel
 154 *   where
 155 *     line time = htotal / dotclock
 156 *     surface width = hdisplay for normal plane and 64 for cursor
 157 *   and latency is assumed to be high, as above.
 158 *
 159 * The final value programmed to the register should always be rounded up,
 160 * and include an extra 2 entries to account for clock crossings.
 161 *
 162 * We don't use the sprite, so we can ignore that.  And on Crestline we have
 163 * to set the non-SR watermarks to 8.
 164 */
 165static void intel_update_watermarks(struct drm_i915_private *dev_priv)
 166{
 167        if (dev_priv->wm_disp->update_wm)
 168                dev_priv->wm_disp->update_wm(dev_priv);
 169}
 170
 171static int intel_compute_pipe_wm(struct intel_atomic_state *state,
 172                                 struct intel_crtc *crtc)
 173{
 174        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 175        if (dev_priv->wm_disp->compute_pipe_wm)
 176                return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
 177        return 0;
 178}
 179
 180static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
 181                                         struct intel_crtc *crtc)
 182{
 183        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 184        if (!dev_priv->wm_disp->compute_intermediate_wm)
 185                return 0;
 186        if (drm_WARN_ON(&dev_priv->drm,
 187                        !dev_priv->wm_disp->compute_pipe_wm))
 188                return 0;
 189        return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
 190}
 191
 192static bool intel_initial_watermarks(struct intel_atomic_state *state,
 193                                     struct intel_crtc *crtc)
 194{
 195        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 196        if (dev_priv->wm_disp->initial_watermarks) {
 197                dev_priv->wm_disp->initial_watermarks(state, crtc);
 198                return true;
 199        }
 200        return false;
 201}
 202
 203static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
 204                                           struct intel_crtc *crtc)
 205{
 206        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 207        if (dev_priv->wm_disp->atomic_update_watermarks)
 208                dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
 209}
 210
 211static void intel_optimize_watermarks(struct intel_atomic_state *state,
 212                                      struct intel_crtc *crtc)
 213{
 214        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 215        if (dev_priv->wm_disp->optimize_watermarks)
 216                dev_priv->wm_disp->optimize_watermarks(state, crtc);
 217}
 218
 219static int intel_compute_global_watermarks(struct intel_atomic_state *state)
 220{
 221        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 222        if (dev_priv->wm_disp->compute_global_watermarks)
 223                return dev_priv->wm_disp->compute_global_watermarks(state);
 224        return 0;
 225}
 226
 227/* returns HPLL frequency in kHz */
 228int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
 229{
 230        int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 231
 232        /* Obtain SKU information */
 233        hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
 234                CCK_FUSE_HPLL_FREQ_MASK;
 235
 236        return vco_freq[hpll_freq] * 1000;
 237}
 238
 239int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 240                      const char *name, u32 reg, int ref_freq)
 241{
 242        u32 val;
 243        int divider;
 244
 245        val = vlv_cck_read(dev_priv, reg);
 246        divider = val & CCK_FREQUENCY_VALUES;
 247
 248        drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
 249                 (divider << CCK_FREQUENCY_STATUS_SHIFT),
 250                 "%s change in progress\n", name);
 251
 252        return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
 253}
 254
 255int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
 256                           const char *name, u32 reg)
 257{
 258        int hpll;
 259
 260        vlv_cck_get(dev_priv);
 261
 262        if (dev_priv->hpll_freq == 0)
 263                dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
 264
 265        hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
 266
 267        vlv_cck_put(dev_priv);
 268
 269        return hpll;
 270}
 271
 272static void intel_update_czclk(struct drm_i915_private *dev_priv)
 273{
 274        if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
 275                return;
 276
 277        dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
 278                                                      CCK_CZ_CLOCK_CONTROL);
 279
 280        drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
 281                dev_priv->czclk_freq);
 282}
 283
 284static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
 285{
 286        return (crtc_state->active_planes &
 287                ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
 288}
 289
 290/* WA Display #0827: Gen9:all */
 291static void
 292skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
 293{
 294        if (enable)
 295                intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 296                               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
 297        else
 298                intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 299                               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
 300}
 301
 302/* Wa_2006604312:icl,ehl */
 303static void
 304icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 305                       bool enable)
 306{
 307        if (enable)
 308                intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 309                               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
 310        else
 311                intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
 312                               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
 313}
 314
 315/* Wa_1604331009:icl,jsl,ehl */
 316static void
 317icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 318                       bool enable)
 319{
 320        intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
 321                     enable ? CURSOR_GATING_DIS : 0);
 322}
 323
 324static bool
 325is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
 326{
 327        return crtc_state->master_transcoder != INVALID_TRANSCODER;
 328}
 329
 330static bool
 331is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
 332{
 333        return crtc_state->sync_mode_slaves_mask != 0;
 334}
 335
 336bool
 337is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
 338{
 339        return is_trans_port_sync_master(crtc_state) ||
 340                is_trans_port_sync_slave(crtc_state);
 341}
 342
 343static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
 344{
 345        if (crtc_state->bigjoiner_slave)
 346                return crtc_state->bigjoiner_linked_crtc;
 347        else
 348                return to_intel_crtc(crtc_state->uapi.crtc);
 349}
 350
 351static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
 352                                    enum pipe pipe)
 353{
 354        i915_reg_t reg = PIPEDSL(pipe);
 355        u32 line1, line2;
 356        u32 line_mask;
 357
 358        if (DISPLAY_VER(dev_priv) == 2)
 359                line_mask = DSL_LINEMASK_GEN2;
 360        else
 361                line_mask = DSL_LINEMASK_GEN3;
 362
 363        line1 = intel_de_read(dev_priv, reg) & line_mask;
 364        msleep(5);
 365        line2 = intel_de_read(dev_priv, reg) & line_mask;
 366
 367        return line1 != line2;
 368}
 369
 370static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
 371{
 372        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 373        enum pipe pipe = crtc->pipe;
 374
 375        /* Wait for the display line to settle/start moving */
 376        if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
 377                drm_err(&dev_priv->drm,
 378                        "pipe %c scanline %s wait timed out\n",
 379                        pipe_name(pipe), onoff(state));
 380}
 381
 382static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
 383{
 384        wait_for_pipe_scanline_moving(crtc, false);
 385}
 386
 387static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
 388{
 389        wait_for_pipe_scanline_moving(crtc, true);
 390}
 391
 392static void
 393intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
 394{
 395        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 396        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 397
 398        if (DISPLAY_VER(dev_priv) >= 4) {
 399                enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 400                i915_reg_t reg = PIPECONF(cpu_transcoder);
 401
 402                /* Wait for the Pipe State to go off */
 403                if (intel_de_wait_for_clear(dev_priv, reg,
 404                                            I965_PIPECONF_ACTIVE, 100))
 405                        drm_WARN(&dev_priv->drm, 1,
 406                                 "pipe_off wait timed out\n");
 407        } else {
 408                intel_wait_for_pipe_scanline_stopped(crtc);
 409        }
 410}
 411
 412void assert_transcoder(struct drm_i915_private *dev_priv,
 413                       enum transcoder cpu_transcoder, bool state)
 414{
 415        bool cur_state;
 416        enum intel_display_power_domain power_domain;
 417        intel_wakeref_t wakeref;
 418
 419        /* we keep both pipes enabled on 830 */
 420        if (IS_I830(dev_priv))
 421                state = true;
 422
 423        power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
 424        wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
 425        if (wakeref) {
 426                u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
 427                cur_state = !!(val & PIPECONF_ENABLE);
 428
 429                intel_display_power_put(dev_priv, power_domain, wakeref);
 430        } else {
 431                cur_state = false;
 432        }
 433
 434        I915_STATE_WARN(cur_state != state,
 435                        "transcoder %s assertion failure (expected %s, current %s)\n",
 436                        transcoder_name(cpu_transcoder),
 437                        onoff(state), onoff(cur_state));
 438}
 439
 440static void assert_plane(struct intel_plane *plane, bool state)
 441{
 442        enum pipe pipe;
 443        bool cur_state;
 444
 445        cur_state = plane->get_hw_state(plane, &pipe);
 446
 447        I915_STATE_WARN(cur_state != state,
 448                        "%s assertion failure (expected %s, current %s)\n",
 449                        plane->base.name, onoff(state), onoff(cur_state));
 450}
 451
 452#define assert_plane_enabled(p) assert_plane(p, true)
 453#define assert_plane_disabled(p) assert_plane(p, false)
 454
 455static void assert_planes_disabled(struct intel_crtc *crtc)
 456{
 457        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 458        struct intel_plane *plane;
 459
 460        for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
 461                assert_plane_disabled(plane);
 462}
 463
 464void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 465                         struct intel_digital_port *dig_port,
 466                         unsigned int expected_mask)
 467{
 468        u32 port_mask;
 469        i915_reg_t dpll_reg;
 470
 471        switch (dig_port->base.port) {
 472        case PORT_B:
 473                port_mask = DPLL_PORTB_READY_MASK;
 474                dpll_reg = DPLL(0);
 475                break;
 476        case PORT_C:
 477                port_mask = DPLL_PORTC_READY_MASK;
 478                dpll_reg = DPLL(0);
 479                expected_mask <<= 4;
 480                break;
 481        case PORT_D:
 482                port_mask = DPLL_PORTD_READY_MASK;
 483                dpll_reg = DPIO_PHY_STATUS;
 484                break;
 485        default:
 486                BUG();
 487        }
 488
 489        if (intel_de_wait_for_register(dev_priv, dpll_reg,
 490                                       port_mask, expected_mask, 1000))
 491                drm_WARN(&dev_priv->drm, 1,
 492                         "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
 493                         dig_port->base.base.base.id, dig_port->base.base.name,
 494                         intel_de_read(dev_priv, dpll_reg) & port_mask,
 495                         expected_mask);
 496}
 497
 498enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
 499{
 500        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 501
 502        if (HAS_PCH_LPT(dev_priv))
 503                return PIPE_A;
 504        else
 505                return crtc->pipe;
 506}
 507
 508void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
 509{
 510        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 511        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 512        enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 513        enum pipe pipe = crtc->pipe;
 514        i915_reg_t reg;
 515        u32 val;
 516
 517        drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
 518
 519        assert_planes_disabled(crtc);
 520
 521        /*
 522         * A pipe without a PLL won't actually be able to drive bits from
 523         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
 524         * need the check.
 525         */
 526        if (HAS_GMCH(dev_priv)) {
 527                if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
 528                        assert_dsi_pll_enabled(dev_priv);
 529                else
 530                        assert_pll_enabled(dev_priv, pipe);
 531        } else {
 532                if (new_crtc_state->has_pch_encoder) {
 533                        /* if driving the PCH, we need FDI enabled */
 534                        assert_fdi_rx_pll_enabled(dev_priv,
 535                                                  intel_crtc_pch_transcoder(crtc));
 536                        assert_fdi_tx_pll_enabled(dev_priv,
 537                                                  (enum pipe) cpu_transcoder);
 538                }
 539                /* FIXME: assert CPU port conditions for SNB+ */
 540        }
 541
 542        /* Wa_22012358565:adl-p */
 543        if (DISPLAY_VER(dev_priv) == 13)
 544                intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
 545                             0, PIPE_ARB_USE_PROG_SLOTS);
 546
 547        reg = PIPECONF(cpu_transcoder);
 548        val = intel_de_read(dev_priv, reg);
 549        if (val & PIPECONF_ENABLE) {
 550                /* we keep both pipes enabled on 830 */
 551                drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
 552                return;
 553        }
 554
 555        intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
 556        intel_de_posting_read(dev_priv, reg);
 557
 558        /*
 559         * Until the pipe starts PIPEDSL reads will return a stale value,
 560         * which causes an apparent vblank timestamp jump when PIPEDSL
 561         * resets to its proper value. That also messes up the frame count
 562         * when it's derived from the timestamps. So let's wait for the
 563         * pipe to start properly before we call drm_crtc_vblank_on()
 564         */
 565        if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
 566                intel_wait_for_pipe_scanline_moving(crtc);
 567}
 568
 569void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
 570{
 571        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 572        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 573        enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 574        enum pipe pipe = crtc->pipe;
 575        i915_reg_t reg;
 576        u32 val;
 577
 578        drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
 579
 580        /*
 581         * Make sure planes won't keep trying to pump pixels to us,
 582         * or we might hang the display.
 583         */
 584        assert_planes_disabled(crtc);
 585
 586        reg = PIPECONF(cpu_transcoder);
 587        val = intel_de_read(dev_priv, reg);
 588        if ((val & PIPECONF_ENABLE) == 0)
 589                return;
 590
 591        /*
 592         * Double wide has implications for planes
 593         * so best keep it disabled when not needed.
 594         */
 595        if (old_crtc_state->double_wide)
 596                val &= ~PIPECONF_DOUBLE_WIDE;
 597
 598        /* Don't disable pipe or pipe PLLs if needed */
 599        if (!IS_I830(dev_priv))
 600                val &= ~PIPECONF_ENABLE;
 601
 602        if (DISPLAY_VER(dev_priv) >= 12)
 603                intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
 604                             FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
 605
 606        intel_de_write(dev_priv, reg, val);
 607        if ((val & PIPECONF_ENABLE) == 0)
 608                intel_wait_for_pipe_off(old_crtc_state);
 609}
 610
 611unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
 612{
 613        unsigned int size = 0;
 614        int i;
 615
 616        for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
 617                size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
 618
 619        return size;
 620}
 621
 622unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
 623{
 624        unsigned int size = 0;
 625        int i;
 626
 627        for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
 628                unsigned int plane_size;
 629
 630                if (rem_info->plane[i].linear)
 631                        plane_size = rem_info->plane[i].size;
 632                else
 633                        plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
 634
 635                if (plane_size == 0)
 636                        continue;
 637
 638                if (rem_info->plane_alignment)
 639                        size = ALIGN(size, rem_info->plane_alignment);
 640
 641                size += plane_size;
 642        }
 643
 644        return size;
 645}
 646
 647bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
 648{
 649        struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 650        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 651
 652        return DISPLAY_VER(dev_priv) < 4 ||
 653                (plane->fbc &&
 654                 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
 655}
 656
 657/*
 658 * Convert the x/y offsets into a linear offset.
 659 * Only valid with 0/180 degree rotation, which is fine since linear
 660 * offset is only used with linear buffers on pre-hsw and tiled buffers
 661 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
 662 */
 663u32 intel_fb_xy_to_linear(int x, int y,
 664                          const struct intel_plane_state *state,
 665                          int color_plane)
 666{
 667        const struct drm_framebuffer *fb = state->hw.fb;
 668        unsigned int cpp = fb->format->cpp[color_plane];
 669        unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
 670
 671        return y * pitch + x * cpp;
 672}
 673
 674/*
 675 * Add the x/y offsets derived from fb->offsets[] to the user
 676 * specified plane src x/y offsets. The resulting x/y offsets
 677 * specify the start of scanout from the beginning of the gtt mapping.
 678 */
 679void intel_add_fb_offsets(int *x, int *y,
 680                          const struct intel_plane_state *state,
 681                          int color_plane)
 682
 683{
 684        *x += state->view.color_plane[color_plane].x;
 685        *y += state->view.color_plane[color_plane].y;
 686}
 687
 688u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
 689                              u32 pixel_format, u64 modifier)
 690{
 691        struct intel_crtc *crtc;
 692        struct intel_plane *plane;
 693
 694        if (!HAS_DISPLAY(dev_priv))
 695                return 0;
 696
 697        /*
 698         * We assume the primary plane for pipe A has
 699         * the highest stride limits of them all,
 700         * if in case pipe A is disabled, use the first pipe from pipe_mask.
 701         */
 702        crtc = intel_first_crtc(dev_priv);
 703        if (!crtc)
 704                return 0;
 705
 706        plane = to_intel_plane(crtc->base.primary);
 707
 708        return plane->max_stride(plane, pixel_format, modifier,
 709                                 DRM_MODE_ROTATE_0);
 710}
 711
 712static void
 713intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 714                        struct intel_plane_state *plane_state,
 715                        bool visible)
 716{
 717        struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
 718
 719        plane_state->uapi.visible = visible;
 720
 721        if (visible)
 722                crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
 723        else
 724                crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
 725}
 726
 727static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
 728{
 729        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 730        struct drm_plane *plane;
 731
 732        /*
 733         * Active_planes aliases if multiple "primary" or cursor planes
 734         * have been used on the same (or wrong) pipe. plane_mask uses
 735         * unique ids, hence we can use that to reconstruct active_planes.
 736         */
 737        crtc_state->enabled_planes = 0;
 738        crtc_state->active_planes = 0;
 739
 740        drm_for_each_plane_mask(plane, &dev_priv->drm,
 741                                crtc_state->uapi.plane_mask) {
 742                crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
 743                crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
 744        }
 745}
 746
 747void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 748                                  struct intel_plane *plane)
 749{
 750        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 751        struct intel_crtc_state *crtc_state =
 752                to_intel_crtc_state(crtc->base.state);
 753        struct intel_plane_state *plane_state =
 754                to_intel_plane_state(plane->base.state);
 755
 756        drm_dbg_kms(&dev_priv->drm,
 757                    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
 758                    plane->base.base.id, plane->base.name,
 759                    crtc->base.base.id, crtc->base.name);
 760
 761        intel_set_plane_visible(crtc_state, plane_state, false);
 762        fixup_plane_bitmasks(crtc_state);
 763        crtc_state->data_rate[plane->id] = 0;
 764        crtc_state->min_cdclk[plane->id] = 0;
 765
 766        if (plane->id == PLANE_PRIMARY)
 767                hsw_disable_ips(crtc_state);
 768
 769        /*
 770         * Vblank time updates from the shadow to live plane control register
 771         * are blocked if the memory self-refresh mode is active at that
 772         * moment. So to make sure the plane gets truly disabled, disable
 773         * first the self-refresh mode. The self-refresh enable bit in turn
 774         * will be checked/applied by the HW only at the next frame start
 775         * event which is after the vblank start event, so we need to have a
 776         * wait-for-vblank between disabling the plane and the pipe.
 777         */
 778        if (HAS_GMCH(dev_priv) &&
 779            intel_set_memory_cxsr(dev_priv, false))
 780                intel_crtc_wait_for_next_vblank(crtc);
 781
 782        /*
 783         * Gen2 reports pipe underruns whenever all planes are disabled.
 784         * So disable underrun reporting before all the planes get disabled.
 785         */
 786        if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
 787                intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 788
 789        intel_plane_disable_arm(plane, crtc_state);
 790        intel_crtc_wait_for_next_vblank(crtc);
 791}
 792
 793unsigned int
 794intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
 795{
 796        int x = 0, y = 0;
 797
 798        intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
 799                                          plane_state->view.color_plane[0].offset, 0);
 800
 801        return y;
 802}
 803
 804static int
 805__intel_display_resume(struct drm_device *dev,
 806                       struct drm_atomic_state *state,
 807                       struct drm_modeset_acquire_ctx *ctx)
 808{
 809        struct drm_crtc_state *crtc_state;
 810        struct drm_crtc *crtc;
 811        int i, ret;
 812
 813        intel_modeset_setup_hw_state(dev, ctx);
 814        intel_vga_redisable(to_i915(dev));
 815
 816        if (!state)
 817                return 0;
 818
 819        /*
 820         * We've duplicated the state, pointers to the old state are invalid.
 821         *
 822         * Don't attempt to use the old state until we commit the duplicated state.
 823         */
 824        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 825                /*
 826                 * Force recalculation even if we restore
 827                 * current state. With fast modeset this may not result
 828                 * in a modeset when the state is compatible.
 829                 */
 830                crtc_state->mode_changed = true;
 831        }
 832
 833        /* ignore any reset values/BIOS leftovers in the WM registers */
 834        if (!HAS_GMCH(to_i915(dev)))
 835                to_intel_atomic_state(state)->skip_intermediate_wm = true;
 836
 837        ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
 838
 839        drm_WARN_ON(dev, ret == -EDEADLK);
 840        return ret;
 841}
 842
 843static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
 844{
 845        return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
 846                intel_has_gpu_reset(to_gt(dev_priv)));
 847}
 848
 849void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
 850{
 851        struct drm_device *dev = &dev_priv->drm;
 852        struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
 853        struct drm_atomic_state *state;
 854        int ret;
 855
 856        if (!HAS_DISPLAY(dev_priv))
 857                return;
 858
 859        /* reset doesn't touch the display */
 860        if (!dev_priv->params.force_reset_modeset_test &&
 861            !gpu_reset_clobbers_display(dev_priv))
 862                return;
 863
 864        /* We have a modeset vs reset deadlock, defensively unbreak it. */
 865        set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
 866        smp_mb__after_atomic();
 867        wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
 868
 869        if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
 870                drm_dbg_kms(&dev_priv->drm,
 871                            "Modeset potentially stuck, unbreaking through wedging\n");
 872                intel_gt_set_wedged(to_gt(dev_priv));
 873        }
 874
 875        /*
 876         * Need mode_config.mutex so that we don't
 877         * trample ongoing ->detect() and whatnot.
 878         */
 879        mutex_lock(&dev->mode_config.mutex);
 880        drm_modeset_acquire_init(ctx, 0);
 881        while (1) {
 882                ret = drm_modeset_lock_all_ctx(dev, ctx);
 883                if (ret != -EDEADLK)
 884                        break;
 885
 886                drm_modeset_backoff(ctx);
 887        }
 888        /*
 889         * Disabling the crtcs gracefully seems nicer. Also the
 890         * g33 docs say we should at least disable all the planes.
 891         */
 892        state = drm_atomic_helper_duplicate_state(dev, ctx);
 893        if (IS_ERR(state)) {
 894                ret = PTR_ERR(state);
 895                drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
 896                        ret);
 897                return;
 898        }
 899
 900        ret = drm_atomic_helper_disable_all(dev, ctx);
 901        if (ret) {
 902                drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
 903                        ret);
 904                drm_atomic_state_put(state);
 905                return;
 906        }
 907
 908        dev_priv->modeset_restore_state = state;
 909        state->acquire_ctx = ctx;
 910}
 911
 912void intel_display_finish_reset(struct drm_i915_private *dev_priv)
 913{
 914        struct drm_device *dev = &dev_priv->drm;
 915        struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
 916        struct drm_atomic_state *state;
 917        int ret;
 918
 919        if (!HAS_DISPLAY(dev_priv))
 920                return;
 921
 922        /* reset doesn't touch the display */
 923        if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
 924                return;
 925
 926        state = fetch_and_zero(&dev_priv->modeset_restore_state);
 927        if (!state)
 928                goto unlock;
 929
 930        /* reset doesn't touch the display */
 931        if (!gpu_reset_clobbers_display(dev_priv)) {
 932                /* for testing only restore the display */
 933                ret = __intel_display_resume(dev, state, ctx);
 934                if (ret)
 935                        drm_err(&dev_priv->drm,
 936                                "Restoring old state failed with %i\n", ret);
 937        } else {
 938                /*
 939                 * The display has been reset as well,
 940                 * so need a full re-initialization.
 941                 */
 942                intel_pps_unlock_regs_wa(dev_priv);
 943                intel_modeset_init_hw(dev_priv);
 944                intel_init_clock_gating(dev_priv);
 945                intel_hpd_init(dev_priv);
 946
 947                ret = __intel_display_resume(dev, state, ctx);
 948                if (ret)
 949                        drm_err(&dev_priv->drm,
 950                                "Restoring old state failed with %i\n", ret);
 951
 952                intel_hpd_poll_disable(dev_priv);
 953        }
 954
 955        drm_atomic_state_put(state);
 956unlock:
 957        drm_modeset_drop_locks(ctx);
 958        drm_modeset_acquire_fini(ctx);
 959        mutex_unlock(&dev->mode_config.mutex);
 960
 961        clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
 962}
 963
 964static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
 965{
 966        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 967        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 968        enum pipe pipe = crtc->pipe;
 969        u32 tmp;
 970
 971        tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
 972
 973        /*
 974         * Display WA #1153: icl
 975         * enable hardware to bypass the alpha math
 976         * and rounding for per-pixel values 00 and 0xff
 977         */
 978        tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
 979        /*
 980         * Display WA # 1605353570: icl
 981         * Set the pixel rounding bit to 1 for allowing
 982         * passthrough of Frame buffer pixels unmodified
 983         * across pipe
 984         */
 985        tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
 986
 987        /*
 988         * Underrun recovery must always be disabled on display 13+.
 989         * DG2 chicken bit meaning is inverted compared to other platforms.
 990         */
 991        if (IS_DG2(dev_priv))
 992                tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
 993        else if (DISPLAY_VER(dev_priv) >= 13)
 994                tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
 995
 996        /* Wa_14010547955:dg2 */
 997        if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
 998                tmp |= DG2_RENDER_CCSTAG_4_3_EN;
 999
1000        intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1001}
1002
1003bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1004{
1005        struct drm_crtc *crtc;
1006        bool cleanup_done;
1007
1008        drm_for_each_crtc(crtc, &dev_priv->drm) {
1009                struct drm_crtc_commit *commit;
1010                spin_lock(&crtc->commit_lock);
1011                commit = list_first_entry_or_null(&crtc->commit_list,
1012                                                  struct drm_crtc_commit, commit_entry);
1013                cleanup_done = commit ?
1014                        try_wait_for_completion(&commit->cleanup_done) : true;
1015                spin_unlock(&crtc->commit_lock);
1016
1017                if (cleanup_done)
1018                        continue;
1019
1020                intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1021
1022                return true;
1023        }
1024
1025        return false;
1026}
1027
1028/*
1029 * Finds the encoder associated with the given CRTC. This can only be
1030 * used when we know that the CRTC isn't feeding multiple encoders!
1031 */
1032struct intel_encoder *
1033intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1034                           const struct intel_crtc_state *crtc_state)
1035{
1036        const struct drm_connector_state *connector_state;
1037        const struct drm_connector *connector;
1038        struct intel_encoder *encoder = NULL;
1039        struct intel_crtc *master_crtc;
1040        int num_encoders = 0;
1041        int i;
1042
1043        master_crtc = intel_master_crtc(crtc_state);
1044
1045        for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1046                if (connector_state->crtc != &master_crtc->base)
1047                        continue;
1048
1049                encoder = to_intel_encoder(connector_state->best_encoder);
1050                num_encoders++;
1051        }
1052
1053        drm_WARN(encoder->base.dev, num_encoders != 1,
1054                 "%d encoders for pipe %c\n",
1055                 num_encoders, pipe_name(master_crtc->pipe));
1056
1057        return encoder;
1058}
1059
1060static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1061                               enum pipe pipe)
1062{
1063        i915_reg_t dslreg = PIPEDSL(pipe);
1064        u32 temp;
1065
1066        temp = intel_de_read(dev_priv, dslreg);
1067        udelay(500);
1068        if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1069                if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1070                        drm_err(&dev_priv->drm,
1071                                "mode set failed: pipe %c stuck\n",
1072                                pipe_name(pipe));
1073        }
1074}
1075
1076static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1077{
1078        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1079        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1080        const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1081        enum pipe pipe = crtc->pipe;
1082        int width = drm_rect_width(dst);
1083        int height = drm_rect_height(dst);
1084        int x = dst->x1;
1085        int y = dst->y1;
1086
1087        if (!crtc_state->pch_pfit.enabled)
1088                return;
1089
1090        /* Force use of hard-coded filter coefficients
1091         * as some pre-programmed values are broken,
1092         * e.g. x201.
1093         */
1094        if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1095                intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1096                               PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1097        else
1098                intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1099                               PF_FILTER_MED_3x3);
1100        intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1101        intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1102}
1103
1104void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1105{
1106        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1107        struct drm_device *dev = crtc->base.dev;
1108        struct drm_i915_private *dev_priv = to_i915(dev);
1109
1110        if (!crtc_state->ips_enabled)
1111                return;
1112
1113        /*
1114         * We can only enable IPS after we enable a plane and wait for a vblank
1115         * This function is called from post_plane_update, which is run after
1116         * a vblank wait.
1117         */
1118        drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1119
1120        if (IS_BROADWELL(dev_priv)) {
1121                drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1122                                                         IPS_ENABLE | IPS_PCODE_CONTROL));
1123                /* Quoting Art Runyan: "its not safe to expect any particular
1124                 * value in IPS_CTL bit 31 after enabling IPS through the
1125                 * mailbox." Moreover, the mailbox may return a bogus state,
1126                 * so we need to just enable it and continue on.
1127                 */
1128        } else {
1129                intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1130                /* The bit only becomes 1 in the next vblank, so this wait here
1131                 * is essentially intel_wait_for_vblank. If we don't have this
1132                 * and don't wait for vblanks until the end of crtc_enable, then
1133                 * the HW state readout code will complain that the expected
1134                 * IPS_CTL value is not the one we read. */
1135                if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1136                        drm_err(&dev_priv->drm,
1137                                "Timed out waiting for IPS enable\n");
1138        }
1139}
1140
1141void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1142{
1143        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1144        struct drm_device *dev = crtc->base.dev;
1145        struct drm_i915_private *dev_priv = to_i915(dev);
1146
1147        if (!crtc_state->ips_enabled)
1148                return;
1149
1150        if (IS_BROADWELL(dev_priv)) {
1151                drm_WARN_ON(dev,
1152                            sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1153                /*
1154                 * Wait for PCODE to finish disabling IPS. The BSpec specified
1155                 * 42ms timeout value leads to occasional timeouts so use 100ms
1156                 * instead.
1157                 */
1158                if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1159                        drm_err(&dev_priv->drm,
1160                                "Timed out waiting for IPS disable\n");
1161        } else {
1162                intel_de_write(dev_priv, IPS_CTL, 0);
1163                intel_de_posting_read(dev_priv, IPS_CTL);
1164        }
1165
1166        /* We need to wait for a vblank before we can disable the plane. */
1167        intel_crtc_wait_for_next_vblank(crtc);
1168}
1169
1170static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1171{
1172        if (crtc->overlay)
1173                (void) intel_overlay_switch_off(crtc->overlay);
1174
1175        /* Let userspace switch the overlay on again. In most cases userspace
1176         * has to recompute where to put it anyway.
1177         */
1178}
1179
1180static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1181                                       const struct intel_crtc_state *new_crtc_state)
1182{
1183        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1184        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1185
1186        if (!old_crtc_state->ips_enabled)
1187                return false;
1188
1189        if (intel_crtc_needs_modeset(new_crtc_state))
1190                return true;
1191
1192        /*
1193         * Workaround : Do not read or write the pipe palette/gamma data while
1194         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1195         *
1196         * Disable IPS before we program the LUT.
1197         */
1198        if (IS_HASWELL(dev_priv) &&
1199            (new_crtc_state->uapi.color_mgmt_changed ||
1200             new_crtc_state->update_pipe) &&
1201            new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1202                return true;
1203
1204        return !new_crtc_state->ips_enabled;
1205}
1206
1207static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1208                                       const struct intel_crtc_state *new_crtc_state)
1209{
1210        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1211        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1212
1213        if (!new_crtc_state->ips_enabled)
1214                return false;
1215
1216        if (intel_crtc_needs_modeset(new_crtc_state))
1217                return true;
1218
1219        /*
1220         * Workaround : Do not read or write the pipe palette/gamma data while
1221         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1222         *
1223         * Re-enable IPS after the LUT has been programmed.
1224         */
1225        if (IS_HASWELL(dev_priv) &&
1226            (new_crtc_state->uapi.color_mgmt_changed ||
1227             new_crtc_state->update_pipe) &&
1228            new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1229                return true;
1230
1231        /*
1232         * We can't read out IPS on broadwell, assume the worst and
1233         * forcibly enable IPS on the first fastset.
1234         */
1235        if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1236                return true;
1237
1238        return !old_crtc_state->ips_enabled;
1239}
1240
1241static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1242{
1243        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1244
1245        if (!crtc_state->nv12_planes)
1246                return false;
1247
1248        /* WA Display #0827: Gen9:all */
1249        if (DISPLAY_VER(dev_priv) == 9)
1250                return true;
1251
1252        return false;
1253}
1254
1255static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1256{
1257        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1258
1259        /* Wa_2006604312:icl,ehl */
1260        if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1261                return true;
1262
1263        return false;
1264}
1265
1266static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1267{
1268        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1269
1270        /* Wa_1604331009:icl,jsl,ehl */
1271        if (is_hdr_mode(crtc_state) &&
1272            crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1273            DISPLAY_VER(dev_priv) == 11)
1274                return true;
1275
1276        return false;
1277}
1278
1279static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1280                                    enum pipe pipe, bool enable)
1281{
1282        if (DISPLAY_VER(i915) == 9) {
1283                /*
1284                 * "Plane N strech max must be programmed to 11b (x1)
1285                 *  when Async flips are enabled on that plane."
1286                 */
1287                intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1288                             SKL_PLANE1_STRETCH_MAX_MASK,
1289                             enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1290        } else {
1291                /* Also needed on HSW/BDW albeit undocumented */
1292                intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1293                             HSW_PRI_STRETCH_MAX_MASK,
1294                             enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1295        }
1296}
1297
1298static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1299{
1300        struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1301
1302        return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
1303                (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1304}
1305
1306static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1307                            const struct intel_crtc_state *new_crtc_state)
1308{
1309        return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1310                new_crtc_state->active_planes;
1311}
1312
1313static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1314                             const struct intel_crtc_state *new_crtc_state)
1315{
1316        return old_crtc_state->active_planes &&
1317                (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1318}
1319
1320static void intel_post_plane_update(struct intel_atomic_state *state,
1321                                    struct intel_crtc *crtc)
1322{
1323        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1324        const struct intel_crtc_state *old_crtc_state =
1325                intel_atomic_get_old_crtc_state(state, crtc);
1326        const struct intel_crtc_state *new_crtc_state =
1327                intel_atomic_get_new_crtc_state(state, crtc);
1328        enum pipe pipe = crtc->pipe;
1329
1330        intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1331
1332        if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1333                intel_update_watermarks(dev_priv);
1334
1335        if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1336                hsw_enable_ips(new_crtc_state);
1337
1338        intel_fbc_post_update(state, crtc);
1339        intel_drrs_page_flip(state, crtc);
1340
1341        if (needs_async_flip_vtd_wa(old_crtc_state) &&
1342            !needs_async_flip_vtd_wa(new_crtc_state))
1343                intel_async_flip_vtd_wa(dev_priv, pipe, false);
1344
1345        if (needs_nv12_wa(old_crtc_state) &&
1346            !needs_nv12_wa(new_crtc_state))
1347                skl_wa_827(dev_priv, pipe, false);
1348
1349        if (needs_scalerclk_wa(old_crtc_state) &&
1350            !needs_scalerclk_wa(new_crtc_state))
1351                icl_wa_scalerclkgating(dev_priv, pipe, false);
1352
1353        if (needs_cursorclk_wa(old_crtc_state) &&
1354            !needs_cursorclk_wa(new_crtc_state))
1355                icl_wa_cursorclkgating(dev_priv, pipe, false);
1356
1357}
1358
1359static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1360                                        struct intel_crtc *crtc)
1361{
1362        const struct intel_crtc_state *crtc_state =
1363                intel_atomic_get_new_crtc_state(state, crtc);
1364        u8 update_planes = crtc_state->update_planes;
1365        const struct intel_plane_state *plane_state;
1366        struct intel_plane *plane;
1367        int i;
1368
1369        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1370                if (plane->enable_flip_done &&
1371                    plane->pipe == crtc->pipe &&
1372                    update_planes & BIT(plane->id))
1373                        plane->enable_flip_done(plane);
1374        }
1375}
1376
1377static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1378                                         struct intel_crtc *crtc)
1379{
1380        const struct intel_crtc_state *crtc_state =
1381                intel_atomic_get_new_crtc_state(state, crtc);
1382        u8 update_planes = crtc_state->update_planes;
1383        const struct intel_plane_state *plane_state;
1384        struct intel_plane *plane;
1385        int i;
1386
1387        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1388                if (plane->disable_flip_done &&
1389                    plane->pipe == crtc->pipe &&
1390                    update_planes & BIT(plane->id))
1391                        plane->disable_flip_done(plane);
1392        }
1393}
1394
1395static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1396                                             struct intel_crtc *crtc)
1397{
1398        const struct intel_crtc_state *old_crtc_state =
1399                intel_atomic_get_old_crtc_state(state, crtc);
1400        const struct intel_crtc_state *new_crtc_state =
1401                intel_atomic_get_new_crtc_state(state, crtc);
1402        u8 update_planes = new_crtc_state->update_planes;
1403        const struct intel_plane_state *old_plane_state;
1404        struct intel_plane *plane;
1405        bool need_vbl_wait = false;
1406        int i;
1407
1408        for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1409                if (plane->need_async_flip_disable_wa &&
1410                    plane->pipe == crtc->pipe &&
1411                    update_planes & BIT(plane->id)) {
1412                        /*
1413                         * Apart from the async flip bit we want to
1414                         * preserve the old state for the plane.
1415                         */
1416                        plane->async_flip(plane, old_crtc_state,
1417                                          old_plane_state, false);
1418                        need_vbl_wait = true;
1419                }
1420        }
1421
1422        if (need_vbl_wait)
1423                intel_crtc_wait_for_next_vblank(crtc);
1424}
1425
1426static void intel_pre_plane_update(struct intel_atomic_state *state,
1427                                   struct intel_crtc *crtc)
1428{
1429        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1430        const struct intel_crtc_state *old_crtc_state =
1431                intel_atomic_get_old_crtc_state(state, crtc);
1432        const struct intel_crtc_state *new_crtc_state =
1433                intel_atomic_get_new_crtc_state(state, crtc);
1434        enum pipe pipe = crtc->pipe;
1435
1436        intel_psr_pre_plane_update(state, crtc);
1437
1438        if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1439                hsw_disable_ips(old_crtc_state);
1440
1441        if (intel_fbc_pre_update(state, crtc))
1442                intel_crtc_wait_for_next_vblank(crtc);
1443
1444        if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1445            needs_async_flip_vtd_wa(new_crtc_state))
1446                intel_async_flip_vtd_wa(dev_priv, pipe, true);
1447
1448        /* Display WA 827 */
1449        if (!needs_nv12_wa(old_crtc_state) &&
1450            needs_nv12_wa(new_crtc_state))
1451                skl_wa_827(dev_priv, pipe, true);
1452
1453        /* Wa_2006604312:icl,ehl */
1454        if (!needs_scalerclk_wa(old_crtc_state) &&
1455            needs_scalerclk_wa(new_crtc_state))
1456                icl_wa_scalerclkgating(dev_priv, pipe, true);
1457
1458        /* Wa_1604331009:icl,jsl,ehl */
1459        if (!needs_cursorclk_wa(old_crtc_state) &&
1460            needs_cursorclk_wa(new_crtc_state))
1461                icl_wa_cursorclkgating(dev_priv, pipe, true);
1462
1463        /*
1464         * Vblank time updates from the shadow to live plane control register
1465         * are blocked if the memory self-refresh mode is active at that
1466         * moment. So to make sure the plane gets truly disabled, disable
1467         * first the self-refresh mode. The self-refresh enable bit in turn
1468         * will be checked/applied by the HW only at the next frame start
1469         * event which is after the vblank start event, so we need to have a
1470         * wait-for-vblank between disabling the plane and the pipe.
1471         */
1472        if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1473            new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1474                intel_crtc_wait_for_next_vblank(crtc);
1475
1476        /*
1477         * IVB workaround: must disable low power watermarks for at least
1478         * one frame before enabling scaling.  LP watermarks can be re-enabled
1479         * when scaling is disabled.
1480         *
1481         * WaCxSRDisabledForSpriteScaling:ivb
1482         */
1483        if (old_crtc_state->hw.active &&
1484            new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1485                intel_crtc_wait_for_next_vblank(crtc);
1486
1487        /*
1488         * If we're doing a modeset we don't need to do any
1489         * pre-vblank watermark programming here.
1490         */
1491        if (!intel_crtc_needs_modeset(new_crtc_state)) {
1492                /*
1493                 * For platforms that support atomic watermarks, program the
1494                 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1495                 * will be the intermediate values that are safe for both pre- and
1496                 * post- vblank; when vblank happens, the 'active' values will be set
1497                 * to the final 'target' values and we'll do this again to get the
1498                 * optimal watermarks.  For gen9+ platforms, the values we program here
1499                 * will be the final target values which will get automatically latched
1500                 * at vblank time; no further programming will be necessary.
1501                 *
1502                 * If a platform hasn't been transitioned to atomic watermarks yet,
1503                 * we'll continue to update watermarks the old way, if flags tell
1504                 * us to.
1505                 */
1506                if (!intel_initial_watermarks(state, crtc))
1507                        if (new_crtc_state->update_wm_pre)
1508                                intel_update_watermarks(dev_priv);
1509        }
1510
1511        /*
1512         * Gen2 reports pipe underruns whenever all planes are disabled.
1513         * So disable underrun reporting before all the planes get disabled.
1514         *
1515         * We do this after .initial_watermarks() so that we have a
1516         * chance of catching underruns with the intermediate watermarks
1517         * vs. the old plane configuration.
1518         */
1519        if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1520                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1521
1522        /*
1523         * WA for platforms where async address update enable bit
1524         * is double buffered and only latched at start of vblank.
1525         */
1526        if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1527                intel_crtc_async_flip_disable_wa(state, crtc);
1528}
1529
1530static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1531                                      struct intel_crtc *crtc)
1532{
1533        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1534        const struct intel_crtc_state *new_crtc_state =
1535                intel_atomic_get_new_crtc_state(state, crtc);
1536        unsigned int update_mask = new_crtc_state->update_planes;
1537        const struct intel_plane_state *old_plane_state;
1538        struct intel_plane *plane;
1539        unsigned fb_bits = 0;
1540        int i;
1541
1542        intel_crtc_dpms_overlay_disable(crtc);
1543
1544        for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1545                if (crtc->pipe != plane->pipe ||
1546                    !(update_mask & BIT(plane->id)))
1547                        continue;
1548
1549                intel_plane_disable_arm(plane, new_crtc_state);
1550
1551                if (old_plane_state->uapi.visible)
1552                        fb_bits |= plane->frontbuffer_bit;
1553        }
1554
1555        intel_frontbuffer_flip(dev_priv, fb_bits);
1556}
1557
1558/*
1559 * intel_connector_primary_encoder - get the primary encoder for a connector
1560 * @connector: connector for which to return the encoder
1561 *
1562 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1563 * all connectors to their encoder, except for DP-MST connectors which have
1564 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1565 * pointed to by as many DP-MST connectors as there are pipes.
1566 */
1567static struct intel_encoder *
1568intel_connector_primary_encoder(struct intel_connector *connector)
1569{
1570        struct intel_encoder *encoder;
1571
1572        if (connector->mst_port)
1573                return &dp_to_dig_port(connector->mst_port)->base;
1574
1575        encoder = intel_attached_encoder(connector);
1576        drm_WARN_ON(connector->base.dev, !encoder);
1577
1578        return encoder;
1579}
1580
1581static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1582{
1583        struct drm_i915_private *i915 = to_i915(state->base.dev);
1584        struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1585        struct intel_crtc *crtc;
1586        struct drm_connector_state *new_conn_state;
1587        struct drm_connector *connector;
1588        int i;
1589
1590        /*
1591         * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1592         * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1593         */
1594        if (i915->dpll.mgr) {
1595                for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1596                        if (intel_crtc_needs_modeset(new_crtc_state))
1597                                continue;
1598
1599                        new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1600                        new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1601                }
1602        }
1603
1604        if (!state->modeset)
1605                return;
1606
1607        for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1608                                        i) {
1609                struct intel_connector *intel_connector;
1610                struct intel_encoder *encoder;
1611                struct intel_crtc *crtc;
1612
1613                if (!intel_connector_needs_modeset(state, connector))
1614                        continue;
1615
1616                intel_connector = to_intel_connector(connector);
1617                encoder = intel_connector_primary_encoder(intel_connector);
1618                if (!encoder->update_prepare)
1619                        continue;
1620
1621                crtc = new_conn_state->crtc ?
1622                        to_intel_crtc(new_conn_state->crtc) : NULL;
1623                encoder->update_prepare(state, encoder, crtc);
1624        }
1625}
1626
1627static void intel_encoders_update_complete(struct intel_atomic_state *state)
1628{
1629        struct drm_connector_state *new_conn_state;
1630        struct drm_connector *connector;
1631        int i;
1632
1633        if (!state->modeset)
1634                return;
1635
1636        for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1637                                        i) {
1638                struct intel_connector *intel_connector;
1639                struct intel_encoder *encoder;
1640                struct intel_crtc *crtc;
1641
1642                if (!intel_connector_needs_modeset(state, connector))
1643                        continue;
1644
1645                intel_connector = to_intel_connector(connector);
1646                encoder = intel_connector_primary_encoder(intel_connector);
1647                if (!encoder->update_complete)
1648                        continue;
1649
1650                crtc = new_conn_state->crtc ?
1651                        to_intel_crtc(new_conn_state->crtc) : NULL;
1652                encoder->update_complete(state, encoder, crtc);
1653        }
1654}
1655
1656static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1657                                          struct intel_crtc *crtc)
1658{
1659        const struct intel_crtc_state *crtc_state =
1660                intel_atomic_get_new_crtc_state(state, crtc);
1661        const struct drm_connector_state *conn_state;
1662        struct drm_connector *conn;
1663        int i;
1664
1665        for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1666                struct intel_encoder *encoder =
1667                        to_intel_encoder(conn_state->best_encoder);
1668
1669                if (conn_state->crtc != &crtc->base)
1670                        continue;
1671
1672                if (encoder->pre_pll_enable)
1673                        encoder->pre_pll_enable(state, encoder,
1674                                                crtc_state, conn_state);
1675        }
1676}
1677
1678static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1679                                      struct intel_crtc *crtc)
1680{
1681        const struct intel_crtc_state *crtc_state =
1682                intel_atomic_get_new_crtc_state(state, crtc);
1683        const struct drm_connector_state *conn_state;
1684        struct drm_connector *conn;
1685        int i;
1686
1687        for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1688                struct intel_encoder *encoder =
1689                        to_intel_encoder(conn_state->best_encoder);
1690
1691                if (conn_state->crtc != &crtc->base)
1692                        continue;
1693
1694                if (encoder->pre_enable)
1695                        encoder->pre_enable(state, encoder,
1696                                            crtc_state, conn_state);
1697        }
1698}
1699
1700static void intel_encoders_enable(struct intel_atomic_state *state,
1701                                  struct intel_crtc *crtc)
1702{
1703        const struct intel_crtc_state *crtc_state =
1704                intel_atomic_get_new_crtc_state(state, crtc);
1705        const struct drm_connector_state *conn_state;
1706        struct drm_connector *conn;
1707        int i;
1708
1709        for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1710                struct intel_encoder *encoder =
1711                        to_intel_encoder(conn_state->best_encoder);
1712
1713                if (conn_state->crtc != &crtc->base)
1714                        continue;
1715
1716                if (encoder->enable)
1717                        encoder->enable(state, encoder,
1718                                        crtc_state, conn_state);
1719                intel_opregion_notify_encoder(encoder, true);
1720        }
1721}
1722
1723static void intel_encoders_disable(struct intel_atomic_state *state,
1724                                   struct intel_crtc *crtc)
1725{
1726        const struct intel_crtc_state *old_crtc_state =
1727                intel_atomic_get_old_crtc_state(state, crtc);
1728        const struct drm_connector_state *old_conn_state;
1729        struct drm_connector *conn;
1730        int i;
1731
1732        for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1733                struct intel_encoder *encoder =
1734                        to_intel_encoder(old_conn_state->best_encoder);
1735
1736                if (old_conn_state->crtc != &crtc->base)
1737                        continue;
1738
1739                intel_opregion_notify_encoder(encoder, false);
1740                if (encoder->disable)
1741                        encoder->disable(state, encoder,
1742                                         old_crtc_state, old_conn_state);
1743        }
1744}
1745
1746static void intel_encoders_post_disable(struct intel_atomic_state *state,
1747                                        struct intel_crtc *crtc)
1748{
1749        const struct intel_crtc_state *old_crtc_state =
1750                intel_atomic_get_old_crtc_state(state, crtc);
1751        const struct drm_connector_state *old_conn_state;
1752        struct drm_connector *conn;
1753        int i;
1754
1755        for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1756                struct intel_encoder *encoder =
1757                        to_intel_encoder(old_conn_state->best_encoder);
1758
1759                if (old_conn_state->crtc != &crtc->base)
1760                        continue;
1761
1762                if (encoder->post_disable)
1763                        encoder->post_disable(state, encoder,
1764                                              old_crtc_state, old_conn_state);
1765        }
1766}
1767
1768static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1769                                            struct intel_crtc *crtc)
1770{
1771        const struct intel_crtc_state *old_crtc_state =
1772                intel_atomic_get_old_crtc_state(state, crtc);
1773        const struct drm_connector_state *old_conn_state;
1774        struct drm_connector *conn;
1775        int i;
1776
1777        for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1778                struct intel_encoder *encoder =
1779                        to_intel_encoder(old_conn_state->best_encoder);
1780
1781                if (old_conn_state->crtc != &crtc->base)
1782                        continue;
1783
1784                if (encoder->post_pll_disable)
1785                        encoder->post_pll_disable(state, encoder,
1786                                                  old_crtc_state, old_conn_state);
1787        }
1788}
1789
1790static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1791                                       struct intel_crtc *crtc)
1792{
1793        const struct intel_crtc_state *crtc_state =
1794                intel_atomic_get_new_crtc_state(state, crtc);
1795        const struct drm_connector_state *conn_state;
1796        struct drm_connector *conn;
1797        int i;
1798
1799        for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1800                struct intel_encoder *encoder =
1801                        to_intel_encoder(conn_state->best_encoder);
1802
1803                if (conn_state->crtc != &crtc->base)
1804                        continue;
1805
1806                if (encoder->update_pipe)
1807                        encoder->update_pipe(state, encoder,
1808                                             crtc_state, conn_state);
1809        }
1810}
1811
1812static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1813{
1814        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1815        struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1816
1817        plane->disable_arm(plane, crtc_state);
1818}
1819
1820static void ilk_crtc_enable(struct intel_atomic_state *state,
1821                            struct intel_crtc *crtc)
1822{
1823        const struct intel_crtc_state *new_crtc_state =
1824                intel_atomic_get_new_crtc_state(state, crtc);
1825        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1826        enum pipe pipe = crtc->pipe;
1827
1828        if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1829                return;
1830
1831        /*
1832         * Sometimes spurious CPU pipe underruns happen during FDI
1833         * training, at least with VGA+HDMI cloning. Suppress them.
1834         *
1835         * On ILK we get an occasional spurious CPU pipe underruns
1836         * between eDP port A enable and vdd enable. Also PCH port
1837         * enable seems to result in the occasional CPU pipe underrun.
1838         *
1839         * Spurious PCH underruns also occur during PCH enabling.
1840         */
1841        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1842        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1843
1844        if (intel_crtc_has_dp_encoder(new_crtc_state))
1845                intel_dp_set_m_n(new_crtc_state, M1_N1);
1846
1847        intel_set_transcoder_timings(new_crtc_state);
1848        intel_set_pipe_src_size(new_crtc_state);
1849
1850        if (new_crtc_state->has_pch_encoder)
1851                intel_cpu_transcoder_set_m_n(new_crtc_state,
1852                                             &new_crtc_state->fdi_m_n, NULL);
1853
1854        ilk_set_pipeconf(new_crtc_state);
1855
1856        crtc->active = true;
1857
1858        intel_encoders_pre_enable(state, crtc);
1859
1860        if (new_crtc_state->has_pch_encoder) {
1861                /* Note: FDI PLL enabling _must_ be done before we enable the
1862                 * cpu pipes, hence this is separate from all the other fdi/pch
1863                 * enabling. */
1864                ilk_fdi_pll_enable(new_crtc_state);
1865        } else {
1866                assert_fdi_tx_disabled(dev_priv, pipe);
1867                assert_fdi_rx_disabled(dev_priv, pipe);
1868        }
1869
1870        ilk_pfit_enable(new_crtc_state);
1871
1872        /*
1873         * On ILK+ LUT must be loaded before the pipe is running but with
1874         * clocks enabled
1875         */
1876        intel_color_load_luts(new_crtc_state);
1877        intel_color_commit(new_crtc_state);
1878        /* update DSPCNTR to configure gamma for pipe bottom color */
1879        intel_disable_primary_plane(new_crtc_state);
1880
1881        intel_initial_watermarks(state, crtc);
1882        intel_enable_transcoder(new_crtc_state);
1883
1884        if (new_crtc_state->has_pch_encoder)
1885                ilk_pch_enable(state, crtc);
1886
1887        intel_crtc_vblank_on(new_crtc_state);
1888
1889        intel_encoders_enable(state, crtc);
1890
1891        if (HAS_PCH_CPT(dev_priv))
1892                cpt_verify_modeset(dev_priv, pipe);
1893
1894        /*
1895         * Must wait for vblank to avoid spurious PCH FIFO underruns.
1896         * And a second vblank wait is needed at least on ILK with
1897         * some interlaced HDMI modes. Let's do the double wait always
1898         * in case there are more corner cases we don't know about.
1899         */
1900        if (new_crtc_state->has_pch_encoder) {
1901                intel_crtc_wait_for_next_vblank(crtc);
1902                intel_crtc_wait_for_next_vblank(crtc);
1903        }
1904        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1905        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1906}
1907
1908/* IPS only exists on ULT machines and is tied to pipe A. */
1909static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1910{
1911        return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1912}
1913
1914static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1915                                            enum pipe pipe, bool apply)
1916{
1917        u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1918        u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1919
1920        if (apply)
1921                val |= mask;
1922        else
1923                val &= ~mask;
1924
1925        intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1926}
1927
1928static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1929{
1930        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1931        enum pipe pipe = crtc->pipe;
1932        u32 val;
1933
1934        /* Wa_22010947358:adl-p */
1935        if (IS_ALDERLAKE_P(dev_priv))
1936                val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1937        else
1938                val = MBUS_DBOX_A_CREDIT(2);
1939
1940        if (DISPLAY_VER(dev_priv) >= 12) {
1941                val |= MBUS_DBOX_BW_CREDIT(2);
1942                val |= MBUS_DBOX_B_CREDIT(12);
1943        } else {
1944                val |= MBUS_DBOX_BW_CREDIT(1);
1945                val |= MBUS_DBOX_B_CREDIT(8);
1946        }
1947
1948        intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1949}
1950
1951static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1952{
1953        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1954        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1955
1956        intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1957                       HSW_LINETIME(crtc_state->linetime) |
1958                       HSW_IPS_LINETIME(crtc_state->ips_linetime));
1959}
1960
1961static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1962{
1963        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1964        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1965        i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1966        u32 val;
1967
1968        val = intel_de_read(dev_priv, reg);
1969        val &= ~HSW_FRAME_START_DELAY_MASK;
1970        val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1971        intel_de_write(dev_priv, reg, val);
1972}
1973
1974static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1975                                         const struct intel_crtc_state *crtc_state)
1976{
1977        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1978        struct intel_crtc_state *master_crtc_state;
1979        struct intel_crtc *master_crtc;
1980        struct drm_connector_state *conn_state;
1981        struct drm_connector *conn;
1982        struct intel_encoder *encoder = NULL;
1983        int i;
1984
1985        master_crtc = intel_master_crtc(crtc_state);
1986        master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1987
1988        for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1989                if (conn_state->crtc != &master_crtc->base)
1990                        continue;
1991
1992                encoder = to_intel_encoder(conn_state->best_encoder);
1993                break;
1994        }
1995
1996        /*
1997         * Enable sequence steps 1-7 on bigjoiner master
1998         */
1999        if (crtc_state->bigjoiner_slave)
2000                intel_encoders_pre_pll_enable(state, master_crtc);
2001
2002        if (crtc_state->shared_dpll)
2003                intel_enable_shared_dpll(crtc_state);
2004
2005        if (crtc_state->bigjoiner_slave)
2006                intel_encoders_pre_enable(state, master_crtc);
2007
2008        /* need to enable VDSC, which we skipped in pre-enable */
2009        intel_dsc_enable(crtc_state);
2010
2011        if (DISPLAY_VER(dev_priv) >= 13)
2012                intel_uncompressed_joiner_enable(crtc_state);
2013}
2014
2015static void hsw_crtc_enable(struct intel_atomic_state *state,
2016                            struct intel_crtc *crtc)
2017{
2018        const struct intel_crtc_state *new_crtc_state =
2019                intel_atomic_get_new_crtc_state(state, crtc);
2020        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2021        enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2022        enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2023        bool psl_clkgate_wa;
2024
2025        if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2026                return;
2027
2028        if (!new_crtc_state->bigjoiner) {
2029                intel_encoders_pre_pll_enable(state, crtc);
2030
2031                if (new_crtc_state->shared_dpll)
2032                        intel_enable_shared_dpll(new_crtc_state);
2033
2034                intel_encoders_pre_enable(state, crtc);
2035        } else {
2036                icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2037        }
2038
2039        intel_set_pipe_src_size(new_crtc_state);
2040        if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2041                bdw_set_pipemisc(new_crtc_state);
2042
2043        if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
2044                intel_set_transcoder_timings(new_crtc_state);
2045
2046                if (cpu_transcoder != TRANSCODER_EDP)
2047                        intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2048                                       new_crtc_state->pixel_multiplier - 1);
2049
2050                if (new_crtc_state->has_pch_encoder)
2051                        intel_cpu_transcoder_set_m_n(new_crtc_state,
2052                                                     &new_crtc_state->fdi_m_n, NULL);
2053
2054                hsw_set_frame_start_delay(new_crtc_state);
2055
2056                hsw_set_transconf(new_crtc_state);
2057        }
2058
2059        crtc->active = true;
2060
2061        /* Display WA #1180: WaDisableScalarClockGating: glk */
2062        psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2063                new_crtc_state->pch_pfit.enabled;
2064        if (psl_clkgate_wa)
2065                glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2066
2067        if (DISPLAY_VER(dev_priv) >= 9)
2068                skl_pfit_enable(new_crtc_state);
2069        else
2070                ilk_pfit_enable(new_crtc_state);
2071
2072        /*
2073         * On ILK+ LUT must be loaded before the pipe is running but with
2074         * clocks enabled
2075         */
2076        intel_color_load_luts(new_crtc_state);
2077        intel_color_commit(new_crtc_state);
2078        /* update DSPCNTR to configure gamma/csc for pipe bottom color */
2079        if (DISPLAY_VER(dev_priv) < 9)
2080                intel_disable_primary_plane(new_crtc_state);
2081
2082        hsw_set_linetime_wm(new_crtc_state);
2083
2084        if (DISPLAY_VER(dev_priv) >= 11)
2085                icl_set_pipe_chicken(new_crtc_state);
2086
2087        intel_initial_watermarks(state, crtc);
2088
2089        if (DISPLAY_VER(dev_priv) >= 11) {
2090                const struct intel_dbuf_state *dbuf_state =
2091                                intel_atomic_get_new_dbuf_state(state);
2092
2093                icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2094        }
2095
2096        if (new_crtc_state->bigjoiner_slave)
2097                intel_crtc_vblank_on(new_crtc_state);
2098
2099        intel_encoders_enable(state, crtc);
2100
2101        if (psl_clkgate_wa) {
2102                intel_crtc_wait_for_next_vblank(crtc);
2103                glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2104        }
2105
2106        /* If we change the relative order between pipe/planes enabling, we need
2107         * to change the workaround. */
2108        hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2109        if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2110                struct intel_crtc *wa_crtc;
2111
2112                wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
2113
2114                intel_crtc_wait_for_next_vblank(wa_crtc);
2115                intel_crtc_wait_for_next_vblank(wa_crtc);
2116        }
2117}
2118
2119void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2120{
2121        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2122        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2123        enum pipe pipe = crtc->pipe;
2124
2125        /* To avoid upsetting the power well on haswell only disable the pfit if
2126         * it's in use. The hw state code will make sure we get this right. */
2127        if (!old_crtc_state->pch_pfit.enabled)
2128                return;
2129
2130        intel_de_write(dev_priv, PF_CTL(pipe), 0);
2131        intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2132        intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2133}
2134
2135static void ilk_crtc_disable(struct intel_atomic_state *state,
2136                             struct intel_crtc *crtc)
2137{
2138        const struct intel_crtc_state *old_crtc_state =
2139                intel_atomic_get_old_crtc_state(state, crtc);
2140        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2141        enum pipe pipe = crtc->pipe;
2142
2143        /*
2144         * Sometimes spurious CPU pipe underruns happen when the
2145         * pipe is already disabled, but FDI RX/TX is still enabled.
2146         * Happens at least with VGA+HDMI cloning. Suppress them.
2147         */
2148        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2149        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2150
2151        intel_encoders_disable(state, crtc);
2152
2153        intel_crtc_vblank_off(old_crtc_state);
2154
2155        intel_disable_transcoder(old_crtc_state);
2156
2157        ilk_pfit_disable(old_crtc_state);
2158
2159        if (old_crtc_state->has_pch_encoder)
2160                ilk_pch_disable(state, crtc);
2161
2162        intel_encoders_post_disable(state, crtc);
2163
2164        if (old_crtc_state->has_pch_encoder)
2165                ilk_pch_post_disable(state, crtc);
2166
2167        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2168        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2169}
2170
2171static void hsw_crtc_disable(struct intel_atomic_state *state,
2172                             struct intel_crtc *crtc)
2173{
2174        const struct intel_crtc_state *old_crtc_state =
2175                intel_atomic_get_old_crtc_state(state, crtc);
2176
2177        /*
2178         * FIXME collapse everything to one hook.
2179         * Need care with mst->ddi interactions.
2180         */
2181        if (!old_crtc_state->bigjoiner_slave) {
2182                intel_encoders_disable(state, crtc);
2183                intel_encoders_post_disable(state, crtc);
2184        }
2185}
2186
2187static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2188{
2189        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2190        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2191
2192        if (!crtc_state->gmch_pfit.control)
2193                return;
2194
2195        /*
2196         * The panel fitter should only be adjusted whilst the pipe is disabled,
2197         * according to register description and PRM.
2198         */
2199        drm_WARN_ON(&dev_priv->drm,
2200                    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2201        assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2202
2203        intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2204                       crtc_state->gmch_pfit.pgm_ratios);
2205        intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2206
2207        /* Border color in case we don't scale up to the full screen. Black by
2208         * default, change to something else for debugging. */
2209        intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2210}
2211
2212bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2213{
2214        if (phy == PHY_NONE)
2215                return false;
2216        else if (IS_DG2(dev_priv))
2217                /*
2218                 * DG2 outputs labelled as "combo PHY" in the bspec use
2219                 * SNPS PHYs with completely different programming,
2220                 * hence we always return false here.
2221                 */
2222                return false;
2223        else if (IS_ALDERLAKE_S(dev_priv))
2224                return phy <= PHY_E;
2225        else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2226                return phy <= PHY_D;
2227        else if (IS_JSL_EHL(dev_priv))
2228                return phy <= PHY_C;
2229        else if (DISPLAY_VER(dev_priv) >= 11)
2230                return phy <= PHY_B;
2231        else
2232                return false;
2233}
2234
2235bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2236{
2237        if (IS_DG2(dev_priv))
2238                /* DG2's "TC1" output uses a SNPS PHY */
2239                return false;
2240        else if (IS_ALDERLAKE_P(dev_priv))
2241                return phy >= PHY_F && phy <= PHY_I;
2242        else if (IS_TIGERLAKE(dev_priv))
2243                return phy >= PHY_D && phy <= PHY_I;
2244        else if (IS_ICELAKE(dev_priv))
2245                return phy >= PHY_C && phy <= PHY_F;
2246        else
2247                return false;
2248}
2249
2250bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2251{
2252        if (phy == PHY_NONE)
2253                return false;
2254        else if (IS_DG2(dev_priv))
2255                /*
2256                 * All four "combo" ports and the TC1 port (PHY E) use
2257                 * Synopsis PHYs.
2258                 */
2259                return phy <= PHY_E;
2260
2261        return false;
2262}
2263
2264enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2265{
2266        if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2267                return PHY_D + port - PORT_D_XELPD;
2268        else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2269                return PHY_F + port - PORT_TC1;
2270        else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2271                return PHY_B + port - PORT_TC1;
2272        else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2273                return PHY_C + port - PORT_TC1;
2274        else if (IS_JSL_EHL(i915) && port == PORT_D)
2275                return PHY_A;
2276
2277        return PHY_A + port - PORT_A;
2278}
2279
2280enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2281{
2282        if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2283                return TC_PORT_NONE;
2284
2285        if (DISPLAY_VER(dev_priv) >= 12)
2286                return TC_PORT_1 + port - PORT_TC1;
2287        else
2288                return TC_PORT_1 + port - PORT_C;
2289}
2290
2291enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2292{
2293        switch (port) {
2294        case PORT_A:
2295                return POWER_DOMAIN_PORT_DDI_A_LANES;
2296        case PORT_B:
2297                return POWER_DOMAIN_PORT_DDI_B_LANES;
2298        case PORT_C:
2299                return POWER_DOMAIN_PORT_DDI_C_LANES;
2300        case PORT_D:
2301                return POWER_DOMAIN_PORT_DDI_D_LANES;
2302        case PORT_E:
2303                return POWER_DOMAIN_PORT_DDI_E_LANES;
2304        case PORT_F:
2305                return POWER_DOMAIN_PORT_DDI_F_LANES;
2306        case PORT_G:
2307                return POWER_DOMAIN_PORT_DDI_G_LANES;
2308        case PORT_H:
2309                return POWER_DOMAIN_PORT_DDI_H_LANES;
2310        case PORT_I:
2311                return POWER_DOMAIN_PORT_DDI_I_LANES;
2312        default:
2313                MISSING_CASE(port);
2314                return POWER_DOMAIN_PORT_OTHER;
2315        }
2316}
2317
2318enum intel_display_power_domain
2319intel_aux_power_domain(struct intel_digital_port *dig_port)
2320{
2321        if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2322                switch (dig_port->aux_ch) {
2323                case AUX_CH_C:
2324                        return POWER_DOMAIN_AUX_C_TBT;
2325                case AUX_CH_D:
2326                        return POWER_DOMAIN_AUX_D_TBT;
2327                case AUX_CH_E:
2328                        return POWER_DOMAIN_AUX_E_TBT;
2329                case AUX_CH_F:
2330                        return POWER_DOMAIN_AUX_F_TBT;
2331                case AUX_CH_G:
2332                        return POWER_DOMAIN_AUX_G_TBT;
2333                case AUX_CH_H:
2334                        return POWER_DOMAIN_AUX_H_TBT;
2335                case AUX_CH_I:
2336                        return POWER_DOMAIN_AUX_I_TBT;
2337                default:
2338                        MISSING_CASE(dig_port->aux_ch);
2339                        return POWER_DOMAIN_AUX_C_TBT;
2340                }
2341        }
2342
2343        return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2344}
2345
2346/*
2347 * Converts aux_ch to power_domain without caring about TBT ports for that use
2348 * intel_aux_power_domain()
2349 */
2350enum intel_display_power_domain
2351intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2352{
2353        switch (aux_ch) {
2354        case AUX_CH_A:
2355                return POWER_DOMAIN_AUX_A;
2356        case AUX_CH_B:
2357                return POWER_DOMAIN_AUX_B;
2358        case AUX_CH_C:
2359                return POWER_DOMAIN_AUX_C;
2360        case AUX_CH_D:
2361                return POWER_DOMAIN_AUX_D;
2362        case AUX_CH_E:
2363                return POWER_DOMAIN_AUX_E;
2364        case AUX_CH_F:
2365                return POWER_DOMAIN_AUX_F;
2366        case AUX_CH_G:
2367                return POWER_DOMAIN_AUX_G;
2368        case AUX_CH_H:
2369                return POWER_DOMAIN_AUX_H;
2370        case AUX_CH_I:
2371                return POWER_DOMAIN_AUX_I;
2372        default:
2373                MISSING_CASE(aux_ch);
2374                return POWER_DOMAIN_AUX_A;
2375        }
2376}
2377
2378static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2379{
2380        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2381        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2382        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2383        struct drm_encoder *encoder;
2384        enum pipe pipe = crtc->pipe;
2385        u64 mask;
2386
2387        if (!crtc_state->hw.active)
2388                return 0;
2389
2390        mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2391        mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2392        if (crtc_state->pch_pfit.enabled ||
2393            crtc_state->pch_pfit.force_thru)
2394                mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2395
2396        drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2397                                  crtc_state->uapi.encoder_mask) {
2398                struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2399
2400                mask |= BIT_ULL(intel_encoder->power_domain);
2401        }
2402
2403        if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2404                mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2405
2406        if (crtc_state->shared_dpll)
2407                mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2408
2409        if (crtc_state->dsc.compression_enable)
2410                mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2411
2412        return mask;
2413}
2414
2415static u64
2416modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2417{
2418        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2419        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2420        enum intel_display_power_domain domain;
2421        u64 domains, new_domains, old_domains;
2422
2423        domains = get_crtc_power_domains(crtc_state);
2424
2425        new_domains = domains & ~crtc->enabled_power_domains.mask;
2426        old_domains = crtc->enabled_power_domains.mask & ~domains;
2427
2428        for_each_power_domain(domain, new_domains)
2429                intel_display_power_get_in_set(dev_priv,
2430                                               &crtc->enabled_power_domains,
2431                                               domain);
2432
2433        return old_domains;
2434}
2435
2436static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2437                                           u64 domains)
2438{
2439        intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2440                                            &crtc->enabled_power_domains,
2441                                            domains);
2442}
2443
2444static void valleyview_crtc_enable(struct intel_atomic_state *state,
2445                                   struct intel_crtc *crtc)
2446{
2447        const struct intel_crtc_state *new_crtc_state =
2448                intel_atomic_get_new_crtc_state(state, crtc);
2449        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2450        enum pipe pipe = crtc->pipe;
2451
2452        if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2453                return;
2454
2455        if (intel_crtc_has_dp_encoder(new_crtc_state))
2456                intel_dp_set_m_n(new_crtc_state, M1_N1);
2457
2458        intel_set_transcoder_timings(new_crtc_state);
2459        intel_set_pipe_src_size(new_crtc_state);
2460
2461        if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2462                intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2463                intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2464        }
2465
2466        i9xx_set_pipeconf(new_crtc_state);
2467
2468        crtc->active = true;
2469
2470        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2471
2472        intel_encoders_pre_pll_enable(state, crtc);
2473
2474        if (IS_CHERRYVIEW(dev_priv))
2475                chv_enable_pll(new_crtc_state);
2476        else
2477                vlv_enable_pll(new_crtc_state);
2478
2479        intel_encoders_pre_enable(state, crtc);
2480
2481        i9xx_pfit_enable(new_crtc_state);
2482
2483        intel_color_load_luts(new_crtc_state);
2484        intel_color_commit(new_crtc_state);
2485        /* update DSPCNTR to configure gamma for pipe bottom color */
2486        intel_disable_primary_plane(new_crtc_state);
2487
2488        intel_initial_watermarks(state, crtc);
2489        intel_enable_transcoder(new_crtc_state);
2490
2491        intel_crtc_vblank_on(new_crtc_state);
2492
2493        intel_encoders_enable(state, crtc);
2494}
2495
2496static void i9xx_crtc_enable(struct intel_atomic_state *state,
2497                             struct intel_crtc *crtc)
2498{
2499        const struct intel_crtc_state *new_crtc_state =
2500                intel_atomic_get_new_crtc_state(state, crtc);
2501        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2502        enum pipe pipe = crtc->pipe;
2503
2504        if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2505                return;
2506
2507        if (intel_crtc_has_dp_encoder(new_crtc_state))
2508                intel_dp_set_m_n(new_crtc_state, M1_N1);
2509
2510        intel_set_transcoder_timings(new_crtc_state);
2511        intel_set_pipe_src_size(new_crtc_state);
2512
2513        i9xx_set_pipeconf(new_crtc_state);
2514
2515        crtc->active = true;
2516
2517        if (DISPLAY_VER(dev_priv) != 2)
2518                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2519
2520        intel_encoders_pre_enable(state, crtc);
2521
2522        i9xx_enable_pll(new_crtc_state);
2523
2524        i9xx_pfit_enable(new_crtc_state);
2525
2526        intel_color_load_luts(new_crtc_state);
2527        intel_color_commit(new_crtc_state);
2528        /* update DSPCNTR to configure gamma for pipe bottom color */
2529        intel_disable_primary_plane(new_crtc_state);
2530
2531        if (!intel_initial_watermarks(state, crtc))
2532                intel_update_watermarks(dev_priv);
2533        intel_enable_transcoder(new_crtc_state);
2534
2535        intel_crtc_vblank_on(new_crtc_state);
2536
2537        intel_encoders_enable(state, crtc);
2538
2539        /* prevents spurious underruns */
2540        if (DISPLAY_VER(dev_priv) == 2)
2541                intel_crtc_wait_for_next_vblank(crtc);
2542}
2543
2544static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2545{
2546        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2547        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2548
2549        if (!old_crtc_state->gmch_pfit.control)
2550                return;
2551
2552        assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2553
2554        drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2555                    intel_de_read(dev_priv, PFIT_CONTROL));
2556        intel_de_write(dev_priv, PFIT_CONTROL, 0);
2557}
2558
2559static void i9xx_crtc_disable(struct intel_atomic_state *state,
2560                              struct intel_crtc *crtc)
2561{
2562        struct intel_crtc_state *old_crtc_state =
2563                intel_atomic_get_old_crtc_state(state, crtc);
2564        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2565        enum pipe pipe = crtc->pipe;
2566
2567        /*
2568         * On gen2 planes are double buffered but the pipe isn't, so we must
2569         * wait for planes to fully turn off before disabling the pipe.
2570         */
2571        if (DISPLAY_VER(dev_priv) == 2)
2572                intel_crtc_wait_for_next_vblank(crtc);
2573
2574        intel_encoders_disable(state, crtc);
2575
2576        intel_crtc_vblank_off(old_crtc_state);
2577
2578        intel_disable_transcoder(old_crtc_state);
2579
2580        i9xx_pfit_disable(old_crtc_state);
2581
2582        intel_encoders_post_disable(state, crtc);
2583
2584        if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2585                if (IS_CHERRYVIEW(dev_priv))
2586                        chv_disable_pll(dev_priv, pipe);
2587                else if (IS_VALLEYVIEW(dev_priv))
2588                        vlv_disable_pll(dev_priv, pipe);
2589                else
2590                        i9xx_disable_pll(old_crtc_state);
2591        }
2592
2593        intel_encoders_post_pll_disable(state, crtc);
2594
2595        if (DISPLAY_VER(dev_priv) != 2)
2596                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2597
2598        if (!dev_priv->wm_disp->initial_watermarks)
2599                intel_update_watermarks(dev_priv);
2600
2601        /* clock the pipe down to 640x480@60 to potentially save power */
2602        if (IS_I830(dev_priv))
2603                i830_enable_pipe(dev_priv, pipe);
2604}
2605
2606static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2607                                        struct drm_modeset_acquire_ctx *ctx)
2608{
2609        struct intel_encoder *encoder;
2610        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2611        struct intel_bw_state *bw_state =
2612                to_intel_bw_state(dev_priv->bw_obj.state);
2613        struct intel_cdclk_state *cdclk_state =
2614                to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2615        struct intel_dbuf_state *dbuf_state =
2616                to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2617        struct intel_crtc_state *crtc_state =
2618                to_intel_crtc_state(crtc->base.state);
2619        struct intel_plane *plane;
2620        struct drm_atomic_state *state;
2621        struct intel_crtc_state *temp_crtc_state;
2622        enum pipe pipe = crtc->pipe;
2623        int ret;
2624
2625        if (!crtc_state->hw.active)
2626                return;
2627
2628        for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2629                const struct intel_plane_state *plane_state =
2630                        to_intel_plane_state(plane->base.state);
2631
2632                if (plane_state->uapi.visible)
2633                        intel_plane_disable_noatomic(crtc, plane);
2634        }
2635
2636        state = drm_atomic_state_alloc(&dev_priv->drm);
2637        if (!state) {
2638                drm_dbg_kms(&dev_priv->drm,
2639                            "failed to disable [CRTC:%d:%s], out of memory",
2640                            crtc->base.base.id, crtc->base.name);
2641                return;
2642        }
2643
2644        state->acquire_ctx = ctx;
2645
2646        /* Everything's already locked, -EDEADLK can't happen. */
2647        temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2648        ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2649
2650        drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2651
2652        dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2653
2654        drm_atomic_state_put(state);
2655
2656        drm_dbg_kms(&dev_priv->drm,
2657                    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2658                    crtc->base.base.id, crtc->base.name);
2659
2660        crtc->active = false;
2661        crtc->base.enabled = false;
2662
2663        drm_WARN_ON(&dev_priv->drm,
2664                    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2665        crtc_state->uapi.active = false;
2666        crtc_state->uapi.connector_mask = 0;
2667        crtc_state->uapi.encoder_mask = 0;
2668        intel_crtc_free_hw_state(crtc_state);
2669        memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2670
2671        for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2672                encoder->base.crtc = NULL;
2673
2674        intel_fbc_disable(crtc);
2675        intel_update_watermarks(dev_priv);
2676        intel_disable_shared_dpll(crtc_state);
2677
2678        intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2679
2680        cdclk_state->min_cdclk[pipe] = 0;
2681        cdclk_state->min_voltage_level[pipe] = 0;
2682        cdclk_state->active_pipes &= ~BIT(pipe);
2683
2684        dbuf_state->active_pipes &= ~BIT(pipe);
2685
2686        bw_state->data_rate[pipe] = 0;
2687        bw_state->num_active_planes[pipe] = 0;
2688}
2689
2690/*
2691 * turn all crtc's off, but do not adjust state
2692 * This has to be paired with a call to intel_modeset_setup_hw_state.
2693 */
2694int intel_display_suspend(struct drm_device *dev)
2695{
2696        struct drm_i915_private *dev_priv = to_i915(dev);
2697        struct drm_atomic_state *state;
2698        int ret;
2699
2700        if (!HAS_DISPLAY(dev_priv))
2701                return 0;
2702
2703        state = drm_atomic_helper_suspend(dev);
2704        ret = PTR_ERR_OR_ZERO(state);
2705        if (ret)
2706                drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2707                        ret);
2708        else
2709                dev_priv->modeset_restore_state = state;
2710        return ret;
2711}
2712
2713void intel_encoder_destroy(struct drm_encoder *encoder)
2714{
2715        struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2716
2717        drm_encoder_cleanup(encoder);
2718        kfree(intel_encoder);
2719}
2720
2721/* Cross check the actual hw state with our own modeset state tracking (and it's
2722 * internal consistency). */
2723static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2724                                         struct drm_connector_state *conn_state)
2725{
2726        struct intel_connector *connector = to_intel_connector(conn_state->connector);
2727        struct drm_i915_private *i915 = to_i915(connector->base.dev);
2728
2729        drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2730                    connector->base.base.id, connector->base.name);
2731
2732        if (connector->get_hw_state(connector)) {
2733                struct intel_encoder *encoder = intel_attached_encoder(connector);
2734
2735                I915_STATE_WARN(!crtc_state,
2736                         "connector enabled without attached crtc\n");
2737
2738                if (!crtc_state)
2739                        return;
2740
2741                I915_STATE_WARN(!crtc_state->hw.active,
2742                                "connector is active, but attached crtc isn't\n");
2743
2744                if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2745                        return;
2746
2747                I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2748                        "atomic encoder doesn't match attached encoder\n");
2749
2750                I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2751                        "attached encoder crtc differs from connector crtc\n");
2752        } else {
2753                I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2754                                "attached crtc is active, but connector isn't\n");
2755                I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2756                        "best encoder set without crtc!\n");
2757        }
2758}
2759
2760bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2761{
2762        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2763        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2764
2765        /* IPS only exists on ULT machines and is tied to pipe A. */
2766        if (!hsw_crtc_supports_ips(crtc))
2767                return false;
2768
2769        if (!dev_priv->params.enable_ips)
2770                return false;
2771
2772        if (crtc_state->pipe_bpp > 24)
2773                return false;
2774
2775        /*
2776         * We compare against max which means we must take
2777         * the increased cdclk requirement into account when
2778         * calculating the new cdclk.
2779         *
2780         * Should measure whether using a lower cdclk w/o IPS
2781         */
2782        if (IS_BROADWELL(dev_priv) &&
2783            crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2784                return false;
2785
2786        return true;
2787}
2788
2789static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2790{
2791        struct drm_i915_private *dev_priv =
2792                to_i915(crtc_state->uapi.crtc->dev);
2793        struct intel_atomic_state *state =
2794                to_intel_atomic_state(crtc_state->uapi.state);
2795
2796        crtc_state->ips_enabled = false;
2797
2798        if (!hsw_crtc_state_ips_capable(crtc_state))
2799                return 0;
2800
2801        /*
2802         * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2803         * enabled and disabled dynamically based on package C states,
2804         * user space can't make reliable use of the CRCs, so let's just
2805         * completely disable it.
2806         */
2807        if (crtc_state->crc_enabled)
2808                return 0;
2809
2810        /* IPS should be fine as long as at least one plane is enabled. */
2811        if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2812                return 0;
2813
2814        if (IS_BROADWELL(dev_priv)) {
2815                const struct intel_cdclk_state *cdclk_state;
2816
2817                cdclk_state = intel_atomic_get_cdclk_state(state);
2818                if (IS_ERR(cdclk_state))
2819                        return PTR_ERR(cdclk_state);
2820
2821                /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2822                if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2823                        return 0;
2824        }
2825
2826        crtc_state->ips_enabled = true;
2827
2828        return 0;
2829}
2830
2831static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2832{
2833        const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2834
2835        /* GDG double wide on either pipe, otherwise pipe A only */
2836        return DISPLAY_VER(dev_priv) < 4 &&
2837                (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2838}
2839
2840static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2841{
2842        u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2843        struct drm_rect src;
2844
2845        /*
2846         * We only use IF-ID interlacing. If we ever use
2847         * PF-ID we'll need to adjust the pixel_rate here.
2848         */
2849
2850        if (!crtc_state->pch_pfit.enabled)
2851                return pixel_rate;
2852
2853        drm_rect_init(&src, 0, 0,
2854                      crtc_state->pipe_src_w << 16,
2855                      crtc_state->pipe_src_h << 16);
2856
2857        return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2858                                   pixel_rate);
2859}
2860
2861static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2862                                         const struct drm_display_mode *timings)
2863{
2864        mode->hdisplay = timings->crtc_hdisplay;
2865        mode->htotal = timings->crtc_htotal;
2866        mode->hsync_start = timings->crtc_hsync_start;
2867        mode->hsync_end = timings->crtc_hsync_end;
2868
2869        mode->vdisplay = timings->crtc_vdisplay;
2870        mode->vtotal = timings->crtc_vtotal;
2871        mode->vsync_start = timings->crtc_vsync_start;
2872        mode->vsync_end = timings->crtc_vsync_end;
2873
2874        mode->flags = timings->flags;
2875        mode->type = DRM_MODE_TYPE_DRIVER;
2876
2877        mode->clock = timings->crtc_clock;
2878
2879        drm_mode_set_name(mode);
2880}
2881
2882static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2883{
2884        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2885
2886        if (HAS_GMCH(dev_priv))
2887                /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2888                crtc_state->pixel_rate =
2889                        crtc_state->hw.pipe_mode.crtc_clock;
2890        else
2891                crtc_state->pixel_rate =
2892                        ilk_pipe_pixel_rate(crtc_state);
2893}
2894
2895static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2896{
2897        struct drm_display_mode *mode = &crtc_state->hw.mode;
2898        struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2899        struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2900
2901        drm_mode_copy(pipe_mode, adjusted_mode);
2902
2903        if (crtc_state->bigjoiner) {
2904                /*
2905                 * transcoder is programmed to the full mode,
2906                 * but pipe timings are half of the transcoder mode
2907                 */
2908                pipe_mode->crtc_hdisplay /= 2;
2909                pipe_mode->crtc_hblank_start /= 2;
2910                pipe_mode->crtc_hblank_end /= 2;
2911                pipe_mode->crtc_hsync_start /= 2;
2912                pipe_mode->crtc_hsync_end /= 2;
2913                pipe_mode->crtc_htotal /= 2;
2914                pipe_mode->crtc_clock /= 2;
2915        }
2916
2917        if (crtc_state->splitter.enable) {
2918                int n = crtc_state->splitter.link_count;
2919                int overlap = crtc_state->splitter.pixel_overlap;
2920
2921                /*
2922                 * eDP MSO uses segment timings from EDID for transcoder
2923                 * timings, but full mode for everything else.
2924                 *
2925                 * h_full = (h_segment - pixel_overlap) * link_count
2926                 */
2927                pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2928                pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2929                pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2930                pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2931                pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2932                pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2933                pipe_mode->crtc_clock *= n;
2934
2935                intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2936                intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2937        } else {
2938                intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2939                intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2940        }
2941
2942        intel_crtc_compute_pixel_rate(crtc_state);
2943
2944        drm_mode_copy(mode, adjusted_mode);
2945        mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2946        mode->vdisplay = crtc_state->pipe_src_h;
2947}
2948
2949static void intel_encoder_get_config(struct intel_encoder *encoder,
2950                                     struct intel_crtc_state *crtc_state)
2951{
2952        encoder->get_config(encoder, crtc_state);
2953
2954        intel_crtc_readout_derived_state(crtc_state);
2955}
2956
2957static int intel_crtc_compute_config(struct intel_crtc *crtc,
2958                                     struct intel_crtc_state *pipe_config)
2959{
2960        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2961        struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2962        int clock_limit = dev_priv->max_dotclk_freq;
2963
2964        drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2965
2966        /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2967        if (pipe_config->bigjoiner) {
2968                pipe_mode->crtc_clock /= 2;
2969                pipe_mode->crtc_hdisplay /= 2;
2970                pipe_mode->crtc_hblank_start /= 2;
2971                pipe_mode->crtc_hblank_end /= 2;
2972                pipe_mode->crtc_hsync_start /= 2;
2973                pipe_mode->crtc_hsync_end /= 2;
2974                pipe_mode->crtc_htotal /= 2;
2975                pipe_config->pipe_src_w /= 2;
2976        }
2977
2978        if (pipe_config->splitter.enable) {
2979                int n = pipe_config->splitter.link_count;
2980                int overlap = pipe_config->splitter.pixel_overlap;
2981
2982                pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2983                pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2984                pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2985                pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2986                pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2987                pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2988                pipe_mode->crtc_clock *= n;
2989        }
2990
2991        intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2992
2993        if (DISPLAY_VER(dev_priv) < 4) {
2994                clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
2995
2996                /*
2997                 * Enable double wide mode when the dot clock
2998                 * is > 90% of the (display) core speed.
2999                 */
3000                if (intel_crtc_supports_double_wide(crtc) &&
3001                    pipe_mode->crtc_clock > clock_limit) {
3002                        clock_limit = dev_priv->max_dotclk_freq;
3003                        pipe_config->double_wide = true;
3004                }
3005        }
3006
3007        if (pipe_mode->crtc_clock > clock_limit) {
3008                drm_dbg_kms(&dev_priv->drm,
3009                            "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3010                            pipe_mode->crtc_clock, clock_limit,
3011                            yesno(pipe_config->double_wide));
3012                return -EINVAL;
3013        }
3014
3015        /*
3016         * Pipe horizontal size must be even in:
3017         * - DVO ganged mode
3018         * - LVDS dual channel mode
3019         * - Double wide pipe
3020         */
3021        if (pipe_config->pipe_src_w & 1) {
3022                if (pipe_config->double_wide) {
3023                        drm_dbg_kms(&dev_priv->drm,
3024                                    "Odd pipe source width not supported with double wide pipe\n");
3025                        return -EINVAL;
3026                }
3027
3028                if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3029                    intel_is_dual_link_lvds(dev_priv)) {
3030                        drm_dbg_kms(&dev_priv->drm,
3031                                    "Odd pipe source width not supported with dual link LVDS\n");
3032                        return -EINVAL;
3033                }
3034        }
3035
3036        intel_crtc_compute_pixel_rate(pipe_config);
3037
3038        if (pipe_config->has_pch_encoder)
3039                return ilk_fdi_compute_config(crtc, pipe_config);
3040
3041        return 0;
3042}
3043
3044static void
3045intel_reduce_m_n_ratio(u32 *num, u32 *den)
3046{
3047        while (*num > DATA_LINK_M_N_MASK ||
3048               *den > DATA_LINK_M_N_MASK) {
3049                *num >>= 1;
3050                *den >>= 1;
3051        }
3052}
3053
3054static void compute_m_n(unsigned int m, unsigned int n,
3055                        u32 *ret_m, u32 *ret_n,
3056                        bool constant_n)
3057{
3058        /*
3059         * Several DP dongles in particular seem to be fussy about
3060         * too large link M/N values. Give N value as 0x8000 that
3061         * should be acceptable by specific devices. 0x8000 is the
3062         * specified fixed N value for asynchronous clock mode,
3063         * which the devices expect also in synchronous clock mode.
3064         */
3065        if (constant_n)
3066                *ret_n = DP_LINK_CONSTANT_N_VALUE;
3067        else
3068                *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3069
3070        *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3071        intel_reduce_m_n_ratio(ret_m, ret_n);
3072}
3073
3074void
3075intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3076                       int pixel_clock, int link_clock,
3077                       struct intel_link_m_n *m_n,
3078                       bool constant_n, bool fec_enable)
3079{
3080        u32 data_clock = bits_per_pixel * pixel_clock;
3081
3082        if (fec_enable)
3083                data_clock = intel_dp_mode_to_fec_clock(data_clock);
3084
3085        m_n->tu = 64;
3086        compute_m_n(data_clock,
3087                    link_clock * nlanes * 8,
3088                    &m_n->gmch_m, &m_n->gmch_n,
3089                    constant_n);
3090
3091        compute_m_n(pixel_clock, link_clock,
3092                    &m_n->link_m, &m_n->link_n,
3093                    constant_n);
3094}
3095
3096static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3097{
3098        /*
3099         * There may be no VBT; and if the BIOS enabled SSC we can
3100         * just keep using it to avoid unnecessary flicker.  Whereas if the
3101         * BIOS isn't using it, don't assume it will work even if the VBT
3102         * indicates as much.
3103         */
3104        if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3105                bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3106                                                       PCH_DREF_CONTROL) &
3107                        DREF_SSC1_ENABLE;
3108
3109                if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3110                        drm_dbg_kms(&dev_priv->drm,
3111                                    "SSC %s by BIOS, overriding VBT which says %s\n",
3112                                    enableddisabled(bios_lvds_use_ssc),
3113                                    enableddisabled(dev_priv->vbt.lvds_use_ssc));
3114                        dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3115                }
3116        }
3117}
3118
3119static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3120                                         const struct intel_link_m_n *m_n)
3121{
3122        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3123        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3124        enum pipe pipe = crtc->pipe;
3125
3126        intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
3127                       TU_SIZE(m_n->tu) | m_n->gmch_m);
3128        intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
3129        intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
3130        intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
3131}
3132
3133static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3134                                 enum transcoder transcoder)
3135{
3136        if (IS_HASWELL(dev_priv))
3137                return transcoder == TRANSCODER_EDP;
3138
3139        /*
3140         * Strictly speaking some registers are available before
3141         * gen7, but we only support DRRS on gen7+
3142         */
3143        return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3144}
3145
3146static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3147                                         const struct intel_link_m_n *m_n,
3148                                         const struct intel_link_m_n *m2_n2)
3149{
3150        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3151        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3152        enum pipe pipe = crtc->pipe;
3153        enum transcoder transcoder = crtc_state->cpu_transcoder;
3154
3155        if (DISPLAY_VER(dev_priv) >= 5) {
3156                intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
3157                               TU_SIZE(m_n->tu) | m_n->gmch_m);
3158                intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
3159                               m_n->gmch_n);
3160                intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
3161                               m_n->link_m);
3162                intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
3163                               m_n->link_n);
3164                /*
3165                 *  M2_N2 registers are set only if DRRS is supported
3166                 * (to make sure the registers are not unnecessarily accessed).
3167                 */
3168                if (m2_n2 && crtc_state->has_drrs &&
3169                    transcoder_has_m2_n2(dev_priv, transcoder)) {
3170                        intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
3171                                       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
3172                        intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
3173                                       m2_n2->gmch_n);
3174                        intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
3175                                       m2_n2->link_m);
3176                        intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
3177                                       m2_n2->link_n);
3178                }
3179        } else {
3180                intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
3181                               TU_SIZE(m_n->tu) | m_n->gmch_m);
3182                intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
3183                intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
3184                intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
3185        }
3186}
3187
3188void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
3189{
3190        const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
3191        struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3192
3193        if (m_n == M1_N1) {
3194                dp_m_n = &crtc_state->dp_m_n;
3195                dp_m2_n2 = &crtc_state->dp_m2_n2;
3196        } else if (m_n == M2_N2) {
3197
3198                /*
3199                 * M2_N2 registers are not supported. Hence m2_n2 divider value
3200                 * needs to be programmed into M1_N1.
3201                 */
3202                dp_m_n = &crtc_state->dp_m2_n2;
3203        } else {
3204                drm_err(&i915->drm, "Unsupported divider value\n");
3205                return;
3206        }
3207
3208        if (crtc_state->has_pch_encoder)
3209                intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
3210        else
3211                intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
3212}
3213
3214static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3215{
3216        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3217        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3218        enum pipe pipe = crtc->pipe;
3219        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3220        const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3221        u32 crtc_vtotal, crtc_vblank_end;
3222        int vsyncshift = 0;
3223
3224        /* We need to be careful not to changed the adjusted mode, for otherwise
3225         * the hw state checker will get angry at the mismatch. */
3226        crtc_vtotal = adjusted_mode->crtc_vtotal;
3227        crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3228
3229        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3230                /* the chip adds 2 halflines automatically */
3231                crtc_vtotal -= 1;
3232                crtc_vblank_end -= 1;
3233
3234                if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3235                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3236                else
3237                        vsyncshift = adjusted_mode->crtc_hsync_start -
3238                                adjusted_mode->crtc_htotal / 2;
3239                if (vsyncshift < 0)
3240                        vsyncshift += adjusted_mode->crtc_htotal;
3241        }
3242
3243        if (DISPLAY_VER(dev_priv) > 3)
3244                intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3245                               vsyncshift);
3246
3247        intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3248                       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3249        intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3250                       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3251        intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3252                       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3253
3254        intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3255                       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3256        intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3257                       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3258        intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3259                       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3260
3261        /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3262         * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3263         * documented on the DDI_FUNC_CTL register description, EDP Input Select
3264         * bits. */
3265        if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3266            (pipe == PIPE_B || pipe == PIPE_C))
3267                intel_de_write(dev_priv, VTOTAL(pipe),
3268                               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3269
3270}
3271
3272static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3273{
3274        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3275        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3276        enum pipe pipe = crtc->pipe;
3277
3278        /* pipesrc controls the size that is scaled from, which should
3279         * always be the user's requested size.
3280         */
3281        intel_de_write(dev_priv, PIPESRC(pipe),
3282                       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
3283}
3284
3285static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3286{
3287        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3288        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3289
3290        if (DISPLAY_VER(dev_priv) == 2)
3291                return false;
3292
3293        if (DISPLAY_VER(dev_priv) >= 9 ||
3294            IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3295                return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3296        else
3297                return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3298}
3299
3300static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3301                                         struct intel_crtc_state *pipe_config)
3302{
3303        struct drm_device *dev = crtc->base.dev;
3304        struct drm_i915_private *dev_priv = to_i915(dev);
3305        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3306        u32 tmp;
3307
3308        tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3309        pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3310        pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3311
3312        if (!transcoder_is_dsi(cpu_transcoder)) {
3313                tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3314                pipe_config->hw.adjusted_mode.crtc_hblank_start =
3315                                                        (tmp & 0xffff) + 1;
3316                pipe_config->hw.adjusted_mode.crtc_hblank_end =
3317                                                ((tmp >> 16) & 0xffff) + 1;
3318        }
3319        tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3320        pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3321        pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3322
3323        tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3324        pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3325        pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3326
3327        if (!transcoder_is_dsi(cpu_transcoder)) {
3328                tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3329                pipe_config->hw.adjusted_mode.crtc_vblank_start =
3330                                                        (tmp & 0xffff) + 1;
3331                pipe_config->hw.adjusted_mode.crtc_vblank_end =
3332                                                ((tmp >> 16) & 0xffff) + 1;
3333        }
3334        tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3335        pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3336        pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3337
3338        if (intel_pipe_is_interlaced(pipe_config)) {
3339                pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3340                pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3341                pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3342        }
3343}
3344
3345static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3346                                    struct intel_crtc_state *pipe_config)
3347{
3348        struct drm_device *dev = crtc->base.dev;
3349        struct drm_i915_private *dev_priv = to_i915(dev);
3350        u32 tmp;
3351
3352        tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3353        pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
3354        pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
3355}
3356
3357static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3358{
3359        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3360        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3361        u32 pipeconf;
3362
3363        pipeconf = 0;
3364
3365        /* we keep both pipes enabled on 830 */
3366        if (IS_I830(dev_priv))
3367                pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3368
3369        if (crtc_state->double_wide)
3370                pipeconf |= PIPECONF_DOUBLE_WIDE;
3371
3372        /* only g4x and later have fancy bpc/dither controls */
3373        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3374            IS_CHERRYVIEW(dev_priv)) {
3375                /* Bspec claims that we can't use dithering for 30bpp pipes. */
3376                if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3377                        pipeconf |= PIPECONF_DITHER_EN |
3378                                    PIPECONF_DITHER_TYPE_SP;
3379
3380                switch (crtc_state->pipe_bpp) {
3381                case 18:
3382                        pipeconf |= PIPECONF_6BPC;
3383                        break;
3384                case 24:
3385                        pipeconf |= PIPECONF_8BPC;
3386                        break;
3387                case 30:
3388                        pipeconf |= PIPECONF_10BPC;
3389                        break;
3390                default:
3391                        /* Case prevented by intel_choose_pipe_bpp_dither. */
3392                        BUG();
3393                }
3394        }
3395
3396        if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3397                if (DISPLAY_VER(dev_priv) < 4 ||
3398                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3399                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3400                else
3401                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3402        } else {
3403                pipeconf |= PIPECONF_PROGRESSIVE;
3404        }
3405
3406        if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3407             crtc_state->limited_color_range)
3408                pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3409
3410        pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3411
3412        pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3413
3414        intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3415        intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3416}
3417
3418static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3419{
3420        if (IS_I830(dev_priv))
3421                return false;
3422
3423        return DISPLAY_VER(dev_priv) >= 4 ||
3424                IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3425}
3426
3427static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3428{
3429        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3430        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3431        u32 tmp;
3432
3433        if (!i9xx_has_pfit(dev_priv))
3434                return;
3435
3436        tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3437        if (!(tmp & PFIT_ENABLE))
3438                return;
3439
3440        /* Check whether the pfit is attached to our pipe. */
3441        if (DISPLAY_VER(dev_priv) < 4) {
3442                if (crtc->pipe != PIPE_B)
3443                        return;
3444        } else {
3445                if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3446                        return;
3447        }
3448
3449        crtc_state->gmch_pfit.control = tmp;
3450        crtc_state->gmch_pfit.pgm_ratios =
3451                intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3452}
3453
3454static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3455                               struct intel_crtc_state *pipe_config)
3456{
3457        struct drm_device *dev = crtc->base.dev;
3458        struct drm_i915_private *dev_priv = to_i915(dev);
3459        enum pipe pipe = crtc->pipe;
3460        struct dpll clock;
3461        u32 mdiv;
3462        int refclk = 100000;
3463
3464        /* In case of DSI, DPLL will not be used */
3465        if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3466                return;
3467
3468        vlv_dpio_get(dev_priv);
3469        mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3470        vlv_dpio_put(dev_priv);
3471
3472        clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3473        clock.m2 = mdiv & DPIO_M2DIV_MASK;
3474        clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3475        clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3476        clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3477
3478        pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3479}
3480
3481static void chv_crtc_clock_get(struct intel_crtc *crtc,
3482                               struct intel_crtc_state *pipe_config)
3483{
3484        struct drm_device *dev = crtc->base.dev;
3485        struct drm_i915_private *dev_priv = to_i915(dev);
3486        enum pipe pipe = crtc->pipe;
3487        enum dpio_channel port = vlv_pipe_to_channel(pipe);
3488        struct dpll clock;
3489        u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3490        int refclk = 100000;
3491
3492        /* In case of DSI, DPLL will not be used */
3493        if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3494                return;
3495
3496        vlv_dpio_get(dev_priv);
3497        cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3498        pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3499        pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3500        pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3501        pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3502        vlv_dpio_put(dev_priv);
3503
3504        clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3505        clock.m2 = (pll_dw0 & 0xff) << 22;
3506        if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3507                clock.m2 |= pll_dw2 & 0x3fffff;
3508        clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3509        clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3510        clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3511
3512        pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3513}
3514
3515static enum intel_output_format
3516bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3517{
3518        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3519        u32 tmp;
3520
3521        tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3522
3523        if (tmp & PIPEMISC_YUV420_ENABLE) {
3524                /* We support 4:2:0 in full blend mode only */
3525                drm_WARN_ON(&dev_priv->drm,
3526                            (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3527
3528                return INTEL_OUTPUT_FORMAT_YCBCR420;
3529        } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3530                return INTEL_OUTPUT_FORMAT_YCBCR444;
3531        } else {
3532                return INTEL_OUTPUT_FORMAT_RGB;
3533        }
3534}
3535
3536static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3537{
3538        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3539        struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3540        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3541        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3542        u32 tmp;
3543
3544        tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3545
3546        if (tmp & DISPPLANE_GAMMA_ENABLE)
3547                crtc_state->gamma_enable = true;
3548
3549        if (!HAS_GMCH(dev_priv) &&
3550            tmp & DISPPLANE_PIPE_CSC_ENABLE)
3551                crtc_state->csc_enable = true;
3552}
3553
3554static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3555                                 struct intel_crtc_state *pipe_config)
3556{
3557        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3558        enum intel_display_power_domain power_domain;
3559        intel_wakeref_t wakeref;
3560        u32 tmp;
3561        bool ret;
3562
3563        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3564        wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3565        if (!wakeref)
3566                return false;
3567
3568        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3569        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3570        pipe_config->shared_dpll = NULL;
3571
3572        ret = false;
3573
3574        tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3575        if (!(tmp & PIPECONF_ENABLE))
3576                goto out;
3577
3578        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3579            IS_CHERRYVIEW(dev_priv)) {
3580                switch (tmp & PIPECONF_BPC_MASK) {
3581                case PIPECONF_6BPC:
3582                        pipe_config->pipe_bpp = 18;
3583                        break;
3584                case PIPECONF_8BPC:
3585                        pipe_config->pipe_bpp = 24;
3586                        break;
3587                case PIPECONF_10BPC:
3588                        pipe_config->pipe_bpp = 30;
3589                        break;
3590                default:
3591                        break;
3592                }
3593        }
3594
3595        if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3596            (tmp & PIPECONF_COLOR_RANGE_SELECT))
3597                pipe_config->limited_color_range = true;
3598
3599        pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
3600                PIPECONF_GAMMA_MODE_SHIFT;
3601
3602        if (IS_CHERRYVIEW(dev_priv))
3603                pipe_config->cgm_mode = intel_de_read(dev_priv,
3604                                                      CGM_PIPE_MODE(crtc->pipe));
3605
3606        i9xx_get_pipe_color_config(pipe_config);
3607        intel_color_get_config(pipe_config);
3608
3609        if (DISPLAY_VER(dev_priv) < 4)
3610                pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3611
3612        intel_get_transcoder_timings(crtc, pipe_config);
3613        intel_get_pipe_src_size(crtc, pipe_config);
3614
3615        i9xx_get_pfit_config(pipe_config);
3616
3617        if (DISPLAY_VER(dev_priv) >= 4) {
3618                /* No way to read it out on pipes B and C */
3619                if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3620                        tmp = dev_priv->chv_dpll_md[crtc->pipe];
3621                else
3622                        tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3623                pipe_config->pixel_multiplier =
3624                        ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3625                         >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3626                pipe_config->dpll_hw_state.dpll_md = tmp;
3627        } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3628                   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3629                tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3630                pipe_config->pixel_multiplier =
3631                        ((tmp & SDVO_MULTIPLIER_MASK)
3632                         >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3633        } else {
3634                /* Note that on i915G/GM the pixel multiplier is in the sdvo
3635                 * port and will be fixed up in the encoder->get_config
3636                 * function. */
3637                pipe_config->pixel_multiplier = 1;
3638        }
3639        pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3640                                                        DPLL(crtc->pipe));
3641        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3642                pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3643                                                               FP0(crtc->pipe));
3644                pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3645                                                               FP1(crtc->pipe));
3646        } else {
3647                /* Mask out read-only status bits. */
3648                pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3649                                                     DPLL_PORTC_READY_MASK |
3650                                                     DPLL_PORTB_READY_MASK);
3651        }
3652
3653        if (IS_CHERRYVIEW(dev_priv))
3654                chv_crtc_clock_get(crtc, pipe_config);
3655        else if (IS_VALLEYVIEW(dev_priv))
3656                vlv_crtc_clock_get(crtc, pipe_config);
3657        else
3658                i9xx_crtc_clock_get(crtc, pipe_config);
3659
3660        /*
3661         * Normally the dotclock is filled in by the encoder .get_config()
3662         * but in case the pipe is enabled w/o any ports we need a sane
3663         * default.
3664         */
3665        pipe_config->hw.adjusted_mode.crtc_clock =
3666                pipe_config->port_clock / pipe_config->pixel_multiplier;
3667
3668        ret = true;
3669
3670out:
3671        intel_display_power_put(dev_priv, power_domain, wakeref);
3672
3673        return ret;
3674}
3675
3676static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3677{
3678        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3679        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3680        enum pipe pipe = crtc->pipe;
3681        u32 val;
3682
3683        val = 0;
3684
3685        switch (crtc_state->pipe_bpp) {
3686        case 18:
3687                val |= PIPECONF_6BPC;
3688                break;
3689        case 24:
3690                val |= PIPECONF_8BPC;
3691                break;
3692        case 30:
3693                val |= PIPECONF_10BPC;
3694                break;
3695        case 36:
3696                val |= PIPECONF_12BPC;
3697                break;
3698        default:
3699                /* Case prevented by intel_choose_pipe_bpp_dither. */
3700                BUG();
3701        }
3702
3703        if (crtc_state->dither)
3704                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3705
3706        if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3707                val |= PIPECONF_INTERLACED_ILK;
3708        else
3709                val |= PIPECONF_PROGRESSIVE;
3710
3711        /*
3712         * This would end up with an odd purple hue over
3713         * the entire display. Make sure we don't do it.
3714         */
3715        drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3716                    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3717
3718        if (crtc_state->limited_color_range &&
3719            !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3720                val |= PIPECONF_COLOR_RANGE_SELECT;
3721
3722        if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3723                val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3724
3725        val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3726
3727        val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3728
3729        intel_de_write(dev_priv, PIPECONF(pipe), val);
3730        intel_de_posting_read(dev_priv, PIPECONF(pipe));
3731}
3732
3733static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3734{
3735        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3736        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3737        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3738        u32 val = 0;
3739
3740        if (IS_HASWELL(dev_priv) && crtc_state->dither)
3741                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3742
3743        if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3744                val |= PIPECONF_INTERLACED_ILK;
3745        else
3746                val |= PIPECONF_PROGRESSIVE;
3747
3748        if (IS_HASWELL(dev_priv) &&
3749            crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3750                val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3751
3752        intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3753        intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3754}
3755
3756static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3757{
3758        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3759        const struct intel_crtc_scaler_state *scaler_state =
3760                &crtc_state->scaler_state;
3761
3762        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3763        u32 val = 0;
3764        int i;
3765
3766        switch (crtc_state->pipe_bpp) {
3767        case 18:
3768                val |= PIPEMISC_6_BPC;
3769                break;
3770        case 24:
3771                val |= PIPEMISC_8_BPC;
3772                break;
3773        case 30:
3774                val |= PIPEMISC_10_BPC;
3775                break;
3776        case 36:
3777                /* Port output 12BPC defined for ADLP+ */
3778                if (DISPLAY_VER(dev_priv) > 12)
3779                        val |= PIPEMISC_12_BPC_ADLP;
3780                break;
3781        default:
3782                MISSING_CASE(crtc_state->pipe_bpp);
3783                break;
3784        }
3785
3786        if (crtc_state->dither)
3787                val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3788
3789        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3790            crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3791                val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3792
3793        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3794                val |= PIPEMISC_YUV420_ENABLE |
3795                        PIPEMISC_YUV420_MODE_FULL_BLEND;
3796
3797        if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3798                val |= PIPEMISC_HDR_MODE_PRECISION;
3799
3800        if (DISPLAY_VER(dev_priv) >= 12)
3801                val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3802
3803        if (IS_ALDERLAKE_P(dev_priv)) {
3804                bool scaler_in_use = false;
3805
3806                for (i = 0; i < crtc->num_scalers; i++) {
3807                        if (!scaler_state->scalers[i].in_use)
3808                                continue;
3809
3810                        scaler_in_use = true;
3811                        break;
3812                }
3813
3814                intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3815                             PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
3816                             scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3817                             PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3818        }
3819
3820        intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3821}
3822
3823int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3824{
3825        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3826        u32 tmp;
3827
3828        tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3829
3830        switch (tmp & PIPEMISC_BPC_MASK) {
3831        case PIPEMISC_6_BPC:
3832                return 18;
3833        case PIPEMISC_8_BPC:
3834                return 24;
3835        case PIPEMISC_10_BPC:
3836                return 30;
3837        /*
3838         * PORT OUTPUT 12 BPC defined for ADLP+.
3839         *
3840         * TODO:
3841         * For previous platforms with DSI interface, bits 5:7
3842         * are used for storing pipe_bpp irrespective of dithering.
3843         * Since the value of 12 BPC is not defined for these bits
3844         * on older platforms, need to find a workaround for 12 BPC
3845         * MIPI DSI HW readout.
3846         */
3847        case PIPEMISC_12_BPC_ADLP:
3848                if (DISPLAY_VER(dev_priv) > 12)
3849                        return 36;
3850                fallthrough;
3851        default:
3852                MISSING_CASE(tmp);
3853                return 0;
3854        }
3855}
3856
3857int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3858{
3859        /*
3860         * Account for spread spectrum to avoid
3861         * oversubscribing the link. Max center spread
3862         * is 2.5%; use 5% for safety's sake.
3863         */
3864        u32 bps = target_clock * bpp * 21 / 20;
3865        return DIV_ROUND_UP(bps, link_bw * 8);
3866}
3867
3868static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3869                                         struct intel_link_m_n *m_n)
3870{
3871        struct drm_device *dev = crtc->base.dev;
3872        struct drm_i915_private *dev_priv = to_i915(dev);
3873        enum pipe pipe = crtc->pipe;
3874
3875        m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
3876        m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
3877        m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3878                & ~TU_SIZE_MASK;
3879        m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
3880        m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3881                    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3882}
3883
3884static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3885                                         enum transcoder transcoder,
3886                                         struct intel_link_m_n *m_n,
3887                                         struct intel_link_m_n *m2_n2)
3888{
3889        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3890        enum pipe pipe = crtc->pipe;
3891
3892        if (DISPLAY_VER(dev_priv) >= 5) {
3893                m_n->link_m = intel_de_read(dev_priv,
3894                                            PIPE_LINK_M1(transcoder));
3895                m_n->link_n = intel_de_read(dev_priv,
3896                                            PIPE_LINK_N1(transcoder));
3897                m_n->gmch_m = intel_de_read(dev_priv,
3898                                            PIPE_DATA_M1(transcoder))
3899                        & ~TU_SIZE_MASK;
3900                m_n->gmch_n = intel_de_read(dev_priv,
3901                                            PIPE_DATA_N1(transcoder));
3902                m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
3903                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3904
3905                if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3906                        m2_n2->link_m = intel_de_read(dev_priv,
3907                                                      PIPE_LINK_M2(transcoder));
3908                        m2_n2->link_n = intel_de_read(dev_priv,
3909                                                             PIPE_LINK_N2(transcoder));
3910                        m2_n2->gmch_m = intel_de_read(dev_priv,
3911                                                             PIPE_DATA_M2(transcoder))
3912                                        & ~TU_SIZE_MASK;
3913                        m2_n2->gmch_n = intel_de_read(dev_priv,
3914                                                             PIPE_DATA_N2(transcoder));
3915                        m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
3916                                        & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3917                }
3918        } else {
3919                m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
3920                m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
3921                m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3922                        & ~TU_SIZE_MASK;
3923                m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
3924                m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3925                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3926        }
3927}
3928
3929void intel_dp_get_m_n(struct intel_crtc *crtc,
3930                      struct intel_crtc_state *pipe_config)
3931{
3932        if (pipe_config->has_pch_encoder)
3933                intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3934        else
3935                intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3936                                             &pipe_config->dp_m_n,
3937                                             &pipe_config->dp_m2_n2);
3938}
3939
3940void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3941                            struct intel_crtc_state *pipe_config)
3942{
3943        intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3944                                     &pipe_config->fdi_m_n, NULL);
3945}
3946
3947static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3948                                  u32 pos, u32 size)
3949{
3950        drm_rect_init(&crtc_state->pch_pfit.dst,
3951                      pos >> 16, pos & 0xffff,
3952                      size >> 16, size & 0xffff);
3953}
3954
3955static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3956{
3957        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3958        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3959        struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3960        int id = -1;
3961        int i;
3962
3963        /* find scaler attached to this pipe */
3964        for (i = 0; i < crtc->num_scalers; i++) {
3965                u32 ctl, pos, size;
3966
3967                ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3968                if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3969                        continue;
3970
3971                id = i;
3972                crtc_state->pch_pfit.enabled = true;
3973
3974                pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3975                size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3976
3977                ilk_get_pfit_pos_size(crtc_state, pos, size);
3978
3979                scaler_state->scalers[i].in_use = true;
3980                break;
3981        }
3982
3983        scaler_state->scaler_id = id;
3984        if (id >= 0)
3985                scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3986        else
3987                scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3988}
3989
3990static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3991{
3992        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3993        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3994        u32 ctl, pos, size;
3995
3996        ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3997        if ((ctl & PF_ENABLE) == 0)
3998                return;
3999
4000        crtc_state->pch_pfit.enabled = true;
4001
4002        pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
4003        size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
4004
4005        ilk_get_pfit_pos_size(crtc_state, pos, size);
4006
4007        /*
4008         * We currently do not free assignements of panel fitters on
4009         * ivb/hsw (since we don't use the higher upscaling modes which
4010         * differentiates them) so just WARN about this case for now.
4011         */
4012        drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
4013                    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
4014}
4015
4016static bool ilk_get_pipe_config(struct intel_crtc *crtc,
4017                                struct intel_crtc_state *pipe_config)
4018{
4019        struct drm_device *dev = crtc->base.dev;
4020        struct drm_i915_private *dev_priv = to_i915(dev);
4021        enum intel_display_power_domain power_domain;
4022        intel_wakeref_t wakeref;
4023        u32 tmp;
4024        bool ret;
4025
4026        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4027        wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4028        if (!wakeref)
4029                return false;
4030
4031        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4032        pipe_config->shared_dpll = NULL;
4033
4034        ret = false;
4035        tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4036        if (!(tmp & PIPECONF_ENABLE))
4037                goto out;
4038
4039        switch (tmp & PIPECONF_BPC_MASK) {
4040        case PIPECONF_6BPC:
4041                pipe_config->pipe_bpp = 18;
4042                break;
4043        case PIPECONF_8BPC:
4044                pipe_config->pipe_bpp = 24;
4045                break;
4046        case PIPECONF_10BPC:
4047                pipe_config->pipe_bpp = 30;
4048                break;
4049        case PIPECONF_12BPC:
4050                pipe_config->pipe_bpp = 36;
4051                break;
4052        default:
4053                break;
4054        }
4055
4056        if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4057                pipe_config->limited_color_range = true;
4058
4059        switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4060        case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4061        case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4062                pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4063                break;
4064        default:
4065                pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4066                break;
4067        }
4068
4069        pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
4070                PIPECONF_GAMMA_MODE_SHIFT;
4071
4072        pipe_config->csc_mode = intel_de_read(dev_priv,
4073                                              PIPE_CSC_MODE(crtc->pipe));
4074
4075        i9xx_get_pipe_color_config(pipe_config);
4076        intel_color_get_config(pipe_config);
4077
4078        pipe_config->pixel_multiplier = 1;
4079
4080        ilk_pch_get_config(pipe_config);
4081
4082        intel_get_transcoder_timings(crtc, pipe_config);
4083        intel_get_pipe_src_size(crtc, pipe_config);
4084
4085        ilk_get_pfit_config(pipe_config);
4086
4087        ret = true;
4088
4089out:
4090        intel_display_power_put(dev_priv, power_domain, wakeref);
4091
4092        return ret;
4093}
4094
4095static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4096{
4097        if (DISPLAY_VER(i915) >= 12)
4098                return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4099        else if (DISPLAY_VER(i915) >= 11)
4100                return BIT(PIPE_B) | BIT(PIPE_C);
4101        else
4102                return 0;
4103}
4104
4105static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4106                                           enum transcoder cpu_transcoder)
4107{
4108        enum intel_display_power_domain power_domain;
4109        intel_wakeref_t wakeref;
4110        u32 tmp = 0;
4111
4112        power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4113
4114        with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4115                tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4116
4117        return tmp & TRANS_DDI_FUNC_ENABLE;
4118}
4119
4120static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4121{
4122        u8 master_pipes = 0, slave_pipes = 0;
4123        struct intel_crtc *crtc;
4124
4125        for_each_intel_crtc(&dev_priv->drm, crtc) {
4126                enum intel_display_power_domain power_domain;
4127                enum pipe pipe = crtc->pipe;
4128                intel_wakeref_t wakeref;
4129
4130                if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4131                        continue;
4132
4133                power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4134                with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4135                        u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4136
4137                        if (!(tmp & BIG_JOINER_ENABLE))
4138                                continue;
4139
4140                        if (tmp & MASTER_BIG_JOINER_ENABLE)
4141                                master_pipes |= BIT(pipe);
4142                        else
4143                                slave_pipes |= BIT(pipe);
4144                }
4145
4146                if (DISPLAY_VER(dev_priv) < 13)
4147                        continue;
4148
4149                power_domain = POWER_DOMAIN_PIPE(pipe);
4150                with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4151                        u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4152
4153                        if (tmp & UNCOMPRESSED_JOINER_MASTER)
4154                                master_pipes |= BIT(pipe);
4155                        if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4156                                slave_pipes |= BIT(pipe);
4157                }
4158        }
4159
4160        /* Bigjoiner pipes should always be consecutive master and slave */
4161        drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4162                 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4163                 master_pipes, slave_pipes);
4164
4165        return slave_pipes;
4166}
4167
4168static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4169{
4170        u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4171
4172        if (DISPLAY_VER(i915) >= 11)
4173                panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4174
4175        return panel_transcoder_mask;
4176}
4177
4178static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4179{
4180        struct drm_device *dev = crtc->base.dev;
4181        struct drm_i915_private *dev_priv = to_i915(dev);
4182        u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4183        enum transcoder cpu_transcoder;
4184        u8 enabled_transcoders = 0;
4185
4186        /*
4187         * XXX: Do intel_display_power_get_if_enabled before reading this (for
4188         * consistency and less surprising code; it's in always on power).
4189         */
4190        for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4191                                       panel_transcoder_mask) {
4192                enum intel_display_power_domain power_domain;
4193                intel_wakeref_t wakeref;
4194                enum pipe trans_pipe;
4195                u32 tmp = 0;
4196
4197                power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4198                with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4199                        tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4200
4201                if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4202                        continue;
4203
4204                switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4205                default:
4206                        drm_WARN(dev, 1,
4207                                 "unknown pipe linked to transcoder %s\n",
4208                                 transcoder_name(cpu_transcoder));
4209                        fallthrough;
4210                case TRANS_DDI_EDP_INPUT_A_ONOFF:
4211                case TRANS_DDI_EDP_INPUT_A_ON:
4212                        trans_pipe = PIPE_A;
4213                        break;
4214                case TRANS_DDI_EDP_INPUT_B_ONOFF:
4215                        trans_pipe = PIPE_B;
4216                        break;
4217                case TRANS_DDI_EDP_INPUT_C_ONOFF:
4218                        trans_pipe = PIPE_C;
4219                        break;
4220                case TRANS_DDI_EDP_INPUT_D_ONOFF:
4221                        trans_pipe = PIPE_D;
4222                        break;
4223                }
4224
4225                if (trans_pipe == crtc->pipe)
4226                        enabled_transcoders |= BIT(cpu_transcoder);
4227        }
4228
4229        /* single pipe or bigjoiner master */
4230        cpu_transcoder = (enum transcoder) crtc->pipe;
4231        if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4232                enabled_transcoders |= BIT(cpu_transcoder);
4233
4234        /* bigjoiner slave -> consider the master pipe's transcoder as well */
4235        if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4236                cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4237                if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4238                        enabled_transcoders |= BIT(cpu_transcoder);
4239        }
4240
4241        return enabled_transcoders;
4242}
4243
4244static bool has_edp_transcoders(u8 enabled_transcoders)
4245{
4246        return enabled_transcoders & BIT(TRANSCODER_EDP);
4247}
4248
4249static bool has_dsi_transcoders(u8 enabled_transcoders)
4250{
4251        return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4252                                      BIT(TRANSCODER_DSI_1));
4253}
4254
4255static bool has_pipe_transcoders(u8 enabled_transcoders)
4256{
4257        return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4258                                       BIT(TRANSCODER_DSI_0) |
4259                                       BIT(TRANSCODER_DSI_1));
4260}
4261
4262static void assert_enabled_transcoders(struct drm_i915_private *i915,
4263                                       u8 enabled_transcoders)
4264{
4265        /* Only one type of transcoder please */
4266        drm_WARN_ON(&i915->drm,
4267                    has_edp_transcoders(enabled_transcoders) +
4268                    has_dsi_transcoders(enabled_transcoders) +
4269                    has_pipe_transcoders(enabled_transcoders) > 1);
4270
4271        /* Only DSI transcoders can be ganged */
4272        drm_WARN_ON(&i915->drm,
4273                    !has_dsi_transcoders(enabled_transcoders) &&
4274                    !is_power_of_2(enabled_transcoders));
4275}
4276
4277static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4278                                     struct intel_crtc_state *pipe_config,
4279                                     struct intel_display_power_domain_set *power_domain_set)
4280{
4281        struct drm_device *dev = crtc->base.dev;
4282        struct drm_i915_private *dev_priv = to_i915(dev);
4283        unsigned long enabled_transcoders;
4284        u32 tmp;
4285
4286        enabled_transcoders = hsw_enabled_transcoders(crtc);
4287        if (!enabled_transcoders)
4288                return false;
4289
4290        assert_enabled_transcoders(dev_priv, enabled_transcoders);
4291
4292        /*
4293         * With the exception of DSI we should only ever have
4294         * a single enabled transcoder. With DSI let's just
4295         * pick the first one.
4296         */
4297        pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4298
4299        if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4300                                                       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4301                return false;
4302
4303        if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4304                tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4305
4306                if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4307                        pipe_config->pch_pfit.force_thru = true;
4308        }
4309
4310        tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4311
4312        return tmp & PIPECONF_ENABLE;
4313}
4314
4315static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4316                                         struct intel_crtc_state *pipe_config,
4317                                         struct intel_display_power_domain_set *power_domain_set)
4318{
4319        struct drm_device *dev = crtc->base.dev;
4320        struct drm_i915_private *dev_priv = to_i915(dev);
4321        enum transcoder cpu_transcoder;
4322        enum port port;
4323        u32 tmp;
4324
4325        for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4326                if (port == PORT_A)
4327                        cpu_transcoder = TRANSCODER_DSI_A;
4328                else
4329                        cpu_transcoder = TRANSCODER_DSI_C;
4330
4331                if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4332                                                               POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4333                        continue;
4334
4335                /*
4336                 * The PLL needs to be enabled with a valid divider
4337                 * configuration, otherwise accessing DSI registers will hang
4338                 * the machine. See BSpec North Display Engine
4339                 * registers/MIPI[BXT]. We can break out here early, since we
4340                 * need the same DSI PLL to be enabled for both DSI ports.
4341                 */
4342                if (!bxt_dsi_pll_is_enabled(dev_priv))
4343                        break;
4344
4345                /* XXX: this works for video mode only */
4346                tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4347                if (!(tmp & DPI_ENABLE))
4348                        continue;
4349
4350                tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4351                if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4352                        continue;
4353
4354                pipe_config->cpu_transcoder = cpu_transcoder;
4355                break;
4356        }
4357
4358        return transcoder_is_dsi(pipe_config->cpu_transcoder);
4359}
4360
4361static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4362                                struct intel_crtc_state *pipe_config)
4363{
4364        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4365        struct intel_display_power_domain_set power_domain_set = { };
4366        bool active;
4367        u32 tmp;
4368
4369        if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4370                                                       POWER_DOMAIN_PIPE(crtc->pipe)))
4371                return false;
4372
4373        pipe_config->shared_dpll = NULL;
4374
4375        active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4376
4377        if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4378            bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4379                drm_WARN_ON(&dev_priv->drm, active);
4380                active = true;
4381        }
4382
4383        intel_dsc_get_config(pipe_config);
4384        if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4385                intel_uncompressed_joiner_get_config(pipe_config);
4386
4387        if (!active)
4388                goto out;
4389
4390        if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4391            DISPLAY_VER(dev_priv) >= 11)
4392                intel_get_transcoder_timings(crtc, pipe_config);
4393
4394        if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4395                intel_vrr_get_config(crtc, pipe_config);
4396
4397        intel_get_pipe_src_size(crtc, pipe_config);
4398
4399        if (IS_HASWELL(dev_priv)) {
4400                u32 tmp = intel_de_read(dev_priv,
4401                                        PIPECONF(pipe_config->cpu_transcoder));
4402
4403                if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4404                        pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4405                else
4406                        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4407        } else {
4408                pipe_config->output_format =
4409                        bdw_get_pipemisc_output_format(crtc);
4410        }
4411
4412        pipe_config->gamma_mode = intel_de_read(dev_priv,
4413                                                GAMMA_MODE(crtc->pipe));
4414
4415        pipe_config->csc_mode = intel_de_read(dev_priv,
4416                                              PIPE_CSC_MODE(crtc->pipe));
4417
4418        if (DISPLAY_VER(dev_priv) >= 9) {
4419                tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4420
4421                if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4422                        pipe_config->gamma_enable = true;
4423
4424                if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4425                        pipe_config->csc_enable = true;
4426        } else {
4427                i9xx_get_pipe_color_config(pipe_config);
4428        }
4429
4430        intel_color_get_config(pipe_config);
4431
4432        tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4433        pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4434        if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4435                pipe_config->ips_linetime =
4436                        REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4437
4438        if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4439                                                      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4440                if (DISPLAY_VER(dev_priv) >= 9)
4441                        skl_get_pfit_config(pipe_config);
4442                else
4443                        ilk_get_pfit_config(pipe_config);
4444        }
4445
4446        if (hsw_crtc_supports_ips(crtc)) {
4447                if (IS_HASWELL(dev_priv))
4448                        pipe_config->ips_enabled = intel_de_read(dev_priv,
4449                                                                 IPS_CTL) & IPS_ENABLE;
4450                else {
4451                        /*
4452                         * We cannot readout IPS state on broadwell, set to
4453                         * true so we can set it to a defined state on first
4454                         * commit.
4455                         */
4456                        pipe_config->ips_enabled = true;
4457                }
4458        }
4459
4460        if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4461            !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4462                pipe_config->pixel_multiplier =
4463                        intel_de_read(dev_priv,
4464                                      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4465        } else {
4466                pipe_config->pixel_multiplier = 1;
4467        }
4468
4469out:
4470        intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4471
4472        return active;
4473}
4474
4475static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4476{
4477        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4478        struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4479
4480        if (!i915->display->get_pipe_config(crtc, crtc_state))
4481                return false;
4482
4483        crtc_state->hw.active = true;
4484
4485        intel_crtc_readout_derived_state(crtc_state);
4486
4487        return true;
4488}
4489
4490/* VESA 640x480x72Hz mode to set on the pipe */
4491static const struct drm_display_mode load_detect_mode = {
4492        DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4493                 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4494};
4495
4496static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4497                                        struct drm_crtc *crtc)
4498{
4499        struct drm_plane *plane;
4500        struct drm_plane_state *plane_state;
4501        int ret, i;
4502
4503        ret = drm_atomic_add_affected_planes(state, crtc);
4504        if (ret)
4505                return ret;
4506
4507        for_each_new_plane_in_state(state, plane, plane_state, i) {
4508                if (plane_state->crtc != crtc)
4509                        continue;
4510
4511                ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4512                if (ret)
4513                        return ret;
4514
4515                drm_atomic_set_fb_for_plane(plane_state, NULL);
4516        }
4517
4518        return 0;
4519}
4520
4521int intel_get_load_detect_pipe(struct drm_connector *connector,
4522                               struct intel_load_detect_pipe *old,
4523                               struct drm_modeset_acquire_ctx *ctx)
4524{
4525        struct intel_encoder *encoder =
4526                intel_attached_encoder(to_intel_connector(connector));
4527        struct intel_crtc *possible_crtc;
4528        struct intel_crtc *crtc = NULL;
4529        struct drm_device *dev = encoder->base.dev;
4530        struct drm_i915_private *dev_priv = to_i915(dev);
4531        struct drm_mode_config *config = &dev->mode_config;
4532        struct drm_atomic_state *state = NULL, *restore_state = NULL;
4533        struct drm_connector_state *connector_state;
4534        struct intel_crtc_state *crtc_state;
4535        int ret;
4536
4537        drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4538                    connector->base.id, connector->name,
4539                    encoder->base.base.id, encoder->base.name);
4540
4541        old->restore_state = NULL;
4542
4543        drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4544
4545        /*
4546         * Algorithm gets a little messy:
4547         *
4548         *   - if the connector already has an assigned crtc, use it (but make
4549         *     sure it's on first)
4550         *
4551         *   - try to find the first unused crtc that can drive this connector,
4552         *     and use that if we find one
4553         */
4554
4555        /* See if we already have a CRTC for this connector */
4556        if (connector->state->crtc) {
4557                crtc = to_intel_crtc(connector->state->crtc);
4558
4559                ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4560                if (ret)
4561                        goto fail;
4562
4563                /* Make sure the crtc and connector are running */
4564                goto found;
4565        }
4566
4567        /* Find an unused one (if possible) */
4568        for_each_intel_crtc(dev, possible_crtc) {
4569                if (!(encoder->base.possible_crtcs &
4570                      drm_crtc_mask(&possible_crtc->base)))
4571                        continue;
4572
4573                ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4574                if (ret)
4575                        goto fail;
4576
4577                if (possible_crtc->base.state->enable) {
4578                        drm_modeset_unlock(&possible_crtc->base.mutex);
4579                        continue;
4580                }
4581
4582                crtc = possible_crtc;
4583                break;
4584        }
4585
4586        /*
4587         * If we didn't find an unused CRTC, don't use any.
4588         */
4589        if (!crtc) {
4590                drm_dbg_kms(&dev_priv->drm,
4591                            "no pipe available for load-detect\n");
4592                ret = -ENODEV;
4593                goto fail;
4594        }
4595
4596found:
4597        state = drm_atomic_state_alloc(dev);
4598        restore_state = drm_atomic_state_alloc(dev);
4599        if (!state || !restore_state) {
4600                ret = -ENOMEM;
4601                goto fail;
4602        }
4603
4604        state->acquire_ctx = ctx;
4605        restore_state->acquire_ctx = ctx;
4606
4607        connector_state = drm_atomic_get_connector_state(state, connector);
4608        if (IS_ERR(connector_state)) {
4609                ret = PTR_ERR(connector_state);
4610                goto fail;
4611        }
4612
4613        ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4614        if (ret)
4615                goto fail;
4616
4617        crtc_state = intel_atomic_get_crtc_state(state, crtc);
4618        if (IS_ERR(crtc_state)) {
4619                ret = PTR_ERR(crtc_state);
4620                goto fail;
4621        }
4622
4623        crtc_state->uapi.active = true;
4624
4625        ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4626                                           &load_detect_mode);
4627        if (ret)
4628                goto fail;
4629
4630        ret = intel_modeset_disable_planes(state, &crtc->base);
4631        if (ret)
4632                goto fail;
4633
4634        ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4635        if (!ret)
4636                ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4637        if (!ret)
4638                ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4639        if (ret) {
4640                drm_dbg_kms(&dev_priv->drm,
4641                            "Failed to create a copy of old state to restore: %i\n",
4642                            ret);
4643                goto fail;
4644        }
4645
4646        ret = drm_atomic_commit(state);
4647        if (ret) {
4648                drm_dbg_kms(&dev_priv->drm,
4649                            "failed to set mode on load-detect pipe\n");
4650                goto fail;
4651        }
4652
4653        old->restore_state = restore_state;
4654        drm_atomic_state_put(state);
4655
4656        /* let the connector get through one full cycle before testing */
4657        intel_crtc_wait_for_next_vblank(crtc);
4658
4659        return true;
4660
4661fail:
4662        if (state) {
4663                drm_atomic_state_put(state);
4664                state = NULL;
4665        }
4666        if (restore_state) {
4667                drm_atomic_state_put(restore_state);
4668                restore_state = NULL;
4669        }
4670
4671        if (ret == -EDEADLK)
4672                return ret;
4673
4674        return false;
4675}
4676
4677void intel_release_load_detect_pipe(struct drm_connector *connector,
4678                                    struct intel_load_detect_pipe *old,
4679                                    struct drm_modeset_acquire_ctx *ctx)
4680{
4681        struct intel_encoder *intel_encoder =
4682                intel_attached_encoder(to_intel_connector(connector));
4683        struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4684        struct drm_encoder *encoder = &intel_encoder->base;
4685        struct drm_atomic_state *state = old->restore_state;
4686        int ret;
4687
4688        drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4689                    connector->base.id, connector->name,
4690                    encoder->base.id, encoder->name);
4691
4692        if (!state)
4693                return;
4694
4695        ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4696        if (ret)
4697                drm_dbg_kms(&i915->drm,
4698                            "Couldn't release load detect pipe: %i\n", ret);
4699        drm_atomic_state_put(state);
4700}
4701
4702static int i9xx_pll_refclk(struct drm_device *dev,
4703                           const struct intel_crtc_state *pipe_config)
4704{
4705        struct drm_i915_private *dev_priv = to_i915(dev);
4706        u32 dpll = pipe_config->dpll_hw_state.dpll;
4707
4708        if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4709                return dev_priv->vbt.lvds_ssc_freq;
4710        else if (HAS_PCH_SPLIT(dev_priv))
4711                return 120000;
4712        else if (DISPLAY_VER(dev_priv) != 2)
4713                return 96000;
4714        else
4715                return 48000;
4716}
4717
4718/* Returns the clock of the currently programmed mode of the given pipe. */
4719void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4720                         struct intel_crtc_state *pipe_config)
4721{
4722        struct drm_device *dev = crtc->base.dev;
4723        struct drm_i915_private *dev_priv = to_i915(dev);
4724        u32 dpll = pipe_config->dpll_hw_state.dpll;
4725        u32 fp;
4726        struct dpll clock;
4727        int port_clock;
4728        int refclk = i9xx_pll_refclk(dev, pipe_config);
4729
4730        if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4731                fp = pipe_config->dpll_hw_state.fp0;
4732        else
4733                fp = pipe_config->dpll_hw_state.fp1;
4734
4735        clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4736        if (IS_PINEVIEW(dev_priv)) {
4737                clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4738                clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4739        } else {
4740                clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4741                clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4742        }
4743
4744        if (DISPLAY_VER(dev_priv) != 2) {
4745                if (IS_PINEVIEW(dev_priv))
4746                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4747                                DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4748                else
4749                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4750                               DPLL_FPA01_P1_POST_DIV_SHIFT);
4751
4752                switch (dpll & DPLL_MODE_MASK) {
4753                case DPLLB_MODE_DAC_SERIAL:
4754                        clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4755                                5 : 10;
4756                        break;
4757                case DPLLB_MODE_LVDS:
4758                        clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4759                                7 : 14;
4760                        break;
4761                default:
4762                        drm_dbg_kms(&dev_priv->drm,
4763                                    "Unknown DPLL mode %08x in programmed "
4764                                    "mode\n", (int)(dpll & DPLL_MODE_MASK));
4765                        return;
4766                }
4767
4768                if (IS_PINEVIEW(dev_priv))
4769                        port_clock = pnv_calc_dpll_params(refclk, &clock);
4770                else
4771                        port_clock = i9xx_calc_dpll_params(refclk, &clock);
4772        } else {
4773                enum pipe lvds_pipe;
4774
4775                if (IS_I85X(dev_priv) &&
4776                    intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4777                    lvds_pipe == crtc->pipe) {
4778                        u32 lvds = intel_de_read(dev_priv, LVDS);
4779
4780                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4781                                       DPLL_FPA01_P1_POST_DIV_SHIFT);
4782
4783                        if (lvds & LVDS_CLKB_POWER_UP)
4784                                clock.p2 = 7;
4785                        else
4786                                clock.p2 = 14;
4787                } else {
4788                        if (dpll & PLL_P1_DIVIDE_BY_TWO)
4789                                clock.p1 = 2;
4790                        else {
4791                                clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4792                                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4793                        }
4794                        if (dpll & PLL_P2_DIVIDE_BY_4)
4795                                clock.p2 = 4;
4796                        else
4797                                clock.p2 = 2;
4798                }
4799
4800                port_clock = i9xx_calc_dpll_params(refclk, &clock);
4801        }
4802
4803        /*
4804         * This value includes pixel_multiplier. We will use
4805         * port_clock to compute adjusted_mode.crtc_clock in the
4806         * encoder's get_config() function.
4807         */
4808        pipe_config->port_clock = port_clock;
4809}
4810
4811int intel_dotclock_calculate(int link_freq,
4812                             const struct intel_link_m_n *m_n)
4813{
4814        /*
4815         * The calculation for the data clock is:
4816         * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4817         * But we want to avoid losing precison if possible, so:
4818         * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4819         *
4820         * and the link clock is simpler:
4821         * link_clock = (m * link_clock) / n
4822         */
4823
4824        if (!m_n->link_n)
4825                return 0;
4826
4827        return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4828}
4829
4830/* Returns the currently programmed mode of the given encoder. */
4831struct drm_display_mode *
4832intel_encoder_current_mode(struct intel_encoder *encoder)
4833{
4834        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4835        struct intel_crtc_state *crtc_state;
4836        struct drm_display_mode *mode;
4837        struct intel_crtc *crtc;
4838        enum pipe pipe;
4839
4840        if (!encoder->get_hw_state(encoder, &pipe))
4841                return NULL;
4842
4843        crtc = intel_crtc_for_pipe(dev_priv, pipe);
4844
4845        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4846        if (!mode)
4847                return NULL;
4848
4849        crtc_state = intel_crtc_state_alloc(crtc);
4850        if (!crtc_state) {
4851                kfree(mode);
4852                return NULL;
4853        }
4854
4855        if (!intel_crtc_get_pipe_config(crtc_state)) {
4856                kfree(crtc_state);
4857                kfree(mode);
4858                return NULL;
4859        }
4860
4861        intel_encoder_get_config(encoder, crtc_state);
4862
4863        intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4864
4865        kfree(crtc_state);
4866
4867        return mode;
4868}
4869
4870/**
4871 * intel_wm_need_update - Check whether watermarks need updating
4872 * @cur: current plane state
4873 * @new: new plane state
4874 *
4875 * Check current plane state versus the new one to determine whether
4876 * watermarks need to be recalculated.
4877 *
4878 * Returns true or false.
4879 */
4880static bool intel_wm_need_update(const struct intel_plane_state *cur,
4881                                 struct intel_plane_state *new)
4882{
4883        /* Update watermarks on tiling or size changes. */
4884        if (new->uapi.visible != cur->uapi.visible)
4885                return true;
4886
4887        if (!cur->hw.fb || !new->hw.fb)
4888                return false;
4889
4890        if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4891            cur->hw.rotation != new->hw.rotation ||
4892            drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4893            drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4894            drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4895            drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4896                return true;
4897
4898        return false;
4899}
4900
4901static bool needs_scaling(const struct intel_plane_state *state)
4902{
4903        int src_w = drm_rect_width(&state->uapi.src) >> 16;
4904        int src_h = drm_rect_height(&state->uapi.src) >> 16;
4905        int dst_w = drm_rect_width(&state->uapi.dst);
4906        int dst_h = drm_rect_height(&state->uapi.dst);
4907
4908        return (src_w != dst_w || src_h != dst_h);
4909}
4910
4911int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4912                                    struct intel_crtc_state *new_crtc_state,
4913                                    const struct intel_plane_state *old_plane_state,
4914                                    struct intel_plane_state *new_plane_state)
4915{
4916        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4917        struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4918        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4919        bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4920        bool was_crtc_enabled = old_crtc_state->hw.active;
4921        bool is_crtc_enabled = new_crtc_state->hw.active;
4922        bool turn_off, turn_on, visible, was_visible;
4923        int ret;
4924
4925        if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4926                ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4927                if (ret)
4928                        return ret;
4929        }
4930
4931        was_visible = old_plane_state->uapi.visible;
4932        visible = new_plane_state->uapi.visible;
4933
4934        if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4935                was_visible = false;
4936
4937        /*
4938         * Visibility is calculated as if the crtc was on, but
4939         * after scaler setup everything depends on it being off
4940         * when the crtc isn't active.
4941         *
4942         * FIXME this is wrong for watermarks. Watermarks should also
4943         * be computed as if the pipe would be active. Perhaps move
4944         * per-plane wm computation to the .check_plane() hook, and
4945         * only combine the results from all planes in the current place?
4946         */
4947        if (!is_crtc_enabled) {
4948                intel_plane_set_invisible(new_crtc_state, new_plane_state);
4949                visible = false;
4950        }
4951
4952        if (!was_visible && !visible)
4953                return 0;
4954
4955        turn_off = was_visible && (!visible || mode_changed);
4956        turn_on = visible && (!was_visible || mode_changed);
4957
4958        drm_dbg_atomic(&dev_priv->drm,
4959                       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4960                       crtc->base.base.id, crtc->base.name,
4961                       plane->base.base.id, plane->base.name,
4962                       was_visible, visible,
4963                       turn_off, turn_on, mode_changed);
4964
4965        if (turn_on) {
4966                if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4967                        new_crtc_state->update_wm_pre = true;
4968
4969                /* must disable cxsr around plane enable/disable */
4970                if (plane->id != PLANE_CURSOR)
4971                        new_crtc_state->disable_cxsr = true;
4972        } else if (turn_off) {
4973                if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4974                        new_crtc_state->update_wm_post = true;
4975
4976                /* must disable cxsr around plane enable/disable */
4977                if (plane->id != PLANE_CURSOR)
4978                        new_crtc_state->disable_cxsr = true;
4979        } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
4980                if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
4981                        /* FIXME bollocks */
4982                        new_crtc_state->update_wm_pre = true;
4983                        new_crtc_state->update_wm_post = true;
4984                }
4985        }
4986
4987        if (visible || was_visible)
4988                new_crtc_state->fb_bits |= plane->frontbuffer_bit;
4989
4990        /*
4991         * ILK/SNB DVSACNTR/Sprite Enable
4992         * IVB SPR_CTL/Sprite Enable
4993         * "When in Self Refresh Big FIFO mode, a write to enable the
4994         *  plane will be internally buffered and delayed while Big FIFO
4995         *  mode is exiting."
4996         *
4997         * Which means that enabling the sprite can take an extra frame
4998         * when we start in big FIFO mode (LP1+). Thus we need to drop
4999         * down to LP0 and wait for vblank in order to make sure the
5000         * sprite gets enabled on the next vblank after the register write.
5001         * Doing otherwise would risk enabling the sprite one frame after
5002         * we've already signalled flip completion. We can resume LP1+
5003         * once the sprite has been enabled.
5004         *
5005         *
5006         * WaCxSRDisabledForSpriteScaling:ivb
5007         * IVB SPR_SCALE/Scaling Enable
5008         * "Low Power watermarks must be disabled for at least one
5009         *  frame before enabling sprite scaling, and kept disabled
5010         *  until sprite scaling is disabled."
5011         *
5012         * ILK/SNB DVSASCALE/Scaling Enable
5013         * "When in Self Refresh Big FIFO mode, scaling enable will be
5014         *  masked off while Big FIFO mode is exiting."
5015         *
5016         * Despite the w/a only being listed for IVB we assume that
5017         * the ILK/SNB note has similar ramifications, hence we apply
5018         * the w/a on all three platforms.
5019         *
5020         * With experimental results seems this is needed also for primary
5021         * plane, not only sprite plane.
5022         */
5023        if (plane->id != PLANE_CURSOR &&
5024            (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5025             IS_IVYBRIDGE(dev_priv)) &&
5026            (turn_on || (!needs_scaling(old_plane_state) &&
5027                         needs_scaling(new_plane_state))))
5028                new_crtc_state->disable_lp_wm = true;
5029
5030        return 0;
5031}
5032
5033static bool encoders_cloneable(const struct intel_encoder *a,
5034                               const struct intel_encoder *b)
5035{
5036        /* masks could be asymmetric, so check both ways */
5037        return a == b || (a->cloneable & (1 << b->type) &&
5038                          b->cloneable & (1 << a->type));
5039}
5040
5041static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5042                                         struct intel_crtc *crtc,
5043                                         struct intel_encoder *encoder)
5044{
5045        struct intel_encoder *source_encoder;
5046        struct drm_connector *connector;
5047        struct drm_connector_state *connector_state;
5048        int i;
5049
5050        for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5051                if (connector_state->crtc != &crtc->base)
5052                        continue;
5053
5054                source_encoder =
5055                        to_intel_encoder(connector_state->best_encoder);
5056                if (!encoders_cloneable(encoder, source_encoder))
5057                        return false;
5058        }
5059
5060        return true;
5061}
5062
5063static int icl_add_linked_planes(struct intel_atomic_state *state)
5064{
5065        struct intel_plane *plane, *linked;
5066        struct intel_plane_state *plane_state, *linked_plane_state;
5067        int i;
5068
5069        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5070                linked = plane_state->planar_linked_plane;
5071
5072                if (!linked)
5073                        continue;
5074
5075                linked_plane_state = intel_atomic_get_plane_state(state, linked);
5076                if (IS_ERR(linked_plane_state))
5077                        return PTR_ERR(linked_plane_state);
5078
5079                drm_WARN_ON(state->base.dev,
5080                            linked_plane_state->planar_linked_plane != plane);
5081                drm_WARN_ON(state->base.dev,
5082                            linked_plane_state->planar_slave == plane_state->planar_slave);
5083        }
5084
5085        return 0;
5086}
5087
5088static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5089{
5090        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5091        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5092        struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5093        struct intel_plane *plane, *linked;
5094        struct intel_plane_state *plane_state;
5095        int i;
5096
5097        if (DISPLAY_VER(dev_priv) < 11)
5098                return 0;
5099
5100        /*
5101         * Destroy all old plane links and make the slave plane invisible
5102         * in the crtc_state->active_planes mask.
5103         */
5104        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5105                if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5106                        continue;
5107
5108                plane_state->planar_linked_plane = NULL;
5109                if (plane_state->planar_slave && !plane_state->uapi.visible) {
5110                        crtc_state->enabled_planes &= ~BIT(plane->id);
5111                        crtc_state->active_planes &= ~BIT(plane->id);
5112                        crtc_state->update_planes |= BIT(plane->id);
5113                }
5114
5115                plane_state->planar_slave = false;
5116        }
5117
5118        if (!crtc_state->nv12_planes)
5119                return 0;
5120
5121        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5122                struct intel_plane_state *linked_state = NULL;
5123
5124                if (plane->pipe != crtc->pipe ||
5125                    !(crtc_state->nv12_planes & BIT(plane->id)))
5126                        continue;
5127
5128                for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5129                        if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5130                                continue;
5131
5132                        if (crtc_state->active_planes & BIT(linked->id))
5133                                continue;
5134
5135                        linked_state = intel_atomic_get_plane_state(state, linked);
5136                        if (IS_ERR(linked_state))
5137                                return PTR_ERR(linked_state);
5138
5139                        break;
5140                }
5141
5142                if (!linked_state) {
5143                        drm_dbg_kms(&dev_priv->drm,
5144                                    "Need %d free Y planes for planar YUV\n",
5145                                    hweight8(crtc_state->nv12_planes));
5146
5147                        return -EINVAL;
5148                }
5149
5150                plane_state->planar_linked_plane = linked;
5151
5152                linked_state->planar_slave = true;
5153                linked_state->planar_linked_plane = plane;
5154                crtc_state->enabled_planes |= BIT(linked->id);
5155                crtc_state->active_planes |= BIT(linked->id);
5156                crtc_state->update_planes |= BIT(linked->id);
5157                drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5158                            linked->base.name, plane->base.name);
5159
5160                /* Copy parameters to slave plane */
5161                linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5162                linked_state->color_ctl = plane_state->color_ctl;
5163                linked_state->view = plane_state->view;
5164                linked_state->decrypt = plane_state->decrypt;
5165
5166                intel_plane_copy_hw_state(linked_state, plane_state);
5167                linked_state->uapi.src = plane_state->uapi.src;
5168                linked_state->uapi.dst = plane_state->uapi.dst;
5169
5170                if (icl_is_hdr_plane(dev_priv, plane->id)) {
5171                        if (linked->id == PLANE_SPRITE5)
5172                                plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
5173                        else if (linked->id == PLANE_SPRITE4)
5174                                plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
5175                        else if (linked->id == PLANE_SPRITE3)
5176                                plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
5177                        else if (linked->id == PLANE_SPRITE2)
5178                                plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
5179                        else
5180                                MISSING_CASE(linked->id);
5181                }
5182        }
5183
5184        return 0;
5185}
5186
5187static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5188{
5189        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5190        struct intel_atomic_state *state =
5191                to_intel_atomic_state(new_crtc_state->uapi.state);
5192        const struct intel_crtc_state *old_crtc_state =
5193                intel_atomic_get_old_crtc_state(state, crtc);
5194
5195        return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5196}
5197
5198static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5199{
5200        const struct drm_display_mode *pipe_mode =
5201                &crtc_state->hw.pipe_mode;
5202        int linetime_wm;
5203
5204        if (!crtc_state->hw.enable)
5205                return 0;
5206
5207        linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5208                                        pipe_mode->crtc_clock);
5209
5210        return min(linetime_wm, 0x1ff);
5211}
5212
5213static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5214                               const struct intel_cdclk_state *cdclk_state)
5215{
5216        const struct drm_display_mode *pipe_mode =
5217                &crtc_state->hw.pipe_mode;
5218        int linetime_wm;
5219
5220        if (!crtc_state->hw.enable)
5221                return 0;
5222
5223        linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5224                                        cdclk_state->logical.cdclk);
5225
5226        return min(linetime_wm, 0x1ff);
5227}
5228
5229static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5230{
5231        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5232        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5233        const struct drm_display_mode *pipe_mode =
5234                &crtc_state->hw.pipe_mode;
5235        int linetime_wm;
5236
5237        if (!crtc_state->hw.enable)
5238                return 0;
5239
5240        linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5241                                   crtc_state->pixel_rate);
5242
5243        /* Display WA #1135: BXT:ALL GLK:ALL */
5244        if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5245            dev_priv->ipc_enabled)
5246                linetime_wm /= 2;
5247
5248        return min(linetime_wm, 0x1ff);
5249}
5250
5251static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5252                                   struct intel_crtc *crtc)
5253{
5254        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5255        struct intel_crtc_state *crtc_state =
5256                intel_atomic_get_new_crtc_state(state, crtc);
5257        const struct intel_cdclk_state *cdclk_state;
5258
5259        if (DISPLAY_VER(dev_priv) >= 9)
5260                crtc_state->linetime = skl_linetime_wm(crtc_state);
5261        else
5262                crtc_state->linetime = hsw_linetime_wm(crtc_state);
5263
5264        if (!hsw_crtc_supports_ips(crtc))
5265                return 0;
5266
5267        cdclk_state = intel_atomic_get_cdclk_state(state);
5268        if (IS_ERR(cdclk_state))
5269                return PTR_ERR(cdclk_state);
5270
5271        crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5272                                                       cdclk_state);
5273
5274        return 0;
5275}
5276
5277static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5278                                   struct intel_crtc *crtc)
5279{
5280        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5281        struct intel_crtc_state *crtc_state =
5282                intel_atomic_get_new_crtc_state(state, crtc);
5283        bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5284        int ret;
5285
5286        if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5287            mode_changed && !crtc_state->hw.active)
5288                crtc_state->update_wm_post = true;
5289
5290        if (mode_changed && crtc_state->hw.enable &&
5291            !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5292                ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5293                if (ret)
5294                        return ret;
5295        }
5296
5297        /*
5298         * May need to update pipe gamma enable bits
5299         * when C8 planes are getting enabled/disabled.
5300         */
5301        if (c8_planes_changed(crtc_state))
5302                crtc_state->uapi.color_mgmt_changed = true;
5303
5304        if (mode_changed || crtc_state->update_pipe ||
5305            crtc_state->uapi.color_mgmt_changed) {
5306                ret = intel_color_check(crtc_state);
5307                if (ret)
5308                        return ret;
5309        }
5310
5311        ret = intel_compute_pipe_wm(state, crtc);
5312        if (ret) {
5313                drm_dbg_kms(&dev_priv->drm,
5314                            "Target pipe watermarks are invalid\n");
5315                return ret;
5316        }
5317
5318        /*
5319         * Calculate 'intermediate' watermarks that satisfy both the
5320         * old state and the new state.  We can program these
5321         * immediately.
5322         */
5323        ret = intel_compute_intermediate_wm(state, crtc);
5324        if (ret) {
5325                drm_dbg_kms(&dev_priv->drm,
5326                            "No valid intermediate pipe watermarks are possible\n");
5327                return ret;
5328        }
5329
5330        if (DISPLAY_VER(dev_priv) >= 9) {
5331                if (mode_changed || crtc_state->update_pipe) {
5332                        ret = skl_update_scaler_crtc(crtc_state);
5333                        if (ret)
5334                                return ret;
5335                }
5336
5337                ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5338                if (ret)
5339                        return ret;
5340        }
5341
5342        if (HAS_IPS(dev_priv)) {
5343                ret = hsw_compute_ips_config(crtc_state);
5344                if (ret)
5345                        return ret;
5346        }
5347
5348        if (DISPLAY_VER(dev_priv) >= 9 ||
5349            IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5350                ret = hsw_compute_linetime_wm(state, crtc);
5351                if (ret)
5352                        return ret;
5353
5354        }
5355
5356        ret = intel_psr2_sel_fetch_update(state, crtc);
5357        if (ret)
5358                return ret;
5359
5360        return 0;
5361}
5362
5363static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5364{
5365        struct intel_connector *connector;
5366        struct drm_connector_list_iter conn_iter;
5367
5368        drm_connector_list_iter_begin(dev, &conn_iter);
5369        for_each_intel_connector_iter(connector, &conn_iter) {
5370                struct drm_connector_state *conn_state = connector->base.state;
5371                struct intel_encoder *encoder =
5372                        to_intel_encoder(connector->base.encoder);
5373
5374                if (conn_state->crtc)
5375                        drm_connector_put(&connector->base);
5376
5377                if (encoder) {
5378                        struct intel_crtc *crtc =
5379                                to_intel_crtc(encoder->base.crtc);
5380                        const struct intel_crtc_state *crtc_state =
5381                                to_intel_crtc_state(crtc->base.state);
5382
5383                        conn_state->best_encoder = &encoder->base;
5384                        conn_state->crtc = &crtc->base;
5385                        conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5386
5387                        drm_connector_get(&connector->base);
5388                } else {
5389                        conn_state->best_encoder = NULL;
5390                        conn_state->crtc = NULL;
5391                }
5392        }
5393        drm_connector_list_iter_end(&conn_iter);
5394}
5395
5396static int
5397compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5398                      struct intel_crtc_state *pipe_config)
5399{
5400        struct drm_connector *connector = conn_state->connector;
5401        struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5402        const struct drm_display_info *info = &connector->display_info;
5403        int bpp;
5404
5405        switch (conn_state->max_bpc) {
5406        case 6 ... 7:
5407                bpp = 6 * 3;
5408                break;
5409        case 8 ... 9:
5410                bpp = 8 * 3;
5411                break;
5412        case 10 ... 11:
5413                bpp = 10 * 3;
5414                break;
5415        case 12 ... 16:
5416                bpp = 12 * 3;
5417                break;
5418        default:
5419                MISSING_CASE(conn_state->max_bpc);
5420                return -EINVAL;
5421        }
5422
5423        if (bpp < pipe_config->pipe_bpp) {
5424                drm_dbg_kms(&i915->drm,
5425                            "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5426                            "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5427                            connector->base.id, connector->name,
5428                            bpp, 3 * info->bpc,
5429                            3 * conn_state->max_requested_bpc,
5430                            pipe_config->pipe_bpp);
5431
5432                pipe_config->pipe_bpp = bpp;
5433        }
5434
5435        return 0;
5436}
5437
5438static int
5439compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5440                          struct intel_crtc_state *pipe_config)
5441{
5442        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5443        struct drm_atomic_state *state = pipe_config->uapi.state;
5444        struct drm_connector *connector;
5445        struct drm_connector_state *connector_state;
5446        int bpp, i;
5447
5448        if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5449            IS_CHERRYVIEW(dev_priv)))
5450                bpp = 10*3;
5451        else if (DISPLAY_VER(dev_priv) >= 5)
5452                bpp = 12*3;
5453        else
5454                bpp = 8*3;
5455
5456        pipe_config->pipe_bpp = bpp;
5457
5458        /* Clamp display bpp to connector max bpp */
5459        for_each_new_connector_in_state(state, connector, connector_state, i) {
5460                int ret;
5461
5462                if (connector_state->crtc != &crtc->base)
5463                        continue;
5464
5465                ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5466                if (ret)
5467                        return ret;
5468        }
5469
5470        return 0;
5471}
5472
5473static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5474                                    const struct drm_display_mode *mode)
5475{
5476        drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5477                    "type: 0x%x flags: 0x%x\n",
5478                    mode->crtc_clock,
5479                    mode->crtc_hdisplay, mode->crtc_hsync_start,
5480                    mode->crtc_hsync_end, mode->crtc_htotal,
5481                    mode->crtc_vdisplay, mode->crtc_vsync_start,
5482                    mode->crtc_vsync_end, mode->crtc_vtotal,
5483                    mode->type, mode->flags);
5484}
5485
5486static void
5487intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5488                      const char *id, unsigned int lane_count,
5489                      const struct intel_link_m_n *m_n)
5490{
5491        struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5492
5493        drm_dbg_kms(&i915->drm,
5494                    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5495                    id, lane_count,
5496                    m_n->gmch_m, m_n->gmch_n,
5497                    m_n->link_m, m_n->link_n, m_n->tu);
5498}
5499
5500static void
5501intel_dump_infoframe(struct drm_i915_private *dev_priv,
5502                     const union hdmi_infoframe *frame)
5503{
5504        if (!drm_debug_enabled(DRM_UT_KMS))
5505                return;
5506
5507        hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5508}
5509
5510static void
5511intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5512                      const struct drm_dp_vsc_sdp *vsc)
5513{
5514        if (!drm_debug_enabled(DRM_UT_KMS))
5515                return;
5516
5517        drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5518}
5519
5520#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5521
5522static const char * const output_type_str[] = {
5523        OUTPUT_TYPE(UNUSED),
5524        OUTPUT_TYPE(ANALOG),
5525        OUTPUT_TYPE(DVO),
5526        OUTPUT_TYPE(SDVO),
5527        OUTPUT_TYPE(LVDS),
5528        OUTPUT_TYPE(TVOUT),
5529        OUTPUT_TYPE(HDMI),
5530        OUTPUT_TYPE(DP),
5531        OUTPUT_TYPE(EDP),
5532        OUTPUT_TYPE(DSI),
5533        OUTPUT_TYPE(DDI),
5534        OUTPUT_TYPE(DP_MST),
5535};
5536
5537#undef OUTPUT_TYPE
5538
5539static void snprintf_output_types(char *buf, size_t len,
5540                                  unsigned int output_types)
5541{
5542        char *str = buf;
5543        int i;
5544
5545        str[0] = '\0';
5546
5547        for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5548                int r;
5549
5550                if ((output_types & BIT(i)) == 0)
5551                        continue;
5552
5553                r = snprintf(str, len, "%s%s",
5554                             str != buf ? "," : "", output_type_str[i]);
5555                if (r >= len)
5556                        break;
5557                str += r;
5558                len -= r;
5559
5560                output_types &= ~BIT(i);
5561        }
5562
5563        WARN_ON_ONCE(output_types != 0);
5564}
5565
5566static const char * const output_format_str[] = {
5567        [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5568        [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5569        [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5570};
5571
5572static const char *output_formats(enum intel_output_format format)
5573{
5574        if (format >= ARRAY_SIZE(output_format_str))
5575                return "invalid";
5576        return output_format_str[format];
5577}
5578
5579static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5580{
5581        struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5582        struct drm_i915_private *i915 = to_i915(plane->base.dev);
5583        const struct drm_framebuffer *fb = plane_state->hw.fb;
5584
5585        if (!fb) {
5586                drm_dbg_kms(&i915->drm,
5587                            "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5588                            plane->base.base.id, plane->base.name,
5589                            yesno(plane_state->uapi.visible));
5590                return;
5591        }
5592
5593        drm_dbg_kms(&i915->drm,
5594                    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5595                    plane->base.base.id, plane->base.name,
5596                    fb->base.id, fb->width, fb->height, &fb->format->format,
5597                    fb->modifier, yesno(plane_state->uapi.visible));
5598        drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5599                    plane_state->hw.rotation, plane_state->scaler_id);
5600        if (plane_state->uapi.visible)
5601                drm_dbg_kms(&i915->drm,
5602                            "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5603                            DRM_RECT_FP_ARG(&plane_state->uapi.src),
5604                            DRM_RECT_ARG(&plane_state->uapi.dst));
5605}
5606
5607static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5608                                   struct intel_atomic_state *state,
5609                                   const char *context)
5610{
5611        struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5612        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5613        const struct intel_plane_state *plane_state;
5614        struct intel_plane *plane;
5615        char buf[64];
5616        int i;
5617
5618        drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5619                    crtc->base.base.id, crtc->base.name,
5620                    yesno(pipe_config->hw.enable), context);
5621
5622        if (!pipe_config->hw.enable)
5623                goto dump_planes;
5624
5625        snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5626        drm_dbg_kms(&dev_priv->drm,
5627                    "active: %s, output_types: %s (0x%x), output format: %s\n",
5628                    yesno(pipe_config->hw.active),
5629                    buf, pipe_config->output_types,
5630                    output_formats(pipe_config->output_format));
5631
5632        drm_dbg_kms(&dev_priv->drm,
5633                    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5634                    transcoder_name(pipe_config->cpu_transcoder),
5635                    pipe_config->pipe_bpp, pipe_config->dither);
5636
5637        drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5638                    transcoder_name(pipe_config->mst_master_transcoder));
5639
5640        drm_dbg_kms(&dev_priv->drm,
5641                    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5642                    transcoder_name(pipe_config->master_transcoder),
5643                    pipe_config->sync_mode_slaves_mask);
5644
5645        drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5646                    pipe_config->bigjoiner_slave ? "slave" :
5647                    pipe_config->bigjoiner ? "master" : "no");
5648
5649        drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5650                    enableddisabled(pipe_config->splitter.enable),
5651                    pipe_config->splitter.link_count,
5652                    pipe_config->splitter.pixel_overlap);
5653
5654        if (pipe_config->has_pch_encoder)
5655                intel_dump_m_n_config(pipe_config, "fdi",
5656                                      pipe_config->fdi_lanes,
5657                                      &pipe_config->fdi_m_n);
5658
5659        if (intel_crtc_has_dp_encoder(pipe_config)) {
5660                intel_dump_m_n_config(pipe_config, "dp m_n",
5661                                pipe_config->lane_count, &pipe_config->dp_m_n);
5662                if (pipe_config->has_drrs)
5663                        intel_dump_m_n_config(pipe_config, "dp m2_n2",
5664                                              pipe_config->lane_count,
5665                                              &pipe_config->dp_m2_n2);
5666        }
5667
5668        drm_dbg_kms(&dev_priv->drm,
5669                    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5670                    pipe_config->has_audio, pipe_config->has_infoframe,
5671                    pipe_config->infoframes.enable);
5672
5673        if (pipe_config->infoframes.enable &
5674            intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5675                drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5676                            pipe_config->infoframes.gcp);
5677        if (pipe_config->infoframes.enable &
5678            intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5679                intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5680        if (pipe_config->infoframes.enable &
5681            intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5682                intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5683        if (pipe_config->infoframes.enable &
5684            intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5685                intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5686        if (pipe_config->infoframes.enable &
5687            intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5688                intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5689        if (pipe_config->infoframes.enable &
5690            intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5691                intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5692        if (pipe_config->infoframes.enable &
5693            intel_hdmi_infoframe_enable(DP_SDP_VSC))
5694                intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5695
5696        drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5697                    yesno(pipe_config->vrr.enable),
5698                    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5699                    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5700                    pipe_config->vrr.flipline,
5701                    intel_vrr_vmin_vblank_start(pipe_config),
5702                    intel_vrr_vmax_vblank_start(pipe_config));
5703
5704        drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5705        drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5706        drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5707        drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5708        intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5709        drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5710        drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5711        intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5712        drm_dbg_kms(&dev_priv->drm,
5713                    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5714                    pipe_config->port_clock,
5715                    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5716                    pipe_config->pixel_rate);
5717
5718        drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5719                    pipe_config->linetime, pipe_config->ips_linetime);
5720
5721        if (DISPLAY_VER(dev_priv) >= 9)
5722                drm_dbg_kms(&dev_priv->drm,
5723                            "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5724                            crtc->num_scalers,
5725                            pipe_config->scaler_state.scaler_users,
5726                            pipe_config->scaler_state.scaler_id);
5727
5728        if (HAS_GMCH(dev_priv))
5729                drm_dbg_kms(&dev_priv->drm,
5730                            "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5731                            pipe_config->gmch_pfit.control,
5732                            pipe_config->gmch_pfit.pgm_ratios,
5733                            pipe_config->gmch_pfit.lvds_border_bits);
5734        else
5735                drm_dbg_kms(&dev_priv->drm,
5736                            "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5737                            DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5738                            enableddisabled(pipe_config->pch_pfit.enabled),
5739                            yesno(pipe_config->pch_pfit.force_thru));
5740
5741        drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5742                    pipe_config->ips_enabled, pipe_config->double_wide);
5743
5744        intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5745
5746        if (IS_CHERRYVIEW(dev_priv))
5747                drm_dbg_kms(&dev_priv->drm,
5748                            "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5749                            pipe_config->cgm_mode, pipe_config->gamma_mode,
5750                            pipe_config->gamma_enable, pipe_config->csc_enable);
5751        else
5752                drm_dbg_kms(&dev_priv->drm,
5753                            "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5754                            pipe_config->csc_mode, pipe_config->gamma_mode,
5755                            pipe_config->gamma_enable, pipe_config->csc_enable);
5756
5757        drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5758                    pipe_config->hw.degamma_lut ?
5759                    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5760                    pipe_config->hw.gamma_lut ?
5761                    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5762
5763dump_planes:
5764        if (!state)
5765                return;
5766
5767        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5768                if (plane->pipe == crtc->pipe)
5769                        intel_dump_plane_state(plane_state);
5770        }
5771}
5772
5773static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5774{
5775        struct drm_device *dev = state->base.dev;
5776        struct drm_connector *connector;
5777        struct drm_connector_list_iter conn_iter;
5778        unsigned int used_ports = 0;
5779        unsigned int used_mst_ports = 0;
5780        bool ret = true;
5781
5782        /*
5783         * We're going to peek into connector->state,
5784         * hence connection_mutex must be held.
5785         */
5786        drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5787
5788        /*
5789         * Walk the connector list instead of the encoder
5790         * list to detect the problem on ddi platforms
5791         * where there's just one encoder per digital port.
5792         */
5793        drm_connector_list_iter_begin(dev, &conn_iter);
5794        drm_for_each_connector_iter(connector, &conn_iter) {
5795                struct drm_connector_state *connector_state;
5796                struct intel_encoder *encoder;
5797
5798                connector_state =
5799                        drm_atomic_get_new_connector_state(&state->base,
5800                                                           connector);
5801                if (!connector_state)
5802                        connector_state = connector->state;
5803
5804                if (!connector_state->best_encoder)
5805                        continue;
5806
5807                encoder = to_intel_encoder(connector_state->best_encoder);
5808
5809                drm_WARN_ON(dev, !connector_state->crtc);
5810
5811                switch (encoder->type) {
5812                case INTEL_OUTPUT_DDI:
5813                        if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5814                                break;
5815                        fallthrough;
5816                case INTEL_OUTPUT_DP:
5817                case INTEL_OUTPUT_HDMI:
5818                case INTEL_OUTPUT_EDP:
5819                        /* the same port mustn't appear more than once */
5820                        if (used_ports & BIT(encoder->port))
5821                                ret = false;
5822
5823                        used_ports |= BIT(encoder->port);
5824                        break;
5825                case INTEL_OUTPUT_DP_MST:
5826                        used_mst_ports |=
5827                                1 << encoder->port;
5828                        break;
5829                default:
5830                        break;
5831                }
5832        }
5833        drm_connector_list_iter_end(&conn_iter);
5834
5835        /* can't mix MST and SST/HDMI on the same port */
5836        if (used_ports & used_mst_ports)
5837                return false;
5838
5839        return ret;
5840}
5841
5842static void
5843intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5844                                           struct intel_crtc_state *crtc_state)
5845{
5846        const struct intel_crtc_state *master_crtc_state;
5847        struct intel_crtc *master_crtc;
5848
5849        master_crtc = intel_master_crtc(crtc_state);
5850        master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5851
5852        /* No need to copy state if the master state is unchanged */
5853        if (master_crtc_state)
5854                intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5855}
5856
5857static void
5858intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5859                                 struct intel_crtc_state *crtc_state)
5860{
5861        crtc_state->hw.enable = crtc_state->uapi.enable;
5862        crtc_state->hw.active = crtc_state->uapi.active;
5863        crtc_state->hw.mode = crtc_state->uapi.mode;
5864        crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5865        crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5866
5867        intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5868}
5869
5870static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5871{
5872        if (crtc_state->bigjoiner_slave)
5873                return;
5874
5875        crtc_state->uapi.enable = crtc_state->hw.enable;
5876        crtc_state->uapi.active = crtc_state->hw.active;
5877        drm_WARN_ON(crtc_state->uapi.crtc->dev,
5878                    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5879
5880        crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5881        crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5882
5883        /* copy color blobs to uapi */
5884        drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5885                                  crtc_state->hw.degamma_lut);
5886        drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5887                                  crtc_state->hw.gamma_lut);
5888        drm_property_replace_blob(&crtc_state->uapi.ctm,
5889                                  crtc_state->hw.ctm);
5890}
5891
5892static int
5893copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5894                          const struct intel_crtc_state *from_crtc_state)
5895{
5896        struct intel_crtc_state *saved_state;
5897
5898        saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5899        if (!saved_state)
5900                return -ENOMEM;
5901
5902        saved_state->uapi = crtc_state->uapi;
5903        saved_state->scaler_state = crtc_state->scaler_state;
5904        saved_state->shared_dpll = crtc_state->shared_dpll;
5905        saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5906        saved_state->crc_enabled = crtc_state->crc_enabled;
5907
5908        intel_crtc_free_hw_state(crtc_state);
5909        memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5910        kfree(saved_state);
5911
5912        /* Re-init hw state */
5913        memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5914        crtc_state->hw.enable = from_crtc_state->hw.enable;
5915        crtc_state->hw.active = from_crtc_state->hw.active;
5916        crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5917        crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5918
5919        /* Some fixups */
5920        crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5921        crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5922        crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5923        crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5924        crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5925        crtc_state->bigjoiner_slave = true;
5926        crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5927        crtc_state->has_audio = from_crtc_state->has_audio;
5928
5929        return 0;
5930}
5931
5932static int
5933intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5934                                 struct intel_crtc_state *crtc_state)
5935{
5936        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5937        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5938        struct intel_crtc_state *saved_state;
5939
5940        saved_state = intel_crtc_state_alloc(crtc);
5941        if (!saved_state)
5942                return -ENOMEM;
5943
5944        /* free the old crtc_state->hw members */
5945        intel_crtc_free_hw_state(crtc_state);
5946
5947        /* FIXME: before the switch to atomic started, a new pipe_config was
5948         * kzalloc'd. Code that depends on any field being zero should be
5949         * fixed, so that the crtc_state can be safely duplicated. For now,
5950         * only fields that are know to not cause problems are preserved. */
5951
5952        saved_state->uapi = crtc_state->uapi;
5953        saved_state->scaler_state = crtc_state->scaler_state;
5954        saved_state->shared_dpll = crtc_state->shared_dpll;
5955        saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5956        memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5957               sizeof(saved_state->icl_port_dplls));
5958        saved_state->crc_enabled = crtc_state->crc_enabled;
5959        if (IS_G4X(dev_priv) ||
5960            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5961                saved_state->wm = crtc_state->wm;
5962
5963        memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5964        kfree(saved_state);
5965
5966        intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5967
5968        return 0;
5969}
5970
5971static int
5972intel_modeset_pipe_config(struct intel_atomic_state *state,
5973                          struct intel_crtc_state *pipe_config)
5974{
5975        struct drm_crtc *crtc = pipe_config->uapi.crtc;
5976        struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5977        struct drm_connector *connector;
5978        struct drm_connector_state *connector_state;
5979        int base_bpp, ret, i;
5980        bool retry = true;
5981
5982        pipe_config->cpu_transcoder =
5983                (enum transcoder) to_intel_crtc(crtc)->pipe;
5984
5985        /*
5986         * Sanitize sync polarity flags based on requested ones. If neither
5987         * positive or negative polarity is requested, treat this as meaning
5988         * negative polarity.
5989         */
5990        if (!(pipe_config->hw.adjusted_mode.flags &
5991              (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5992                pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5993
5994        if (!(pipe_config->hw.adjusted_mode.flags &
5995              (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5996                pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5997
5998        ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
5999                                        pipe_config);
6000        if (ret)
6001                return ret;
6002
6003        base_bpp = pipe_config->pipe_bpp;
6004
6005        /*
6006         * Determine the real pipe dimensions. Note that stereo modes can
6007         * increase the actual pipe size due to the frame doubling and
6008         * insertion of additional space for blanks between the frame. This
6009         * is stored in the crtc timings. We use the requested mode to do this
6010         * computation to clearly distinguish it from the adjusted mode, which
6011         * can be changed by the connectors in the below retry loop.
6012         */
6013        drm_mode_get_hv_timing(&pipe_config->hw.mode,
6014                               &pipe_config->pipe_src_w,
6015                               &pipe_config->pipe_src_h);
6016
6017        for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6018                struct intel_encoder *encoder =
6019                        to_intel_encoder(connector_state->best_encoder);
6020
6021                if (connector_state->crtc != crtc)
6022                        continue;
6023
6024                if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6025                        drm_dbg_kms(&i915->drm,
6026                                    "rejecting invalid cloning configuration\n");
6027                        return -EINVAL;
6028                }
6029
6030                /*
6031                 * Determine output_types before calling the .compute_config()
6032                 * hooks so that the hooks can use this information safely.
6033                 */
6034                if (encoder->compute_output_type)
6035                        pipe_config->output_types |=
6036                                BIT(encoder->compute_output_type(encoder, pipe_config,
6037                                                                 connector_state));
6038                else
6039                        pipe_config->output_types |= BIT(encoder->type);
6040        }
6041
6042encoder_retry:
6043        /* Ensure the port clock defaults are reset when retrying. */
6044        pipe_config->port_clock = 0;
6045        pipe_config->pixel_multiplier = 1;
6046
6047        /* Fill in default crtc timings, allow encoders to overwrite them. */
6048        drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6049                              CRTC_STEREO_DOUBLE);
6050
6051        /* Pass our mode to the connectors and the CRTC to give them a chance to
6052         * adjust it according to limitations or connector properties, and also
6053         * a chance to reject the mode entirely.
6054         */
6055        for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6056                struct intel_encoder *encoder =
6057                        to_intel_encoder(connector_state->best_encoder);
6058
6059                if (connector_state->crtc != crtc)
6060                        continue;
6061
6062                ret = encoder->compute_config(encoder, pipe_config,
6063                                              connector_state);
6064                if (ret == -EDEADLK)
6065                        return ret;
6066                if (ret < 0) {
6067                        drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6068                        return ret;
6069                }
6070        }
6071
6072        /* Set default port clock if not overwritten by the encoder. Needs to be
6073         * done afterwards in case the encoder adjusts the mode. */
6074        if (!pipe_config->port_clock)
6075                pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6076                        * pipe_config->pixel_multiplier;
6077
6078        ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6079        if (ret == -EDEADLK)
6080                return ret;
6081        if (ret == -EAGAIN) {
6082                if (drm_WARN(&i915->drm, !retry,
6083                             "loop in pipe configuration computation\n"))
6084                        return -EINVAL;
6085
6086                drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6087                retry = false;
6088                goto encoder_retry;
6089        }
6090        if (ret < 0) {
6091                drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6092                return ret;
6093        }
6094
6095        /* Dithering seems to not pass-through bits correctly when it should, so
6096         * only enable it on 6bpc panels and when its not a compliance
6097         * test requesting 6bpc video pattern.
6098         */
6099        pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6100                !pipe_config->dither_force_disable;
6101        drm_dbg_kms(&i915->drm,
6102                    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6103                    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6104
6105        return 0;
6106}
6107
6108static int
6109intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6110{
6111        struct intel_atomic_state *state =
6112                to_intel_atomic_state(crtc_state->uapi.state);
6113        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6114        struct drm_connector_state *conn_state;
6115        struct drm_connector *connector;
6116        int i;
6117
6118        for_each_new_connector_in_state(&state->base, connector,
6119                                        conn_state, i) {
6120                struct intel_encoder *encoder =
6121                        to_intel_encoder(conn_state->best_encoder);
6122                int ret;
6123
6124                if (conn_state->crtc != &crtc->base ||
6125                    !encoder->compute_config_late)
6126                        continue;
6127
6128                ret = encoder->compute_config_late(encoder, crtc_state,
6129                                                   conn_state);
6130                if (ret)
6131                        return ret;
6132        }
6133
6134        return 0;
6135}
6136
6137bool intel_fuzzy_clock_check(int clock1, int clock2)
6138{
6139        int diff;
6140
6141        if (clock1 == clock2)
6142                return true;
6143
6144        if (!clock1 || !clock2)
6145                return false;
6146
6147        diff = abs(clock1 - clock2);
6148
6149        if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6150                return true;
6151
6152        return false;
6153}
6154
6155static bool
6156intel_compare_m_n(unsigned int m, unsigned int n,
6157                  unsigned int m2, unsigned int n2,
6158                  bool exact)
6159{
6160        if (m == m2 && n == n2)
6161                return true;
6162
6163        if (exact || !m || !n || !m2 || !n2)
6164                return false;
6165
6166        BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6167
6168        if (n > n2) {
6169                while (n > n2) {
6170                        m2 <<= 1;
6171                        n2 <<= 1;
6172                }
6173        } else if (n < n2) {
6174                while (n < n2) {
6175                        m <<= 1;
6176                        n <<= 1;
6177                }
6178        }
6179
6180        if (n != n2)
6181                return false;
6182
6183        return intel_fuzzy_clock_check(m, m2);
6184}
6185
6186static bool
6187intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6188                       const struct intel_link_m_n *m2_n2,
6189                       bool exact)
6190{
6191        return m_n->tu == m2_n2->tu &&
6192                intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
6193                                  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
6194                intel_compare_m_n(m_n->link_m, m_n->link_n,
6195                                  m2_n2->link_m, m2_n2->link_n, exact);
6196}
6197
6198static bool
6199intel_compare_infoframe(const union hdmi_infoframe *a,
6200                        const union hdmi_infoframe *b)
6201{
6202        return memcmp(a, b, sizeof(*a)) == 0;
6203}
6204
6205static bool
6206intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6207                         const struct drm_dp_vsc_sdp *b)
6208{
6209        return memcmp(a, b, sizeof(*a)) == 0;
6210}
6211
6212static void
6213pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6214                               bool fastset, const char *name,
6215                               const union hdmi_infoframe *a,
6216                               const union hdmi_infoframe *b)
6217{
6218        if (fastset) {
6219                if (!drm_debug_enabled(DRM_UT_KMS))
6220                        return;
6221
6222                drm_dbg_kms(&dev_priv->drm,
6223                            "fastset mismatch in %s infoframe\n", name);
6224                drm_dbg_kms(&dev_priv->drm, "expected:\n");
6225                hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6226                drm_dbg_kms(&dev_priv->drm, "found:\n");
6227                hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6228        } else {
6229                drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6230                drm_err(&dev_priv->drm, "expected:\n");
6231                hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6232                drm_err(&dev_priv->drm, "found:\n");
6233                hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6234        }
6235}
6236
6237static void
6238pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6239                                bool fastset, const char *name,
6240                                const struct drm_dp_vsc_sdp *a,
6241                                const struct drm_dp_vsc_sdp *b)
6242{
6243        if (fastset) {
6244                if (!drm_debug_enabled(DRM_UT_KMS))
6245                        return;
6246
6247                drm_dbg_kms(&dev_priv->drm,
6248                            "fastset mismatch in %s dp sdp\n", name);
6249                drm_dbg_kms(&dev_priv->drm, "expected:\n");
6250                drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6251                drm_dbg_kms(&dev_priv->drm, "found:\n");
6252                drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6253        } else {
6254                drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6255                drm_err(&dev_priv->drm, "expected:\n");
6256                drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6257                drm_err(&dev_priv->drm, "found:\n");
6258                drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6259        }
6260}
6261
6262static void __printf(4, 5)
6263pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6264                     const char *name, const char *format, ...)
6265{
6266        struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6267        struct va_format vaf;
6268        va_list args;
6269
6270        va_start(args, format);
6271        vaf.fmt = format;
6272        vaf.va = &args;
6273
6274        if (fastset)
6275                drm_dbg_kms(&i915->drm,
6276                            "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6277                            crtc->base.base.id, crtc->base.name, name, &vaf);
6278        else
6279                drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6280                        crtc->base.base.id, crtc->base.name, name, &vaf);
6281
6282        va_end(args);
6283}
6284
6285static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6286{
6287        if (dev_priv->params.fastboot != -1)
6288                return dev_priv->params.fastboot;
6289
6290        /* Enable fastboot by default on Skylake and newer */
6291        if (DISPLAY_VER(dev_priv) >= 9)
6292                return true;
6293
6294        /* Enable fastboot by default on VLV and CHV */
6295        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6296                return true;
6297
6298        /* Disabled by default on all others */
6299        return false;
6300}
6301
6302static bool
6303intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6304                          const struct intel_crtc_state *pipe_config,
6305                          bool fastset)
6306{
6307        struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6308        struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6309        bool ret = true;
6310        u32 bp_gamma = 0;
6311        bool fixup_inherited = fastset &&
6312                current_config->inherited && !pipe_config->inherited;
6313
6314        if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6315                drm_dbg_kms(&dev_priv->drm,
6316                            "initial modeset and fastboot not set\n");
6317                ret = false;
6318        }
6319
6320#define PIPE_CONF_CHECK_X(name) do { \
6321        if (current_config->name != pipe_config->name) { \
6322                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6323                                     "(expected 0x%08x, found 0x%08x)", \
6324                                     current_config->name, \
6325                                     pipe_config->name); \
6326                ret = false; \
6327        } \
6328} while (0)
6329
6330#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6331        if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6332                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6333                                     "(expected 0x%08x, found 0x%08x)", \
6334                                     current_config->name & (mask), \
6335                                     pipe_config->name & (mask)); \
6336                ret = false; \
6337        } \
6338} while (0)
6339
6340#define PIPE_CONF_CHECK_I(name) do { \
6341        if (current_config->name != pipe_config->name) { \
6342                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6343                                     "(expected %i, found %i)", \
6344                                     current_config->name, \
6345                                     pipe_config->name); \
6346                ret = false; \
6347        } \
6348} while (0)
6349
6350#define PIPE_CONF_CHECK_BOOL(name) do { \
6351        if (current_config->name != pipe_config->name) { \
6352                pipe_config_mismatch(fastset, crtc,  __stringify(name), \
6353                                     "(expected %s, found %s)", \
6354                                     yesno(current_config->name), \
6355                                     yesno(pipe_config->name)); \
6356                ret = false; \
6357        } \
6358} while (0)
6359
6360/*
6361 * Checks state where we only read out the enabling, but not the entire
6362 * state itself (like full infoframes or ELD for audio). These states
6363 * require a full modeset on bootup to fix up.
6364 */
6365#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6366        if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6367                PIPE_CONF_CHECK_BOOL(name); \
6368        } else { \
6369                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6370                                     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6371                                     yesno(current_config->name), \
6372                                     yesno(pipe_config->name)); \
6373                ret = false; \
6374        } \
6375} while (0)
6376
6377#define PIPE_CONF_CHECK_P(name) do { \
6378        if (current_config->name != pipe_config->name) { \
6379                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6380                                     "(expected %p, found %p)", \
6381                                     current_config->name, \
6382                                     pipe_config->name); \
6383                ret = false; \
6384        } \
6385} while (0)
6386
6387#define PIPE_CONF_CHECK_M_N(name) do { \
6388        if (!intel_compare_link_m_n(&current_config->name, \
6389                                    &pipe_config->name,\
6390                                    !fastset)) { \
6391                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6392                                     "(expected tu %i gmch %i/%i link %i/%i, " \
6393                                     "found tu %i, gmch %i/%i link %i/%i)", \
6394                                     current_config->name.tu, \
6395                                     current_config->name.gmch_m, \
6396                                     current_config->name.gmch_n, \
6397                                     current_config->name.link_m, \
6398                                     current_config->name.link_n, \
6399                                     pipe_config->name.tu, \
6400                                     pipe_config->name.gmch_m, \
6401                                     pipe_config->name.gmch_n, \
6402                                     pipe_config->name.link_m, \
6403                                     pipe_config->name.link_n); \
6404                ret = false; \
6405        } \
6406} while (0)
6407
6408/* This is required for BDW+ where there is only one set of registers for
6409 * switching between high and low RR.
6410 * This macro can be used whenever a comparison has to be made between one
6411 * hw state and multiple sw state variables.
6412 */
6413#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6414        if (!intel_compare_link_m_n(&current_config->name, \
6415                                    &pipe_config->name, !fastset) && \
6416            !intel_compare_link_m_n(&current_config->alt_name, \
6417                                    &pipe_config->name, !fastset)) { \
6418                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6419                                     "(expected tu %i gmch %i/%i link %i/%i, " \
6420                                     "or tu %i gmch %i/%i link %i/%i, " \
6421                                     "found tu %i, gmch %i/%i link %i/%i)", \
6422                                     current_config->name.tu, \
6423                                     current_config->name.gmch_m, \
6424                                     current_config->name.gmch_n, \
6425                                     current_config->name.link_m, \
6426                                     current_config->name.link_n, \
6427                                     current_config->alt_name.tu, \
6428                                     current_config->alt_name.gmch_m, \
6429                                     current_config->alt_name.gmch_n, \
6430                                     current_config->alt_name.link_m, \
6431                                     current_config->alt_name.link_n, \
6432                                     pipe_config->name.tu, \
6433                                     pipe_config->name.gmch_m, \
6434                                     pipe_config->name.gmch_n, \
6435                                     pipe_config->name.link_m, \
6436                                     pipe_config->name.link_n); \
6437                ret = false; \
6438        } \
6439} while (0)
6440
6441#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6442        if ((current_config->name ^ pipe_config->name) & (mask)) { \
6443                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6444                                     "(%x) (expected %i, found %i)", \
6445                                     (mask), \
6446                                     current_config->name & (mask), \
6447                                     pipe_config->name & (mask)); \
6448                ret = false; \
6449        } \
6450} while (0)
6451
6452#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6453        if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6454                pipe_config_mismatch(fastset, crtc, __stringify(name), \
6455                                     "(expected %i, found %i)", \
6456                                     current_config->name, \
6457                                     pipe_config->name); \
6458                ret = false; \
6459        } \
6460} while (0)
6461
6462#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6463        if (!intel_compare_infoframe(&current_config->infoframes.name, \
6464                                     &pipe_config->infoframes.name)) { \
6465                pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6466                                               &current_config->infoframes.name, \
6467                                               &pipe_config->infoframes.name); \
6468                ret = false; \
6469        } \
6470} while (0)
6471
6472#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6473        if (!current_config->has_psr && !pipe_config->has_psr && \
6474            !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
6475                                      &pipe_config->infoframes.name)) { \
6476                pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6477                                                &current_config->infoframes.name, \
6478                                                &pipe_config->infoframes.name); \
6479                ret = false; \
6480        } \
6481} while (0)
6482
6483#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6484        if (current_config->name1 != pipe_config->name1) { \
6485                pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6486                                "(expected %i, found %i, won't compare lut values)", \
6487                                current_config->name1, \
6488                                pipe_config->name1); \
6489                ret = false;\
6490        } else { \
6491                if (!intel_color_lut_equal(current_config->name2, \
6492                                        pipe_config->name2, pipe_config->name1, \
6493                                        bit_precision)) { \
6494                        pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6495                                        "hw_state doesn't match sw_state"); \
6496                        ret = false; \
6497                } \
6498        } \
6499} while (0)
6500
6501#define PIPE_CONF_QUIRK(quirk) \
6502        ((current_config->quirks | pipe_config->quirks) & (quirk))
6503
6504        PIPE_CONF_CHECK_I(cpu_transcoder);
6505
6506        PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6507        PIPE_CONF_CHECK_I(fdi_lanes);
6508        PIPE_CONF_CHECK_M_N(fdi_m_n);
6509
6510        PIPE_CONF_CHECK_I(lane_count);
6511        PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6512
6513        if (DISPLAY_VER(dev_priv) < 8) {
6514                PIPE_CONF_CHECK_M_N(dp_m_n);
6515
6516                if (current_config->has_drrs)
6517                        PIPE_CONF_CHECK_M_N(dp_m2_n2);
6518        } else
6519                PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6520
6521        PIPE_CONF_CHECK_X(output_types);
6522
6523        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6524        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6525        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6526        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6527        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6528        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6529
6530        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6531        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6532        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6533        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6534        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6535        PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6536
6537        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6538        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6539        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6540        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6541        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6542        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6543
6544        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6545        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6546        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6547        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6548        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6549        PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6550
6551        PIPE_CONF_CHECK_I(pixel_multiplier);
6552
6553        PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6554                              DRM_MODE_FLAG_INTERLACE);
6555
6556        if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6557                PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6558                                      DRM_MODE_FLAG_PHSYNC);
6559                PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6560                                      DRM_MODE_FLAG_NHSYNC);
6561                PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6562                                      DRM_MODE_FLAG_PVSYNC);
6563                PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6564                                      DRM_MODE_FLAG_NVSYNC);
6565        }
6566
6567        PIPE_CONF_CHECK_I(output_format);
6568        PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6569        if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6570            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6571                PIPE_CONF_CHECK_BOOL(limited_color_range);
6572
6573        PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6574        PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6575        PIPE_CONF_CHECK_BOOL(has_infoframe);
6576        PIPE_CONF_CHECK_BOOL(fec_enable);
6577
6578        PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6579
6580        PIPE_CONF_CHECK_X(gmch_pfit.control);
6581        /* pfit ratios are autocomputed by the hw on gen4+ */
6582        if (DISPLAY_VER(dev_priv) < 4)
6583                PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6584        PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6585
6586        /*
6587         * Changing the EDP transcoder input mux
6588         * (A_ONOFF vs. A_ON) requires a full modeset.
6589         */
6590        PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6591
6592        if (!fastset) {
6593                PIPE_CONF_CHECK_I(pipe_src_w);
6594                PIPE_CONF_CHECK_I(pipe_src_h);
6595
6596                PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6597                if (current_config->pch_pfit.enabled) {
6598                        PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6599                        PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6600                        PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6601                        PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6602                }
6603
6604                PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6605                PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6606
6607                PIPE_CONF_CHECK_X(gamma_mode);
6608                if (IS_CHERRYVIEW(dev_priv))
6609                        PIPE_CONF_CHECK_X(cgm_mode);
6610                else
6611                        PIPE_CONF_CHECK_X(csc_mode);
6612                PIPE_CONF_CHECK_BOOL(gamma_enable);
6613                PIPE_CONF_CHECK_BOOL(csc_enable);
6614
6615                PIPE_CONF_CHECK_I(linetime);
6616                PIPE_CONF_CHECK_I(ips_linetime);
6617
6618                bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6619                if (bp_gamma)
6620                        PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6621
6622                if (current_config->active_planes) {
6623                        PIPE_CONF_CHECK_BOOL(has_psr);
6624                        PIPE_CONF_CHECK_BOOL(has_psr2);
6625                        PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6626                        PIPE_CONF_CHECK_I(dc3co_exitline);
6627                }
6628        }
6629
6630        PIPE_CONF_CHECK_BOOL(double_wide);
6631
6632        if (dev_priv->dpll.mgr) {
6633                PIPE_CONF_CHECK_P(shared_dpll);
6634
6635                PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6636                PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6637                PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6638                PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6639                PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6640                PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6641                PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6642                PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6643                PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6644                PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6645                PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6646                PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6647                PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6648                PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6649                PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6650                PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6651                PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6652                PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6653                PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6654                PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6655                PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6656                PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6657                PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6658                PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6659                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6660                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6661                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6662                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6663                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6664                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6665                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6666        }
6667
6668        PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6669        PIPE_CONF_CHECK_X(dsi_pll.div);
6670
6671        if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6672                PIPE_CONF_CHECK_I(pipe_bpp);
6673
6674        PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6675        PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6676        PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6677
6678        PIPE_CONF_CHECK_I(min_voltage_level);
6679
6680        if (current_config->has_psr || pipe_config->has_psr)
6681                PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6682                                            ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6683        else
6684                PIPE_CONF_CHECK_X(infoframes.enable);
6685
6686        PIPE_CONF_CHECK_X(infoframes.gcp);
6687        PIPE_CONF_CHECK_INFOFRAME(avi);
6688        PIPE_CONF_CHECK_INFOFRAME(spd);
6689        PIPE_CONF_CHECK_INFOFRAME(hdmi);
6690        PIPE_CONF_CHECK_INFOFRAME(drm);
6691        PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6692
6693        PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6694        PIPE_CONF_CHECK_I(master_transcoder);
6695        PIPE_CONF_CHECK_BOOL(bigjoiner);
6696        PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6697        PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6698
6699        PIPE_CONF_CHECK_I(dsc.compression_enable);
6700        PIPE_CONF_CHECK_I(dsc.dsc_split);
6701        PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6702
6703        PIPE_CONF_CHECK_BOOL(splitter.enable);
6704        PIPE_CONF_CHECK_I(splitter.link_count);
6705        PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6706
6707        PIPE_CONF_CHECK_I(mst_master_transcoder);
6708
6709        PIPE_CONF_CHECK_BOOL(vrr.enable);
6710        PIPE_CONF_CHECK_I(vrr.vmin);
6711        PIPE_CONF_CHECK_I(vrr.vmax);
6712        PIPE_CONF_CHECK_I(vrr.flipline);
6713        PIPE_CONF_CHECK_I(vrr.pipeline_full);
6714        PIPE_CONF_CHECK_I(vrr.guardband);
6715
6716#undef PIPE_CONF_CHECK_X
6717#undef PIPE_CONF_CHECK_I
6718#undef PIPE_CONF_CHECK_BOOL
6719#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6720#undef PIPE_CONF_CHECK_P
6721#undef PIPE_CONF_CHECK_FLAGS
6722#undef PIPE_CONF_CHECK_CLOCK_FUZZY
6723#undef PIPE_CONF_CHECK_COLOR_LUT
6724#undef PIPE_CONF_QUIRK
6725
6726        return ret;
6727}
6728
6729static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6730                                           const struct intel_crtc_state *pipe_config)
6731{
6732        if (pipe_config->has_pch_encoder) {
6733                int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6734                                                            &pipe_config->fdi_m_n);
6735                int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6736
6737                /*
6738                 * FDI already provided one idea for the dotclock.
6739                 * Yell if the encoder disagrees.
6740                 */
6741                drm_WARN(&dev_priv->drm,
6742                         !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6743                         "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6744                         fdi_dotclock, dotclock);
6745        }
6746}
6747
6748static void verify_wm_state(struct intel_crtc *crtc,
6749                            struct intel_crtc_state *new_crtc_state)
6750{
6751        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6752        struct skl_hw_state {
6753                struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6754                struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6755                struct skl_pipe_wm wm;
6756        } *hw;
6757        const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6758        int level, max_level = ilk_wm_max_level(dev_priv);
6759        struct intel_plane *plane;
6760        u8 hw_enabled_slices;
6761
6762        if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6763                return;
6764
6765        hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6766        if (!hw)
6767                return;
6768
6769        skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6770
6771        skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6772
6773        hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6774
6775        if (DISPLAY_VER(dev_priv) >= 11 &&
6776            hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6777                drm_err(&dev_priv->drm,
6778                        "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6779                        dev_priv->dbuf.enabled_slices,
6780                        hw_enabled_slices);
6781
6782        for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6783                const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6784                const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6785
6786                /* Watermarks */
6787                for (level = 0; level <= max_level; level++) {
6788                        hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6789                        sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6790
6791                        if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6792                                continue;
6793
6794                        drm_err(&dev_priv->drm,
6795                                "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6796                                plane->base.base.id, plane->base.name, level,
6797                                sw_wm_level->enable,
6798                                sw_wm_level->blocks,
6799                                sw_wm_level->lines,
6800                                hw_wm_level->enable,
6801                                hw_wm_level->blocks,
6802                                hw_wm_level->lines);
6803                }
6804
6805                hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6806                sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6807
6808                if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6809                        drm_err(&dev_priv->drm,
6810                                "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6811                                plane->base.base.id, plane->base.name,
6812                                sw_wm_level->enable,
6813                                sw_wm_level->blocks,
6814                                sw_wm_level->lines,
6815                                hw_wm_level->enable,
6816                                hw_wm_level->blocks,
6817                                hw_wm_level->lines);
6818                }
6819
6820                hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6821                sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6822
6823                if (HAS_HW_SAGV_WM(dev_priv) &&
6824                    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6825                        drm_err(&dev_priv->drm,
6826                                "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6827                                plane->base.base.id, plane->base.name,
6828                                sw_wm_level->enable,
6829                                sw_wm_level->blocks,
6830                                sw_wm_level->lines,
6831                                hw_wm_level->enable,
6832                                hw_wm_level->blocks,
6833                                hw_wm_level->lines);
6834                }
6835
6836                hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6837                sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6838
6839                if (HAS_HW_SAGV_WM(dev_priv) &&
6840                    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6841                        drm_err(&dev_priv->drm,
6842                                "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6843                                plane->base.base.id, plane->base.name,
6844                                sw_wm_level->enable,
6845                                sw_wm_level->blocks,
6846                                sw_wm_level->lines,
6847                                hw_wm_level->enable,
6848                                hw_wm_level->blocks,
6849                                hw_wm_level->lines);
6850                }
6851
6852                /* DDB */
6853                hw_ddb_entry = &hw->ddb_y[plane->id];
6854                sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6855
6856                if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6857                        drm_err(&dev_priv->drm,
6858                                "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6859                                plane->base.base.id, plane->base.name,
6860                                sw_ddb_entry->start, sw_ddb_entry->end,
6861                                hw_ddb_entry->start, hw_ddb_entry->end);
6862                }
6863        }
6864
6865        kfree(hw);
6866}
6867
6868static void
6869verify_connector_state(struct intel_atomic_state *state,
6870                       struct intel_crtc *crtc)
6871{
6872        struct drm_connector *connector;
6873        struct drm_connector_state *new_conn_state;
6874        int i;
6875
6876        for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6877                struct drm_encoder *encoder = connector->encoder;
6878                struct intel_crtc_state *crtc_state = NULL;
6879
6880                if (new_conn_state->crtc != &crtc->base)
6881                        continue;
6882
6883                if (crtc)
6884                        crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6885
6886                intel_connector_verify_state(crtc_state, new_conn_state);
6887
6888                I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6889                     "connector's atomic encoder doesn't match legacy encoder\n");
6890        }
6891}
6892
6893static void
6894verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6895{
6896        struct intel_encoder *encoder;
6897        struct drm_connector *connector;
6898        struct drm_connector_state *old_conn_state, *new_conn_state;
6899        int i;
6900
6901        for_each_intel_encoder(&dev_priv->drm, encoder) {
6902                bool enabled = false, found = false;
6903                enum pipe pipe;
6904
6905                drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6906                            encoder->base.base.id,
6907                            encoder->base.name);
6908
6909                for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6910                                                   new_conn_state, i) {
6911                        if (old_conn_state->best_encoder == &encoder->base)
6912                                found = true;
6913
6914                        if (new_conn_state->best_encoder != &encoder->base)
6915                                continue;
6916                        found = enabled = true;
6917
6918                        I915_STATE_WARN(new_conn_state->crtc !=
6919                                        encoder->base.crtc,
6920                             "connector's crtc doesn't match encoder crtc\n");
6921                }
6922
6923                if (!found)
6924                        continue;
6925
6926                I915_STATE_WARN(!!encoder->base.crtc != enabled,
6927                     "encoder's enabled state mismatch "
6928                     "(expected %i, found %i)\n",
6929                     !!encoder->base.crtc, enabled);
6930
6931                if (!encoder->base.crtc) {
6932                        bool active;
6933
6934                        active = encoder->get_hw_state(encoder, &pipe);
6935                        I915_STATE_WARN(active,
6936                             "encoder detached but still enabled on pipe %c.\n",
6937                             pipe_name(pipe));
6938                }
6939        }
6940}
6941
6942static void
6943verify_crtc_state(struct intel_crtc *crtc,
6944                  struct intel_crtc_state *old_crtc_state,
6945                  struct intel_crtc_state *new_crtc_state)
6946{
6947        struct drm_device *dev = crtc->base.dev;
6948        struct drm_i915_private *dev_priv = to_i915(dev);
6949        struct intel_encoder *encoder;
6950        struct intel_crtc_state *pipe_config = old_crtc_state;
6951        struct drm_atomic_state *state = old_crtc_state->uapi.state;
6952        struct intel_crtc *master_crtc;
6953
6954        __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6955        intel_crtc_free_hw_state(old_crtc_state);
6956        intel_crtc_state_reset(old_crtc_state, crtc);
6957        old_crtc_state->uapi.state = state;
6958
6959        drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6960                    crtc->base.name);
6961
6962        pipe_config->hw.enable = new_crtc_state->hw.enable;
6963
6964        intel_crtc_get_pipe_config(pipe_config);
6965
6966        /* we keep both pipes enabled on 830 */
6967        if (IS_I830(dev_priv) && pipe_config->hw.active)
6968                pipe_config->hw.active = new_crtc_state->hw.active;
6969
6970        I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6971                        "crtc active state doesn't match with hw state "
6972                        "(expected %i, found %i)\n",
6973                        new_crtc_state->hw.active, pipe_config->hw.active);
6974
6975        I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
6976                        "transitional active state does not match atomic hw state "
6977                        "(expected %i, found %i)\n",
6978                        new_crtc_state->hw.active, crtc->active);
6979
6980        master_crtc = intel_master_crtc(new_crtc_state);
6981
6982        for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
6983                enum pipe pipe;
6984                bool active;
6985
6986                active = encoder->get_hw_state(encoder, &pipe);
6987                I915_STATE_WARN(active != new_crtc_state->hw.active,
6988                                "[ENCODER:%i] active %i with crtc active %i\n",
6989                                encoder->base.base.id, active,
6990                                new_crtc_state->hw.active);
6991
6992                I915_STATE_WARN(active && master_crtc->pipe != pipe,
6993                                "Encoder connected to wrong pipe %c\n",
6994                                pipe_name(pipe));
6995
6996                if (active)
6997                        intel_encoder_get_config(encoder, pipe_config);
6998        }
6999
7000        if (!new_crtc_state->hw.active)
7001                return;
7002
7003        intel_pipe_config_sanity_check(dev_priv, pipe_config);
7004
7005        if (!intel_pipe_config_compare(new_crtc_state,
7006                                       pipe_config, false)) {
7007                I915_STATE_WARN(1, "pipe state doesn't match!\n");
7008                intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
7009                intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7010        }
7011}
7012
7013static void
7014intel_verify_planes(struct intel_atomic_state *state)
7015{
7016        struct intel_plane *plane;
7017        const struct intel_plane_state *plane_state;
7018        int i;
7019
7020        for_each_new_intel_plane_in_state(state, plane,
7021                                          plane_state, i)
7022                assert_plane(plane, plane_state->planar_slave ||
7023                             plane_state->uapi.visible);
7024}
7025
7026static void
7027verify_single_dpll_state(struct drm_i915_private *dev_priv,
7028                         struct intel_shared_dpll *pll,
7029                         struct intel_crtc *crtc,
7030                         struct intel_crtc_state *new_crtc_state)
7031{
7032        struct intel_dpll_hw_state dpll_hw_state;
7033        u8 pipe_mask;
7034        bool active;
7035
7036        memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7037
7038        drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7039
7040        active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7041
7042        if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7043                I915_STATE_WARN(!pll->on && pll->active_mask,
7044                     "pll in active use but not on in sw tracking\n");
7045                I915_STATE_WARN(pll->on && !pll->active_mask,
7046                     "pll is on but not used by any active pipe\n");
7047                I915_STATE_WARN(pll->on != active,
7048                     "pll on state mismatch (expected %i, found %i)\n",
7049                     pll->on, active);
7050        }
7051
7052        if (!crtc) {
7053                I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7054                                "more active pll users than references: 0x%x vs 0x%x\n",
7055                                pll->active_mask, pll->state.pipe_mask);
7056
7057                return;
7058        }
7059
7060        pipe_mask = BIT(crtc->pipe);
7061
7062        if (new_crtc_state->hw.active)
7063                I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7064                                "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7065                                pipe_name(crtc->pipe), pll->active_mask);
7066        else
7067                I915_STATE_WARN(pll->active_mask & pipe_mask,
7068                                "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7069                                pipe_name(crtc->pipe), pll->active_mask);
7070
7071        I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7072                        "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7073                        pipe_mask, pll->state.pipe_mask);
7074
7075        I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7076                                          &dpll_hw_state,
7077                                          sizeof(dpll_hw_state)),
7078                        "pll hw state mismatch\n");
7079}
7080
7081static void
7082verify_shared_dpll_state(struct intel_crtc *crtc,
7083                         struct intel_crtc_state *old_crtc_state,
7084                         struct intel_crtc_state *new_crtc_state)
7085{
7086        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7087
7088        if (new_crtc_state->shared_dpll)
7089                verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7090
7091        if (old_crtc_state->shared_dpll &&
7092            old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7093                u8 pipe_mask = BIT(crtc->pipe);
7094                struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7095
7096                I915_STATE_WARN(pll->active_mask & pipe_mask,
7097                                "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7098                                pipe_name(crtc->pipe), pll->active_mask);
7099                I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7100                                "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7101                                pipe_name(crtc->pipe), pll->state.pipe_mask);
7102        }
7103}
7104
7105static void
7106verify_mpllb_state(struct intel_atomic_state *state,
7107                   struct intel_crtc_state *new_crtc_state)
7108{
7109        struct drm_i915_private *i915 = to_i915(state->base.dev);
7110        struct intel_mpllb_state mpllb_hw_state = { 0 };
7111        struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7112        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7113        struct intel_encoder *encoder;
7114
7115        if (!IS_DG2(i915))
7116                return;
7117
7118        if (!new_crtc_state->hw.active)
7119                return;
7120
7121        encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7122        intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7123
7124#define MPLLB_CHECK(name) do { \
7125        if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7126                pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7127                                     "(expected 0x%08x, found 0x%08x)", \
7128                                     mpllb_sw_state->name, \
7129                                     mpllb_hw_state.name); \
7130        } \
7131} while (0)
7132
7133        MPLLB_CHECK(mpllb_cp);
7134        MPLLB_CHECK(mpllb_div);
7135        MPLLB_CHECK(mpllb_div2);
7136        MPLLB_CHECK(mpllb_fracn1);
7137        MPLLB_CHECK(mpllb_fracn2);
7138        MPLLB_CHECK(mpllb_sscen);
7139        MPLLB_CHECK(mpllb_sscstep);
7140
7141        /*
7142         * ref_control is handled by the hardware/firemware and never
7143         * programmed by the software, but the proper values are supplied
7144         * in the bspec for verification purposes.
7145         */
7146        MPLLB_CHECK(ref_control);
7147
7148#undef MPLLB_CHECK
7149}
7150
7151static void
7152intel_modeset_verify_crtc(struct intel_crtc *crtc,
7153                          struct intel_atomic_state *state,
7154                          struct intel_crtc_state *old_crtc_state,
7155                          struct intel_crtc_state *new_crtc_state)
7156{
7157        if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7158                return;
7159
7160        verify_wm_state(crtc, new_crtc_state);
7161        verify_connector_state(state, crtc);
7162        verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7163        verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7164        verify_mpllb_state(state, new_crtc_state);
7165}
7166
7167static void
7168verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7169{
7170        int i;
7171
7172        for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7173                verify_single_dpll_state(dev_priv,
7174                                         &dev_priv->dpll.shared_dplls[i],
7175                                         NULL, NULL);
7176}
7177
7178static void
7179intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7180                              struct intel_atomic_state *state)
7181{
7182        verify_encoder_state(dev_priv, state);
7183        verify_connector_state(state, NULL);
7184        verify_disabled_dpll_state(dev_priv);
7185}
7186
7187int intel_modeset_all_pipes(struct intel_atomic_state *state)
7188{
7189        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7190        struct intel_crtc *crtc;
7191
7192        /*
7193         * Add all pipes to the state, and force
7194         * a modeset on all the active ones.
7195         */
7196        for_each_intel_crtc(&dev_priv->drm, crtc) {
7197                struct intel_crtc_state *crtc_state;
7198                int ret;
7199
7200                crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7201                if (IS_ERR(crtc_state))
7202                        return PTR_ERR(crtc_state);
7203
7204                if (!crtc_state->hw.active ||
7205                    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7206                        continue;
7207
7208                crtc_state->uapi.mode_changed = true;
7209
7210                ret = drm_atomic_add_affected_connectors(&state->base,
7211                                                         &crtc->base);
7212                if (ret)
7213                        return ret;
7214
7215                ret = intel_atomic_add_affected_planes(state, crtc);
7216                if (ret)
7217                        return ret;
7218
7219                crtc_state->update_planes |= crtc_state->active_planes;
7220        }
7221
7222        return 0;
7223}
7224
7225static void
7226intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7227{
7228        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7229        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7230        struct drm_display_mode adjusted_mode =
7231                crtc_state->hw.adjusted_mode;
7232
7233        if (crtc_state->vrr.enable) {
7234                adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7235                adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7236                adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7237                crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7238        }
7239
7240        drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7241
7242        crtc->mode_flags = crtc_state->mode_flags;
7243
7244        /*
7245         * The scanline counter increments at the leading edge of hsync.
7246         *
7247         * On most platforms it starts counting from vtotal-1 on the
7248         * first active line. That means the scanline counter value is
7249         * always one less than what we would expect. Ie. just after
7250         * start of vblank, which also occurs at start of hsync (on the
7251         * last active line), the scanline counter will read vblank_start-1.
7252         *
7253         * On gen2 the scanline counter starts counting from 1 instead
7254         * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7255         * to keep the value positive), instead of adding one.
7256         *
7257         * On HSW+ the behaviour of the scanline counter depends on the output
7258         * type. For DP ports it behaves like most other platforms, but on HDMI
7259         * there's an extra 1 line difference. So we need to add two instead of
7260         * one to the value.
7261         *
7262         * On VLV/CHV DSI the scanline counter would appear to increment
7263         * approx. 1/3 of a scanline before start of vblank. Unfortunately
7264         * that means we can't tell whether we're in vblank or not while
7265         * we're on that particular line. We must still set scanline_offset
7266         * to 1 so that the vblank timestamps come out correct when we query
7267         * the scanline counter from within the vblank interrupt handler.
7268         * However if queried just before the start of vblank we'll get an
7269         * answer that's slightly in the future.
7270         */
7271        if (DISPLAY_VER(dev_priv) == 2) {
7272                int vtotal;
7273
7274                vtotal = adjusted_mode.crtc_vtotal;
7275                if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7276                        vtotal /= 2;
7277
7278                crtc->scanline_offset = vtotal - 1;
7279        } else if (HAS_DDI(dev_priv) &&
7280                   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7281                crtc->scanline_offset = 2;
7282        } else {
7283                crtc->scanline_offset = 1;
7284        }
7285}
7286
7287static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7288{
7289        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7290        struct intel_crtc_state *new_crtc_state;
7291        struct intel_crtc *crtc;
7292        int i;
7293
7294        if (!dev_priv->dpll_funcs)
7295                return;
7296
7297        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7298                if (!intel_crtc_needs_modeset(new_crtc_state))
7299                        continue;
7300
7301                intel_release_shared_dplls(state, crtc);
7302        }
7303}
7304
7305/*
7306 * This implements the workaround described in the "notes" section of the mode
7307 * set sequence documentation. When going from no pipes or single pipe to
7308 * multiple pipes, and planes are enabled after the pipe, we need to wait at
7309 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7310 */
7311static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7312{
7313        struct intel_crtc_state *crtc_state;
7314        struct intel_crtc *crtc;
7315        struct intel_crtc_state *first_crtc_state = NULL;
7316        struct intel_crtc_state *other_crtc_state = NULL;
7317        enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7318        int i;
7319
7320        /* look at all crtc's that are going to be enabled in during modeset */
7321        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7322                if (!crtc_state->hw.active ||
7323                    !intel_crtc_needs_modeset(crtc_state))
7324                        continue;
7325
7326                if (first_crtc_state) {
7327                        other_crtc_state = crtc_state;
7328                        break;
7329                } else {
7330                        first_crtc_state = crtc_state;
7331                        first_pipe = crtc->pipe;
7332                }
7333        }
7334
7335        /* No workaround needed? */
7336        if (!first_crtc_state)
7337                return 0;
7338
7339        /* w/a possibly needed, check how many crtc's are already enabled. */
7340        for_each_intel_crtc(state->base.dev, crtc) {
7341                crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7342                if (IS_ERR(crtc_state))
7343                        return PTR_ERR(crtc_state);
7344
7345                crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7346
7347                if (!crtc_state->hw.active ||
7348                    intel_crtc_needs_modeset(crtc_state))
7349                        continue;
7350
7351                /* 2 or more enabled crtcs means no need for w/a */
7352                if (enabled_pipe != INVALID_PIPE)
7353                        return 0;
7354
7355                enabled_pipe = crtc->pipe;
7356        }
7357
7358        if (enabled_pipe != INVALID_PIPE)
7359                first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7360        else if (other_crtc_state)
7361                other_crtc_state->hsw_workaround_pipe = first_pipe;
7362
7363        return 0;
7364}
7365
7366u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7367                           u8 active_pipes)
7368{
7369        const struct intel_crtc_state *crtc_state;
7370        struct intel_crtc *crtc;
7371        int i;
7372
7373        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7374                if (crtc_state->hw.active)
7375                        active_pipes |= BIT(crtc->pipe);
7376                else
7377                        active_pipes &= ~BIT(crtc->pipe);
7378        }
7379
7380        return active_pipes;
7381}
7382
7383static int intel_modeset_checks(struct intel_atomic_state *state)
7384{
7385        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7386
7387        state->modeset = true;
7388
7389        if (IS_HASWELL(dev_priv))
7390                return hsw_mode_set_planes_workaround(state);
7391
7392        return 0;
7393}
7394
7395static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7396                                     struct intel_crtc_state *new_crtc_state)
7397{
7398        if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7399                return;
7400
7401        new_crtc_state->uapi.mode_changed = false;
7402        new_crtc_state->update_pipe = true;
7403}
7404
7405static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7406                                    struct intel_crtc_state *new_crtc_state)
7407{
7408        /*
7409         * If we're not doing the full modeset we want to
7410         * keep the current M/N values as they may be
7411         * sufficiently different to the computed values
7412         * to cause problems.
7413         *
7414         * FIXME: should really copy more fuzzy state here
7415         */
7416        new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7417        new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7418        new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7419        new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7420}
7421
7422static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7423                                          struct intel_crtc *crtc,
7424                                          u8 plane_ids_mask)
7425{
7426        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7427        struct intel_plane *plane;
7428
7429        for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7430                struct intel_plane_state *plane_state;
7431
7432                if ((plane_ids_mask & BIT(plane->id)) == 0)
7433                        continue;
7434
7435                plane_state = intel_atomic_get_plane_state(state, plane);
7436                if (IS_ERR(plane_state))
7437                        return PTR_ERR(plane_state);
7438        }
7439
7440        return 0;
7441}
7442
7443int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7444                                     struct intel_crtc *crtc)
7445{
7446        const struct intel_crtc_state *old_crtc_state =
7447                intel_atomic_get_old_crtc_state(state, crtc);
7448        const struct intel_crtc_state *new_crtc_state =
7449                intel_atomic_get_new_crtc_state(state, crtc);
7450
7451        return intel_crtc_add_planes_to_state(state, crtc,
7452                                              old_crtc_state->enabled_planes |
7453                                              new_crtc_state->enabled_planes);
7454}
7455
7456static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7457{
7458        /* See {hsw,vlv,ivb}_plane_ratio() */
7459        return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7460                IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7461                IS_IVYBRIDGE(dev_priv);
7462}
7463
7464static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7465                                           struct intel_crtc *crtc,
7466                                           struct intel_crtc *other)
7467{
7468        const struct intel_plane_state *plane_state;
7469        struct intel_plane *plane;
7470        u8 plane_ids = 0;
7471        int i;
7472
7473        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7474                if (plane->pipe == crtc->pipe)
7475                        plane_ids |= BIT(plane->id);
7476        }
7477
7478        return intel_crtc_add_planes_to_state(state, other, plane_ids);
7479}
7480
7481static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7482{
7483        const struct intel_crtc_state *crtc_state;
7484        struct intel_crtc *crtc;
7485        int i;
7486
7487        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7488                int ret;
7489
7490                if (!crtc_state->bigjoiner)
7491                        continue;
7492
7493                ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7494                                                      crtc_state->bigjoiner_linked_crtc);
7495                if (ret)
7496                        return ret;
7497        }
7498
7499        return 0;
7500}
7501
7502static int intel_atomic_check_planes(struct intel_atomic_state *state)
7503{
7504        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7505        struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7506        struct intel_plane_state *plane_state;
7507        struct intel_plane *plane;
7508        struct intel_crtc *crtc;
7509        int i, ret;
7510
7511        ret = icl_add_linked_planes(state);
7512        if (ret)
7513                return ret;
7514
7515        ret = intel_bigjoiner_add_affected_planes(state);
7516        if (ret)
7517                return ret;
7518
7519        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7520                ret = intel_plane_atomic_check(state, plane);
7521                if (ret) {
7522                        drm_dbg_atomic(&dev_priv->drm,
7523                                       "[PLANE:%d:%s] atomic driver check failed\n",
7524                                       plane->base.base.id, plane->base.name);
7525                        return ret;
7526                }
7527        }
7528
7529        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7530                                            new_crtc_state, i) {
7531                u8 old_active_planes, new_active_planes;
7532
7533                ret = icl_check_nv12_planes(new_crtc_state);
7534                if (ret)
7535                        return ret;
7536
7537                /*
7538                 * On some platforms the number of active planes affects
7539                 * the planes' minimum cdclk calculation. Add such planes
7540                 * to the state before we compute the minimum cdclk.
7541                 */
7542                if (!active_planes_affects_min_cdclk(dev_priv))
7543                        continue;
7544
7545                old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7546                new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7547
7548                if (hweight8(old_active_planes) == hweight8(new_active_planes))
7549                        continue;
7550
7551                ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7552                if (ret)
7553                        return ret;
7554        }
7555
7556        return 0;
7557}
7558
7559static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7560{
7561        struct intel_crtc_state *crtc_state;
7562        struct intel_crtc *crtc;
7563        int i;
7564
7565        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7566                struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7567                int ret;
7568
7569                ret = intel_crtc_atomic_check(state, crtc);
7570                if (ret) {
7571                        drm_dbg_atomic(&i915->drm,
7572                                       "[CRTC:%d:%s] atomic driver check failed\n",
7573                                       crtc->base.base.id, crtc->base.name);
7574                        return ret;
7575                }
7576        }
7577
7578        return 0;
7579}
7580
7581static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7582                                               u8 transcoders)
7583{
7584        const struct intel_crtc_state *new_crtc_state;
7585        struct intel_crtc *crtc;
7586        int i;
7587
7588        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7589                if (new_crtc_state->hw.enable &&
7590                    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7591                    intel_crtc_needs_modeset(new_crtc_state))
7592                        return true;
7593        }
7594
7595        return false;
7596}
7597
7598static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7599                                        struct intel_crtc *crtc,
7600                                        struct intel_crtc_state *old_crtc_state,
7601                                        struct intel_crtc_state *new_crtc_state)
7602{
7603        struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7604        struct intel_crtc *slave_crtc, *master_crtc;
7605
7606        /* slave being enabled, is master is still claiming this crtc? */
7607        if (old_crtc_state->bigjoiner_slave) {
7608                slave_crtc = crtc;
7609                master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7610                master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7611                if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7612                        goto claimed;
7613        }
7614
7615        if (!new_crtc_state->bigjoiner)
7616                return 0;
7617
7618        slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7619        if (!slave_crtc) {
7620                DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
7621                              "CRTC + 1 to be used, doesn't exist\n",
7622                              crtc->base.base.id, crtc->base.name);
7623                return -EINVAL;
7624        }
7625
7626        new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7627        slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7628        master_crtc = crtc;
7629        if (IS_ERR(slave_crtc_state))
7630                return PTR_ERR(slave_crtc_state);
7631
7632        /* master being enabled, slave was already configured? */
7633        if (slave_crtc_state->uapi.enable)
7634                goto claimed;
7635
7636        DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
7637                      slave_crtc->base.base.id, slave_crtc->base.name);
7638
7639        return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7640
7641claimed:
7642        DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7643                      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7644                      slave_crtc->base.base.id, slave_crtc->base.name,
7645                      master_crtc->base.base.id, master_crtc->base.name);
7646        return -EINVAL;
7647}
7648
7649static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7650                                 struct intel_crtc_state *master_crtc_state)
7651{
7652        struct intel_crtc_state *slave_crtc_state =
7653                intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7654
7655        slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7656        slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7657        slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7658        intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7659}
7660
7661/**
7662 * DOC: asynchronous flip implementation
7663 *
7664 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7665 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7666 * Correspondingly, support is currently added for primary plane only.
7667 *
7668 * Async flip can only change the plane surface address, so anything else
7669 * changing is rejected from the intel_atomic_check_async() function.
7670 * Once this check is cleared, flip done interrupt is enabled using
7671 * the intel_crtc_enable_flip_done() function.
7672 *
7673 * As soon as the surface address register is written, flip done interrupt is
7674 * generated and the requested events are sent to the usersapce in the interrupt
7675 * handler itself. The timestamp and sequence sent during the flip done event
7676 * correspond to the last vblank and have no relation to the actual time when
7677 * the flip done event was sent.
7678 */
7679static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7680{
7681        struct drm_i915_private *i915 = to_i915(state->base.dev);
7682        const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7683        const struct intel_plane_state *new_plane_state, *old_plane_state;
7684        struct intel_plane *plane;
7685        int i;
7686
7687        old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7688        new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7689
7690        if (intel_crtc_needs_modeset(new_crtc_state)) {
7691                drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7692                return -EINVAL;
7693        }
7694
7695        if (!new_crtc_state->hw.active) {
7696                drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7697                return -EINVAL;
7698        }
7699        if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7700                drm_dbg_kms(&i915->drm,
7701                            "Active planes cannot be changed during async flip\n");
7702                return -EINVAL;
7703        }
7704
7705        for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7706                                             new_plane_state, i) {
7707                if (plane->pipe != crtc->pipe)
7708                        continue;
7709
7710                /*
7711                 * TODO: Async flip is only supported through the page flip IOCTL
7712                 * as of now. So support currently added for primary plane only.
7713                 * Support for other planes on platforms on which supports
7714                 * this(vlv/chv and icl+) should be added when async flip is
7715                 * enabled in the atomic IOCTL path.
7716                 */
7717                if (!plane->async_flip)
7718                        return -EINVAL;
7719
7720                /*
7721                 * FIXME: This check is kept generic for all platforms.
7722                 * Need to verify this for all gen9 platforms to enable
7723                 * this selectively if required.
7724                 */
7725                switch (new_plane_state->hw.fb->modifier) {
7726                case I915_FORMAT_MOD_X_TILED:
7727                case I915_FORMAT_MOD_Y_TILED:
7728                case I915_FORMAT_MOD_Yf_TILED:
7729                        break;
7730                default:
7731                        drm_dbg_kms(&i915->drm,
7732                                    "Linear memory/CCS does not support async flips\n");
7733                        return -EINVAL;
7734                }
7735
7736                if (new_plane_state->hw.fb->format->num_planes > 1) {
7737                        drm_dbg_kms(&i915->drm,
7738                                    "Planar formats not supported with async flips\n");
7739                        return -EINVAL;
7740                }
7741
7742                if (old_plane_state->view.color_plane[0].mapping_stride !=
7743                    new_plane_state->view.color_plane[0].mapping_stride) {
7744                        drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7745                        return -EINVAL;
7746                }
7747
7748                if (old_plane_state->hw.fb->modifier !=
7749                    new_plane_state->hw.fb->modifier) {
7750                        drm_dbg_kms(&i915->drm,
7751                                    "Framebuffer modifiers cannot be changed in async flip\n");
7752                        return -EINVAL;
7753                }
7754
7755                if (old_plane_state->hw.fb->format !=
7756                    new_plane_state->hw.fb->format) {
7757                        drm_dbg_kms(&i915->drm,
7758                                    "Framebuffer format cannot be changed in async flip\n");
7759                        return -EINVAL;
7760                }
7761
7762                if (old_plane_state->hw.rotation !=
7763                    new_plane_state->hw.rotation) {
7764                        drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7765                        return -EINVAL;
7766                }
7767
7768                if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7769                    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7770                        drm_dbg_kms(&i915->drm,
7771                                    "Plane size/co-ordinates cannot be changed in async flip\n");
7772                        return -EINVAL;
7773                }
7774
7775                if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7776                        drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7777                        return -EINVAL;
7778                }
7779
7780                if (old_plane_state->hw.pixel_blend_mode !=
7781                    new_plane_state->hw.pixel_blend_mode) {
7782                        drm_dbg_kms(&i915->drm,
7783                                    "Pixel blend mode cannot be changed in async flip\n");
7784                        return -EINVAL;
7785                }
7786
7787                if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7788                        drm_dbg_kms(&i915->drm,
7789                                    "Color encoding cannot be changed in async flip\n");
7790                        return -EINVAL;
7791                }
7792
7793                if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7794                        drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7795                        return -EINVAL;
7796                }
7797
7798                /* plane decryption is allow to change only in synchronous flips */
7799                if (old_plane_state->decrypt != new_plane_state->decrypt)
7800                        return -EINVAL;
7801        }
7802
7803        return 0;
7804}
7805
7806static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7807{
7808        struct intel_crtc_state *crtc_state;
7809        struct intel_crtc *crtc;
7810        int i;
7811
7812        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7813                struct intel_crtc_state *linked_crtc_state;
7814                struct intel_crtc *linked_crtc;
7815                int ret;
7816
7817                if (!crtc_state->bigjoiner)
7818                        continue;
7819
7820                linked_crtc = crtc_state->bigjoiner_linked_crtc;
7821                linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7822                if (IS_ERR(linked_crtc_state))
7823                        return PTR_ERR(linked_crtc_state);
7824
7825                if (!intel_crtc_needs_modeset(crtc_state))
7826                        continue;
7827
7828                linked_crtc_state->uapi.mode_changed = true;
7829
7830                ret = drm_atomic_add_affected_connectors(&state->base,
7831                                                         &linked_crtc->base);
7832                if (ret)
7833                        return ret;
7834
7835                ret = intel_atomic_add_affected_planes(state, linked_crtc);
7836                if (ret)
7837                        return ret;
7838        }
7839
7840        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7841                /* Kill old bigjoiner link, we may re-establish afterwards */
7842                if (intel_crtc_needs_modeset(crtc_state) &&
7843                    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7844                        kill_bigjoiner_slave(state, crtc_state);
7845        }
7846
7847        return 0;
7848}
7849
7850/**
7851 * intel_atomic_check - validate state object
7852 * @dev: drm device
7853 * @_state: state to validate
7854 */
7855static int intel_atomic_check(struct drm_device *dev,
7856                              struct drm_atomic_state *_state)
7857{
7858        struct drm_i915_private *dev_priv = to_i915(dev);
7859        struct intel_atomic_state *state = to_intel_atomic_state(_state);
7860        struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7861        struct intel_crtc *crtc;
7862        int ret, i;
7863        bool any_ms = false;
7864
7865        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7866                                            new_crtc_state, i) {
7867                if (new_crtc_state->inherited != old_crtc_state->inherited)
7868                        new_crtc_state->uapi.mode_changed = true;
7869        }
7870
7871        intel_vrr_check_modeset(state);
7872
7873        ret = drm_atomic_helper_check_modeset(dev, &state->base);
7874        if (ret)
7875                goto fail;
7876
7877        ret = intel_bigjoiner_add_affected_crtcs(state);
7878        if (ret)
7879                goto fail;
7880
7881        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7882                                            new_crtc_state, i) {
7883                if (!intel_crtc_needs_modeset(new_crtc_state)) {
7884                        /* Light copy */
7885                        intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7886
7887                        continue;
7888                }
7889
7890                if (!new_crtc_state->uapi.enable) {
7891                        if (!new_crtc_state->bigjoiner_slave) {
7892                                intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7893                                any_ms = true;
7894                        }
7895                        continue;
7896                }
7897
7898                ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7899                if (ret)
7900                        goto fail;
7901
7902                ret = intel_modeset_pipe_config(state, new_crtc_state);
7903                if (ret)
7904                        goto fail;
7905
7906                ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7907                                                   new_crtc_state);
7908                if (ret)
7909                        goto fail;
7910        }
7911
7912        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7913                                            new_crtc_state, i) {
7914                if (!intel_crtc_needs_modeset(new_crtc_state))
7915                        continue;
7916
7917                ret = intel_modeset_pipe_config_late(new_crtc_state);
7918                if (ret)
7919                        goto fail;
7920
7921                intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7922        }
7923
7924        /**
7925         * Check if fastset is allowed by external dependencies like other
7926         * pipes and transcoders.
7927         *
7928         * Right now it only forces a fullmodeset when the MST master
7929         * transcoder did not changed but the pipe of the master transcoder
7930         * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7931         * in case of port synced crtcs, if one of the synced crtcs
7932         * needs a full modeset, all other synced crtcs should be
7933         * forced a full modeset.
7934         */
7935        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7936                if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7937                        continue;
7938
7939                if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7940                        enum transcoder master = new_crtc_state->mst_master_transcoder;
7941
7942                        if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7943                                new_crtc_state->uapi.mode_changed = true;
7944                                new_crtc_state->update_pipe = false;
7945                        }
7946                }
7947
7948                if (is_trans_port_sync_mode(new_crtc_state)) {
7949                        u8 trans = new_crtc_state->sync_mode_slaves_mask;
7950
7951                        if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7952                                trans |= BIT(new_crtc_state->master_transcoder);
7953
7954                        if (intel_cpu_transcoders_need_modeset(state, trans)) {
7955                                new_crtc_state->uapi.mode_changed = true;
7956                                new_crtc_state->update_pipe = false;
7957                        }
7958                }
7959
7960                if (new_crtc_state->bigjoiner) {
7961                        struct intel_crtc_state *linked_crtc_state =
7962                                intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
7963
7964                        if (intel_crtc_needs_modeset(linked_crtc_state)) {
7965                                new_crtc_state->uapi.mode_changed = true;
7966                                new_crtc_state->update_pipe = false;
7967                        }
7968                }
7969        }
7970
7971        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7972                                            new_crtc_state, i) {
7973                if (intel_crtc_needs_modeset(new_crtc_state)) {
7974                        any_ms = true;
7975                        continue;
7976                }
7977
7978                if (!new_crtc_state->update_pipe)
7979                        continue;
7980
7981                intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
7982        }
7983
7984        if (any_ms && !check_digital_port_conflicts(state)) {
7985                drm_dbg_kms(&dev_priv->drm,
7986                            "rejecting conflicting digital port configuration\n");
7987                ret = -EINVAL;
7988                goto fail;
7989        }
7990
7991        ret = drm_dp_mst_atomic_check(&state->base);
7992        if (ret)
7993                goto fail;
7994
7995        ret = intel_atomic_check_planes(state);
7996        if (ret)
7997                goto fail;
7998
7999        ret = intel_compute_global_watermarks(state);
8000        if (ret)
8001                goto fail;
8002
8003        ret = intel_bw_atomic_check(state);
8004        if (ret)
8005                goto fail;
8006
8007        ret = intel_cdclk_atomic_check(state, &any_ms);
8008        if (ret)
8009                goto fail;
8010
8011        if (intel_any_crtc_needs_modeset(state))
8012                any_ms = true;
8013
8014        if (any_ms) {
8015                ret = intel_modeset_checks(state);
8016                if (ret)
8017                        goto fail;
8018
8019                ret = intel_modeset_calc_cdclk(state);
8020                if (ret)
8021                        return ret;
8022
8023                intel_modeset_clear_plls(state);
8024        }
8025
8026        ret = intel_atomic_check_crtcs(state);
8027        if (ret)
8028                goto fail;
8029
8030        ret = intel_fbc_atomic_check(state);
8031        if (ret)
8032                goto fail;
8033
8034        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8035                                            new_crtc_state, i) {
8036                if (new_crtc_state->uapi.async_flip) {
8037                        ret = intel_atomic_check_async(state, crtc);
8038                        if (ret)
8039                                goto fail;
8040                }
8041
8042                if (!intel_crtc_needs_modeset(new_crtc_state) &&
8043                    !new_crtc_state->update_pipe)
8044                        continue;
8045
8046                intel_dump_pipe_config(new_crtc_state, state,
8047                                       intel_crtc_needs_modeset(new_crtc_state) ?
8048                                       "[modeset]" : "[fastset]");
8049        }
8050
8051        return 0;
8052
8053 fail:
8054        if (ret == -EDEADLK)
8055                return ret;
8056
8057        /*
8058         * FIXME would probably be nice to know which crtc specifically
8059         * caused the failure, in cases where we can pinpoint it.
8060         */
8061        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8062                                            new_crtc_state, i)
8063                intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8064
8065        return ret;
8066}
8067
8068static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8069{
8070        struct intel_crtc_state *crtc_state;
8071        struct intel_crtc *crtc;
8072        int i, ret;
8073
8074        ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8075        if (ret < 0)
8076                return ret;
8077
8078        for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8079                bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8080
8081                if (mode_changed || crtc_state->update_pipe ||
8082                    crtc_state->uapi.color_mgmt_changed) {
8083                        intel_dsb_prepare(crtc_state);
8084                }
8085        }
8086
8087        return 0;
8088}
8089
8090void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8091                                  struct intel_crtc_state *crtc_state)
8092{
8093        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8094
8095        if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8096                intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8097
8098        if (crtc_state->has_pch_encoder) {
8099                enum pipe pch_transcoder =
8100                        intel_crtc_pch_transcoder(crtc);
8101
8102                intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8103        }
8104}
8105
8106static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8107                               const struct intel_crtc_state *new_crtc_state)
8108{
8109        struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8110        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8111
8112        /*
8113         * Update pipe size and adjust fitter if needed: the reason for this is
8114         * that in compute_mode_changes we check the native mode (not the pfit
8115         * mode) to see if we can flip rather than do a full mode set. In the
8116         * fastboot case, we'll flip, but if we don't update the pipesrc and
8117         * pfit state, we'll end up with a big fb scanned out into the wrong
8118         * sized surface.
8119         */
8120        intel_set_pipe_src_size(new_crtc_state);
8121
8122        /* on skylake this is done by detaching scalers */
8123        if (DISPLAY_VER(dev_priv) >= 9) {
8124                if (new_crtc_state->pch_pfit.enabled)
8125                        skl_pfit_enable(new_crtc_state);
8126        } else if (HAS_PCH_SPLIT(dev_priv)) {
8127                if (new_crtc_state->pch_pfit.enabled)
8128                        ilk_pfit_enable(new_crtc_state);
8129                else if (old_crtc_state->pch_pfit.enabled)
8130                        ilk_pfit_disable(old_crtc_state);
8131        }
8132
8133        /*
8134         * The register is supposedly single buffered so perhaps
8135         * not 100% correct to do this here. But SKL+ calculate
8136         * this based on the adjust pixel rate so pfit changes do
8137         * affect it and so it must be updated for fastsets.
8138         * HSW/BDW only really need this here for fastboot, after
8139         * that the value should not change without a full modeset.
8140         */
8141        if (DISPLAY_VER(dev_priv) >= 9 ||
8142            IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8143                hsw_set_linetime_wm(new_crtc_state);
8144
8145        if (DISPLAY_VER(dev_priv) >= 11)
8146                icl_set_pipe_chicken(new_crtc_state);
8147}
8148
8149static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8150                                   struct intel_crtc *crtc)
8151{
8152        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8153        const struct intel_crtc_state *old_crtc_state =
8154                intel_atomic_get_old_crtc_state(state, crtc);
8155        const struct intel_crtc_state *new_crtc_state =
8156                intel_atomic_get_new_crtc_state(state, crtc);
8157        bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8158
8159        /*
8160         * During modesets pipe configuration was programmed as the
8161         * CRTC was enabled.
8162         */
8163        if (!modeset) {
8164                if (new_crtc_state->uapi.color_mgmt_changed ||
8165                    new_crtc_state->update_pipe)
8166                        intel_color_commit(new_crtc_state);
8167
8168                if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8169                        bdw_set_pipemisc(new_crtc_state);
8170
8171                if (new_crtc_state->update_pipe)
8172                        intel_pipe_fastset(old_crtc_state, new_crtc_state);
8173        }
8174
8175        intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8176
8177        intel_atomic_update_watermarks(state, crtc);
8178}
8179
8180static void commit_pipe_post_planes(struct intel_atomic_state *state,
8181                                    struct intel_crtc *crtc)
8182{
8183        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8184        const struct intel_crtc_state *new_crtc_state =
8185                intel_atomic_get_new_crtc_state(state, crtc);
8186
8187        /*
8188         * Disable the scaler(s) after the plane(s) so that we don't
8189         * get a catastrophic underrun even if the two operations
8190         * end up happening in two different frames.
8191         */
8192        if (DISPLAY_VER(dev_priv) >= 9 &&
8193            !intel_crtc_needs_modeset(new_crtc_state))
8194                skl_detach_scalers(new_crtc_state);
8195}
8196
8197static void intel_enable_crtc(struct intel_atomic_state *state,
8198                              struct intel_crtc *crtc)
8199{
8200        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8201        const struct intel_crtc_state *new_crtc_state =
8202                intel_atomic_get_new_crtc_state(state, crtc);
8203
8204        if (!intel_crtc_needs_modeset(new_crtc_state))
8205                return;
8206
8207        intel_crtc_update_active_timings(new_crtc_state);
8208
8209        dev_priv->display->crtc_enable(state, crtc);
8210
8211        if (new_crtc_state->bigjoiner_slave)
8212                return;
8213
8214        /* vblanks work again, re-enable pipe CRC. */
8215        intel_crtc_enable_pipe_crc(crtc);
8216}
8217
8218static void intel_update_crtc(struct intel_atomic_state *state,
8219                              struct intel_crtc *crtc)
8220{
8221        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8222        const struct intel_crtc_state *old_crtc_state =
8223                intel_atomic_get_old_crtc_state(state, crtc);
8224        struct intel_crtc_state *new_crtc_state =
8225                intel_atomic_get_new_crtc_state(state, crtc);
8226        bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8227
8228        if (!modeset) {
8229                if (new_crtc_state->preload_luts &&
8230                    (new_crtc_state->uapi.color_mgmt_changed ||
8231                     new_crtc_state->update_pipe))
8232                        intel_color_load_luts(new_crtc_state);
8233
8234                intel_pre_plane_update(state, crtc);
8235
8236                if (new_crtc_state->update_pipe)
8237                        intel_encoders_update_pipe(state, crtc);
8238        }
8239
8240        intel_fbc_update(state, crtc);
8241
8242        intel_update_planes_on_crtc(state, crtc);
8243
8244        /* Perform vblank evasion around commit operation */
8245        intel_pipe_update_start(new_crtc_state);
8246
8247        commit_pipe_pre_planes(state, crtc);
8248
8249        if (DISPLAY_VER(dev_priv) >= 9)
8250                skl_arm_planes_on_crtc(state, crtc);
8251        else
8252                i9xx_arm_planes_on_crtc(state, crtc);
8253
8254        commit_pipe_post_planes(state, crtc);
8255
8256        intel_pipe_update_end(new_crtc_state);
8257
8258        /*
8259         * We usually enable FIFO underrun interrupts as part of the
8260         * CRTC enable sequence during modesets.  But when we inherit a
8261         * valid pipe configuration from the BIOS we need to take care
8262         * of enabling them on the CRTC's first fastset.
8263         */
8264        if (new_crtc_state->update_pipe && !modeset &&
8265            old_crtc_state->inherited)
8266                intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8267}
8268
8269static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8270                                          struct intel_crtc_state *old_crtc_state,
8271                                          struct intel_crtc_state *new_crtc_state,
8272                                          struct intel_crtc *crtc)
8273{
8274        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8275
8276        /*
8277         * We need to disable pipe CRC before disabling the pipe,
8278         * or we race against vblank off.
8279         */
8280        intel_crtc_disable_pipe_crc(crtc);
8281
8282        dev_priv->display->crtc_disable(state, crtc);
8283        crtc->active = false;
8284        intel_fbc_disable(crtc);
8285        intel_disable_shared_dpll(old_crtc_state);
8286
8287        /* FIXME unify this for all platforms */
8288        if (!new_crtc_state->hw.active &&
8289            !HAS_GMCH(dev_priv))
8290                intel_initial_watermarks(state, crtc);
8291}
8292
8293static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8294{
8295        struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8296        struct intel_crtc *crtc;
8297        u32 handled = 0;
8298        int i;
8299
8300        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8301                                            new_crtc_state, i) {
8302                if (!intel_crtc_needs_modeset(new_crtc_state))
8303                        continue;
8304
8305                if (!old_crtc_state->hw.active)
8306                        continue;
8307
8308                intel_pre_plane_update(state, crtc);
8309                intel_crtc_disable_planes(state, crtc);
8310        }
8311
8312        /* Only disable port sync and MST slaves */
8313        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8314                                            new_crtc_state, i) {
8315                if (!intel_crtc_needs_modeset(new_crtc_state))
8316                        continue;
8317
8318                if (!old_crtc_state->hw.active)
8319                        continue;
8320
8321                /* In case of Transcoder port Sync master slave CRTCs can be
8322                 * assigned in any order and we need to make sure that
8323                 * slave CRTCs are disabled first and then master CRTC since
8324                 * Slave vblanks are masked till Master Vblanks.
8325                 */
8326                if (!is_trans_port_sync_slave(old_crtc_state) &&
8327                    !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8328                    !old_crtc_state->bigjoiner_slave)
8329                        continue;
8330
8331                intel_old_crtc_state_disables(state, old_crtc_state,
8332                                              new_crtc_state, crtc);
8333                handled |= BIT(crtc->pipe);
8334        }
8335
8336        /* Disable everything else left on */
8337        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8338                                            new_crtc_state, i) {
8339                if (!intel_crtc_needs_modeset(new_crtc_state) ||
8340                    (handled & BIT(crtc->pipe)))
8341                        continue;
8342
8343                if (!old_crtc_state->hw.active)
8344                        continue;
8345
8346                intel_old_crtc_state_disables(state, old_crtc_state,
8347                                              new_crtc_state, crtc);
8348        }
8349}
8350
8351static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8352{
8353        struct intel_crtc_state *new_crtc_state;
8354        struct intel_crtc *crtc;
8355        int i;
8356
8357        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8358                if (!new_crtc_state->hw.active)
8359                        continue;
8360
8361                intel_enable_crtc(state, crtc);
8362                intel_update_crtc(state, crtc);
8363        }
8364}
8365
8366static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8367{
8368        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8369        struct intel_crtc *crtc;
8370        struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8371        struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8372        u8 update_pipes = 0, modeset_pipes = 0;
8373        int i;
8374
8375        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8376                enum pipe pipe = crtc->pipe;
8377
8378                if (!new_crtc_state->hw.active)
8379                        continue;
8380
8381                /* ignore allocations for crtc's that have been turned off. */
8382                if (!intel_crtc_needs_modeset(new_crtc_state)) {
8383                        entries[pipe] = old_crtc_state->wm.skl.ddb;
8384                        update_pipes |= BIT(pipe);
8385                } else {
8386                        modeset_pipes |= BIT(pipe);
8387                }
8388        }
8389
8390        /*
8391         * Whenever the number of active pipes changes, we need to make sure we
8392         * update the pipes in the right order so that their ddb allocations
8393         * never overlap with each other between CRTC updates. Otherwise we'll
8394         * cause pipe underruns and other bad stuff.
8395         *
8396         * So first lets enable all pipes that do not need a fullmodeset as
8397         * those don't have any external dependency.
8398         */
8399        while (update_pipes) {
8400                for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8401                                                    new_crtc_state, i) {
8402                        enum pipe pipe = crtc->pipe;
8403
8404                        if ((update_pipes & BIT(pipe)) == 0)
8405                                continue;
8406
8407                        if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8408                                                        entries, I915_MAX_PIPES, pipe))
8409                                continue;
8410
8411                        entries[pipe] = new_crtc_state->wm.skl.ddb;
8412                        update_pipes &= ~BIT(pipe);
8413
8414                        intel_update_crtc(state, crtc);
8415
8416                        /*
8417                         * If this is an already active pipe, it's DDB changed,
8418                         * and this isn't the last pipe that needs updating
8419                         * then we need to wait for a vblank to pass for the
8420                         * new ddb allocation to take effect.
8421                         */
8422                        if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8423                                                 &old_crtc_state->wm.skl.ddb) &&
8424                            (update_pipes | modeset_pipes))
8425                                intel_crtc_wait_for_next_vblank(crtc);
8426                }
8427        }
8428
8429        update_pipes = modeset_pipes;
8430
8431        /*
8432         * Enable all pipes that needs a modeset and do not depends on other
8433         * pipes
8434         */
8435        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8436                enum pipe pipe = crtc->pipe;
8437
8438                if ((modeset_pipes & BIT(pipe)) == 0)
8439                        continue;
8440
8441                if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8442                    is_trans_port_sync_master(new_crtc_state) ||
8443                    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8444                        continue;
8445
8446                modeset_pipes &= ~BIT(pipe);
8447
8448                intel_enable_crtc(state, crtc);
8449        }
8450
8451        /*
8452         * Then we enable all remaining pipes that depend on other
8453         * pipes: MST slaves and port sync masters, big joiner master
8454         */
8455        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8456                enum pipe pipe = crtc->pipe;
8457
8458                if ((modeset_pipes & BIT(pipe)) == 0)
8459                        continue;
8460
8461                modeset_pipes &= ~BIT(pipe);
8462
8463                intel_enable_crtc(state, crtc);
8464        }
8465
8466        /*
8467         * Finally we do the plane updates/etc. for all pipes that got enabled.
8468         */
8469        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8470                enum pipe pipe = crtc->pipe;
8471
8472                if ((update_pipes & BIT(pipe)) == 0)
8473                        continue;
8474
8475                drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8476                                                                        entries, I915_MAX_PIPES, pipe));
8477
8478                entries[pipe] = new_crtc_state->wm.skl.ddb;
8479                update_pipes &= ~BIT(pipe);
8480
8481                intel_update_crtc(state, crtc);
8482        }
8483
8484        drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8485        drm_WARN_ON(&dev_priv->drm, update_pipes);
8486}
8487
8488static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8489{
8490        struct intel_atomic_state *state, *next;
8491        struct llist_node *freed;
8492
8493        freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8494        llist_for_each_entry_safe(state, next, freed, freed)
8495                drm_atomic_state_put(&state->base);
8496}
8497
8498static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8499{
8500        struct drm_i915_private *dev_priv =
8501                container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8502
8503        intel_atomic_helper_free_state(dev_priv);
8504}
8505
8506static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8507{
8508        struct wait_queue_entry wait_fence, wait_reset;
8509        struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8510
8511        init_wait_entry(&wait_fence, 0);
8512        init_wait_entry(&wait_reset, 0);
8513        for (;;) {
8514                prepare_to_wait(&intel_state->commit_ready.wait,
8515                                &wait_fence, TASK_UNINTERRUPTIBLE);
8516                prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8517                                              I915_RESET_MODESET),
8518                                &wait_reset, TASK_UNINTERRUPTIBLE);
8519
8520
8521                if (i915_sw_fence_done(&intel_state->commit_ready) ||
8522                    test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
8523                        break;
8524
8525                schedule();
8526        }
8527        finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8528        finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8529                                  I915_RESET_MODESET),
8530                    &wait_reset);
8531}
8532
8533static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8534{
8535        struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8536        struct intel_crtc *crtc;
8537        int i;
8538
8539        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8540                                            new_crtc_state, i)
8541                intel_dsb_cleanup(old_crtc_state);
8542}
8543
8544static void intel_atomic_cleanup_work(struct work_struct *work)
8545{
8546        struct intel_atomic_state *state =
8547                container_of(work, struct intel_atomic_state, base.commit_work);
8548        struct drm_i915_private *i915 = to_i915(state->base.dev);
8549
8550        intel_cleanup_dsbs(state);
8551        drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8552        drm_atomic_helper_commit_cleanup_done(&state->base);
8553        drm_atomic_state_put(&state->base);
8554
8555        intel_atomic_helper_free_state(i915);
8556}
8557
8558static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8559{
8560        struct drm_i915_private *i915 = to_i915(state->base.dev);
8561        struct intel_plane *plane;
8562        struct intel_plane_state *plane_state;
8563        int i;
8564
8565        for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8566                struct drm_framebuffer *fb = plane_state->hw.fb;
8567                int cc_plane;
8568                int ret;
8569
8570                if (!fb)
8571                        continue;
8572
8573                cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8574                if (cc_plane < 0)
8575                        continue;
8576
8577                /*
8578                 * The layout of the fast clear color value expected by HW
8579                 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8580                 * - 4 x 4 bytes per-channel value
8581                 *   (in surface type specific float/int format provided by the fb user)
8582                 * - 8 bytes native color value used by the display
8583                 *   (converted/written by GPU during a fast clear operation using the
8584                 *    above per-channel values)
8585                 *
8586                 * The commit's FB prepare hook already ensured that FB obj is pinned and the
8587                 * caller made sure that the object is synced wrt. the related color clear value
8588                 * GPU write on it.
8589                 */
8590                ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8591                                                     fb->offsets[cc_plane] + 16,
8592                                                     &plane_state->ccval,
8593                                                     sizeof(plane_state->ccval));
8594                /* The above could only fail if the FB obj has an unexpected backing store type. */
8595                drm_WARN_ON(&i915->drm, ret);
8596        }
8597}
8598
8599static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8600{
8601        struct drm_device *dev = state->base.dev;
8602        struct drm_i915_private *dev_priv = to_i915(dev);
8603        struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8604        struct intel_crtc *crtc;
8605        u64 put_domains[I915_MAX_PIPES] = {};
8606        intel_wakeref_t wakeref = 0;
8607        int i;
8608
8609        intel_atomic_commit_fence_wait(state);
8610
8611        drm_atomic_helper_wait_for_dependencies(&state->base);
8612
8613        if (state->modeset)
8614                wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8615
8616        intel_atomic_prepare_plane_clear_colors(state);
8617
8618        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8619                                            new_crtc_state, i) {
8620                if (intel_crtc_needs_modeset(new_crtc_state) ||
8621                    new_crtc_state->update_pipe) {
8622
8623                        put_domains[crtc->pipe] =
8624                                modeset_get_crtc_power_domains(new_crtc_state);
8625                }
8626        }
8627
8628        intel_commit_modeset_disables(state);
8629
8630        /* FIXME: Eventually get rid of our crtc->config pointer */
8631        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8632                crtc->config = new_crtc_state;
8633
8634        if (state->modeset) {
8635                drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8636
8637                intel_set_cdclk_pre_plane_update(state);
8638
8639                intel_modeset_verify_disabled(dev_priv, state);
8640        }
8641
8642        intel_sagv_pre_plane_update(state);
8643
8644        /* Complete the events for pipes that have now been disabled */
8645        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8646                bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8647
8648                /* Complete events for now disable pipes here. */
8649                if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8650                        spin_lock_irq(&dev->event_lock);
8651                        drm_crtc_send_vblank_event(&crtc->base,
8652                                                   new_crtc_state->uapi.event);
8653                        spin_unlock_irq(&dev->event_lock);
8654
8655                        new_crtc_state->uapi.event = NULL;
8656                }
8657        }
8658
8659        intel_encoders_update_prepare(state);
8660
8661        intel_dbuf_pre_plane_update(state);
8662
8663        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8664                if (new_crtc_state->uapi.async_flip)
8665                        intel_crtc_enable_flip_done(state, crtc);
8666        }
8667
8668        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8669        dev_priv->display->commit_modeset_enables(state);
8670
8671        intel_encoders_update_complete(state);
8672
8673        if (state->modeset)
8674                intel_set_cdclk_post_plane_update(state);
8675
8676        intel_wait_for_vblank_workers(state);
8677
8678        /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8679         * already, but still need the state for the delayed optimization. To
8680         * fix this:
8681         * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8682         * - schedule that vblank worker _before_ calling hw_done
8683         * - at the start of commit_tail, cancel it _synchrously
8684         * - switch over to the vblank wait helper in the core after that since
8685         *   we don't need out special handling any more.
8686         */
8687        drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8688
8689        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8690                if (new_crtc_state->uapi.async_flip)
8691                        intel_crtc_disable_flip_done(state, crtc);
8692        }
8693
8694        /*
8695         * Now that the vblank has passed, we can go ahead and program the
8696         * optimal watermarks on platforms that need two-step watermark
8697         * programming.
8698         *
8699         * TODO: Move this (and other cleanup) to an async worker eventually.
8700         */
8701        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8702                                            new_crtc_state, i) {
8703                /*
8704                 * Gen2 reports pipe underruns whenever all planes are disabled.
8705                 * So re-enable underrun reporting after some planes get enabled.
8706                 *
8707                 * We do this before .optimize_watermarks() so that we have a
8708                 * chance of catching underruns with the intermediate watermarks
8709                 * vs. the new plane configuration.
8710                 */
8711                if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8712                        intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8713
8714                intel_optimize_watermarks(state, crtc);
8715        }
8716
8717        intel_dbuf_post_plane_update(state);
8718        intel_psr_post_plane_update(state);
8719
8720        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8721                intel_post_plane_update(state, crtc);
8722
8723                modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8724
8725                intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8726
8727                /*
8728                 * DSB cleanup is done in cleanup_work aligning with framebuffer
8729                 * cleanup. So copy and reset the dsb structure to sync with
8730                 * commit_done and later do dsb cleanup in cleanup_work.
8731                 */
8732                old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8733        }
8734
8735        /* Underruns don't always raise interrupts, so check manually */
8736        intel_check_cpu_fifo_underruns(dev_priv);
8737        intel_check_pch_fifo_underruns(dev_priv);
8738
8739        if (state->modeset)
8740                intel_verify_planes(state);
8741
8742        intel_sagv_post_plane_update(state);
8743
8744        drm_atomic_helper_commit_hw_done(&state->base);
8745
8746        if (state->modeset) {
8747                /* As one of the primary mmio accessors, KMS has a high
8748                 * likelihood of triggering bugs in unclaimed access. After we
8749                 * finish modesetting, see if an error has been flagged, and if
8750                 * so enable debugging for the next modeset - and hope we catch
8751                 * the culprit.
8752                 */
8753                intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8754                intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8755        }
8756        intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8757
8758        /*
8759         * Defer the cleanup of the old state to a separate worker to not
8760         * impede the current task (userspace for blocking modesets) that
8761         * are executed inline. For out-of-line asynchronous modesets/flips,
8762         * deferring to a new worker seems overkill, but we would place a
8763         * schedule point (cond_resched()) here anyway to keep latencies
8764         * down.
8765         */
8766        INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8767        queue_work(system_highpri_wq, &state->base.commit_work);
8768}
8769
8770static void intel_atomic_commit_work(struct work_struct *work)
8771{
8772        struct intel_atomic_state *state =
8773                container_of(work, struct intel_atomic_state, base.commit_work);
8774
8775        intel_atomic_commit_tail(state);
8776}
8777
8778static int
8779intel_atomic_commit_ready(struct i915_sw_fence *fence,
8780                          enum i915_sw_fence_notify notify)
8781{
8782        struct intel_atomic_state *state =
8783                container_of(fence, struct intel_atomic_state, commit_ready);
8784
8785        switch (notify) {
8786        case FENCE_COMPLETE:
8787                /* we do blocking waits in the worker, nothing to do here */
8788                break;
8789        case FENCE_FREE:
8790                {
8791                        struct intel_atomic_helper *helper =
8792                                &to_i915(state->base.dev)->atomic_helper;
8793
8794                        if (llist_add(&state->freed, &helper->free_list))
8795                                schedule_work(&helper->free_work);
8796                        break;
8797                }
8798        }
8799
8800        return NOTIFY_DONE;
8801}
8802
8803static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8804{
8805        struct intel_plane_state *old_plane_state, *new_plane_state;
8806        struct intel_plane *plane;
8807        int i;
8808
8809        for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8810                                             new_plane_state, i)
8811                intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8812                                        to_intel_frontbuffer(new_plane_state->hw.fb),
8813                                        plane->frontbuffer_bit);
8814}
8815
8816static int intel_atomic_commit(struct drm_device *dev,
8817                               struct drm_atomic_state *_state,
8818                               bool nonblock)
8819{
8820        struct intel_atomic_state *state = to_intel_atomic_state(_state);
8821        struct drm_i915_private *dev_priv = to_i915(dev);
8822        int ret = 0;
8823
8824        state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8825
8826        drm_atomic_state_get(&state->base);
8827        i915_sw_fence_init(&state->commit_ready,
8828                           intel_atomic_commit_ready);
8829
8830        /*
8831         * The intel_legacy_cursor_update() fast path takes care
8832         * of avoiding the vblank waits for simple cursor
8833         * movement and flips. For cursor on/off and size changes,
8834         * we want to perform the vblank waits so that watermark
8835         * updates happen during the correct frames. Gen9+ have
8836         * double buffered watermarks and so shouldn't need this.
8837         *
8838         * Unset state->legacy_cursor_update before the call to
8839         * drm_atomic_helper_setup_commit() because otherwise
8840         * drm_atomic_helper_wait_for_flip_done() is a noop and
8841         * we get FIFO underruns because we didn't wait
8842         * for vblank.
8843         *
8844         * FIXME doing watermarks and fb cleanup from a vblank worker
8845         * (assuming we had any) would solve these problems.
8846         */
8847        if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8848                struct intel_crtc_state *new_crtc_state;
8849                struct intel_crtc *crtc;
8850                int i;
8851
8852                for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8853                        if (new_crtc_state->wm.need_postvbl_update ||
8854                            new_crtc_state->update_wm_post)
8855                                state->base.legacy_cursor_update = false;
8856        }
8857
8858        ret = intel_atomic_prepare_commit(state);
8859        if (ret) {
8860                drm_dbg_atomic(&dev_priv->drm,
8861                               "Preparing state failed with %i\n", ret);
8862                i915_sw_fence_commit(&state->commit_ready);
8863                intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8864                return ret;
8865        }
8866
8867        ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8868        if (!ret)
8869                ret = drm_atomic_helper_swap_state(&state->base, true);
8870        if (!ret)
8871                intel_atomic_swap_global_state(state);
8872
8873        if (ret) {
8874                struct intel_crtc_state *new_crtc_state;
8875                struct intel_crtc *crtc;
8876                int i;
8877
8878                i915_sw_fence_commit(&state->commit_ready);
8879
8880                for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8881                        intel_dsb_cleanup(new_crtc_state);
8882
8883                drm_atomic_helper_cleanup_planes(dev, &state->base);
8884                intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8885                return ret;
8886        }
8887        intel_shared_dpll_swap_state(state);
8888        intel_atomic_track_fbs(state);
8889
8890        drm_atomic_state_get(&state->base);
8891        INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8892
8893        i915_sw_fence_commit(&state->commit_ready);
8894        if (nonblock && state->modeset) {
8895                queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8896        } else if (nonblock) {
8897                queue_work(dev_priv->flip_wq, &state->base.commit_work);
8898        } else {
8899                if (state->modeset)
8900                        flush_workqueue(dev_priv->modeset_wq);
8901                intel_atomic_commit_tail(state);
8902        }
8903
8904        return 0;
8905}
8906
8907/**
8908 * intel_plane_destroy - destroy a plane
8909 * @plane: plane to destroy
8910 *
8911 * Common destruction function for all types of planes (primary, cursor,
8912 * sprite).
8913 */
8914void intel_plane_destroy(struct drm_plane *plane)
8915{
8916        drm_plane_cleanup(plane);
8917        kfree(to_intel_plane(plane));
8918}
8919
8920static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8921{
8922        struct intel_plane *plane;
8923
8924        for_each_intel_plane(&dev_priv->drm, plane) {
8925                struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8926                                                              plane->pipe);
8927
8928                plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8929        }
8930}
8931
8932
8933int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8934                                      struct drm_file *file)
8935{
8936        struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8937        struct drm_crtc *drmmode_crtc;
8938        struct intel_crtc *crtc;
8939
8940        drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8941        if (!drmmode_crtc)
8942                return -ENOENT;
8943
8944        crtc = to_intel_crtc(drmmode_crtc);
8945        pipe_from_crtc_id->pipe = crtc->pipe;
8946
8947        return 0;
8948}
8949
8950static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8951{
8952        struct drm_device *dev = encoder->base.dev;
8953        struct intel_encoder *source_encoder;
8954        u32 possible_clones = 0;
8955
8956        for_each_intel_encoder(dev, source_encoder) {
8957                if (encoders_cloneable(encoder, source_encoder))
8958                        possible_clones |= drm_encoder_mask(&source_encoder->base);
8959        }
8960
8961        return possible_clones;
8962}
8963
8964static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8965{
8966        struct drm_device *dev = encoder->base.dev;
8967        struct intel_crtc *crtc;
8968        u32 possible_crtcs = 0;
8969
8970        for_each_intel_crtc(dev, crtc) {
8971                if (encoder->pipe_mask & BIT(crtc->pipe))
8972                        possible_crtcs |= drm_crtc_mask(&crtc->base);
8973        }
8974
8975        return possible_crtcs;
8976}
8977
8978static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
8979{
8980        if (!IS_MOBILE(dev_priv))
8981                return false;
8982
8983        if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
8984                return false;
8985
8986        if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
8987                return false;
8988
8989        return true;
8990}
8991
8992static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
8993{
8994        if (DISPLAY_VER(dev_priv) >= 9)
8995                return false;
8996
8997        if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
8998                return false;
8999
9000        if (HAS_PCH_LPT_H(dev_priv) &&
9001            intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
9002                return false;
9003
9004        /* DDI E can't be used if DDI A requires 4 lanes */
9005        if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9006                return false;
9007
9008        if (!dev_priv->vbt.int_crt_support)
9009                return false;
9010
9011        return true;
9012}
9013
9014static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9015{
9016        struct intel_encoder *encoder;
9017        bool dpd_is_edp = false;
9018
9019        intel_pps_unlock_regs_wa(dev_priv);
9020
9021        if (!HAS_DISPLAY(dev_priv))
9022                return;
9023
9024        if (IS_DG2(dev_priv)) {
9025                intel_ddi_init(dev_priv, PORT_A);
9026                intel_ddi_init(dev_priv, PORT_B);
9027                intel_ddi_init(dev_priv, PORT_C);
9028                intel_ddi_init(dev_priv, PORT_D_XELPD);
9029        } else if (IS_ALDERLAKE_P(dev_priv)) {
9030                intel_ddi_init(dev_priv, PORT_A);
9031                intel_ddi_init(dev_priv, PORT_B);
9032                intel_ddi_init(dev_priv, PORT_TC1);
9033                intel_ddi_init(dev_priv, PORT_TC2);
9034                intel_ddi_init(dev_priv, PORT_TC3);
9035                intel_ddi_init(dev_priv, PORT_TC4);
9036                icl_dsi_init(dev_priv);
9037        } else if (IS_ALDERLAKE_S(dev_priv)) {
9038                intel_ddi_init(dev_priv, PORT_A);
9039                intel_ddi_init(dev_priv, PORT_TC1);
9040                intel_ddi_init(dev_priv, PORT_TC2);
9041                intel_ddi_init(dev_priv, PORT_TC3);
9042                intel_ddi_init(dev_priv, PORT_TC4);
9043        } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9044                intel_ddi_init(dev_priv, PORT_A);
9045                intel_ddi_init(dev_priv, PORT_B);
9046                intel_ddi_init(dev_priv, PORT_TC1);
9047                intel_ddi_init(dev_priv, PORT_TC2);
9048        } else if (DISPLAY_VER(dev_priv) >= 12) {
9049                intel_ddi_init(dev_priv, PORT_A);
9050                intel_ddi_init(dev_priv, PORT_B);
9051                intel_ddi_init(dev_priv, PORT_TC1);
9052                intel_ddi_init(dev_priv, PORT_TC2);
9053                intel_ddi_init(dev_priv, PORT_TC3);
9054                intel_ddi_init(dev_priv, PORT_TC4);
9055                intel_ddi_init(dev_priv, PORT_TC5);
9056                intel_ddi_init(dev_priv, PORT_TC6);
9057                icl_dsi_init(dev_priv);
9058        } else if (IS_JSL_EHL(dev_priv)) {
9059                intel_ddi_init(dev_priv, PORT_A);
9060                intel_ddi_init(dev_priv, PORT_B);
9061                intel_ddi_init(dev_priv, PORT_C);
9062                intel_ddi_init(dev_priv, PORT_D);
9063                icl_dsi_init(dev_priv);
9064        } else if (DISPLAY_VER(dev_priv) == 11) {
9065                intel_ddi_init(dev_priv, PORT_A);
9066                intel_ddi_init(dev_priv, PORT_B);
9067                intel_ddi_init(dev_priv, PORT_C);
9068                intel_ddi_init(dev_priv, PORT_D);
9069                intel_ddi_init(dev_priv, PORT_E);
9070                intel_ddi_init(dev_priv, PORT_F);
9071                icl_dsi_init(dev_priv);
9072        } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9073                intel_ddi_init(dev_priv, PORT_A);
9074                intel_ddi_init(dev_priv, PORT_B);
9075                intel_ddi_init(dev_priv, PORT_C);
9076                vlv_dsi_init(dev_priv);
9077        } else if (DISPLAY_VER(dev_priv) >= 9) {
9078                intel_ddi_init(dev_priv, PORT_A);
9079                intel_ddi_init(dev_priv, PORT_B);
9080                intel_ddi_init(dev_priv, PORT_C);
9081                intel_ddi_init(dev_priv, PORT_D);
9082                intel_ddi_init(dev_priv, PORT_E);
9083        } else if (HAS_DDI(dev_priv)) {
9084                u32 found;
9085
9086                if (intel_ddi_crt_present(dev_priv))
9087                        intel_crt_init(dev_priv);
9088
9089                /* Haswell uses DDI functions to detect digital outputs. */
9090                found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9091                if (found)
9092                        intel_ddi_init(dev_priv, PORT_A);
9093
9094                found = intel_de_read(dev_priv, SFUSE_STRAP);
9095                if (found & SFUSE_STRAP_DDIB_DETECTED)
9096                        intel_ddi_init(dev_priv, PORT_B);
9097                if (found & SFUSE_STRAP_DDIC_DETECTED)
9098                        intel_ddi_init(dev_priv, PORT_C);
9099                if (found & SFUSE_STRAP_DDID_DETECTED)
9100                        intel_ddi_init(dev_priv, PORT_D);
9101                if (found & SFUSE_STRAP_DDIF_DETECTED)
9102                        intel_ddi_init(dev_priv, PORT_F);
9103        } else if (HAS_PCH_SPLIT(dev_priv)) {
9104                int found;
9105
9106                /*
9107                 * intel_edp_init_connector() depends on this completing first,
9108                 * to prevent the registration of both eDP and LVDS and the
9109                 * incorrect sharing of the PPS.
9110                 */
9111                intel_lvds_init(dev_priv);
9112                intel_crt_init(dev_priv);
9113
9114                dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9115
9116                if (ilk_has_edp_a(dev_priv))
9117                        g4x_dp_init(dev_priv, DP_A, PORT_A);
9118
9119                if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9120                        /* PCH SDVOB multiplex with HDMIB */
9121                        found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9122                        if (!found)
9123                                g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9124                        if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9125                                g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9126                }
9127
9128                if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9129                        g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9130
9131                if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9132                        g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9133
9134                if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9135                        g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9136
9137                if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9138                        g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9139        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9140                bool has_edp, has_port;
9141
9142                if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9143                        intel_crt_init(dev_priv);
9144
9145                /*
9146                 * The DP_DETECTED bit is the latched state of the DDC
9147                 * SDA pin at boot. However since eDP doesn't require DDC
9148                 * (no way to plug in a DP->HDMI dongle) the DDC pins for
9149                 * eDP ports may have been muxed to an alternate function.
9150                 * Thus we can't rely on the DP_DETECTED bit alone to detect
9151                 * eDP ports. Consult the VBT as well as DP_DETECTED to
9152                 * detect eDP ports.
9153                 *
9154                 * Sadly the straps seem to be missing sometimes even for HDMI
9155                 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9156                 * and VBT for the presence of the port. Additionally we can't
9157                 * trust the port type the VBT declares as we've seen at least
9158                 * HDMI ports that the VBT claim are DP or eDP.
9159                 */
9160                has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9161                has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9162                if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9163                        has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9164                if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9165                        g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9166
9167                has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9168                has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9169                if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9170                        has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9171                if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9172                        g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9173
9174                if (IS_CHERRYVIEW(dev_priv)) {
9175                        /*
9176                         * eDP not supported on port D,
9177                         * so no need to worry about it
9178                         */
9179                        has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9180                        if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9181                                g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9182                        if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9183                                g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9184                }
9185
9186                vlv_dsi_init(dev_priv);
9187        } else if (IS_PINEVIEW(dev_priv)) {
9188                intel_lvds_init(dev_priv);
9189                intel_crt_init(dev_priv);
9190        } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9191                bool found = false;
9192
9193                if (IS_MOBILE(dev_priv))
9194                        intel_lvds_init(dev_priv);
9195
9196                intel_crt_init(dev_priv);
9197
9198                if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9199                        drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9200                        found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9201                        if (!found && IS_G4X(dev_priv)) {
9202                                drm_dbg_kms(&dev_priv->drm,
9203                                            "probing HDMI on SDVOB\n");
9204                                g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9205                        }
9206
9207                        if (!found && IS_G4X(dev_priv))
9208                                g4x_dp_init(dev_priv, DP_B, PORT_B);
9209                }
9210
9211                /* Before G4X SDVOC doesn't have its own detect register */
9212
9213                if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9214                        drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9215                        found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9216                }
9217
9218                if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9219
9220                        if (IS_G4X(dev_priv)) {
9221                                drm_dbg_kms(&dev_priv->drm,
9222                                            "probing HDMI on SDVOC\n");
9223                                g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9224                        }
9225                        if (IS_G4X(dev_priv))
9226                                g4x_dp_init(dev_priv, DP_C, PORT_C);
9227                }
9228
9229                if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9230                        g4x_dp_init(dev_priv, DP_D, PORT_D);
9231
9232                if (SUPPORTS_TV(dev_priv))
9233                        intel_tv_init(dev_priv);
9234        } else if (DISPLAY_VER(dev_priv) == 2) {
9235                if (IS_I85X(dev_priv))
9236                        intel_lvds_init(dev_priv);
9237
9238                intel_crt_init(dev_priv);
9239                intel_dvo_init(dev_priv);
9240        }
9241
9242        for_each_intel_encoder(&dev_priv->drm, encoder) {
9243                encoder->base.possible_crtcs =
9244                        intel_encoder_possible_crtcs(encoder);
9245                encoder->base.possible_clones =
9246                        intel_encoder_possible_clones(encoder);
9247        }
9248
9249        intel_init_pch_refclk(dev_priv);
9250
9251        drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9252}
9253
9254static enum drm_mode_status
9255intel_mode_valid(struct drm_device *dev,
9256                 const struct drm_display_mode *mode)
9257{
9258        struct drm_i915_private *dev_priv = to_i915(dev);
9259        int hdisplay_max, htotal_max;
9260        int vdisplay_max, vtotal_max;
9261
9262        /*
9263         * Can't reject DBLSCAN here because Xorg ddxen can add piles
9264         * of DBLSCAN modes to the output's mode list when they detect
9265         * the scaling mode property on the connector. And they don't
9266         * ask the kernel to validate those modes in any way until
9267         * modeset time at which point the client gets a protocol error.
9268         * So in order to not upset those clients we silently ignore the
9269         * DBLSCAN flag on such connectors. For other connectors we will
9270         * reject modes with the DBLSCAN flag in encoder->compute_config().
9271         * And we always reject DBLSCAN modes in connector->mode_valid()
9272         * as we never want such modes on the connector's mode list.
9273         */
9274
9275        if (mode->vscan > 1)
9276                return MODE_NO_VSCAN;
9277
9278        if (mode->flags & DRM_MODE_FLAG_HSKEW)
9279                return MODE_H_ILLEGAL;
9280
9281        if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9282                           DRM_MODE_FLAG_NCSYNC |
9283                           DRM_MODE_FLAG_PCSYNC))
9284                return MODE_HSYNC;
9285
9286        if (mode->flags & (DRM_MODE_FLAG_BCAST |
9287                           DRM_MODE_FLAG_PIXMUX |
9288                           DRM_MODE_FLAG_CLKDIV2))
9289                return MODE_BAD;
9290
9291        /* Transcoder timing limits */
9292        if (DISPLAY_VER(dev_priv) >= 11) {
9293                hdisplay_max = 16384;
9294                vdisplay_max = 8192;
9295                htotal_max = 16384;
9296                vtotal_max = 8192;
9297        } else if (DISPLAY_VER(dev_priv) >= 9 ||
9298                   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9299                hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9300                vdisplay_max = 4096;
9301                htotal_max = 8192;
9302                vtotal_max = 8192;
9303        } else if (DISPLAY_VER(dev_priv) >= 3) {
9304                hdisplay_max = 4096;
9305                vdisplay_max = 4096;
9306                htotal_max = 8192;
9307                vtotal_max = 8192;
9308        } else {
9309                hdisplay_max = 2048;
9310                vdisplay_max = 2048;
9311                htotal_max = 4096;
9312                vtotal_max = 4096;
9313        }
9314
9315        if (mode->hdisplay > hdisplay_max ||
9316            mode->hsync_start > htotal_max ||
9317            mode->hsync_end > htotal_max ||
9318            mode->htotal > htotal_max)
9319                return MODE_H_ILLEGAL;
9320
9321        if (mode->vdisplay > vdisplay_max ||
9322            mode->vsync_start > vtotal_max ||
9323            mode->vsync_end > vtotal_max ||
9324            mode->vtotal > vtotal_max)
9325                return MODE_V_ILLEGAL;
9326
9327        if (DISPLAY_VER(dev_priv) >= 5) {
9328                if (mode->hdisplay < 64 ||
9329                    mode->htotal - mode->hdisplay < 32)
9330                        return MODE_H_ILLEGAL;
9331
9332                if (mode->vtotal - mode->vdisplay < 5)
9333                        return MODE_V_ILLEGAL;
9334        } else {
9335                if (mode->htotal - mode->hdisplay < 32)
9336                        return MODE_H_ILLEGAL;
9337
9338                if (mode->vtotal - mode->vdisplay < 3)
9339                        return MODE_V_ILLEGAL;
9340        }
9341
9342        /*
9343         * Cantiga+ cannot handle modes with a hsync front porch of 0.
9344         * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9345         */
9346        if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9347            mode->hsync_start == mode->hdisplay)
9348                return MODE_H_ILLEGAL;
9349
9350        return MODE_OK;
9351}
9352
9353enum drm_mode_status
9354intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9355                                const struct drm_display_mode *mode,
9356                                bool bigjoiner)
9357{
9358        int plane_width_max, plane_height_max;
9359
9360        /*
9361         * intel_mode_valid() should be
9362         * sufficient on older platforms.
9363         */
9364        if (DISPLAY_VER(dev_priv) < 9)
9365                return MODE_OK;
9366
9367        /*
9368         * Most people will probably want a fullscreen
9369         * plane so let's not advertize modes that are
9370         * too big for that.
9371         */
9372        if (DISPLAY_VER(dev_priv) >= 11) {
9373                plane_width_max = 5120 << bigjoiner;
9374                plane_height_max = 4320;
9375        } else {
9376                plane_width_max = 5120;
9377                plane_height_max = 4096;
9378        }
9379
9380        if (mode->hdisplay > plane_width_max)
9381                return MODE_H_ILLEGAL;
9382
9383        if (mode->vdisplay > plane_height_max)
9384                return MODE_V_ILLEGAL;
9385
9386        return MODE_OK;
9387}
9388
9389static const struct drm_mode_config_funcs intel_mode_funcs = {
9390        .fb_create = intel_user_framebuffer_create,
9391        .get_format_info = intel_fb_get_format_info,
9392        .output_poll_changed = intel_fbdev_output_poll_changed,
9393        .mode_valid = intel_mode_valid,
9394        .atomic_check = intel_atomic_check,
9395        .atomic_commit = intel_atomic_commit,
9396        .atomic_state_alloc = intel_atomic_state_alloc,
9397        .atomic_state_clear = intel_atomic_state_clear,
9398        .atomic_state_free = intel_atomic_state_free,
9399};
9400
9401static const struct drm_i915_display_funcs skl_display_funcs = {
9402        .get_pipe_config = hsw_get_pipe_config,
9403        .crtc_enable = hsw_crtc_enable,
9404        .crtc_disable = hsw_crtc_disable,
9405        .commit_modeset_enables = skl_commit_modeset_enables,
9406        .get_initial_plane_config = skl_get_initial_plane_config,
9407};
9408
9409static const struct drm_i915_display_funcs ddi_display_funcs = {
9410        .get_pipe_config = hsw_get_pipe_config,
9411        .crtc_enable = hsw_crtc_enable,
9412        .crtc_disable = hsw_crtc_disable,
9413        .commit_modeset_enables = intel_commit_modeset_enables,
9414        .get_initial_plane_config = i9xx_get_initial_plane_config,
9415};
9416
9417static const struct drm_i915_display_funcs pch_split_display_funcs = {
9418        .get_pipe_config = ilk_get_pipe_config,
9419        .crtc_enable = ilk_crtc_enable,
9420        .crtc_disable = ilk_crtc_disable,
9421        .commit_modeset_enables = intel_commit_modeset_enables,
9422        .get_initial_plane_config = i9xx_get_initial_plane_config,
9423};
9424
9425static const struct drm_i915_display_funcs vlv_display_funcs = {
9426        .get_pipe_config = i9xx_get_pipe_config,
9427        .crtc_enable = valleyview_crtc_enable,
9428        .crtc_disable = i9xx_crtc_disable,
9429        .commit_modeset_enables = intel_commit_modeset_enables,
9430        .get_initial_plane_config = i9xx_get_initial_plane_config,
9431};
9432
9433static const struct drm_i915_display_funcs i9xx_display_funcs = {
9434        .get_pipe_config = i9xx_get_pipe_config,
9435        .crtc_enable = i9xx_crtc_enable,
9436        .crtc_disable = i9xx_crtc_disable,
9437        .commit_modeset_enables = intel_commit_modeset_enables,
9438        .get_initial_plane_config = i9xx_get_initial_plane_config,
9439};
9440
9441/**
9442 * intel_init_display_hooks - initialize the display modesetting hooks
9443 * @dev_priv: device private
9444 */
9445void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9446{
9447        if (!HAS_DISPLAY(dev_priv))
9448                return;
9449
9450        intel_init_cdclk_hooks(dev_priv);
9451        intel_audio_hooks_init(dev_priv);
9452
9453        intel_dpll_init_clock_hook(dev_priv);
9454
9455        if (DISPLAY_VER(dev_priv) >= 9) {
9456                dev_priv->display = &skl_display_funcs;
9457        } else if (HAS_DDI(dev_priv)) {
9458                dev_priv->display = &ddi_display_funcs;
9459        } else if (HAS_PCH_SPLIT(dev_priv)) {
9460                dev_priv->display = &pch_split_display_funcs;
9461        } else if (IS_CHERRYVIEW(dev_priv) ||
9462                   IS_VALLEYVIEW(dev_priv)) {
9463                dev_priv->display = &vlv_display_funcs;
9464        } else {
9465                dev_priv->display = &i9xx_display_funcs;
9466        }
9467
9468        intel_fdi_init_hook(dev_priv);
9469}
9470
9471void intel_modeset_init_hw(struct drm_i915_private *i915)
9472{
9473        struct intel_cdclk_state *cdclk_state;
9474
9475        if (!HAS_DISPLAY(i915))
9476                return;
9477
9478        cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9479
9480        intel_update_cdclk(i915);
9481        intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
9482        cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9483}
9484
9485static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9486{
9487        struct drm_plane *plane;
9488        struct intel_crtc *crtc;
9489
9490        for_each_intel_crtc(state->dev, crtc) {
9491                struct intel_crtc_state *crtc_state;
9492
9493                crtc_state = intel_atomic_get_crtc_state(state, crtc);
9494                if (IS_ERR(crtc_state))
9495                        return PTR_ERR(crtc_state);
9496
9497                if (crtc_state->hw.active) {
9498                        /*
9499                         * Preserve the inherited flag to avoid
9500                         * taking the full modeset path.
9501                         */
9502                        crtc_state->inherited = true;
9503                }
9504        }
9505
9506        drm_for_each_plane(plane, state->dev) {
9507                struct drm_plane_state *plane_state;
9508
9509                plane_state = drm_atomic_get_plane_state(state, plane);
9510                if (IS_ERR(plane_state))
9511                        return PTR_ERR(plane_state);
9512        }
9513
9514        return 0;
9515}
9516
9517/*
9518 * Calculate what we think the watermarks should be for the state we've read
9519 * out of the hardware and then immediately program those watermarks so that
9520 * we ensure the hardware settings match our internal state.
9521 *
9522 * We can calculate what we think WM's should be by creating a duplicate of the
9523 * current state (which was constructed during hardware readout) and running it
9524 * through the atomic check code to calculate new watermark values in the
9525 * state object.
9526 */
9527static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9528{
9529        struct drm_atomic_state *state;
9530        struct intel_atomic_state *intel_state;
9531        struct intel_crtc *crtc;
9532        struct intel_crtc_state *crtc_state;
9533        struct drm_modeset_acquire_ctx ctx;
9534        int ret;
9535        int i;
9536
9537        /* Only supported on platforms that use atomic watermark design */
9538        if (!dev_priv->wm_disp->optimize_watermarks)
9539                return;
9540
9541        state = drm_atomic_state_alloc(&dev_priv->drm);
9542        if (drm_WARN_ON(&dev_priv->drm, !state))
9543                return;
9544
9545        intel_state = to_intel_atomic_state(state);
9546
9547        drm_modeset_acquire_init(&ctx, 0);
9548
9549retry:
9550        state->acquire_ctx = &ctx;
9551
9552        /*
9553         * Hardware readout is the only time we don't want to calculate
9554         * intermediate watermarks (since we don't trust the current
9555         * watermarks).
9556         */
9557        if (!HAS_GMCH(dev_priv))
9558                intel_state->skip_intermediate_wm = true;
9559
9560        ret = sanitize_watermarks_add_affected(state);
9561        if (ret)
9562                goto fail;
9563
9564        ret = intel_atomic_check(&dev_priv->drm, state);
9565        if (ret)
9566                goto fail;
9567
9568        /* Write calculated watermark values back */
9569        for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9570                crtc_state->wm.need_postvbl_update = true;
9571                intel_optimize_watermarks(intel_state, crtc);
9572
9573                to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9574        }
9575
9576fail:
9577        if (ret == -EDEADLK) {
9578                drm_atomic_state_clear(state);
9579                drm_modeset_backoff(&ctx);
9580                goto retry;
9581        }
9582
9583        /*
9584         * If we fail here, it means that the hardware appears to be
9585         * programmed in a way that shouldn't be possible, given our
9586         * understanding of watermark requirements.  This might mean a
9587         * mistake in the hardware readout code or a mistake in the
9588         * watermark calculations for a given platform.  Raise a WARN
9589         * so that this is noticeable.
9590         *
9591         * If this actually happens, we'll have to just leave the
9592         * BIOS-programmed watermarks untouched and hope for the best.
9593         */
9594        drm_WARN(&dev_priv->drm, ret,
9595                 "Could not determine valid watermarks for inherited state\n");
9596
9597        drm_atomic_state_put(state);
9598
9599        drm_modeset_drop_locks(&ctx);
9600        drm_modeset_acquire_fini(&ctx);
9601}
9602
9603static int intel_initial_commit(struct drm_device *dev)
9604{
9605        struct drm_atomic_state *state = NULL;
9606        struct drm_modeset_acquire_ctx ctx;
9607        struct intel_crtc *crtc;
9608        int ret = 0;
9609
9610        state = drm_atomic_state_alloc(dev);
9611        if (!state)
9612                return -ENOMEM;
9613
9614        drm_modeset_acquire_init(&ctx, 0);
9615
9616retry:
9617        state->acquire_ctx = &ctx;
9618
9619        for_each_intel_crtc(dev, crtc) {
9620                struct intel_crtc_state *crtc_state =
9621                        intel_atomic_get_crtc_state(state, crtc);
9622
9623                if (IS_ERR(crtc_state)) {
9624                        ret = PTR_ERR(crtc_state);
9625                        goto out;
9626                }
9627
9628                if (crtc_state->hw.active) {
9629                        struct intel_encoder *encoder;
9630
9631                        /*
9632                         * We've not yet detected sink capabilities
9633                         * (audio,infoframes,etc.) and thus we don't want to
9634                         * force a full state recomputation yet. We want that to
9635                         * happen only for the first real commit from userspace.
9636                         * So preserve the inherited flag for the time being.
9637                         */
9638                        crtc_state->inherited = true;
9639
9640                        ret = drm_atomic_add_affected_planes(state, &crtc->base);
9641                        if (ret)
9642                                goto out;
9643
9644                        /*
9645                         * FIXME hack to force a LUT update to avoid the
9646                         * plane update forcing the pipe gamma on without
9647                         * having a proper LUT loaded. Remove once we
9648                         * have readout for pipe gamma enable.
9649                         */
9650                        crtc_state->uapi.color_mgmt_changed = true;
9651
9652                        for_each_intel_encoder_mask(dev, encoder,
9653                                                    crtc_state->uapi.encoder_mask) {
9654                                if (encoder->initial_fastset_check &&
9655                                    !encoder->initial_fastset_check(encoder, crtc_state)) {
9656                                        ret = drm_atomic_add_affected_connectors(state,
9657                                                                                 &crtc->base);
9658                                        if (ret)
9659                                                goto out;
9660                                }
9661                        }
9662                }
9663        }
9664
9665        ret = drm_atomic_commit(state);
9666
9667out:
9668        if (ret == -EDEADLK) {
9669                drm_atomic_state_clear(state);
9670                drm_modeset_backoff(&ctx);
9671                goto retry;
9672        }
9673
9674        drm_atomic_state_put(state);
9675
9676        drm_modeset_drop_locks(&ctx);
9677        drm_modeset_acquire_fini(&ctx);
9678
9679        return ret;
9680}
9681
9682static void intel_mode_config_init(struct drm_i915_private *i915)
9683{
9684        struct drm_mode_config *mode_config = &i915->drm.mode_config;
9685
9686        drm_mode_config_init(&i915->drm);
9687        INIT_LIST_HEAD(&i915->global_obj_list);
9688
9689        mode_config->min_width = 0;
9690        mode_config->min_height = 0;
9691
9692        mode_config->preferred_depth = 24;
9693        mode_config->prefer_shadow = 1;
9694
9695        mode_config->funcs = &intel_mode_funcs;
9696
9697        mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9698
9699        /*
9700         * Maximum framebuffer dimensions, chosen to match
9701         * the maximum render engine surface size on gen4+.
9702         */
9703        if (DISPLAY_VER(i915) >= 7) {
9704                mode_config->max_width = 16384;
9705                mode_config->max_height = 16384;
9706        } else if (DISPLAY_VER(i915) >= 4) {
9707                mode_config->max_width = 8192;
9708                mode_config->max_height = 8192;
9709        } else if (DISPLAY_VER(i915) == 3) {
9710                mode_config->max_width = 4096;
9711                mode_config->max_height = 4096;
9712        } else {
9713                mode_config->max_width = 2048;
9714                mode_config->max_height = 2048;
9715        }
9716
9717        if (IS_I845G(i915) || IS_I865G(i915)) {
9718                mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9719                mode_config->cursor_height = 1023;
9720        } else if (IS_I830(i915) || IS_I85X(i915) ||
9721                   IS_I915G(i915) || IS_I915GM(i915)) {
9722                mode_config->cursor_width = 64;
9723                mode_config->cursor_height = 64;
9724        } else {
9725                mode_config->cursor_width = 256;
9726                mode_config->cursor_height = 256;
9727        }
9728}
9729
9730static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9731{
9732        intel_atomic_global_obj_cleanup(i915);
9733        drm_mode_config_cleanup(&i915->drm);
9734}
9735
9736/* part #1: call before irq install */
9737int intel_modeset_init_noirq(struct drm_i915_private *i915)
9738{
9739        int ret;
9740
9741        if (i915_inject_probe_failure(i915))
9742                return -ENODEV;
9743
9744        if (HAS_DISPLAY(i915)) {
9745                ret = drm_vblank_init(&i915->drm,
9746                                      INTEL_NUM_PIPES(i915));
9747                if (ret)
9748                        return ret;
9749        }
9750
9751        intel_bios_init(i915);
9752
9753        ret = intel_vga_register(i915);
9754        if (ret)
9755                goto cleanup_bios;
9756
9757        /* FIXME: completely on the wrong abstraction layer */
9758        intel_power_domains_init_hw(i915, false);
9759
9760        if (!HAS_DISPLAY(i915))
9761                return 0;
9762
9763        intel_dmc_ucode_init(i915);
9764
9765        i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9766        i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9767                                        WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9768
9769        i915->framestart_delay = 1; /* 1-4 */
9770
9771        i915->window2_delay = 0; /* No DSB so no window2 delay */
9772
9773        intel_mode_config_init(i915);
9774
9775        ret = intel_cdclk_init(i915);
9776        if (ret)
9777                goto cleanup_vga_client_pw_domain_dmc;
9778
9779        ret = intel_dbuf_init(i915);
9780        if (ret)
9781                goto cleanup_vga_client_pw_domain_dmc;
9782
9783        ret = intel_bw_init(i915);
9784        if (ret)
9785                goto cleanup_vga_client_pw_domain_dmc;
9786
9787        init_llist_head(&i915->atomic_helper.free_list);
9788        INIT_WORK(&i915->atomic_helper.free_work,
9789                  intel_atomic_helper_free_state_worker);
9790
9791        intel_init_quirks(i915);
9792
9793        intel_fbc_init(i915);
9794
9795        return 0;
9796
9797cleanup_vga_client_pw_domain_dmc:
9798        intel_dmc_ucode_fini(i915);
9799        intel_power_domains_driver_remove(i915);
9800        intel_vga_unregister(i915);
9801cleanup_bios:
9802        intel_bios_driver_remove(i915);
9803
9804        return ret;
9805}
9806
9807/* part #2: call after irq install, but before gem init */
9808int intel_modeset_init_nogem(struct drm_i915_private *i915)
9809{
9810        struct drm_device *dev = &i915->drm;
9811        enum pipe pipe;
9812        struct intel_crtc *crtc;
9813        int ret;
9814
9815        if (!HAS_DISPLAY(i915))
9816                return 0;
9817
9818        intel_init_pm(i915);
9819
9820        intel_panel_sanitize_ssc(i915);
9821
9822        intel_pps_setup(i915);
9823
9824        intel_gmbus_setup(i915);
9825
9826        drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9827                    INTEL_NUM_PIPES(i915),
9828                    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9829
9830        for_each_pipe(i915, pipe) {
9831                ret = intel_crtc_init(i915, pipe);
9832                if (ret) {
9833                        intel_mode_config_cleanup(i915);
9834                        return ret;
9835                }
9836        }
9837
9838        intel_plane_possible_crtcs_init(i915);
9839        intel_shared_dpll_init(dev);
9840        intel_fdi_pll_freq_update(i915);
9841
9842        intel_update_czclk(i915);
9843        intel_modeset_init_hw(i915);
9844        intel_dpll_update_ref_clks(i915);
9845
9846        intel_hdcp_component_init(i915);
9847
9848        if (i915->max_cdclk_freq == 0)
9849                intel_update_max_cdclk(i915);
9850
9851        /*
9852         * If the platform has HTI, we need to find out whether it has reserved
9853         * any display resources before we create our display outputs.
9854         */
9855        if (INTEL_INFO(i915)->display.has_hti)
9856                i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9857
9858        /* Just disable it once at startup */
9859        intel_vga_disable(i915);
9860        intel_setup_outputs(i915);
9861
9862        drm_modeset_lock_all(dev);
9863        intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9864        intel_acpi_assign_connector_fwnodes(i915);
9865        drm_modeset_unlock_all(dev);
9866
9867        for_each_intel_crtc(dev, crtc) {
9868                if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9869                        continue;
9870                intel_crtc_initial_plane_config(crtc);
9871        }
9872
9873        /*
9874         * Make sure hardware watermarks really match the state we read out.
9875         * Note that we need to do this after reconstructing the BIOS fb's
9876         * since the watermark calculation done here will use pstate->fb.
9877         */
9878        if (!HAS_GMCH(i915))
9879                sanitize_watermarks(i915);
9880
9881        return 0;
9882}
9883
9884/* part #3: call after gem init */
9885int intel_modeset_init(struct drm_i915_private *i915)
9886{
9887        int ret;
9888
9889        if (!HAS_DISPLAY(i915))
9890                return 0;
9891
9892        /*
9893         * Force all active planes to recompute their states. So that on
9894         * mode_setcrtc after probe, all the intel_plane_state variables
9895         * are already calculated and there is no assert_plane warnings
9896         * during bootup.
9897         */
9898        ret = intel_initial_commit(&i915->drm);
9899        if (ret)
9900                drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9901
9902        intel_overlay_setup(i915);
9903
9904        ret = intel_fbdev_init(&i915->drm);
9905        if (ret)
9906                return ret;
9907
9908        /* Only enable hotplug handling once the fbdev is fully set up. */
9909        intel_hpd_init(i915);
9910        intel_hpd_poll_disable(i915);
9911
9912        intel_init_ipc(i915);
9913
9914        return 0;
9915}
9916
9917void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9918{
9919        struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9920        /* 640x480@60Hz, ~25175 kHz */
9921        struct dpll clock = {
9922                .m1 = 18,
9923                .m2 = 7,
9924                .p1 = 13,
9925                .p2 = 4,
9926                .n = 2,
9927        };
9928        u32 dpll, fp;
9929        int i;
9930
9931        drm_WARN_ON(&dev_priv->drm,
9932                    i9xx_calc_dpll_params(48000, &clock) != 25154);
9933
9934        drm_dbg_kms(&dev_priv->drm,
9935                    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9936                    pipe_name(pipe), clock.vco, clock.dot);
9937
9938        fp = i9xx_dpll_compute_fp(&clock);
9939        dpll = DPLL_DVO_2X_MODE |
9940                DPLL_VGA_MODE_DIS |
9941                ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9942                PLL_P2_DIVIDE_BY_4 |
9943                PLL_REF_INPUT_DREFCLK |
9944                DPLL_VCO_ENABLE;
9945
9946        intel_de_write(dev_priv, FP0(pipe), fp);
9947        intel_de_write(dev_priv, FP1(pipe), fp);
9948
9949        intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9950        intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9951        intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9952        intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9953        intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9954        intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9955        intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9956
9957        /*
9958         * Apparently we need to have VGA mode enabled prior to changing
9959         * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9960         * dividers, even though the register value does change.
9961         */
9962        intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9963        intel_de_write(dev_priv, DPLL(pipe), dpll);
9964
9965        /* Wait for the clocks to stabilize. */
9966        intel_de_posting_read(dev_priv, DPLL(pipe));
9967        udelay(150);
9968
9969        /* The pixel multiplier can only be updated once the
9970         * DPLL is enabled and the clocks are stable.
9971         *
9972         * So write it again.
9973         */
9974        intel_de_write(dev_priv, DPLL(pipe), dpll);
9975
9976        /* We do this three times for luck */
9977        for (i = 0; i < 3 ; i++) {
9978                intel_de_write(dev_priv, DPLL(pipe), dpll);
9979                intel_de_posting_read(dev_priv, DPLL(pipe));
9980                udelay(150); /* wait for warmup */
9981        }
9982
9983        intel_de_write(dev_priv, PIPECONF(pipe),
9984                       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
9985        intel_de_posting_read(dev_priv, PIPECONF(pipe));
9986
9987        intel_wait_for_pipe_scanline_moving(crtc);
9988}
9989
9990void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9991{
9992        struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9993
9994        drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
9995                    pipe_name(pipe));
9996
9997        drm_WARN_ON(&dev_priv->drm,
9998                    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
9999                    DISPLAY_PLANE_ENABLE);
10000        drm_WARN_ON(&dev_priv->drm,
10001                    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
10002                    DISPLAY_PLANE_ENABLE);
10003        drm_WARN_ON(&dev_priv->drm,
10004                    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
10005                    DISPLAY_PLANE_ENABLE);
10006        drm_WARN_ON(&dev_priv->drm,
10007                    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
10008        drm_WARN_ON(&dev_priv->drm,
10009                    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
10010
10011        intel_de_write(dev_priv, PIPECONF(pipe), 0);
10012        intel_de_posting_read(dev_priv, PIPECONF(pipe));
10013
10014        intel_wait_for_pipe_scanline_stopped(crtc);
10015
10016        intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10017        intel_de_posting_read(dev_priv, DPLL(pipe));
10018}
10019
10020static void
10021intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10022{
10023        struct intel_crtc *crtc;
10024
10025        if (DISPLAY_VER(dev_priv) >= 4)
10026                return;
10027
10028        for_each_intel_crtc(&dev_priv->drm, crtc) {
10029                struct intel_plane *plane =
10030                        to_intel_plane(crtc->base.primary);
10031                struct intel_crtc *plane_crtc;
10032                enum pipe pipe;
10033
10034                if (!plane->get_hw_state(plane, &pipe))
10035                        continue;
10036
10037                if (pipe == crtc->pipe)
10038                        continue;
10039
10040                drm_dbg_kms(&dev_priv->drm,
10041                            "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10042                            plane->base.base.id, plane->base.name);
10043
10044                plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
10045                intel_plane_disable_noatomic(plane_crtc, plane);
10046        }
10047}
10048
10049static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10050{
10051        struct drm_device *dev = crtc->base.dev;
10052        struct intel_encoder *encoder;
10053
10054        for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10055                return true;
10056
10057        return false;
10058}
10059
10060static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10061{
10062        struct drm_device *dev = encoder->base.dev;
10063        struct intel_connector *connector;
10064
10065        for_each_connector_on_encoder(dev, &encoder->base, connector)
10066                return connector;
10067
10068        return NULL;
10069}
10070
10071static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10072                              enum pipe pch_transcoder)
10073{
10074        return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10075                (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10076}
10077
10078static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10079{
10080        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10081        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10082        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10083
10084        if (DISPLAY_VER(dev_priv) >= 9 ||
10085            IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10086                i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10087                u32 val;
10088
10089                if (transcoder_is_dsi(cpu_transcoder))
10090                        return;
10091
10092                val = intel_de_read(dev_priv, reg);
10093                val &= ~HSW_FRAME_START_DELAY_MASK;
10094                val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10095                intel_de_write(dev_priv, reg, val);
10096        } else {
10097                i915_reg_t reg = PIPECONF(cpu_transcoder);
10098                u32 val;
10099
10100                val = intel_de_read(dev_priv, reg);
10101                val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10102                val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10103                intel_de_write(dev_priv, reg, val);
10104        }
10105
10106        if (!crtc_state->has_pch_encoder)
10107                return;
10108
10109        if (HAS_PCH_IBX(dev_priv)) {
10110                i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10111                u32 val;
10112
10113                val = intel_de_read(dev_priv, reg);
10114                val &= ~TRANS_FRAME_START_DELAY_MASK;
10115                val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10116                intel_de_write(dev_priv, reg, val);
10117        } else {
10118                enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10119                i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10120                u32 val;
10121
10122                val = intel_de_read(dev_priv, reg);
10123                val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10124                val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10125                intel_de_write(dev_priv, reg, val);
10126        }
10127}
10128
10129static void intel_sanitize_crtc(struct intel_crtc *crtc,
10130                                struct drm_modeset_acquire_ctx *ctx)
10131{
10132        struct drm_device *dev = crtc->base.dev;
10133        struct drm_i915_private *dev_priv = to_i915(dev);
10134        struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10135
10136        if (crtc_state->hw.active) {
10137                struct intel_plane *plane;
10138
10139                /* Clear any frame start delays used for debugging left by the BIOS */
10140                intel_sanitize_frame_start_delay(crtc_state);
10141
10142                /* Disable everything but the primary plane */
10143                for_each_intel_plane_on_crtc(dev, crtc, plane) {
10144                        const struct intel_plane_state *plane_state =
10145                                to_intel_plane_state(plane->base.state);
10146
10147                        if (plane_state->uapi.visible &&
10148                            plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10149                                intel_plane_disable_noatomic(crtc, plane);
10150                }
10151
10152                /* Disable any background color/etc. set by the BIOS */
10153                intel_color_commit(crtc_state);
10154        }
10155
10156        /* Adjust the state of the output pipe according to whether we
10157         * have active connectors/encoders. */
10158        if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10159            !crtc_state->bigjoiner_slave)
10160                intel_crtc_disable_noatomic(crtc, ctx);
10161
10162        if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10163                /*
10164                 * We start out with underrun reporting disabled to avoid races.
10165                 * For correct bookkeeping mark this on active crtcs.
10166                 *
10167                 * Also on gmch platforms we dont have any hardware bits to
10168                 * disable the underrun reporting. Which means we need to start
10169                 * out with underrun reporting disabled also on inactive pipes,
10170                 * since otherwise we'll complain about the garbage we read when
10171                 * e.g. coming up after runtime pm.
10172                 *
10173                 * No protection against concurrent access is required - at
10174                 * worst a fifo underrun happens which also sets this to false.
10175                 */
10176                crtc->cpu_fifo_underrun_disabled = true;
10177                /*
10178                 * We track the PCH trancoder underrun reporting state
10179                 * within the crtc. With crtc for pipe A housing the underrun
10180                 * reporting state for PCH transcoder A, crtc for pipe B housing
10181                 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10182                 * and marking underrun reporting as disabled for the non-existing
10183                 * PCH transcoders B and C would prevent enabling the south
10184                 * error interrupt (see cpt_can_enable_serr_int()).
10185                 */
10186                if (has_pch_trancoder(dev_priv, crtc->pipe))
10187                        crtc->pch_fifo_underrun_disabled = true;
10188        }
10189}
10190
10191static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10192{
10193        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10194
10195        /*
10196         * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10197         * the hardware when a high res displays plugged in. DPLL P
10198         * divider is zero, and the pipe timings are bonkers. We'll
10199         * try to disable everything in that case.
10200         *
10201         * FIXME would be nice to be able to sanitize this state
10202         * without several WARNs, but for now let's take the easy
10203         * road.
10204         */
10205        return IS_SANDYBRIDGE(dev_priv) &&
10206                crtc_state->hw.active &&
10207                crtc_state->shared_dpll &&
10208                crtc_state->port_clock == 0;
10209}
10210
10211static void intel_sanitize_encoder(struct intel_encoder *encoder)
10212{
10213        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10214        struct intel_connector *connector;
10215        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10216        struct intel_crtc_state *crtc_state = crtc ?
10217                to_intel_crtc_state(crtc->base.state) : NULL;
10218
10219        /* We need to check both for a crtc link (meaning that the
10220         * encoder is active and trying to read from a pipe) and the
10221         * pipe itself being active. */
10222        bool has_active_crtc = crtc_state &&
10223                crtc_state->hw.active;
10224
10225        if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10226                drm_dbg_kms(&dev_priv->drm,
10227                            "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10228                            pipe_name(crtc->pipe));
10229                has_active_crtc = false;
10230        }
10231
10232        connector = intel_encoder_find_connector(encoder);
10233        if (connector && !has_active_crtc) {
10234                drm_dbg_kms(&dev_priv->drm,
10235                            "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10236                            encoder->base.base.id,
10237                            encoder->base.name);
10238
10239                /* Connector is active, but has no active pipe. This is
10240                 * fallout from our resume register restoring. Disable
10241                 * the encoder manually again. */
10242                if (crtc_state) {
10243                        struct drm_encoder *best_encoder;
10244
10245                        drm_dbg_kms(&dev_priv->drm,
10246                                    "[ENCODER:%d:%s] manually disabled\n",
10247                                    encoder->base.base.id,
10248                                    encoder->base.name);
10249
10250                        /* avoid oopsing in case the hooks consult best_encoder */
10251                        best_encoder = connector->base.state->best_encoder;
10252                        connector->base.state->best_encoder = &encoder->base;
10253
10254                        /* FIXME NULL atomic state passed! */
10255                        if (encoder->disable)
10256                                encoder->disable(NULL, encoder, crtc_state,
10257                                                 connector->base.state);
10258                        if (encoder->post_disable)
10259                                encoder->post_disable(NULL, encoder, crtc_state,
10260                                                      connector->base.state);
10261
10262                        connector->base.state->best_encoder = best_encoder;
10263                }
10264                encoder->base.crtc = NULL;
10265
10266                /* Inconsistent output/port/pipe state happens presumably due to
10267                 * a bug in one of the get_hw_state functions. Or someplace else
10268                 * in our code, like the register restore mess on resume. Clamp
10269                 * things to off as a safer default. */
10270
10271                connector->base.dpms = DRM_MODE_DPMS_OFF;
10272                connector->base.encoder = NULL;
10273        }
10274
10275        /* notify opregion of the sanitized encoder state */
10276        intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10277
10278        if (HAS_DDI(dev_priv))
10279                intel_ddi_sanitize_encoder_pll_mapping(encoder);
10280}
10281
10282/* FIXME read out full plane state for all planes */
10283static void readout_plane_state(struct drm_i915_private *dev_priv)
10284{
10285        struct intel_plane *plane;
10286        struct intel_crtc *crtc;
10287
10288        for_each_intel_plane(&dev_priv->drm, plane) {
10289                struct intel_plane_state *plane_state =
10290                        to_intel_plane_state(plane->base.state);
10291                struct intel_crtc_state *crtc_state;
10292                enum pipe pipe = PIPE_A;
10293                bool visible;
10294
10295                visible = plane->get_hw_state(plane, &pipe);
10296
10297                crtc = intel_crtc_for_pipe(dev_priv, pipe);
10298                crtc_state = to_intel_crtc_state(crtc->base.state);
10299
10300                intel_set_plane_visible(crtc_state, plane_state, visible);
10301
10302                drm_dbg_kms(&dev_priv->drm,
10303                            "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10304                            plane->base.base.id, plane->base.name,
10305                            enableddisabled(visible), pipe_name(pipe));
10306        }
10307
10308        for_each_intel_crtc(&dev_priv->drm, crtc) {
10309                struct intel_crtc_state *crtc_state =
10310                        to_intel_crtc_state(crtc->base.state);
10311
10312                fixup_plane_bitmasks(crtc_state);
10313        }
10314}
10315
10316static void intel_modeset_readout_hw_state(struct drm_device *dev)
10317{
10318        struct drm_i915_private *dev_priv = to_i915(dev);
10319        struct intel_cdclk_state *cdclk_state =
10320                to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10321        struct intel_dbuf_state *dbuf_state =
10322                to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10323        enum pipe pipe;
10324        struct intel_crtc *crtc;
10325        struct intel_encoder *encoder;
10326        struct intel_connector *connector;
10327        struct drm_connector_list_iter conn_iter;
10328        u8 active_pipes = 0;
10329
10330        for_each_intel_crtc(dev, crtc) {
10331                struct intel_crtc_state *crtc_state =
10332                        to_intel_crtc_state(crtc->base.state);
10333
10334                __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10335                intel_crtc_free_hw_state(crtc_state);
10336                intel_crtc_state_reset(crtc_state, crtc);
10337
10338                intel_crtc_get_pipe_config(crtc_state);
10339
10340                crtc_state->hw.enable = crtc_state->hw.active;
10341
10342                crtc->base.enabled = crtc_state->hw.enable;
10343                crtc->active = crtc_state->hw.active;
10344
10345                if (crtc_state->hw.active)
10346                        active_pipes |= BIT(crtc->pipe);
10347
10348                drm_dbg_kms(&dev_priv->drm,
10349                            "[CRTC:%d:%s] hw state readout: %s\n",
10350                            crtc->base.base.id, crtc->base.name,
10351                            enableddisabled(crtc_state->hw.active));
10352        }
10353
10354        cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10355
10356        readout_plane_state(dev_priv);
10357
10358        for_each_intel_encoder(dev, encoder) {
10359                struct intel_crtc_state *crtc_state = NULL;
10360
10361                pipe = 0;
10362
10363                if (encoder->get_hw_state(encoder, &pipe)) {
10364                        crtc = intel_crtc_for_pipe(dev_priv, pipe);
10365                        crtc_state = to_intel_crtc_state(crtc->base.state);
10366
10367                        encoder->base.crtc = &crtc->base;
10368                        intel_encoder_get_config(encoder, crtc_state);
10369
10370                        /* read out to slave crtc as well for bigjoiner */
10371                        if (crtc_state->bigjoiner) {
10372                                /* encoder should read be linked to bigjoiner master */
10373                                WARN_ON(crtc_state->bigjoiner_slave);
10374
10375                                crtc = crtc_state->bigjoiner_linked_crtc;
10376                                crtc_state = to_intel_crtc_state(crtc->base.state);
10377                                intel_encoder_get_config(encoder, crtc_state);
10378                        }
10379                } else {
10380                        encoder->base.crtc = NULL;
10381                }
10382
10383                if (encoder->sync_state)
10384                        encoder->sync_state(encoder, crtc_state);
10385
10386                drm_dbg_kms(&dev_priv->drm,
10387                            "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10388                            encoder->base.base.id, encoder->base.name,
10389                            enableddisabled(encoder->base.crtc),
10390                            pipe_name(pipe));
10391        }
10392
10393        intel_dpll_readout_hw_state(dev_priv);
10394
10395        drm_connector_list_iter_begin(dev, &conn_iter);
10396        for_each_intel_connector_iter(connector, &conn_iter) {
10397                if (connector->get_hw_state(connector)) {
10398                        struct intel_crtc_state *crtc_state;
10399                        struct intel_crtc *crtc;
10400
10401                        connector->base.dpms = DRM_MODE_DPMS_ON;
10402
10403                        encoder = intel_attached_encoder(connector);
10404                        connector->base.encoder = &encoder->base;
10405
10406                        crtc = to_intel_crtc(encoder->base.crtc);
10407                        crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10408
10409                        if (crtc_state && crtc_state->hw.active) {
10410                                /*
10411                                 * This has to be done during hardware readout
10412                                 * because anything calling .crtc_disable may
10413                                 * rely on the connector_mask being accurate.
10414                                 */
10415                                crtc_state->uapi.connector_mask |=
10416                                        drm_connector_mask(&connector->base);
10417                                crtc_state->uapi.encoder_mask |=
10418                                        drm_encoder_mask(&encoder->base);
10419                        }
10420                } else {
10421                        connector->base.dpms = DRM_MODE_DPMS_OFF;
10422                        connector->base.encoder = NULL;
10423                }
10424                drm_dbg_kms(&dev_priv->drm,
10425                            "[CONNECTOR:%d:%s] hw state readout: %s\n",
10426                            connector->base.base.id, connector->base.name,
10427                            enableddisabled(connector->base.encoder));
10428        }
10429        drm_connector_list_iter_end(&conn_iter);
10430
10431        for_each_intel_crtc(dev, crtc) {
10432                struct intel_bw_state *bw_state =
10433                        to_intel_bw_state(dev_priv->bw_obj.state);
10434                struct intel_crtc_state *crtc_state =
10435                        to_intel_crtc_state(crtc->base.state);
10436                struct intel_plane *plane;
10437                int min_cdclk = 0;
10438
10439                if (crtc_state->hw.active) {
10440                        /*
10441                         * The initial mode needs to be set in order to keep
10442                         * the atomic core happy. It wants a valid mode if the
10443                         * crtc's enabled, so we do the above call.
10444                         *
10445                         * But we don't set all the derived state fully, hence
10446                         * set a flag to indicate that a full recalculation is
10447                         * needed on the next commit.
10448                         */
10449                        crtc_state->inherited = true;
10450
10451                        intel_crtc_update_active_timings(crtc_state);
10452
10453                        intel_crtc_copy_hw_to_uapi_state(crtc_state);
10454                }
10455
10456                for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10457                        const struct intel_plane_state *plane_state =
10458                                to_intel_plane_state(plane->base.state);
10459
10460                        /*
10461                         * FIXME don't have the fb yet, so can't
10462                         * use intel_plane_data_rate() :(
10463                         */
10464                        if (plane_state->uapi.visible)
10465                                crtc_state->data_rate[plane->id] =
10466                                        4 * crtc_state->pixel_rate;
10467                        /*
10468                         * FIXME don't have the fb yet, so can't
10469                         * use plane->min_cdclk() :(
10470                         */
10471                        if (plane_state->uapi.visible && plane->min_cdclk) {
10472                                if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10473                                        crtc_state->min_cdclk[plane->id] =
10474                                                DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10475                                else
10476                                        crtc_state->min_cdclk[plane->id] =
10477                                                crtc_state->pixel_rate;
10478                        }
10479                        drm_dbg_kms(&dev_priv->drm,
10480                                    "[PLANE:%d:%s] min_cdclk %d kHz\n",
10481                                    plane->base.base.id, plane->base.name,
10482                                    crtc_state->min_cdclk[plane->id]);
10483                }
10484
10485                if (crtc_state->hw.active) {
10486                        min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10487                        if (drm_WARN_ON(dev, min_cdclk < 0))
10488                                min_cdclk = 0;
10489                }
10490
10491                cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10492                cdclk_state->min_voltage_level[crtc->pipe] =
10493                        crtc_state->min_voltage_level;
10494
10495                intel_bw_crtc_update(bw_state, crtc_state);
10496
10497                intel_pipe_config_sanity_check(dev_priv, crtc_state);
10498        }
10499}
10500
10501static void
10502get_encoder_power_domains(struct drm_i915_private *dev_priv)
10503{
10504        struct intel_encoder *encoder;
10505
10506        for_each_intel_encoder(&dev_priv->drm, encoder) {
10507                struct intel_crtc_state *crtc_state;
10508
10509                if (!encoder->get_power_domains)
10510                        continue;
10511
10512                /*
10513                 * MST-primary and inactive encoders don't have a crtc state
10514                 * and neither of these require any power domain references.
10515                 */
10516                if (!encoder->base.crtc)
10517                        continue;
10518
10519                crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10520                encoder->get_power_domains(encoder, crtc_state);
10521        }
10522}
10523
10524static void intel_early_display_was(struct drm_i915_private *dev_priv)
10525{
10526        /*
10527         * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10528         * Also known as Wa_14010480278.
10529         */
10530        if (IS_DISPLAY_VER(dev_priv, 10, 12))
10531                intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10532                               intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10533
10534        if (IS_HASWELL(dev_priv)) {
10535                /*
10536                 * WaRsPkgCStateDisplayPMReq:hsw
10537                 * System hang if this isn't done before disabling all planes!
10538                 */
10539                intel_de_write(dev_priv, CHICKEN_PAR1_1,
10540                               intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10541        }
10542
10543        if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10544                /* Display WA #1142:kbl,cfl,cml */
10545                intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10546                             KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10547                intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10548                             KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10549                             KBL_ARB_FILL_SPARE_14);
10550        }
10551}
10552
10553static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10554                                       enum port port, i915_reg_t hdmi_reg)
10555{
10556        u32 val = intel_de_read(dev_priv, hdmi_reg);
10557
10558        if (val & SDVO_ENABLE ||
10559            (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10560                return;
10561
10562        drm_dbg_kms(&dev_priv->drm,
10563                    "Sanitizing transcoder select for HDMI %c\n",
10564                    port_name(port));
10565
10566        val &= ~SDVO_PIPE_SEL_MASK;
10567        val |= SDVO_PIPE_SEL(PIPE_A);
10568
10569        intel_de_write(dev_priv, hdmi_reg, val);
10570}
10571
10572static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10573                                     enum port port, i915_reg_t dp_reg)
10574{
10575        u32 val = intel_de_read(dev_priv, dp_reg);
10576
10577        if (val & DP_PORT_EN ||
10578            (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10579                return;
10580
10581        drm_dbg_kms(&dev_priv->drm,
10582                    "Sanitizing transcoder select for DP %c\n",
10583                    port_name(port));
10584
10585        val &= ~DP_PIPE_SEL_MASK;
10586        val |= DP_PIPE_SEL(PIPE_A);
10587
10588        intel_de_write(dev_priv, dp_reg, val);
10589}
10590
10591static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10592{
10593        /*
10594         * The BIOS may select transcoder B on some of the PCH
10595         * ports even it doesn't enable the port. This would trip
10596         * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10597         * Sanitize the transcoder select bits to prevent that. We
10598         * assume that the BIOS never actually enabled the port,
10599         * because if it did we'd actually have to toggle the port
10600         * on and back off to make the transcoder A select stick
10601         * (see. intel_dp_link_down(), intel_disable_hdmi(),
10602         * intel_disable_sdvo()).
10603         */
10604        ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10605        ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10606        ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10607
10608        /* PCH SDVOB multiplex with HDMIB */
10609        ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10610        ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10611        ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10612}
10613
10614/* Scan out the current hw modeset state,
10615 * and sanitizes it to the current state
10616 */
10617static void
10618intel_modeset_setup_hw_state(struct drm_device *dev,
10619                             struct drm_modeset_acquire_ctx *ctx)
10620{
10621        struct drm_i915_private *dev_priv = to_i915(dev);
10622        struct intel_encoder *encoder;
10623        struct intel_crtc *crtc;
10624        intel_wakeref_t wakeref;
10625
10626        wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10627
10628        intel_early_display_was(dev_priv);
10629        intel_modeset_readout_hw_state(dev);
10630
10631        /* HW state is read out, now we need to sanitize this mess. */
10632        get_encoder_power_domains(dev_priv);
10633
10634        if (HAS_PCH_IBX(dev_priv))
10635                ibx_sanitize_pch_ports(dev_priv);
10636
10637        /*
10638         * intel_sanitize_plane_mapping() may need to do vblank
10639         * waits, so we need vblank interrupts restored beforehand.
10640         */
10641        for_each_intel_crtc(&dev_priv->drm, crtc) {
10642                struct intel_crtc_state *crtc_state =
10643                        to_intel_crtc_state(crtc->base.state);
10644
10645                drm_crtc_vblank_reset(&crtc->base);
10646
10647                if (crtc_state->hw.active)
10648                        intel_crtc_vblank_on(crtc_state);
10649        }
10650
10651        intel_sanitize_plane_mapping(dev_priv);
10652
10653        for_each_intel_encoder(dev, encoder)
10654                intel_sanitize_encoder(encoder);
10655
10656        for_each_intel_crtc(&dev_priv->drm, crtc) {
10657                struct intel_crtc_state *crtc_state =
10658                        to_intel_crtc_state(crtc->base.state);
10659
10660                intel_sanitize_crtc(crtc, ctx);
10661                intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10662        }
10663
10664        intel_modeset_update_connector_atomic_state(dev);
10665
10666        intel_dpll_sanitize_state(dev_priv);
10667
10668        if (IS_G4X(dev_priv)) {
10669                g4x_wm_get_hw_state(dev_priv);
10670                g4x_wm_sanitize(dev_priv);
10671        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10672                vlv_wm_get_hw_state(dev_priv);
10673                vlv_wm_sanitize(dev_priv);
10674        } else if (DISPLAY_VER(dev_priv) >= 9) {
10675                skl_wm_get_hw_state(dev_priv);
10676                skl_wm_sanitize(dev_priv);
10677        } else if (HAS_PCH_SPLIT(dev_priv)) {
10678                ilk_wm_get_hw_state(dev_priv);
10679        }
10680
10681        for_each_intel_crtc(dev, crtc) {
10682                struct intel_crtc_state *crtc_state =
10683                        to_intel_crtc_state(crtc->base.state);
10684                u64 put_domains;
10685
10686                put_domains = modeset_get_crtc_power_domains(crtc_state);
10687                if (drm_WARN_ON(dev, put_domains))
10688                        modeset_put_crtc_power_domains(crtc, put_domains);
10689        }
10690
10691        intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10692}
10693
10694void intel_display_resume(struct drm_device *dev)
10695{
10696        struct drm_i915_private *dev_priv = to_i915(dev);
10697        struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10698        struct drm_modeset_acquire_ctx ctx;
10699        int ret;
10700
10701        if (!HAS_DISPLAY(dev_priv))
10702                return;
10703
10704        dev_priv->modeset_restore_state = NULL;
10705        if (state)
10706                state->acquire_ctx = &ctx;
10707
10708        drm_modeset_acquire_init(&ctx, 0);
10709
10710        while (1) {
10711                ret = drm_modeset_lock_all_ctx(dev, &ctx);
10712                if (ret != -EDEADLK)
10713                        break;
10714
10715                drm_modeset_backoff(&ctx);
10716        }
10717
10718        if (!ret)
10719                ret = __intel_display_resume(dev, state, &ctx);
10720
10721        intel_enable_ipc(dev_priv);
10722        drm_modeset_drop_locks(&ctx);
10723        drm_modeset_acquire_fini(&ctx);
10724
10725        if (ret)
10726                drm_err(&dev_priv->drm,
10727                        "Restoring old state failed with %i\n", ret);
10728        if (state)
10729                drm_atomic_state_put(state);
10730}
10731
10732static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10733{
10734        struct intel_connector *connector;
10735        struct drm_connector_list_iter conn_iter;
10736
10737        /* Kill all the work that may have been queued by hpd. */
10738        drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10739        for_each_intel_connector_iter(connector, &conn_iter) {
10740                if (connector->modeset_retry_work.func)
10741                        cancel_work_sync(&connector->modeset_retry_work);
10742                if (connector->hdcp.shim) {
10743                        cancel_delayed_work_sync(&connector->hdcp.check_work);
10744                        cancel_work_sync(&connector->hdcp.prop_work);
10745                }
10746        }
10747        drm_connector_list_iter_end(&conn_iter);
10748}
10749
10750/* part #1: call before irq uninstall */
10751void intel_modeset_driver_remove(struct drm_i915_private *i915)
10752{
10753        if (!HAS_DISPLAY(i915))
10754                return;
10755
10756        flush_workqueue(i915->flip_wq);
10757        flush_workqueue(i915->modeset_wq);
10758
10759        flush_work(&i915->atomic_helper.free_work);
10760        drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10761}
10762
10763/* part #2: call after irq uninstall */
10764void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10765{
10766        if (!HAS_DISPLAY(i915))
10767                return;
10768
10769        /*
10770         * Due to the hpd irq storm handling the hotplug work can re-arm the
10771         * poll handlers. Hence disable polling after hpd handling is shut down.
10772         */
10773        intel_hpd_poll_fini(i915);
10774
10775        /*
10776         * MST topology needs to be suspended so we don't have any calls to
10777         * fbdev after it's finalized. MST will be destroyed later as part of
10778         * drm_mode_config_cleanup()
10779         */
10780        intel_dp_mst_suspend(i915);
10781
10782        /* poll work can call into fbdev, hence clean that up afterwards */
10783        intel_fbdev_fini(i915);
10784
10785        intel_unregister_dsm_handler();
10786
10787        intel_fbc_global_disable(i915);
10788
10789        /* flush any delayed tasks or pending work */
10790        flush_scheduled_work();
10791
10792        intel_hdcp_component_fini(i915);
10793
10794        intel_mode_config_cleanup(i915);
10795
10796        intel_overlay_cleanup(i915);
10797
10798        intel_gmbus_teardown(i915);
10799
10800        destroy_workqueue(i915->flip_wq);
10801        destroy_workqueue(i915->modeset_wq);
10802
10803        intel_fbc_cleanup(i915);
10804}
10805
10806/* part #3: call after gem init */
10807void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10808{
10809        intel_dmc_ucode_fini(i915);
10810
10811        intel_power_domains_driver_remove(i915);
10812
10813        intel_vga_unregister(i915);
10814
10815        intel_bios_driver_remove(i915);
10816}
10817
10818bool intel_modeset_probe_defer(struct pci_dev *pdev)
10819{
10820        struct drm_privacy_screen *privacy_screen;
10821
10822        /*
10823         * apple-gmux is needed on dual GPU MacBook Pro
10824         * to probe the panel if we're the inactive GPU.
10825         */
10826        if (vga_switcheroo_client_probe_defer(pdev))
10827                return true;
10828
10829        /* If the LCD panel has a privacy-screen, wait for it */
10830        privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10831        if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10832                return true;
10833
10834        drm_privacy_screen_put(privacy_screen);
10835
10836        return false;
10837}
10838
10839void intel_display_driver_register(struct drm_i915_private *i915)
10840{
10841        if (!HAS_DISPLAY(i915))
10842                return;
10843
10844        intel_display_debugfs_register(i915);
10845
10846        /* Must be done after probing outputs */
10847        intel_opregion_register(i915);
10848        acpi_video_register();
10849
10850        intel_audio_init(i915);
10851
10852        /*
10853         * Some ports require correctly set-up hpd registers for
10854         * detection to work properly (leading to ghost connected
10855         * connector status), e.g. VGA on gm45.  Hence we can only set
10856         * up the initial fbdev config after hpd irqs are fully
10857         * enabled. We do it last so that the async config cannot run
10858         * before the connectors are registered.
10859         */
10860        intel_fbdev_initial_config_async(&i915->drm);
10861
10862        /*
10863         * We need to coordinate the hotplugs with the asynchronous
10864         * fbdev configuration, for which we use the
10865         * fbdev->async_cookie.
10866         */
10867        drm_kms_helper_poll_init(&i915->drm);
10868}
10869
10870void intel_display_driver_unregister(struct drm_i915_private *i915)
10871{
10872        if (!HAS_DISPLAY(i915))
10873                return;
10874
10875        intel_fbdev_unregister(i915);
10876        intel_audio_deinit(i915);
10877
10878        /*
10879         * After flushing the fbdev (incl. a late async config which
10880         * will have delayed queuing of a hotplug event), then flush
10881         * the hotplug events.
10882         */
10883        drm_kms_helper_poll_fini(&i915->drm);
10884        drm_atomic_helper_shutdown(&i915->drm);
10885
10886        acpi_video_unregister();
10887        intel_opregion_unregister(i915);
10888}
10889