linux/drivers/gpu/drm/i915/intel_psr.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 */
  23
  24/**
  25 * DOC: Panel Self Refresh (PSR/SRD)
  26 *
  27 * Since Haswell Display controller supports Panel Self-Refresh on display
  28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
  29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  30 * when system is idle but display is on as it eliminates display refresh
  31 * request to DDR memory completely as long as the frame buffer for that
  32 * display is unchanged.
  33 *
  34 * Panel Self Refresh must be supported by both Hardware (source) and
  35 * Panel (sink).
  36 *
  37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  38 * to power down the link and memory controller. For DSI panels the same idea
  39 * is called "manual mode".
  40 *
  41 * The implementation uses the hardware-based PSR support which automatically
  42 * enters/exits self-refresh mode. The hardware takes care of sending the
  43 * required DP aux message and could even retrain the link (that part isn't
  44 * enabled yet though). The hardware also keeps track of any frontbuffer
  45 * changes to know when to exit self-refresh mode again. Unfortunately that
  46 * part doesn't work too well, hence why the i915 PSR support uses the
  47 * software frontbuffer tracking to make sure it doesn't miss a screen
  48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  49 * get called by the frontbuffer tracking code. Note that because of locking
  50 * issues the self-refresh re-enable code is done from a work queue, which
  51 * must be correctly synchronized/cancelled when shutting down the pipe."
  52 */
  53
  54#include <drm/drmP.h>
  55
  56#include "intel_drv.h"
  57#include "i915_drv.h"
  58
  59static bool is_edp_psr(struct intel_dp *intel_dp)
  60{
  61        return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
  62}
  63
  64static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
  65{
  66        struct drm_i915_private *dev_priv = dev->dev_private;
  67        uint32_t val;
  68
  69        val = I915_READ(VLV_PSRSTAT(pipe)) &
  70              VLV_EDP_PSR_CURR_STATE_MASK;
  71        return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
  72               (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
  73}
  74
  75static void intel_psr_write_vsc(struct intel_dp *intel_dp,
  76                                const struct edp_vsc_psr *vsc_psr)
  77{
  78        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  79        struct drm_device *dev = dig_port->base.base.dev;
  80        struct drm_i915_private *dev_priv = dev->dev_private;
  81        struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  82        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  83        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
  84        uint32_t *data = (uint32_t *) vsc_psr;
  85        unsigned int i;
  86
  87        /* As per BSPec (Pipe Video Data Island Packet), we need to disable
  88           the video DIP being updated before program video DIP data buffer
  89           registers for DIP being updated. */
  90        I915_WRITE(ctl_reg, 0);
  91        POSTING_READ(ctl_reg);
  92
  93        for (i = 0; i < sizeof(*vsc_psr); i += 4) {
  94                I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
  95                                                   i >> 2), *data);
  96                data++;
  97        }
  98        for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
  99                I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
 100                                                   i >> 2), 0);
 101
 102        I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
 103        POSTING_READ(ctl_reg);
 104}
 105
 106static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
 107{
 108        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 109        struct drm_device *dev = intel_dig_port->base.base.dev;
 110        struct drm_i915_private *dev_priv = dev->dev_private;
 111        struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
 112        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 113        uint32_t val;
 114
 115        /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
 116        val  = I915_READ(VLV_VSCSDP(pipe));
 117        val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
 118        val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
 119        I915_WRITE(VLV_VSCSDP(pipe), val);
 120}
 121
 122static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
 123{
 124        struct edp_vsc_psr psr_vsc;
 125
 126        /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
 127        memset(&psr_vsc, 0, sizeof(psr_vsc));
 128        psr_vsc.sdp_header.HB0 = 0;
 129        psr_vsc.sdp_header.HB1 = 0x7;
 130        psr_vsc.sdp_header.HB2 = 0x3;
 131        psr_vsc.sdp_header.HB3 = 0xb;
 132        intel_psr_write_vsc(intel_dp, &psr_vsc);
 133}
 134
 135static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
 136{
 137        struct edp_vsc_psr psr_vsc;
 138
 139        /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
 140        memset(&psr_vsc, 0, sizeof(psr_vsc));
 141        psr_vsc.sdp_header.HB0 = 0;
 142        psr_vsc.sdp_header.HB1 = 0x7;
 143        psr_vsc.sdp_header.HB2 = 0x2;
 144        psr_vsc.sdp_header.HB3 = 0x8;
 145        intel_psr_write_vsc(intel_dp, &psr_vsc);
 146}
 147
 148static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
 149{
 150        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 151                           DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 152}
 153
 154static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
 155                                       enum port port)
 156{
 157        if (INTEL_INFO(dev_priv)->gen >= 9)
 158                return DP_AUX_CH_CTL(port);
 159        else
 160                return EDP_PSR_AUX_CTL;
 161}
 162
 163static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
 164                                        enum port port, int index)
 165{
 166        if (INTEL_INFO(dev_priv)->gen >= 9)
 167                return DP_AUX_CH_DATA(port, index);
 168        else
 169                return EDP_PSR_AUX_DATA(index);
 170}
 171
 172static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 173{
 174        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 175        struct drm_device *dev = dig_port->base.base.dev;
 176        struct drm_i915_private *dev_priv = dev->dev_private;
 177        uint32_t aux_clock_divider;
 178        i915_reg_t aux_ctl_reg;
 179        int precharge = 0x3;
 180        static const uint8_t aux_msg[] = {
 181                [0] = DP_AUX_NATIVE_WRITE << 4,
 182                [1] = DP_SET_POWER >> 8,
 183                [2] = DP_SET_POWER & 0xff,
 184                [3] = 1 - 1,
 185                [4] = DP_SET_POWER_D0,
 186        };
 187        enum port port = dig_port->port;
 188        int i;
 189
 190        BUILD_BUG_ON(sizeof(aux_msg) > 20);
 191
 192        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 193
 194        /* Enable AUX frame sync at sink */
 195        if (dev_priv->psr.aux_frame_sync)
 196                drm_dp_dpcd_writeb(&intel_dp->aux,
 197                                DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
 198                                DP_AUX_FRAME_SYNC_ENABLE);
 199
 200        aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
 201
 202        /* Setup AUX registers */
 203        for (i = 0; i < sizeof(aux_msg); i += 4)
 204                I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
 205                           intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 206
 207        if (INTEL_INFO(dev)->gen >= 9) {
 208                uint32_t val;
 209
 210                val = I915_READ(aux_ctl_reg);
 211                val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
 212                val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
 213                val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
 214                val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
 215                /* Use hardcoded data values for PSR, frame sync and GTC */
 216                val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
 217                val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
 218                val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
 219                I915_WRITE(aux_ctl_reg, val);
 220        } else {
 221                I915_WRITE(aux_ctl_reg,
 222                   DP_AUX_CH_CTL_TIME_OUT_400us |
 223                   (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 224                   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 225                   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
 226        }
 227
 228        if (dev_priv->psr.link_standby)
 229                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 230                                   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 231        else
 232                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 233                                   DP_PSR_ENABLE);
 234}
 235
 236static void vlv_psr_enable_source(struct intel_dp *intel_dp)
 237{
 238        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 239        struct drm_device *dev = dig_port->base.base.dev;
 240        struct drm_i915_private *dev_priv = dev->dev_private;
 241        struct drm_crtc *crtc = dig_port->base.base.crtc;
 242        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 243
 244        /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
 245        I915_WRITE(VLV_PSRCTL(pipe),
 246                   VLV_EDP_PSR_MODE_SW_TIMER |
 247                   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
 248                   VLV_EDP_PSR_ENABLE);
 249}
 250
 251static void vlv_psr_activate(struct intel_dp *intel_dp)
 252{
 253        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 254        struct drm_device *dev = dig_port->base.base.dev;
 255        struct drm_i915_private *dev_priv = dev->dev_private;
 256        struct drm_crtc *crtc = dig_port->base.base.crtc;
 257        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 258
 259        /* Let's do the transition from PSR_state 1 to PSR_state 2
 260         * that is PSR transition to active - static frame transmission.
 261         * Then Hardware is responsible for the transition to PSR_state 3
 262         * that is PSR active - no Remote Frame Buffer (RFB) update.
 263         */
 264        I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
 265                   VLV_EDP_PSR_ACTIVE_ENTRY);
 266}
 267
 268static void hsw_psr_enable_source(struct intel_dp *intel_dp)
 269{
 270        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 271        struct drm_device *dev = dig_port->base.base.dev;
 272        struct drm_i915_private *dev_priv = dev->dev_private;
 273
 274        uint32_t max_sleep_time = 0x1f;
 275        /*
 276         * Let's respect VBT in case VBT asks a higher idle_frame value.
 277         * Let's use 6 as the minimum to cover all known cases including
 278         * the off-by-one issue that HW has in some cases. Also there are
 279         * cases where sink should be able to train
 280         * with the 5 or 6 idle patterns.
 281         */
 282        uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 283        uint32_t val = 0x0;
 284
 285        if (IS_HASWELL(dev))
 286                val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 287
 288        if (dev_priv->psr.link_standby)
 289                val |= EDP_PSR_LINK_STANDBY;
 290
 291        I915_WRITE(EDP_PSR_CTL, val |
 292                   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
 293                   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
 294                   EDP_PSR_ENABLE);
 295
 296        if (dev_priv->psr.psr2_support)
 297                I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
 298                                EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
 299}
 300
 301static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 302{
 303        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 304        struct drm_device *dev = dig_port->base.base.dev;
 305        struct drm_i915_private *dev_priv = dev->dev_private;
 306        struct drm_crtc *crtc = dig_port->base.base.crtc;
 307        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 308
 309        lockdep_assert_held(&dev_priv->psr.lock);
 310        WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 311        WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 312
 313        dev_priv->psr.source_ok = false;
 314
 315        /*
 316         * HSW spec explicitly says PSR is tied to port A.
 317         * BDW+ platforms with DDI implementation of PSR have different
 318         * PSR registers per transcoder and we only implement transcoder EDP
 319         * ones. Since by Display design transcoder EDP is tied to port A
 320         * we can safely escape based on the port A.
 321         */
 322        if (HAS_DDI(dev) && dig_port->port != PORT_A) {
 323                DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
 324                return false;
 325        }
 326
 327        if (!i915.enable_psr) {
 328                DRM_DEBUG_KMS("PSR disable by flag\n");
 329                return false;
 330        }
 331
 332        if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
 333            !dev_priv->psr.link_standby) {
 334                DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
 335                return false;
 336        }
 337
 338        if (IS_HASWELL(dev) &&
 339            I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
 340                      S3D_ENABLE) {
 341                DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
 342                return false;
 343        }
 344
 345        if (IS_HASWELL(dev) &&
 346            intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 347                DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
 348                return false;
 349        }
 350
 351        dev_priv->psr.source_ok = true;
 352        return true;
 353}
 354
 355static void intel_psr_activate(struct intel_dp *intel_dp)
 356{
 357        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 358        struct drm_device *dev = intel_dig_port->base.base.dev;
 359        struct drm_i915_private *dev_priv = dev->dev_private;
 360
 361        WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
 362        WARN_ON(dev_priv->psr.active);
 363        lockdep_assert_held(&dev_priv->psr.lock);
 364
 365        /* Enable/Re-enable PSR on the host */
 366        if (HAS_DDI(dev))
 367                /* On HSW+ after we enable PSR on source it will activate it
 368                 * as soon as it match configure idle_frame count. So
 369                 * we just actually enable it here on activation time.
 370                 */
 371                hsw_psr_enable_source(intel_dp);
 372        else
 373                vlv_psr_activate(intel_dp);
 374
 375        dev_priv->psr.active = true;
 376}
 377
 378/**
 379 * intel_psr_enable - Enable PSR
 380 * @intel_dp: Intel DP
 381 *
 382 * This function can only be called after the pipe is fully trained and enabled.
 383 */
 384void intel_psr_enable(struct intel_dp *intel_dp)
 385{
 386        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 387        struct drm_device *dev = intel_dig_port->base.base.dev;
 388        struct drm_i915_private *dev_priv = dev->dev_private;
 389        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
 390
 391        if (!HAS_PSR(dev)) {
 392                DRM_DEBUG_KMS("PSR not supported on this platform\n");
 393                return;
 394        }
 395
 396        if (!is_edp_psr(intel_dp)) {
 397                DRM_DEBUG_KMS("PSR not supported by this panel\n");
 398                return;
 399        }
 400
 401        mutex_lock(&dev_priv->psr.lock);
 402        if (dev_priv->psr.enabled) {
 403                DRM_DEBUG_KMS("PSR already in use\n");
 404                goto unlock;
 405        }
 406
 407        if (!intel_psr_match_conditions(intel_dp))
 408                goto unlock;
 409
 410        dev_priv->psr.busy_frontbuffer_bits = 0;
 411
 412        if (HAS_DDI(dev)) {
 413                hsw_psr_setup_vsc(intel_dp);
 414
 415                if (dev_priv->psr.psr2_support) {
 416                        /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
 417                        if (crtc->config->pipe_src_w > 3200 ||
 418                                crtc->config->pipe_src_h > 2000)
 419                                dev_priv->psr.psr2_support = false;
 420                        else
 421                                skl_psr_setup_su_vsc(intel_dp);
 422                }
 423
 424                /*
 425                 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
 426                 * Also mask LPSP to avoid dependency on other drivers that
 427                 * might block runtime_pm besides preventing other hw tracking
 428                 * issues now we can rely on frontbuffer tracking.
 429                 */
 430                I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
 431                           EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 432
 433                /* Enable PSR on the panel */
 434                hsw_psr_enable_sink(intel_dp);
 435
 436                if (INTEL_INFO(dev)->gen >= 9)
 437                        intel_psr_activate(intel_dp);
 438        } else {
 439                vlv_psr_setup_vsc(intel_dp);
 440
 441                /* Enable PSR on the panel */
 442                vlv_psr_enable_sink(intel_dp);
 443
 444                /* On HSW+ enable_source also means go to PSR entry/active
 445                 * state as soon as idle_frame achieved and here would be
 446                 * to soon. However on VLV enable_source just enable PSR
 447                 * but let it on inactive state. So we might do this prior
 448                 * to active transition, i.e. here.
 449                 */
 450                vlv_psr_enable_source(intel_dp);
 451        }
 452
 453        /*
 454         * FIXME: Activation should happen immediately since this function
 455         * is just called after pipe is fully trained and enabled.
 456         * However on every platform we face issues when first activation
 457         * follows a modeset so quickly.
 458         *     - On VLV/CHV we get bank screen on first activation
 459         *     - On HSW/BDW we get a recoverable frozen screen until next
 460         *       exit-activate sequence.
 461         */
 462        if (INTEL_INFO(dev)->gen < 9)
 463                schedule_delayed_work(&dev_priv->psr.work,
 464                                      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
 465
 466        dev_priv->psr.enabled = intel_dp;
 467unlock:
 468        mutex_unlock(&dev_priv->psr.lock);
 469}
 470
 471static void vlv_psr_disable(struct intel_dp *intel_dp)
 472{
 473        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 474        struct drm_device *dev = intel_dig_port->base.base.dev;
 475        struct drm_i915_private *dev_priv = dev->dev_private;
 476        struct intel_crtc *intel_crtc =
 477                to_intel_crtc(intel_dig_port->base.base.crtc);
 478        uint32_t val;
 479
 480        if (dev_priv->psr.active) {
 481                /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
 482                if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
 483                              VLV_EDP_PSR_IN_TRANS) == 0, 1))
 484                        WARN(1, "PSR transition took longer than expected\n");
 485
 486                val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
 487                val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
 488                val &= ~VLV_EDP_PSR_ENABLE;
 489                val &= ~VLV_EDP_PSR_MODE_MASK;
 490                I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
 491
 492                dev_priv->psr.active = false;
 493        } else {
 494                WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
 495        }
 496}
 497
 498static void hsw_psr_disable(struct intel_dp *intel_dp)
 499{
 500        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 501        struct drm_device *dev = intel_dig_port->base.base.dev;
 502        struct drm_i915_private *dev_priv = dev->dev_private;
 503
 504        if (dev_priv->psr.active) {
 505                I915_WRITE(EDP_PSR_CTL,
 506                           I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 507
 508                /* Wait till PSR is idle */
 509                if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
 510                               EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
 511                        DRM_ERROR("Timed out waiting for PSR Idle State\n");
 512
 513                dev_priv->psr.active = false;
 514        } else {
 515                WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
 516        }
 517}
 518
 519/**
 520 * intel_psr_disable - Disable PSR
 521 * @intel_dp: Intel DP
 522 *
 523 * This function needs to be called before disabling pipe.
 524 */
 525void intel_psr_disable(struct intel_dp *intel_dp)
 526{
 527        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 528        struct drm_device *dev = intel_dig_port->base.base.dev;
 529        struct drm_i915_private *dev_priv = dev->dev_private;
 530
 531        mutex_lock(&dev_priv->psr.lock);
 532        if (!dev_priv->psr.enabled) {
 533                mutex_unlock(&dev_priv->psr.lock);
 534                return;
 535        }
 536
 537        /* Disable PSR on Source */
 538        if (HAS_DDI(dev))
 539                hsw_psr_disable(intel_dp);
 540        else
 541                vlv_psr_disable(intel_dp);
 542
 543        /* Disable PSR on Sink */
 544        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 545
 546        dev_priv->psr.enabled = NULL;
 547        mutex_unlock(&dev_priv->psr.lock);
 548
 549        cancel_delayed_work_sync(&dev_priv->psr.work);
 550}
 551
 552static void intel_psr_work(struct work_struct *work)
 553{
 554        struct drm_i915_private *dev_priv =
 555                container_of(work, typeof(*dev_priv), psr.work.work);
 556        struct intel_dp *intel_dp = dev_priv->psr.enabled;
 557        struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 558        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 559
 560        /* We have to make sure PSR is ready for re-enable
 561         * otherwise it keeps disabled until next full enable/disable cycle.
 562         * PSR might take some time to get fully disabled
 563         * and be ready for re-enable.
 564         */
 565        if (HAS_DDI(dev_priv->dev)) {
 566                if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
 567                              EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
 568                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
 569                        return;
 570                }
 571        } else {
 572                if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
 573                              VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
 574                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
 575                        return;
 576                }
 577        }
 578        mutex_lock(&dev_priv->psr.lock);
 579        intel_dp = dev_priv->psr.enabled;
 580
 581        if (!intel_dp)
 582                goto unlock;
 583
 584        /*
 585         * The delayed work can race with an invalidate hence we need to
 586         * recheck. Since psr_flush first clears this and then reschedules we
 587         * won't ever miss a flush when bailing out here.
 588         */
 589        if (dev_priv->psr.busy_frontbuffer_bits)
 590                goto unlock;
 591
 592        intel_psr_activate(intel_dp);
 593unlock:
 594        mutex_unlock(&dev_priv->psr.lock);
 595}
 596
 597static void intel_psr_exit(struct drm_device *dev)
 598{
 599        struct drm_i915_private *dev_priv = dev->dev_private;
 600        struct intel_dp *intel_dp = dev_priv->psr.enabled;
 601        struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 602        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 603        u32 val;
 604
 605        if (!dev_priv->psr.active)
 606                return;
 607
 608        if (HAS_DDI(dev)) {
 609                val = I915_READ(EDP_PSR_CTL);
 610
 611                WARN_ON(!(val & EDP_PSR_ENABLE));
 612
 613                I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
 614        } else {
 615                val = I915_READ(VLV_PSRCTL(pipe));
 616
 617                /* Here we do the transition from PSR_state 3 to PSR_state 5
 618                 * directly once PSR State 4 that is active with single frame
 619                 * update can be skipped. PSR_state 5 that is PSR exit then
 620                 * Hardware is responsible to transition back to PSR_state 1
 621                 * that is PSR inactive. Same state after
 622                 * vlv_edp_psr_enable_source.
 623                 */
 624                val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
 625                I915_WRITE(VLV_PSRCTL(pipe), val);
 626
 627                /* Send AUX wake up - Spec says after transitioning to PSR
 628                 * active we have to send AUX wake up by writing 01h in DPCD
 629                 * 600h of sink device.
 630                 * XXX: This might slow down the transition, but without this
 631                 * HW doesn't complete the transition to PSR_state 1 and we
 632                 * never get the screen updated.
 633                 */
 634                drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
 635                                   DP_SET_POWER_D0);
 636        }
 637
 638        dev_priv->psr.active = false;
 639}
 640
 641/**
 642 * intel_psr_single_frame_update - Single Frame Update
 643 * @dev: DRM device
 644 * @frontbuffer_bits: frontbuffer plane tracking bits
 645 *
 646 * Some platforms support a single frame update feature that is used to
 647 * send and update only one frame on Remote Frame Buffer.
 648 * So far it is only implemented for Valleyview and Cherryview because
 649 * hardware requires this to be done before a page flip.
 650 */
 651void intel_psr_single_frame_update(struct drm_device *dev,
 652                                   unsigned frontbuffer_bits)
 653{
 654        struct drm_i915_private *dev_priv = dev->dev_private;
 655        struct drm_crtc *crtc;
 656        enum pipe pipe;
 657        u32 val;
 658
 659        /*
 660         * Single frame update is already supported on BDW+ but it requires
 661         * many W/A and it isn't really needed.
 662         */
 663        if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
 664                return;
 665
 666        mutex_lock(&dev_priv->psr.lock);
 667        if (!dev_priv->psr.enabled) {
 668                mutex_unlock(&dev_priv->psr.lock);
 669                return;
 670        }
 671
 672        crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 673        pipe = to_intel_crtc(crtc)->pipe;
 674
 675        if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
 676                val = I915_READ(VLV_PSRCTL(pipe));
 677
 678                /*
 679                 * We need to set this bit before writing registers for a flip.
 680                 * This bit will be self-clear when it gets to the PSR active state.
 681                 */
 682                I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
 683        }
 684        mutex_unlock(&dev_priv->psr.lock);
 685}
 686
 687/**
 688 * intel_psr_invalidate - Invalidade PSR
 689 * @dev: DRM device
 690 * @frontbuffer_bits: frontbuffer plane tracking bits
 691 *
 692 * Since the hardware frontbuffer tracking has gaps we need to integrate
 693 * with the software frontbuffer tracking. This function gets called every
 694 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 695 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 696 *
 697 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 698 */
 699void intel_psr_invalidate(struct drm_device *dev,
 700                          unsigned frontbuffer_bits)
 701{
 702        struct drm_i915_private *dev_priv = dev->dev_private;
 703        struct drm_crtc *crtc;
 704        enum pipe pipe;
 705
 706        mutex_lock(&dev_priv->psr.lock);
 707        if (!dev_priv->psr.enabled) {
 708                mutex_unlock(&dev_priv->psr.lock);
 709                return;
 710        }
 711
 712        crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 713        pipe = to_intel_crtc(crtc)->pipe;
 714
 715        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 716        dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 717
 718        if (frontbuffer_bits)
 719                intel_psr_exit(dev);
 720
 721        mutex_unlock(&dev_priv->psr.lock);
 722}
 723
 724/**
 725 * intel_psr_flush - Flush PSR
 726 * @dev: DRM device
 727 * @frontbuffer_bits: frontbuffer plane tracking bits
 728 * @origin: which operation caused the flush
 729 *
 730 * Since the hardware frontbuffer tracking has gaps we need to integrate
 731 * with the software frontbuffer tracking. This function gets called every
 732 * time frontbuffer rendering has completed and flushed out to memory. PSR
 733 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 734 *
 735 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 736 */
 737void intel_psr_flush(struct drm_device *dev,
 738                     unsigned frontbuffer_bits, enum fb_op_origin origin)
 739{
 740        struct drm_i915_private *dev_priv = dev->dev_private;
 741        struct drm_crtc *crtc;
 742        enum pipe pipe;
 743
 744        mutex_lock(&dev_priv->psr.lock);
 745        if (!dev_priv->psr.enabled) {
 746                mutex_unlock(&dev_priv->psr.lock);
 747                return;
 748        }
 749
 750        crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 751        pipe = to_intel_crtc(crtc)->pipe;
 752
 753        frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 754        dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 755
 756        /* By definition flush = invalidate + flush */
 757        if (frontbuffer_bits)
 758                intel_psr_exit(dev);
 759
 760        if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 761                if (!work_busy(&dev_priv->psr.work.work))
 762                        schedule_delayed_work(&dev_priv->psr.work,
 763                                              msecs_to_jiffies(100));
 764        mutex_unlock(&dev_priv->psr.lock);
 765}
 766
 767/**
 768 * intel_psr_init - Init basic PSR work and mutex.
 769 * @dev: DRM device
 770 *
 771 * This function is  called only once at driver load to initialize basic
 772 * PSR stuff.
 773 */
 774void intel_psr_init(struct drm_device *dev)
 775{
 776        struct drm_i915_private *dev_priv = dev->dev_private;
 777
 778        dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
 779                HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 780
 781        /* Per platform default */
 782        if (i915.enable_psr == -1) {
 783                if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
 784                    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 785                        i915.enable_psr = 1;
 786                else
 787                        i915.enable_psr = 0;
 788        }
 789
 790        /* Set link_standby x link_off defaults */
 791        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 792                /* HSW and BDW require workarounds that we don't implement. */
 793                dev_priv->psr.link_standby = false;
 794        else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 795                /* On VLV and CHV only standby mode is supported. */
 796                dev_priv->psr.link_standby = true;
 797        else
 798                /* For new platforms let's respect VBT back again */
 799                dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
 800
 801        /* Override link_standby x link_off defaults */
 802        if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
 803                DRM_DEBUG_KMS("PSR: Forcing link standby\n");
 804                dev_priv->psr.link_standby = true;
 805        }
 806        if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
 807                DRM_DEBUG_KMS("PSR: Forcing main link off\n");
 808                dev_priv->psr.link_standby = false;
 809        }
 810
 811        INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
 812        mutex_init(&dev_priv->psr.lock);
 813}
 814