linux/drivers/gpu/drm/i915/intel_runtime_pm.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
  25 *    Daniel Vetter <daniel.vetter@ffwll.ch>
  26 *
  27 */
  28
  29#include <linux/pm_runtime.h>
  30#include <linux/vgaarb.h>
  31
  32#include <drm/drm_print.h>
  33
  34#include "i915_drv.h"
  35#include "intel_drv.h"
  36
  37/**
  38 * DOC: runtime pm
  39 *
  40 * The i915 driver supports dynamic enabling and disabling of entire hardware
  41 * blocks at runtime. This is especially important on the display side where
  42 * software is supposed to control many power gates manually on recent hardware,
  43 * since on the GT side a lot of the power management is done by the hardware.
  44 * But even there some manual control at the device level is required.
  45 *
  46 * Since i915 supports a diverse set of platforms with a unified codebase and
  47 * hardware engineers just love to shuffle functionality around between power
  48 * domains there's a sizeable amount of indirection required. This file provides
  49 * generic functions to the driver for grabbing and releasing references for
  50 * abstract power domains. It then maps those to the actual power wells
  51 * present for a given platform.
  52 */
  53
  54#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
  55
  56#include <linux/sort.h>
  57
  58#define STACKDEPTH 8
  59
  60static noinline depot_stack_handle_t __save_depot_stack(void)
  61{
  62        unsigned long entries[STACKDEPTH];
  63        struct stack_trace trace = {
  64                .entries = entries,
  65                .max_entries = ARRAY_SIZE(entries),
  66                .skip = 1,
  67        };
  68
  69        save_stack_trace(&trace);
  70        if (trace.nr_entries &&
  71            trace.entries[trace.nr_entries - 1] == ULONG_MAX)
  72                trace.nr_entries--;
  73
  74        return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
  75}
  76
  77static void __print_depot_stack(depot_stack_handle_t stack,
  78                                char *buf, int sz, int indent)
  79{
  80        unsigned long entries[STACKDEPTH];
  81        struct stack_trace trace = {
  82                .entries = entries,
  83                .max_entries = ARRAY_SIZE(entries),
  84        };
  85
  86        depot_fetch_stack(stack, &trace);
  87        snprint_stack_trace(buf, sz, &trace, indent);
  88}
  89
  90static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
  91{
  92        struct i915_runtime_pm *rpm = &i915->runtime_pm;
  93
  94        spin_lock_init(&rpm->debug.lock);
  95}
  96
  97static noinline depot_stack_handle_t
  98track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
  99{
 100        struct i915_runtime_pm *rpm = &i915->runtime_pm;
 101        depot_stack_handle_t stack, *stacks;
 102        unsigned long flags;
 103
 104        atomic_inc(&rpm->wakeref_count);
 105        assert_rpm_wakelock_held(i915);
 106
 107        if (!HAS_RUNTIME_PM(i915))
 108                return -1;
 109
 110        stack = __save_depot_stack();
 111        if (!stack)
 112                return -1;
 113
 114        spin_lock_irqsave(&rpm->debug.lock, flags);
 115
 116        if (!rpm->debug.count)
 117                rpm->debug.last_acquire = stack;
 118
 119        stacks = krealloc(rpm->debug.owners,
 120                          (rpm->debug.count + 1) * sizeof(*stacks),
 121                          GFP_NOWAIT | __GFP_NOWARN);
 122        if (stacks) {
 123                stacks[rpm->debug.count++] = stack;
 124                rpm->debug.owners = stacks;
 125        } else {
 126                stack = -1;
 127        }
 128
 129        spin_unlock_irqrestore(&rpm->debug.lock, flags);
 130
 131        return stack;
 132}
 133
 134static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
 135                                            depot_stack_handle_t stack)
 136{
 137        struct i915_runtime_pm *rpm = &i915->runtime_pm;
 138        unsigned long flags, n;
 139        bool found = false;
 140
 141        if (unlikely(stack == -1))
 142                return;
 143
 144        spin_lock_irqsave(&rpm->debug.lock, flags);
 145        for (n = rpm->debug.count; n--; ) {
 146                if (rpm->debug.owners[n] == stack) {
 147                        memmove(rpm->debug.owners + n,
 148                                rpm->debug.owners + n + 1,
 149                                (--rpm->debug.count - n) * sizeof(stack));
 150                        found = true;
 151                        break;
 152                }
 153        }
 154        spin_unlock_irqrestore(&rpm->debug.lock, flags);
 155
 156        if (WARN(!found,
 157                 "Unmatched wakeref (tracking %lu), count %u\n",
 158                 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
 159                char *buf;
 160
 161                buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 162                if (!buf)
 163                        return;
 164
 165                __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 166                DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
 167
 168                stack = READ_ONCE(rpm->debug.last_release);
 169                if (stack) {
 170                        __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 171                        DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
 172                }
 173
 174                kfree(buf);
 175        }
 176}
 177
 178static int cmphandle(const void *_a, const void *_b)
 179{
 180        const depot_stack_handle_t * const a = _a, * const b = _b;
 181
 182        if (*a < *b)
 183                return -1;
 184        else if (*a > *b)
 185                return 1;
 186        else
 187                return 0;
 188}
 189
 190static void
 191__print_intel_runtime_pm_wakeref(struct drm_printer *p,
 192                                 const struct intel_runtime_pm_debug *dbg)
 193{
 194        unsigned long i;
 195        char *buf;
 196
 197        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 198        if (!buf)
 199                return;
 200
 201        if (dbg->last_acquire) {
 202                __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
 203                drm_printf(p, "Wakeref last acquired:\n%s", buf);
 204        }
 205
 206        if (dbg->last_release) {
 207                __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
 208                drm_printf(p, "Wakeref last released:\n%s", buf);
 209        }
 210
 211        drm_printf(p, "Wakeref count: %lu\n", dbg->count);
 212
 213        sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
 214
 215        for (i = 0; i < dbg->count; i++) {
 216                depot_stack_handle_t stack = dbg->owners[i];
 217                unsigned long rep;
 218
 219                rep = 1;
 220                while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
 221                        rep++, i++;
 222                __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 223                drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
 224        }
 225
 226        kfree(buf);
 227}
 228
 229static noinline void
 230untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
 231{
 232        struct i915_runtime_pm *rpm = &i915->runtime_pm;
 233        struct intel_runtime_pm_debug dbg = {};
 234        struct drm_printer p;
 235        unsigned long flags;
 236
 237        assert_rpm_wakelock_held(i915);
 238        if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
 239                                        &rpm->debug.lock,
 240                                        flags)) {
 241                dbg = rpm->debug;
 242
 243                rpm->debug.owners = NULL;
 244                rpm->debug.count = 0;
 245                rpm->debug.last_release = __save_depot_stack();
 246
 247                spin_unlock_irqrestore(&rpm->debug.lock, flags);
 248        }
 249        if (!dbg.count)
 250                return;
 251
 252        p = drm_debug_printer("i915");
 253        __print_intel_runtime_pm_wakeref(&p, &dbg);
 254
 255        kfree(dbg.owners);
 256}
 257
 258void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
 259                                    struct drm_printer *p)
 260{
 261        struct intel_runtime_pm_debug dbg = {};
 262
 263        do {
 264                struct i915_runtime_pm *rpm = &i915->runtime_pm;
 265                unsigned long alloc = dbg.count;
 266                depot_stack_handle_t *s;
 267
 268                spin_lock_irq(&rpm->debug.lock);
 269                dbg.count = rpm->debug.count;
 270                if (dbg.count <= alloc) {
 271                        memcpy(dbg.owners,
 272                               rpm->debug.owners,
 273                               dbg.count * sizeof(*s));
 274                }
 275                dbg.last_acquire = rpm->debug.last_acquire;
 276                dbg.last_release = rpm->debug.last_release;
 277                spin_unlock_irq(&rpm->debug.lock);
 278                if (dbg.count <= alloc)
 279                        break;
 280
 281                s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
 282                if (!s)
 283                        goto out;
 284
 285                dbg.owners = s;
 286        } while (1);
 287
 288        __print_intel_runtime_pm_wakeref(p, &dbg);
 289
 290out:
 291        kfree(dbg.owners);
 292}
 293
 294#else
 295
 296static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
 297{
 298}
 299
 300static depot_stack_handle_t
 301track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
 302{
 303        atomic_inc(&i915->runtime_pm.wakeref_count);
 304        assert_rpm_wakelock_held(i915);
 305        return -1;
 306}
 307
 308static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
 309{
 310        assert_rpm_wakelock_held(i915);
 311        atomic_dec(&i915->runtime_pm.wakeref_count);
 312}
 313
 314#endif
 315
 316bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 317                                         enum i915_power_well_id power_well_id);
 318
 319const char *
 320intel_display_power_domain_str(enum intel_display_power_domain domain)
 321{
 322        switch (domain) {
 323        case POWER_DOMAIN_PIPE_A:
 324                return "PIPE_A";
 325        case POWER_DOMAIN_PIPE_B:
 326                return "PIPE_B";
 327        case POWER_DOMAIN_PIPE_C:
 328                return "PIPE_C";
 329        case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
 330                return "PIPE_A_PANEL_FITTER";
 331        case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
 332                return "PIPE_B_PANEL_FITTER";
 333        case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
 334                return "PIPE_C_PANEL_FITTER";
 335        case POWER_DOMAIN_TRANSCODER_A:
 336                return "TRANSCODER_A";
 337        case POWER_DOMAIN_TRANSCODER_B:
 338                return "TRANSCODER_B";
 339        case POWER_DOMAIN_TRANSCODER_C:
 340                return "TRANSCODER_C";
 341        case POWER_DOMAIN_TRANSCODER_EDP:
 342                return "TRANSCODER_EDP";
 343        case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
 344                return "TRANSCODER_EDP_VDSC";
 345        case POWER_DOMAIN_TRANSCODER_DSI_A:
 346                return "TRANSCODER_DSI_A";
 347        case POWER_DOMAIN_TRANSCODER_DSI_C:
 348                return "TRANSCODER_DSI_C";
 349        case POWER_DOMAIN_PORT_DDI_A_LANES:
 350                return "PORT_DDI_A_LANES";
 351        case POWER_DOMAIN_PORT_DDI_B_LANES:
 352                return "PORT_DDI_B_LANES";
 353        case POWER_DOMAIN_PORT_DDI_C_LANES:
 354                return "PORT_DDI_C_LANES";
 355        case POWER_DOMAIN_PORT_DDI_D_LANES:
 356                return "PORT_DDI_D_LANES";
 357        case POWER_DOMAIN_PORT_DDI_E_LANES:
 358                return "PORT_DDI_E_LANES";
 359        case POWER_DOMAIN_PORT_DDI_F_LANES:
 360                return "PORT_DDI_F_LANES";
 361        case POWER_DOMAIN_PORT_DDI_A_IO:
 362                return "PORT_DDI_A_IO";
 363        case POWER_DOMAIN_PORT_DDI_B_IO:
 364                return "PORT_DDI_B_IO";
 365        case POWER_DOMAIN_PORT_DDI_C_IO:
 366                return "PORT_DDI_C_IO";
 367        case POWER_DOMAIN_PORT_DDI_D_IO:
 368                return "PORT_DDI_D_IO";
 369        case POWER_DOMAIN_PORT_DDI_E_IO:
 370                return "PORT_DDI_E_IO";
 371        case POWER_DOMAIN_PORT_DDI_F_IO:
 372                return "PORT_DDI_F_IO";
 373        case POWER_DOMAIN_PORT_DSI:
 374                return "PORT_DSI";
 375        case POWER_DOMAIN_PORT_CRT:
 376                return "PORT_CRT";
 377        case POWER_DOMAIN_PORT_OTHER:
 378                return "PORT_OTHER";
 379        case POWER_DOMAIN_VGA:
 380                return "VGA";
 381        case POWER_DOMAIN_AUDIO:
 382                return "AUDIO";
 383        case POWER_DOMAIN_PLLS:
 384                return "PLLS";
 385        case POWER_DOMAIN_AUX_A:
 386                return "AUX_A";
 387        case POWER_DOMAIN_AUX_B:
 388                return "AUX_B";
 389        case POWER_DOMAIN_AUX_C:
 390                return "AUX_C";
 391        case POWER_DOMAIN_AUX_D:
 392                return "AUX_D";
 393        case POWER_DOMAIN_AUX_E:
 394                return "AUX_E";
 395        case POWER_DOMAIN_AUX_F:
 396                return "AUX_F";
 397        case POWER_DOMAIN_AUX_IO_A:
 398                return "AUX_IO_A";
 399        case POWER_DOMAIN_AUX_TBT1:
 400                return "AUX_TBT1";
 401        case POWER_DOMAIN_AUX_TBT2:
 402                return "AUX_TBT2";
 403        case POWER_DOMAIN_AUX_TBT3:
 404                return "AUX_TBT3";
 405        case POWER_DOMAIN_AUX_TBT4:
 406                return "AUX_TBT4";
 407        case POWER_DOMAIN_GMBUS:
 408                return "GMBUS";
 409        case POWER_DOMAIN_INIT:
 410                return "INIT";
 411        case POWER_DOMAIN_MODESET:
 412                return "MODESET";
 413        case POWER_DOMAIN_GT_IRQ:
 414                return "GT_IRQ";
 415        default:
 416                MISSING_CASE(domain);
 417                return "?";
 418        }
 419}
 420
 421static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 422                                    struct i915_power_well *power_well)
 423{
 424        DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
 425        power_well->desc->ops->enable(dev_priv, power_well);
 426        power_well->hw_enabled = true;
 427}
 428
 429static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 430                                     struct i915_power_well *power_well)
 431{
 432        DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
 433        power_well->hw_enabled = false;
 434        power_well->desc->ops->disable(dev_priv, power_well);
 435}
 436
 437static void intel_power_well_get(struct drm_i915_private *dev_priv,
 438                                 struct i915_power_well *power_well)
 439{
 440        if (!power_well->count++)
 441                intel_power_well_enable(dev_priv, power_well);
 442}
 443
 444static void intel_power_well_put(struct drm_i915_private *dev_priv,
 445                                 struct i915_power_well *power_well)
 446{
 447        WARN(!power_well->count, "Use count on power well %s is already zero",
 448             power_well->desc->name);
 449
 450        if (!--power_well->count)
 451                intel_power_well_disable(dev_priv, power_well);
 452}
 453
 454/**
 455 * __intel_display_power_is_enabled - unlocked check for a power domain
 456 * @dev_priv: i915 device instance
 457 * @domain: power domain to check
 458 *
 459 * This is the unlocked version of intel_display_power_is_enabled() and should
 460 * only be used from error capture and recovery code where deadlocks are
 461 * possible.
 462 *
 463 * Returns:
 464 * True when the power domain is enabled, false otherwise.
 465 */
 466bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 467                                      enum intel_display_power_domain domain)
 468{
 469        struct i915_power_well *power_well;
 470        bool is_enabled;
 471
 472        if (dev_priv->runtime_pm.suspended)
 473                return false;
 474
 475        is_enabled = true;
 476
 477        for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
 478                if (power_well->desc->always_on)
 479                        continue;
 480
 481                if (!power_well->hw_enabled) {
 482                        is_enabled = false;
 483                        break;
 484                }
 485        }
 486
 487        return is_enabled;
 488}
 489
 490/**
 491 * intel_display_power_is_enabled - check for a power domain
 492 * @dev_priv: i915 device instance
 493 * @domain: power domain to check
 494 *
 495 * This function can be used to check the hw power domain state. It is mostly
 496 * used in hardware state readout functions. Everywhere else code should rely
 497 * upon explicit power domain reference counting to ensure that the hardware
 498 * block is powered up before accessing it.
 499 *
 500 * Callers must hold the relevant modesetting locks to ensure that concurrent
 501 * threads can't disable the power well while the caller tries to read a few
 502 * registers.
 503 *
 504 * Returns:
 505 * True when the power domain is enabled, false otherwise.
 506 */
 507bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 508                                    enum intel_display_power_domain domain)
 509{
 510        struct i915_power_domains *power_domains;
 511        bool ret;
 512
 513        power_domains = &dev_priv->power_domains;
 514
 515        mutex_lock(&power_domains->lock);
 516        ret = __intel_display_power_is_enabled(dev_priv, domain);
 517        mutex_unlock(&power_domains->lock);
 518
 519        return ret;
 520}
 521
 522/*
 523 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 524 * when not needed anymore. We have 4 registers that can request the power well
 525 * to be enabled, and it will only be disabled if none of the registers is
 526 * requesting it to be enabled.
 527 */
 528static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
 529                                       u8 irq_pipe_mask, bool has_vga)
 530{
 531        struct pci_dev *pdev = dev_priv->drm.pdev;
 532
 533        /*
 534         * After we re-enable the power well, if we touch VGA register 0x3d5
 535         * we'll get unclaimed register interrupts. This stops after we write
 536         * anything to the VGA MSR register. The vgacon module uses this
 537         * register all the time, so if we unbind our driver and, as a
 538         * consequence, bind vgacon, we'll get stuck in an infinite loop at
 539         * console_unlock(). So make here we touch the VGA MSR register, making
 540         * sure vgacon can keep working normally without triggering interrupts
 541         * and error messages.
 542         */
 543        if (has_vga) {
 544                vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 545                outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 546                vga_put(pdev, VGA_RSRC_LEGACY_IO);
 547        }
 548
 549        if (irq_pipe_mask)
 550                gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
 551}
 552
 553static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 554                                       u8 irq_pipe_mask)
 555{
 556        if (irq_pipe_mask)
 557                gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 558}
 559
 560
 561static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 562                                           struct i915_power_well *power_well)
 563{
 564        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 565        int pw_idx = power_well->desc->hsw.idx;
 566
 567        /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 568        WARN_ON(intel_wait_for_register(dev_priv,
 569                                        regs->driver,
 570                                        HSW_PWR_WELL_CTL_STATE(pw_idx),
 571                                        HSW_PWR_WELL_CTL_STATE(pw_idx),
 572                                        1));
 573}
 574
 575static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 576                                     const struct i915_power_well_regs *regs,
 577                                     int pw_idx)
 578{
 579        u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
 580        u32 ret;
 581
 582        ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
 583        ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
 584        if (regs->kvmr.reg)
 585                ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
 586        ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
 587
 588        return ret;
 589}
 590
 591static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 592                                            struct i915_power_well *power_well)
 593{
 594        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 595        int pw_idx = power_well->desc->hsw.idx;
 596        bool disabled;
 597        u32 reqs;
 598
 599        /*
 600         * Bspec doesn't require waiting for PWs to get disabled, but still do
 601         * this for paranoia. The known cases where a PW will be forced on:
 602         * - a KVMR request on any power well via the KVMR request register
 603         * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
 604         *   DEBUG request registers
 605         * Skip the wait in case any of the request bits are set and print a
 606         * diagnostic message.
 607         */
 608        wait_for((disabled = !(I915_READ(regs->driver) &
 609                               HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
 610                 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
 611        if (disabled)
 612                return;
 613
 614        DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
 615                      power_well->desc->name,
 616                      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 617}
 618
 619static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 620                                           enum skl_power_gate pg)
 621{
 622        /* Timeout 5us for PG#0, for other PGs 1us */
 623        WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
 624                                        SKL_FUSE_PG_DIST_STATUS(pg),
 625                                        SKL_FUSE_PG_DIST_STATUS(pg), 1));
 626}
 627
 628static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 629                                  struct i915_power_well *power_well)
 630{
 631        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 632        int pw_idx = power_well->desc->hsw.idx;
 633        bool wait_fuses = power_well->desc->hsw.has_fuses;
 634        enum skl_power_gate uninitialized_var(pg);
 635        u32 val;
 636
 637        if (wait_fuses) {
 638                pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
 639                                                 SKL_PW_CTL_IDX_TO_PG(pw_idx);
 640                /*
 641                 * For PW1 we have to wait both for the PW0/PG0 fuse state
 642                 * before enabling the power well and PW1/PG1's own fuse
 643                 * state after the enabling. For all other power wells with
 644                 * fuses we only have to wait for that PW/PG's fuse state
 645                 * after the enabling.
 646                 */
 647                if (pg == SKL_PG1)
 648                        gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 649        }
 650
 651        val = I915_READ(regs->driver);
 652        I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 653        hsw_wait_for_power_well_enable(dev_priv, power_well);
 654
 655        /* Display WA #1178: cnl */
 656        if (IS_CANNONLAKE(dev_priv) &&
 657            pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
 658            pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
 659                val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
 660                val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
 661                I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
 662        }
 663
 664        if (wait_fuses)
 665                gen9_wait_for_power_well_fuses(dev_priv, pg);
 666
 667        hsw_power_well_post_enable(dev_priv,
 668                                   power_well->desc->hsw.irq_pipe_mask,
 669                                   power_well->desc->hsw.has_vga);
 670}
 671
 672static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 673                                   struct i915_power_well *power_well)
 674{
 675        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 676        int pw_idx = power_well->desc->hsw.idx;
 677        u32 val;
 678
 679        hsw_power_well_pre_disable(dev_priv,
 680                                   power_well->desc->hsw.irq_pipe_mask);
 681
 682        val = I915_READ(regs->driver);
 683        I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 684        hsw_wait_for_power_well_disable(dev_priv, power_well);
 685}
 686
 687#define ICL_AUX_PW_TO_PORT(pw_idx)      ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
 688
 689static void
 690icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 691                                    struct i915_power_well *power_well)
 692{
 693        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 694        int pw_idx = power_well->desc->hsw.idx;
 695        enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
 696        u32 val;
 697
 698        val = I915_READ(regs->driver);
 699        I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 700
 701        val = I915_READ(ICL_PORT_CL_DW12(port));
 702        I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
 703
 704        hsw_wait_for_power_well_enable(dev_priv, power_well);
 705
 706        /* Display WA #1178: icl */
 707        if (IS_ICELAKE(dev_priv) &&
 708            pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 709            !intel_bios_is_port_edp(dev_priv, port)) {
 710                val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
 711                val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
 712                I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
 713        }
 714}
 715
 716static void
 717icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 718                                     struct i915_power_well *power_well)
 719{
 720        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 721        int pw_idx = power_well->desc->hsw.idx;
 722        enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
 723        u32 val;
 724
 725        val = I915_READ(ICL_PORT_CL_DW12(port));
 726        I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
 727
 728        val = I915_READ(regs->driver);
 729        I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 730
 731        hsw_wait_for_power_well_disable(dev_priv, power_well);
 732}
 733
 734#define ICL_AUX_PW_TO_CH(pw_idx)        \
 735        ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 736
 737static void
 738icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 739                                 struct i915_power_well *power_well)
 740{
 741        enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
 742        u32 val;
 743
 744        val = I915_READ(DP_AUX_CH_CTL(aux_ch));
 745        val &= ~DP_AUX_CH_CTL_TBT_IO;
 746        if (power_well->desc->hsw.is_tc_tbt)
 747                val |= DP_AUX_CH_CTL_TBT_IO;
 748        I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
 749
 750        hsw_power_well_enable(dev_priv, power_well);
 751}
 752
 753/*
 754 * We should only use the power well if we explicitly asked the hardware to
 755 * enable it, so check if it's enabled and also check if we've requested it to
 756 * be enabled.
 757 */
 758static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 759                                   struct i915_power_well *power_well)
 760{
 761        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 762        enum i915_power_well_id id = power_well->desc->id;
 763        int pw_idx = power_well->desc->hsw.idx;
 764        u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
 765                   HSW_PWR_WELL_CTL_STATE(pw_idx);
 766        u32 val;
 767
 768        val = I915_READ(regs->driver);
 769
 770        /*
 771         * On GEN9 big core due to a DMC bug the driver's request bits for PW1
 772         * and the MISC_IO PW will be not restored, so check instead for the
 773         * BIOS's own request bits, which are forced-on for these power wells
 774         * when exiting DC5/6.
 775         */
 776        if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
 777            (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
 778                val |= I915_READ(regs->bios);
 779
 780        return (val & mask) == mask;
 781}
 782
 783static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 784{
 785        WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
 786                  "DC9 already programmed to be enabled.\n");
 787        WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
 788                  "DC5 still not disabled to enable DC9.\n");
 789        WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
 790                  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
 791                  "Power well 2 on.\n");
 792        WARN_ONCE(intel_irqs_enabled(dev_priv),
 793                  "Interrupts not disabled yet.\n");
 794
 795         /*
 796          * TODO: check for the following to verify the conditions to enter DC9
 797          * state are satisfied:
 798          * 1] Check relevant display engine registers to verify if mode set
 799          * disable sequence was followed.
 800          * 2] Check if display uninitialize sequence is initialized.
 801          */
 802}
 803
 804static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 805{
 806        WARN_ONCE(intel_irqs_enabled(dev_priv),
 807                  "Interrupts not disabled yet.\n");
 808        WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
 809                  "DC5 still not disabled.\n");
 810
 811         /*
 812          * TODO: check for the following to verify DC9 state was indeed
 813          * entered before programming to disable it:
 814          * 1] Check relevant display engine registers to verify if mode
 815          *  set disable sequence was followed.
 816          * 2] Check if display uninitialize sequence is initialized.
 817          */
 818}
 819
 820static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 821                                u32 state)
 822{
 823        int rewrites = 0;
 824        int rereads = 0;
 825        u32 v;
 826
 827        I915_WRITE(DC_STATE_EN, state);
 828
 829        /* It has been observed that disabling the dc6 state sometimes
 830         * doesn't stick and dmc keeps returning old value. Make sure
 831         * the write really sticks enough times and also force rewrite until
 832         * we are confident that state is exactly what we want.
 833         */
 834        do  {
 835                v = I915_READ(DC_STATE_EN);
 836
 837                if (v != state) {
 838                        I915_WRITE(DC_STATE_EN, state);
 839                        rewrites++;
 840                        rereads = 0;
 841                } else if (rereads++ > 5) {
 842                        break;
 843                }
 844
 845        } while (rewrites < 100);
 846
 847        if (v != state)
 848                DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
 849                          state, v);
 850
 851        /* Most of the times we need one retry, avoid spam */
 852        if (rewrites > 1)
 853                DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
 854                              state, rewrites);
 855}
 856
 857static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 858{
 859        u32 mask;
 860
 861        mask = DC_STATE_EN_UPTO_DC5;
 862        if (INTEL_GEN(dev_priv) >= 11)
 863                mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
 864        else if (IS_GEN9_LP(dev_priv))
 865                mask |= DC_STATE_EN_DC9;
 866        else
 867                mask |= DC_STATE_EN_UPTO_DC6;
 868
 869        return mask;
 870}
 871
 872void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 873{
 874        u32 val;
 875
 876        val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
 877
 878        DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
 879                      dev_priv->csr.dc_state, val);
 880        dev_priv->csr.dc_state = val;
 881}
 882
 883/**
 884 * gen9_set_dc_state - set target display C power state
 885 * @dev_priv: i915 device instance
 886 * @state: target DC power state
 887 * - DC_STATE_DISABLE
 888 * - DC_STATE_EN_UPTO_DC5
 889 * - DC_STATE_EN_UPTO_DC6
 890 * - DC_STATE_EN_DC9
 891 *
 892 * Signal to DMC firmware/HW the target DC power state passed in @state.
 893 * DMC/HW can turn off individual display clocks and power rails when entering
 894 * a deeper DC power state (higher in number) and turns these back when exiting
 895 * that state to a shallower power state (lower in number). The HW will decide
 896 * when to actually enter a given state on an on-demand basis, for instance
 897 * depending on the active state of display pipes. The state of display
 898 * registers backed by affected power rails are saved/restored as needed.
 899 *
 900 * Based on the above enabling a deeper DC power state is asynchronous wrt.
 901 * enabling it. Disabling a deeper power state is synchronous: for instance
 902 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
 903 * back on and register state is restored. This is guaranteed by the MMIO write
 904 * to DC_STATE_EN blocking until the state is restored.
 905 */
 906static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 907{
 908        u32 val;
 909        u32 mask;
 910
 911        if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
 912                state &= dev_priv->csr.allowed_dc_mask;
 913
 914        val = I915_READ(DC_STATE_EN);
 915        mask = gen9_dc_mask(dev_priv);
 916        DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
 917                      val & mask, state);
 918
 919        /* Check if DMC is ignoring our DC state requests */
 920        if ((val & mask) != dev_priv->csr.dc_state)
 921                DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
 922                          dev_priv->csr.dc_state, val & mask);
 923
 924        val &= ~mask;
 925        val |= state;
 926
 927        gen9_write_dc_state(dev_priv, val);
 928
 929        dev_priv->csr.dc_state = val & mask;
 930}
 931
 932void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 933{
 934        assert_can_enable_dc9(dev_priv);
 935
 936        DRM_DEBUG_KMS("Enabling DC9\n");
 937        /*
 938         * Power sequencer reset is not needed on
 939         * platforms with South Display Engine on PCH,
 940         * because PPS registers are always on.
 941         */
 942        if (!HAS_PCH_SPLIT(dev_priv))
 943                intel_power_sequencer_reset(dev_priv);
 944        gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 945}
 946
 947void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 948{
 949        assert_can_disable_dc9(dev_priv);
 950
 951        DRM_DEBUG_KMS("Disabling DC9\n");
 952
 953        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 954
 955        intel_pps_unlock_regs_wa(dev_priv);
 956}
 957
 958static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 959{
 960        WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
 961                  "CSR program storage start is NULL\n");
 962        WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
 963        WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
 964}
 965
 966static struct i915_power_well *
 967lookup_power_well(struct drm_i915_private *dev_priv,
 968                  enum i915_power_well_id power_well_id)
 969{
 970        struct i915_power_well *power_well;
 971
 972        for_each_power_well(dev_priv, power_well)
 973                if (power_well->desc->id == power_well_id)
 974                        return power_well;
 975
 976        /*
 977         * It's not feasible to add error checking code to the callers since
 978         * this condition really shouldn't happen and it doesn't even make sense
 979         * to abort things like display initialization sequences. Just return
 980         * the first power well and hope the WARN gets reported so we can fix
 981         * our driver.
 982         */
 983        WARN(1, "Power well %d not defined for this platform\n", power_well_id);
 984        return &dev_priv->power_domains.power_wells[0];
 985}
 986
 987static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 988{
 989        bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
 990                                        SKL_DISP_PW_2);
 991
 992        WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 993
 994        WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
 995                  "DC5 already programmed to be enabled.\n");
 996        assert_rpm_wakelock_held(dev_priv);
 997
 998        assert_csr_loaded(dev_priv);
 999}
1000
1001void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1002{
1003        assert_can_enable_dc5(dev_priv);
1004
1005        DRM_DEBUG_KMS("Enabling DC5\n");
1006
1007        /* Wa Display #1183: skl,kbl,cfl */
1008        if (IS_GEN9_BC(dev_priv))
1009                I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1010                           SKL_SELECT_ALTERNATE_DC_EXIT);
1011
1012        gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1013}
1014
1015static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1016{
1017        WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1018                  "Backlight is not disabled.\n");
1019        WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1020                  "DC6 already programmed to be enabled.\n");
1021
1022        assert_csr_loaded(dev_priv);
1023}
1024
1025void skl_enable_dc6(struct drm_i915_private *dev_priv)
1026{
1027        assert_can_enable_dc6(dev_priv);
1028
1029        DRM_DEBUG_KMS("Enabling DC6\n");
1030
1031        /* Wa Display #1183: skl,kbl,cfl */
1032        if (IS_GEN9_BC(dev_priv))
1033                I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1034                           SKL_SELECT_ALTERNATE_DC_EXIT);
1035
1036        gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1037}
1038
1039static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1040                                   struct i915_power_well *power_well)
1041{
1042        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1043        int pw_idx = power_well->desc->hsw.idx;
1044        u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1045        u32 bios_req = I915_READ(regs->bios);
1046
1047        /* Take over the request bit if set by BIOS. */
1048        if (bios_req & mask) {
1049                u32 drv_req = I915_READ(regs->driver);
1050
1051                if (!(drv_req & mask))
1052                        I915_WRITE(regs->driver, drv_req | mask);
1053                I915_WRITE(regs->bios, bios_req & ~mask);
1054        }
1055}
1056
1057static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1058                                           struct i915_power_well *power_well)
1059{
1060        bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1061}
1062
1063static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1064                                            struct i915_power_well *power_well)
1065{
1066        bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1067}
1068
1069static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1070                                            struct i915_power_well *power_well)
1071{
1072        return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1073}
1074
1075static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1076{
1077        struct i915_power_well *power_well;
1078
1079        power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1080        if (power_well->count > 0)
1081                bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1082
1083        power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1084        if (power_well->count > 0)
1085                bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1086
1087        if (IS_GEMINILAKE(dev_priv)) {
1088                power_well = lookup_power_well(dev_priv,
1089                                               GLK_DISP_PW_DPIO_CMN_C);
1090                if (power_well->count > 0)
1091                        bxt_ddi_phy_verify_state(dev_priv,
1092                                                 power_well->desc->bxt.phy);
1093        }
1094}
1095
1096static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1097                                           struct i915_power_well *power_well)
1098{
1099        return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1100}
1101
1102static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1103{
1104        u32 tmp = I915_READ(DBUF_CTL);
1105
1106        WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1107             (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1108             "Unexpected DBuf power power state (0x%08x)\n", tmp);
1109}
1110
1111static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1112                                          struct i915_power_well *power_well)
1113{
1114        struct intel_cdclk_state cdclk_state = {};
1115
1116        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1117
1118        dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1119        /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1120        WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1121
1122        gen9_assert_dbuf_enabled(dev_priv);
1123
1124        if (IS_GEN9_LP(dev_priv))
1125                bxt_verify_ddi_phy_power_wells(dev_priv);
1126
1127        if (INTEL_GEN(dev_priv) >= 11)
1128                /*
1129                 * DMC retains HW context only for port A, the other combo
1130                 * PHY's HW context for port B is lost after DC transitions,
1131                 * so we need to restore it manually.
1132                 */
1133                icl_combo_phys_init(dev_priv);
1134}
1135
1136static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1137                                           struct i915_power_well *power_well)
1138{
1139        if (!dev_priv->csr.dmc_payload)
1140                return;
1141
1142        if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1143                skl_enable_dc6(dev_priv);
1144        else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1145                gen9_enable_dc5(dev_priv);
1146}
1147
1148static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1149                                         struct i915_power_well *power_well)
1150{
1151}
1152
1153static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1154                                           struct i915_power_well *power_well)
1155{
1156}
1157
1158static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1159                                             struct i915_power_well *power_well)
1160{
1161        return true;
1162}
1163
1164static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1165                                         struct i915_power_well *power_well)
1166{
1167        if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1168                i830_enable_pipe(dev_priv, PIPE_A);
1169        if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1170                i830_enable_pipe(dev_priv, PIPE_B);
1171}
1172
1173static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1174                                          struct i915_power_well *power_well)
1175{
1176        i830_disable_pipe(dev_priv, PIPE_B);
1177        i830_disable_pipe(dev_priv, PIPE_A);
1178}
1179
1180static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1181                                          struct i915_power_well *power_well)
1182{
1183        return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1184                I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1185}
1186
1187static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1188                                          struct i915_power_well *power_well)
1189{
1190        if (power_well->count > 0)
1191                i830_pipes_power_well_enable(dev_priv, power_well);
1192        else
1193                i830_pipes_power_well_disable(dev_priv, power_well);
1194}
1195
1196static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1197                               struct i915_power_well *power_well, bool enable)
1198{
1199        int pw_idx = power_well->desc->vlv.idx;
1200        u32 mask;
1201        u32 state;
1202        u32 ctrl;
1203
1204        mask = PUNIT_PWRGT_MASK(pw_idx);
1205        state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1206                         PUNIT_PWRGT_PWR_GATE(pw_idx);
1207
1208        mutex_lock(&dev_priv->pcu_lock);
1209
1210#define COND \
1211        ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1212
1213        if (COND)
1214                goto out;
1215
1216        ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1217        ctrl &= ~mask;
1218        ctrl |= state;
1219        vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1220
1221        if (wait_for(COND, 100))
1222                DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1223                          state,
1224                          vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1225
1226#undef COND
1227
1228out:
1229        mutex_unlock(&dev_priv->pcu_lock);
1230}
1231
1232static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1233                                  struct i915_power_well *power_well)
1234{
1235        vlv_set_power_well(dev_priv, power_well, true);
1236}
1237
1238static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1239                                   struct i915_power_well *power_well)
1240{
1241        vlv_set_power_well(dev_priv, power_well, false);
1242}
1243
1244static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1245                                   struct i915_power_well *power_well)
1246{
1247        int pw_idx = power_well->desc->vlv.idx;
1248        bool enabled = false;
1249        u32 mask;
1250        u32 state;
1251        u32 ctrl;
1252
1253        mask = PUNIT_PWRGT_MASK(pw_idx);
1254        ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1255
1256        mutex_lock(&dev_priv->pcu_lock);
1257
1258        state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1259        /*
1260         * We only ever set the power-on and power-gate states, anything
1261         * else is unexpected.
1262         */
1263        WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1264                state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1265        if (state == ctrl)
1266                enabled = true;
1267
1268        /*
1269         * A transient state at this point would mean some unexpected party
1270         * is poking at the power controls too.
1271         */
1272        ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1273        WARN_ON(ctrl != state);
1274
1275        mutex_unlock(&dev_priv->pcu_lock);
1276
1277        return enabled;
1278}
1279
1280static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1281{
1282        u32 val;
1283
1284        /*
1285         * On driver load, a pipe may be active and driving a DSI display.
1286         * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1287         * (and never recovering) in this case. intel_dsi_post_disable() will
1288         * clear it when we turn off the display.
1289         */
1290        val = I915_READ(DSPCLK_GATE_D);
1291        val &= DPOUNIT_CLOCK_GATE_DISABLE;
1292        val |= VRHUNIT_CLOCK_GATE_DISABLE;
1293        I915_WRITE(DSPCLK_GATE_D, val);
1294
1295        /*
1296         * Disable trickle feed and enable pnd deadline calculation
1297         */
1298        I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1299        I915_WRITE(CBR1_VLV, 0);
1300
1301        WARN_ON(dev_priv->rawclk_freq == 0);
1302
1303        I915_WRITE(RAWCLK_FREQ_VLV,
1304                   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1305}
1306
1307static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1308{
1309        struct intel_encoder *encoder;
1310        enum pipe pipe;
1311
1312        /*
1313         * Enable the CRI clock source so we can get at the
1314         * display and the reference clock for VGA
1315         * hotplug / manual detection. Supposedly DSI also
1316         * needs the ref clock up and running.
1317         *
1318         * CHV DPLL B/C have some issues if VGA mode is enabled.
1319         */
1320        for_each_pipe(dev_priv, pipe) {
1321                u32 val = I915_READ(DPLL(pipe));
1322
1323                val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1324                if (pipe != PIPE_A)
1325                        val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1326
1327                I915_WRITE(DPLL(pipe), val);
1328        }
1329
1330        vlv_init_display_clock_gating(dev_priv);
1331
1332        spin_lock_irq(&dev_priv->irq_lock);
1333        valleyview_enable_display_irqs(dev_priv);
1334        spin_unlock_irq(&dev_priv->irq_lock);
1335
1336        /*
1337         * During driver initialization/resume we can avoid restoring the
1338         * part of the HW/SW state that will be inited anyway explicitly.
1339         */
1340        if (dev_priv->power_domains.initializing)
1341                return;
1342
1343        intel_hpd_init(dev_priv);
1344
1345        /* Re-enable the ADPA, if we have one */
1346        for_each_intel_encoder(&dev_priv->drm, encoder) {
1347                if (encoder->type == INTEL_OUTPUT_ANALOG)
1348                        intel_crt_reset(&encoder->base);
1349        }
1350
1351        i915_redisable_vga_power_on(dev_priv);
1352
1353        intel_pps_unlock_regs_wa(dev_priv);
1354}
1355
1356static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1357{
1358        spin_lock_irq(&dev_priv->irq_lock);
1359        valleyview_disable_display_irqs(dev_priv);
1360        spin_unlock_irq(&dev_priv->irq_lock);
1361
1362        /* make sure we're done processing display irqs */
1363        synchronize_irq(dev_priv->drm.irq);
1364
1365        intel_power_sequencer_reset(dev_priv);
1366
1367        /* Prevent us from re-enabling polling on accident in late suspend */
1368        if (!dev_priv->drm.dev->power.is_suspended)
1369                intel_hpd_poll_init(dev_priv);
1370}
1371
1372static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1373                                          struct i915_power_well *power_well)
1374{
1375        vlv_set_power_well(dev_priv, power_well, true);
1376
1377        vlv_display_power_well_init(dev_priv);
1378}
1379
1380static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1381                                           struct i915_power_well *power_well)
1382{
1383        vlv_display_power_well_deinit(dev_priv);
1384
1385        vlv_set_power_well(dev_priv, power_well, false);
1386}
1387
1388static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1389                                           struct i915_power_well *power_well)
1390{
1391        /* since ref/cri clock was enabled */
1392        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1393
1394        vlv_set_power_well(dev_priv, power_well, true);
1395
1396        /*
1397         * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1398         *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1399         *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1400         *   b. The other bits such as sfr settings / modesel may all
1401         *      be set to 0.
1402         *
1403         * This should only be done on init and resume from S3 with
1404         * both PLLs disabled, or we risk losing DPIO and PLL
1405         * synchronization.
1406         */
1407        I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1408}
1409
1410static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1411                                            struct i915_power_well *power_well)
1412{
1413        enum pipe pipe;
1414
1415        for_each_pipe(dev_priv, pipe)
1416                assert_pll_disabled(dev_priv, pipe);
1417
1418        /* Assert common reset */
1419        I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1420
1421        vlv_set_power_well(dev_priv, power_well, false);
1422}
1423
1424#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1425
1426#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1427
1428static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1429{
1430        struct i915_power_well *cmn_bc =
1431                lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1432        struct i915_power_well *cmn_d =
1433                lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1434        u32 phy_control = dev_priv->chv_phy_control;
1435        u32 phy_status = 0;
1436        u32 phy_status_mask = 0xffffffff;
1437
1438        /*
1439         * The BIOS can leave the PHY is some weird state
1440         * where it doesn't fully power down some parts.
1441         * Disable the asserts until the PHY has been fully
1442         * reset (ie. the power well has been disabled at
1443         * least once).
1444         */
1445        if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1446                phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1447                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1448                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1449                                     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1450                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1451                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1452
1453        if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1454                phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1455                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1456                                     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1457
1458        if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1459                phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1460
1461                /* this assumes override is only used to enable lanes */
1462                if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1463                        phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1464
1465                if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1466                        phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1467
1468                /* CL1 is on whenever anything is on in either channel */
1469                if (BITS_SET(phy_control,
1470                             PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1471                             PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1472                        phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1473
1474                /*
1475                 * The DPLLB check accounts for the pipe B + port A usage
1476                 * with CL2 powered up but all the lanes in the second channel
1477                 * powered down.
1478                 */
1479                if (BITS_SET(phy_control,
1480                             PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1481                    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1482                        phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1483
1484                if (BITS_SET(phy_control,
1485                             PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1486                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1487                if (BITS_SET(phy_control,
1488                             PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1489                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1490
1491                if (BITS_SET(phy_control,
1492                             PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1493                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1494                if (BITS_SET(phy_control,
1495                             PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1496                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1497        }
1498
1499        if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1500                phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1501
1502                /* this assumes override is only used to enable lanes */
1503                if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1504                        phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1505
1506                if (BITS_SET(phy_control,
1507                             PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1508                        phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1509
1510                if (BITS_SET(phy_control,
1511                             PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1512                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1513                if (BITS_SET(phy_control,
1514                             PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1515                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1516        }
1517
1518        phy_status &= phy_status_mask;
1519
1520        /*
1521         * The PHY may be busy with some initial calibration and whatnot,
1522         * so the power state can take a while to actually change.
1523         */
1524        if (intel_wait_for_register(dev_priv,
1525                                    DISPLAY_PHY_STATUS,
1526                                    phy_status_mask,
1527                                    phy_status,
1528                                    10))
1529                DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1530                          I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1531                           phy_status, dev_priv->chv_phy_control);
1532}
1533
1534#undef BITS_SET
1535
1536static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1537                                           struct i915_power_well *power_well)
1538{
1539        enum dpio_phy phy;
1540        enum pipe pipe;
1541        u32 tmp;
1542
1543        WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1544                     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1545
1546        if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1547                pipe = PIPE_A;
1548                phy = DPIO_PHY0;
1549        } else {
1550                pipe = PIPE_C;
1551                phy = DPIO_PHY1;
1552        }
1553
1554        /* since ref/cri clock was enabled */
1555        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1556        vlv_set_power_well(dev_priv, power_well, true);
1557
1558        /* Poll for phypwrgood signal */
1559        if (intel_wait_for_register(dev_priv,
1560                                    DISPLAY_PHY_STATUS,
1561                                    PHY_POWERGOOD(phy),
1562                                    PHY_POWERGOOD(phy),
1563                                    1))
1564                DRM_ERROR("Display PHY %d is not power up\n", phy);
1565
1566        mutex_lock(&dev_priv->sb_lock);
1567
1568        /* Enable dynamic power down */
1569        tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1570        tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1571                DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1572        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1573
1574        if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1575                tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1576                tmp |= DPIO_DYNPWRDOWNEN_CH1;
1577                vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1578        } else {
1579                /*
1580                 * Force the non-existing CL2 off. BXT does this
1581                 * too, so maybe it saves some power even though
1582                 * CL2 doesn't exist?
1583                 */
1584                tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1585                tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1586                vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1587        }
1588
1589        mutex_unlock(&dev_priv->sb_lock);
1590
1591        dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1592        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1593
1594        DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1595                      phy, dev_priv->chv_phy_control);
1596
1597        assert_chv_phy_status(dev_priv);
1598}
1599
1600static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1601                                            struct i915_power_well *power_well)
1602{
1603        enum dpio_phy phy;
1604
1605        WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1606                     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1607
1608        if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1609                phy = DPIO_PHY0;
1610                assert_pll_disabled(dev_priv, PIPE_A);
1611                assert_pll_disabled(dev_priv, PIPE_B);
1612        } else {
1613                phy = DPIO_PHY1;
1614                assert_pll_disabled(dev_priv, PIPE_C);
1615        }
1616
1617        dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1618        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1619
1620        vlv_set_power_well(dev_priv, power_well, false);
1621
1622        DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1623                      phy, dev_priv->chv_phy_control);
1624
1625        /* PHY is fully reset now, so we can enable the PHY state asserts */
1626        dev_priv->chv_phy_assert[phy] = true;
1627
1628        assert_chv_phy_status(dev_priv);
1629}
1630
1631static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1632                                     enum dpio_channel ch, bool override, unsigned int mask)
1633{
1634        enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1635        u32 reg, val, expected, actual;
1636
1637        /*
1638         * The BIOS can leave the PHY is some weird state
1639         * where it doesn't fully power down some parts.
1640         * Disable the asserts until the PHY has been fully
1641         * reset (ie. the power well has been disabled at
1642         * least once).
1643         */
1644        if (!dev_priv->chv_phy_assert[phy])
1645                return;
1646
1647        if (ch == DPIO_CH0)
1648                reg = _CHV_CMN_DW0_CH0;
1649        else
1650                reg = _CHV_CMN_DW6_CH1;
1651
1652        mutex_lock(&dev_priv->sb_lock);
1653        val = vlv_dpio_read(dev_priv, pipe, reg);
1654        mutex_unlock(&dev_priv->sb_lock);
1655
1656        /*
1657         * This assumes !override is only used when the port is disabled.
1658         * All lanes should power down even without the override when
1659         * the port is disabled.
1660         */
1661        if (!override || mask == 0xf) {
1662                expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1663                /*
1664                 * If CH1 common lane is not active anymore
1665                 * (eg. for pipe B DPLL) the entire channel will
1666                 * shut down, which causes the common lane registers
1667                 * to read as 0. That means we can't actually check
1668                 * the lane power down status bits, but as the entire
1669                 * register reads as 0 it's a good indication that the
1670                 * channel is indeed entirely powered down.
1671                 */
1672                if (ch == DPIO_CH1 && val == 0)
1673                        expected = 0;
1674        } else if (mask != 0x0) {
1675                expected = DPIO_ANYDL_POWERDOWN;
1676        } else {
1677                expected = 0;
1678        }
1679
1680        if (ch == DPIO_CH0)
1681                actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1682        else
1683                actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1684        actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1685
1686        WARN(actual != expected,
1687             "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1688             !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1689             !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1690             reg, val);
1691}
1692
1693bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1694                          enum dpio_channel ch, bool override)
1695{
1696        struct i915_power_domains *power_domains = &dev_priv->power_domains;
1697        bool was_override;
1698
1699        mutex_lock(&power_domains->lock);
1700
1701        was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1702
1703        if (override == was_override)
1704                goto out;
1705
1706        if (override)
1707                dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1708        else
1709                dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1710
1711        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1712
1713        DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1714                      phy, ch, dev_priv->chv_phy_control);
1715
1716        assert_chv_phy_status(dev_priv);
1717
1718out:
1719        mutex_unlock(&power_domains->lock);
1720
1721        return was_override;
1722}
1723
1724void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1725                             bool override, unsigned int mask)
1726{
1727        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1728        struct i915_power_domains *power_domains = &dev_priv->power_domains;
1729        enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1730        enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1731
1732        mutex_lock(&power_domains->lock);
1733
1734        dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1735        dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1736
1737        if (override)
1738                dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1739        else
1740                dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1741
1742        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1743
1744        DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1745                      phy, ch, mask, dev_priv->chv_phy_control);
1746
1747        assert_chv_phy_status(dev_priv);
1748
1749        assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1750
1751        mutex_unlock(&power_domains->lock);
1752}
1753
1754static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1755                                        struct i915_power_well *power_well)
1756{
1757        enum pipe pipe = PIPE_A;
1758        bool enabled;
1759        u32 state, ctrl;
1760
1761        mutex_lock(&dev_priv->pcu_lock);
1762
1763        state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1764        /*
1765         * We only ever set the power-on and power-gate states, anything
1766         * else is unexpected.
1767         */
1768        WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1769        enabled = state == DP_SSS_PWR_ON(pipe);
1770
1771        /*
1772         * A transient state at this point would mean some unexpected party
1773         * is poking at the power controls too.
1774         */
1775        ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1776        WARN_ON(ctrl << 16 != state);
1777
1778        mutex_unlock(&dev_priv->pcu_lock);
1779
1780        return enabled;
1781}
1782
1783static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1784                                    struct i915_power_well *power_well,
1785                                    bool enable)
1786{
1787        enum pipe pipe = PIPE_A;
1788        u32 state;
1789        u32 ctrl;
1790
1791        state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1792
1793        mutex_lock(&dev_priv->pcu_lock);
1794
1795#define COND \
1796        ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1797
1798        if (COND)
1799                goto out;
1800
1801        ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1802        ctrl &= ~DP_SSC_MASK(pipe);
1803        ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1804        vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1805
1806        if (wait_for(COND, 100))
1807                DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1808                          state,
1809                          vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1810
1811#undef COND
1812
1813out:
1814        mutex_unlock(&dev_priv->pcu_lock);
1815}
1816
1817static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1818                                       struct i915_power_well *power_well)
1819{
1820        chv_set_pipe_power_well(dev_priv, power_well, true);
1821
1822        vlv_display_power_well_init(dev_priv);
1823}
1824
1825static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1826                                        struct i915_power_well *power_well)
1827{
1828        vlv_display_power_well_deinit(dev_priv);
1829
1830        chv_set_pipe_power_well(dev_priv, power_well, false);
1831}
1832
1833static void
1834__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1835                                 enum intel_display_power_domain domain)
1836{
1837        struct i915_power_domains *power_domains = &dev_priv->power_domains;
1838        struct i915_power_well *power_well;
1839
1840        for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1841                intel_power_well_get(dev_priv, power_well);
1842
1843        power_domains->domain_use_count[domain]++;
1844}
1845
1846/**
1847 * intel_display_power_get - grab a power domain reference
1848 * @dev_priv: i915 device instance
1849 * @domain: power domain to reference
1850 *
1851 * This function grabs a power domain reference for @domain and ensures that the
1852 * power domain and all its parents are powered up. Therefore users should only
1853 * grab a reference to the innermost power domain they need.
1854 *
1855 * Any power domain reference obtained by this function must have a symmetric
1856 * call to intel_display_power_put() to release the reference again.
1857 */
1858intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1859                                        enum intel_display_power_domain domain)
1860{
1861        struct i915_power_domains *power_domains = &dev_priv->power_domains;
1862        intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
1863
1864        mutex_lock(&power_domains->lock);
1865
1866        __intel_display_power_get_domain(dev_priv, domain);
1867
1868        mutex_unlock(&power_domains->lock);
1869
1870        return wakeref;
1871}
1872
1873/**
1874 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1875 * @dev_priv: i915 device instance
1876 * @domain: power domain to reference
1877 *
1878 * This function grabs a power domain reference for @domain and ensures that the
1879 * power domain and all its parents are powered up. Therefore users should only
1880 * grab a reference to the innermost power domain they need.
1881 *
1882 * Any power domain reference obtained by this function must have a symmetric
1883 * call to intel_display_power_put() to release the reference again.
1884 */
1885intel_wakeref_t
1886intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1887                                   enum intel_display_power_domain domain)
1888{
1889        struct i915_power_domains *power_domains = &dev_priv->power_domains;
1890        intel_wakeref_t wakeref;
1891        bool is_enabled;
1892
1893        wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
1894        if (!wakeref)
1895                return false;
1896
1897        mutex_lock(&power_domains->lock);
1898
1899        if (__intel_display_power_is_enabled(dev_priv, domain)) {
1900                __intel_display_power_get_domain(dev_priv, domain);
1901                is_enabled = true;
1902        } else {
1903                is_enabled = false;
1904        }
1905
1906        mutex_unlock(&power_domains->lock);
1907
1908        if (!is_enabled) {
1909                intel_runtime_pm_put(dev_priv, wakeref);
1910                wakeref = 0;
1911        }
1912
1913        return wakeref;
1914}
1915
1916static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1917                                      enum intel_display_power_domain domain)
1918{
1919        struct i915_power_domains *power_domains;
1920        struct i915_power_well *power_well;
1921
1922        power_domains = &dev_priv->power_domains;
1923
1924        mutex_lock(&power_domains->lock);
1925
1926        WARN(!power_domains->domain_use_count[domain],
1927             "Use count on domain %s is already zero\n",
1928             intel_display_power_domain_str(domain));
1929        power_domains->domain_use_count[domain]--;
1930
1931        for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1932                intel_power_well_put(dev_priv, power_well);
1933
1934        mutex_unlock(&power_domains->lock);
1935}
1936
1937/**
1938 * intel_display_power_put - release a power domain reference
1939 * @dev_priv: i915 device instance
1940 * @domain: power domain to reference
1941 *
1942 * This function drops the power domain reference obtained by
1943 * intel_display_power_get() and might power down the corresponding hardware
1944 * block right away if this is the last reference.
1945 */
1946void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1947                                       enum intel_display_power_domain domain)
1948{
1949        __intel_display_power_put(dev_priv, domain);
1950        intel_runtime_pm_put_unchecked(dev_priv);
1951}
1952
1953#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1954void intel_display_power_put(struct drm_i915_private *dev_priv,
1955                             enum intel_display_power_domain domain,
1956                             intel_wakeref_t wakeref)
1957{
1958        __intel_display_power_put(dev_priv, domain);
1959        intel_runtime_pm_put(dev_priv, wakeref);
1960}
1961#endif
1962
1963#define I830_PIPES_POWER_DOMAINS (              \
1964        BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
1965        BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
1966        BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
1967        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
1968        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
1969        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
1970        BIT_ULL(POWER_DOMAIN_INIT))
1971
1972#define VLV_DISPLAY_POWER_DOMAINS (             \
1973        BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
1974        BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
1975        BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
1976        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
1977        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
1978        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
1979        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1980        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1981        BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
1982        BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
1983        BIT_ULL(POWER_DOMAIN_VGA) |                     \
1984        BIT_ULL(POWER_DOMAIN_AUDIO) |           \
1985        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1986        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1987        BIT_ULL(POWER_DOMAIN_GMBUS) |           \
1988        BIT_ULL(POWER_DOMAIN_INIT))
1989
1990#define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1991        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1992        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1993        BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
1994        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1995        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1996        BIT_ULL(POWER_DOMAIN_INIT))
1997
1998#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1999        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2000        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2001        BIT_ULL(POWER_DOMAIN_INIT))
2002
2003#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
2004        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2005        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2006        BIT_ULL(POWER_DOMAIN_INIT))
2007
2008#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
2009        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2010        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2011        BIT_ULL(POWER_DOMAIN_INIT))
2012
2013#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
2014        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2015        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2016        BIT_ULL(POWER_DOMAIN_INIT))
2017
2018#define CHV_DISPLAY_POWER_DOMAINS (             \
2019        BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2020        BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2021        BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
2022        BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2023        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2024        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2025        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2026        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2027        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
2028        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2029        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2030        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2031        BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2032        BIT_ULL(POWER_DOMAIN_VGA) |                     \
2033        BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2034        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2035        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2036        BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2037        BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2038        BIT_ULL(POWER_DOMAIN_INIT))
2039
2040#define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
2041        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2042        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2043        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2044        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2045        BIT_ULL(POWER_DOMAIN_INIT))
2046
2047#define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
2048        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2049        BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2050        BIT_ULL(POWER_DOMAIN_INIT))
2051
2052#define HSW_DISPLAY_POWER_DOMAINS (                     \
2053        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2054        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2055        BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
2056        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2057        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2058        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2059        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2060        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2061        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2062        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2063        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2064        BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2065        BIT_ULL(POWER_DOMAIN_VGA) |                             \
2066        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2067        BIT_ULL(POWER_DOMAIN_INIT))
2068
2069#define BDW_DISPLAY_POWER_DOMAINS (                     \
2070        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2071        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2072        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2073        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2074        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2075        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2076        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2077        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2078        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2079        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2080        BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2081        BIT_ULL(POWER_DOMAIN_VGA) |                             \
2082        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2083        BIT_ULL(POWER_DOMAIN_INIT))
2084
2085#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2086        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2087        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2088        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2089        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2090        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2091        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2092        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2093        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2094        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2095        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2096        BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
2097        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2098        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2099        BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2100        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2101        BIT_ULL(POWER_DOMAIN_VGA) |                             \
2102        BIT_ULL(POWER_DOMAIN_INIT))
2103#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
2104        BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2105        BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2106        BIT_ULL(POWER_DOMAIN_INIT))
2107#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2108        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2109        BIT_ULL(POWER_DOMAIN_INIT))
2110#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2111        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2112        BIT_ULL(POWER_DOMAIN_INIT))
2113#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
2114        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2115        BIT_ULL(POWER_DOMAIN_INIT))
2116#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2117        SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2118        BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2119        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2120        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2121        BIT_ULL(POWER_DOMAIN_INIT))
2122
2123#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2124        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2125        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2126        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2127        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2128        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2129        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2130        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2131        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2132        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2133        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2134        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2135        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2136        BIT_ULL(POWER_DOMAIN_VGA) |                             \
2137        BIT_ULL(POWER_DOMAIN_INIT))
2138#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2139        BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2140        BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2141        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2142        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2143        BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2144        BIT_ULL(POWER_DOMAIN_INIT))
2145#define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
2146        BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2147        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2148        BIT_ULL(POWER_DOMAIN_INIT))
2149#define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
2150        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2151        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2152        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2153        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2154        BIT_ULL(POWER_DOMAIN_INIT))
2155
2156#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2157        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2158        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2159        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2160        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2161        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2162        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2163        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2164        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2165        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2166        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2167        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2168        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2169        BIT_ULL(POWER_DOMAIN_VGA) |                             \
2170        BIT_ULL(POWER_DOMAIN_INIT))
2171#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
2172        BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2173#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2174        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2175#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2176        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2177#define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
2178        BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2179        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2180        BIT_ULL(POWER_DOMAIN_INIT))
2181#define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
2182        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2183        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2184        BIT_ULL(POWER_DOMAIN_INIT))
2185#define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
2186        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2187        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2188        BIT_ULL(POWER_DOMAIN_INIT))
2189#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
2190        BIT_ULL(POWER_DOMAIN_AUX_A) |           \
2191        BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2192        BIT_ULL(POWER_DOMAIN_INIT))
2193#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
2194        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2195        BIT_ULL(POWER_DOMAIN_INIT))
2196#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
2197        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2198        BIT_ULL(POWER_DOMAIN_INIT))
2199#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2200        GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2201        BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2202        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2203        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2204        BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2205        BIT_ULL(POWER_DOMAIN_INIT))
2206
2207#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2208        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2209        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2210        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2211        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2212        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2213        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2214        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2215        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2216        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2217        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2218        BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
2219        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2220        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2221        BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2222        BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2223        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2224        BIT_ULL(POWER_DOMAIN_VGA) |                             \
2225        BIT_ULL(POWER_DOMAIN_INIT))
2226#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
2227        BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2228        BIT_ULL(POWER_DOMAIN_INIT))
2229#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
2230        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2231        BIT_ULL(POWER_DOMAIN_INIT))
2232#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
2233        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2234        BIT_ULL(POWER_DOMAIN_INIT))
2235#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
2236        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2237        BIT_ULL(POWER_DOMAIN_INIT))
2238#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
2239        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2240        BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2241        BIT_ULL(POWER_DOMAIN_INIT))
2242#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
2243        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2244        BIT_ULL(POWER_DOMAIN_INIT))
2245#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
2246        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2247        BIT_ULL(POWER_DOMAIN_INIT))
2248#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
2249        BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2250        BIT_ULL(POWER_DOMAIN_INIT))
2251#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (               \
2252        BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2253        BIT_ULL(POWER_DOMAIN_INIT))
2254#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (            \
2255        BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2256        BIT_ULL(POWER_DOMAIN_INIT))
2257#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2258        CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2259        BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2260        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2261        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2262        BIT_ULL(POWER_DOMAIN_INIT))
2263
2264/*
2265 * ICL PW_0/PG_0 domains (HW/DMC control):
2266 * - PCI
2267 * - clocks except port PLL
2268 * - central power except FBC
2269 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2270 * ICL PW_1/PG_1 domains (HW/DMC control):
2271 * - DBUF function
2272 * - PIPE_A and its planes, except VGA
2273 * - transcoder EDP + PSR
2274 * - transcoder DSI
2275 * - DDI_A
2276 * - FBC
2277 */
2278#define ICL_PW_4_POWER_DOMAINS (                        \
2279        BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2280        BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2281        BIT_ULL(POWER_DOMAIN_INIT))
2282        /* VDSC/joining */
2283#define ICL_PW_3_POWER_DOMAINS (                        \
2284        ICL_PW_4_POWER_DOMAINS |                        \
2285        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2286        BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2287        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2288        BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2289        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2290        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2291        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2292        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2293        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2294        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2295        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2296        BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2297        BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2298        BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2299        BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2300        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2301        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2302        BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2303        BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2304        BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2305        BIT_ULL(POWER_DOMAIN_AUX_TBT1) |                \
2306        BIT_ULL(POWER_DOMAIN_AUX_TBT2) |                \
2307        BIT_ULL(POWER_DOMAIN_AUX_TBT3) |                \
2308        BIT_ULL(POWER_DOMAIN_AUX_TBT4) |                \
2309        BIT_ULL(POWER_DOMAIN_VGA) |                     \
2310        BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2311        BIT_ULL(POWER_DOMAIN_INIT))
2312        /*
2313         * - transcoder WD
2314         * - KVMR (HW control)
2315         */
2316#define ICL_PW_2_POWER_DOMAINS (                        \
2317        ICL_PW_3_POWER_DOMAINS |                        \
2318        BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |             \
2319        BIT_ULL(POWER_DOMAIN_INIT))
2320        /*
2321         * - KVMR (HW control)
2322         */
2323#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2324        ICL_PW_2_POWER_DOMAINS |                        \
2325        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2326        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2327        BIT_ULL(POWER_DOMAIN_INIT))
2328
2329#define ICL_DDI_IO_A_POWER_DOMAINS (                    \
2330        BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2331#define ICL_DDI_IO_B_POWER_DOMAINS (                    \
2332        BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2333#define ICL_DDI_IO_C_POWER_DOMAINS (                    \
2334        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2335#define ICL_DDI_IO_D_POWER_DOMAINS (                    \
2336        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2337#define ICL_DDI_IO_E_POWER_DOMAINS (                    \
2338        BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2339#define ICL_DDI_IO_F_POWER_DOMAINS (                    \
2340        BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2341
2342#define ICL_AUX_A_IO_POWER_DOMAINS (                    \
2343        BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2344        BIT_ULL(POWER_DOMAIN_AUX_A))
2345#define ICL_AUX_B_IO_POWER_DOMAINS (                    \
2346        BIT_ULL(POWER_DOMAIN_AUX_B))
2347#define ICL_AUX_C_IO_POWER_DOMAINS (                    \
2348        BIT_ULL(POWER_DOMAIN_AUX_C))
2349#define ICL_AUX_D_IO_POWER_DOMAINS (                    \
2350        BIT_ULL(POWER_DOMAIN_AUX_D))
2351#define ICL_AUX_E_IO_POWER_DOMAINS (                    \
2352        BIT_ULL(POWER_DOMAIN_AUX_E))
2353#define ICL_AUX_F_IO_POWER_DOMAINS (                    \
2354        BIT_ULL(POWER_DOMAIN_AUX_F))
2355#define ICL_AUX_TBT1_IO_POWER_DOMAINS (                 \
2356        BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2357#define ICL_AUX_TBT2_IO_POWER_DOMAINS (                 \
2358        BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2359#define ICL_AUX_TBT3_IO_POWER_DOMAINS (                 \
2360        BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2361#define ICL_AUX_TBT4_IO_POWER_DOMAINS (                 \
2362        BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2363
2364static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2365        .sync_hw = i9xx_power_well_sync_hw_noop,
2366        .enable = i9xx_always_on_power_well_noop,
2367        .disable = i9xx_always_on_power_well_noop,
2368        .is_enabled = i9xx_always_on_power_well_enabled,
2369};
2370
2371static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2372        .sync_hw = i9xx_power_well_sync_hw_noop,
2373        .enable = chv_pipe_power_well_enable,
2374        .disable = chv_pipe_power_well_disable,
2375        .is_enabled = chv_pipe_power_well_enabled,
2376};
2377
2378static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2379        .sync_hw = i9xx_power_well_sync_hw_noop,
2380        .enable = chv_dpio_cmn_power_well_enable,
2381        .disable = chv_dpio_cmn_power_well_disable,
2382        .is_enabled = vlv_power_well_enabled,
2383};
2384
2385static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2386        {
2387                .name = "always-on",
2388                .always_on = true,
2389                .domains = POWER_DOMAIN_MASK,
2390                .ops = &i9xx_always_on_power_well_ops,
2391                .id = DISP_PW_ID_NONE,
2392        },
2393};
2394
2395static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2396        .sync_hw = i830_pipes_power_well_sync_hw,
2397        .enable = i830_pipes_power_well_enable,
2398        .disable = i830_pipes_power_well_disable,
2399        .is_enabled = i830_pipes_power_well_enabled,
2400};
2401
2402static const struct i915_power_well_desc i830_power_wells[] = {
2403        {
2404                .name = "always-on",
2405                .always_on = true,
2406                .domains = POWER_DOMAIN_MASK,
2407                .ops = &i9xx_always_on_power_well_ops,
2408                .id = DISP_PW_ID_NONE,
2409        },
2410        {
2411                .name = "pipes",
2412                .domains = I830_PIPES_POWER_DOMAINS,
2413                .ops = &i830_pipes_power_well_ops,
2414                .id = DISP_PW_ID_NONE,
2415        },
2416};
2417
2418static const struct i915_power_well_ops hsw_power_well_ops = {
2419        .sync_hw = hsw_power_well_sync_hw,
2420        .enable = hsw_power_well_enable,
2421        .disable = hsw_power_well_disable,
2422        .is_enabled = hsw_power_well_enabled,
2423};
2424
2425static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2426        .sync_hw = i9xx_power_well_sync_hw_noop,
2427        .enable = gen9_dc_off_power_well_enable,
2428        .disable = gen9_dc_off_power_well_disable,
2429        .is_enabled = gen9_dc_off_power_well_enabled,
2430};
2431
2432static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2433        .sync_hw = i9xx_power_well_sync_hw_noop,
2434        .enable = bxt_dpio_cmn_power_well_enable,
2435        .disable = bxt_dpio_cmn_power_well_disable,
2436        .is_enabled = bxt_dpio_cmn_power_well_enabled,
2437};
2438
2439static const struct i915_power_well_regs hsw_power_well_regs = {
2440        .bios   = HSW_PWR_WELL_CTL1,
2441        .driver = HSW_PWR_WELL_CTL2,
2442        .kvmr   = HSW_PWR_WELL_CTL3,
2443        .debug  = HSW_PWR_WELL_CTL4,
2444};
2445
2446static const struct i915_power_well_desc hsw_power_wells[] = {
2447        {
2448                .name = "always-on",
2449                .always_on = true,
2450                .domains = POWER_DOMAIN_MASK,
2451                .ops = &i9xx_always_on_power_well_ops,
2452                .id = DISP_PW_ID_NONE,
2453        },
2454        {
2455                .name = "display",
2456                .domains = HSW_DISPLAY_POWER_DOMAINS,
2457                .ops = &hsw_power_well_ops,
2458                .id = HSW_DISP_PW_GLOBAL,
2459                {
2460                        .hsw.regs = &hsw_power_well_regs,
2461                        .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2462                        .hsw.has_vga = true,
2463                },
2464        },
2465};
2466
2467static const struct i915_power_well_desc bdw_power_wells[] = {
2468        {
2469                .name = "always-on",
2470                .always_on = true,
2471                .domains = POWER_DOMAIN_MASK,
2472                .ops = &i9xx_always_on_power_well_ops,
2473                .id = DISP_PW_ID_NONE,
2474        },
2475        {
2476                .name = "display",
2477                .domains = BDW_DISPLAY_POWER_DOMAINS,
2478                .ops = &hsw_power_well_ops,
2479                .id = HSW_DISP_PW_GLOBAL,
2480                {
2481                        .hsw.regs = &hsw_power_well_regs,
2482                        .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2483                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2484                        .hsw.has_vga = true,
2485                },
2486        },
2487};
2488
2489static const struct i915_power_well_ops vlv_display_power_well_ops = {
2490        .sync_hw = i9xx_power_well_sync_hw_noop,
2491        .enable = vlv_display_power_well_enable,
2492        .disable = vlv_display_power_well_disable,
2493        .is_enabled = vlv_power_well_enabled,
2494};
2495
2496static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2497        .sync_hw = i9xx_power_well_sync_hw_noop,
2498        .enable = vlv_dpio_cmn_power_well_enable,
2499        .disable = vlv_dpio_cmn_power_well_disable,
2500        .is_enabled = vlv_power_well_enabled,
2501};
2502
2503static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2504        .sync_hw = i9xx_power_well_sync_hw_noop,
2505        .enable = vlv_power_well_enable,
2506        .disable = vlv_power_well_disable,
2507        .is_enabled = vlv_power_well_enabled,
2508};
2509
2510static const struct i915_power_well_desc vlv_power_wells[] = {
2511        {
2512                .name = "always-on",
2513                .always_on = true,
2514                .domains = POWER_DOMAIN_MASK,
2515                .ops = &i9xx_always_on_power_well_ops,
2516                .id = DISP_PW_ID_NONE,
2517        },
2518        {
2519                .name = "display",
2520                .domains = VLV_DISPLAY_POWER_DOMAINS,
2521                .ops = &vlv_display_power_well_ops,
2522                .id = VLV_DISP_PW_DISP2D,
2523                {
2524                        .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2525                },
2526        },
2527        {
2528                .name = "dpio-tx-b-01",
2529                .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2530                           VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2531                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2532                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2533                .ops = &vlv_dpio_power_well_ops,
2534                .id = DISP_PW_ID_NONE,
2535                {
2536                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2537                },
2538        },
2539        {
2540                .name = "dpio-tx-b-23",
2541                .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2542                           VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2543                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2544                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2545                .ops = &vlv_dpio_power_well_ops,
2546                .id = DISP_PW_ID_NONE,
2547                {
2548                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2549                },
2550        },
2551        {
2552                .name = "dpio-tx-c-01",
2553                .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2554                           VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2555                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2556                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2557                .ops = &vlv_dpio_power_well_ops,
2558                .id = DISP_PW_ID_NONE,
2559                {
2560                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2561                },
2562        },
2563        {
2564                .name = "dpio-tx-c-23",
2565                .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2566                           VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2567                           VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2568                           VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2569                .ops = &vlv_dpio_power_well_ops,
2570                .id = DISP_PW_ID_NONE,
2571                {
2572                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2573                },
2574        },
2575        {
2576                .name = "dpio-common",
2577                .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2578                .ops = &vlv_dpio_cmn_power_well_ops,
2579                .id = VLV_DISP_PW_DPIO_CMN_BC,
2580                {
2581                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2582                },
2583        },
2584};
2585
2586static const struct i915_power_well_desc chv_power_wells[] = {
2587        {
2588                .name = "always-on",
2589                .always_on = true,
2590                .domains = POWER_DOMAIN_MASK,
2591                .ops = &i9xx_always_on_power_well_ops,
2592                .id = DISP_PW_ID_NONE,
2593        },
2594        {
2595                .name = "display",
2596                /*
2597                 * Pipe A power well is the new disp2d well. Pipe B and C
2598                 * power wells don't actually exist. Pipe A power well is
2599                 * required for any pipe to work.
2600                 */
2601                .domains = CHV_DISPLAY_POWER_DOMAINS,
2602                .ops = &chv_pipe_power_well_ops,
2603                .id = DISP_PW_ID_NONE,
2604        },
2605        {
2606                .name = "dpio-common-bc",
2607                .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2608                .ops = &chv_dpio_cmn_power_well_ops,
2609                .id = VLV_DISP_PW_DPIO_CMN_BC,
2610                {
2611                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2612                },
2613        },
2614        {
2615                .name = "dpio-common-d",
2616                .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2617                .ops = &chv_dpio_cmn_power_well_ops,
2618                .id = CHV_DISP_PW_DPIO_CMN_D,
2619                {
2620                        .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2621                },
2622        },
2623};
2624
2625bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2626                                         enum i915_power_well_id power_well_id)
2627{
2628        struct i915_power_well *power_well;
2629        bool ret;
2630
2631        power_well = lookup_power_well(dev_priv, power_well_id);
2632        ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2633
2634        return ret;
2635}
2636
2637static const struct i915_power_well_desc skl_power_wells[] = {
2638        {
2639                .name = "always-on",
2640                .always_on = true,
2641                .domains = POWER_DOMAIN_MASK,
2642                .ops = &i9xx_always_on_power_well_ops,
2643                .id = DISP_PW_ID_NONE,
2644        },
2645        {
2646                .name = "power well 1",
2647                /* Handled by the DMC firmware */
2648                .always_on = true,
2649                .domains = 0,
2650                .ops = &hsw_power_well_ops,
2651                .id = SKL_DISP_PW_1,
2652                {
2653                        .hsw.regs = &hsw_power_well_regs,
2654                        .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2655                        .hsw.has_fuses = true,
2656                },
2657        },
2658        {
2659                .name = "MISC IO power well",
2660                /* Handled by the DMC firmware */
2661                .always_on = true,
2662                .domains = 0,
2663                .ops = &hsw_power_well_ops,
2664                .id = SKL_DISP_PW_MISC_IO,
2665                {
2666                        .hsw.regs = &hsw_power_well_regs,
2667                        .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2668                },
2669        },
2670        {
2671                .name = "DC off",
2672                .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2673                .ops = &gen9_dc_off_power_well_ops,
2674                .id = DISP_PW_ID_NONE,
2675        },
2676        {
2677                .name = "power well 2",
2678                .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2679                .ops = &hsw_power_well_ops,
2680                .id = SKL_DISP_PW_2,
2681                {
2682                        .hsw.regs = &hsw_power_well_regs,
2683                        .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2684                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2685                        .hsw.has_vga = true,
2686                        .hsw.has_fuses = true,
2687                },
2688        },
2689        {
2690                .name = "DDI A/E IO power well",
2691                .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2692                .ops = &hsw_power_well_ops,
2693                .id = DISP_PW_ID_NONE,
2694                {
2695                        .hsw.regs = &hsw_power_well_regs,
2696                        .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2697                },
2698        },
2699        {
2700                .name = "DDI B IO power well",
2701                .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2702                .ops = &hsw_power_well_ops,
2703                .id = DISP_PW_ID_NONE,
2704                {
2705                        .hsw.regs = &hsw_power_well_regs,
2706                        .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2707                },
2708        },
2709        {
2710                .name = "DDI C IO power well",
2711                .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2712                .ops = &hsw_power_well_ops,
2713                .id = DISP_PW_ID_NONE,
2714                {
2715                        .hsw.regs = &hsw_power_well_regs,
2716                        .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2717                },
2718        },
2719        {
2720                .name = "DDI D IO power well",
2721                .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2722                .ops = &hsw_power_well_ops,
2723                .id = DISP_PW_ID_NONE,
2724                {
2725                        .hsw.regs = &hsw_power_well_regs,
2726                        .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2727                },
2728        },
2729};
2730
2731static const struct i915_power_well_desc bxt_power_wells[] = {
2732        {
2733                .name = "always-on",
2734                .always_on = true,
2735                .domains = POWER_DOMAIN_MASK,
2736                .ops = &i9xx_always_on_power_well_ops,
2737                .id = DISP_PW_ID_NONE,
2738        },
2739        {
2740                .name = "power well 1",
2741                /* Handled by the DMC firmware */
2742                .always_on = true,
2743                .domains = 0,
2744                .ops = &hsw_power_well_ops,
2745                .id = SKL_DISP_PW_1,
2746                {
2747                        .hsw.regs = &hsw_power_well_regs,
2748                        .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2749                        .hsw.has_fuses = true,
2750                },
2751        },
2752        {
2753                .name = "DC off",
2754                .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2755                .ops = &gen9_dc_off_power_well_ops,
2756                .id = DISP_PW_ID_NONE,
2757        },
2758        {
2759                .name = "power well 2",
2760                .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2761                .ops = &hsw_power_well_ops,
2762                .id = SKL_DISP_PW_2,
2763                {
2764                        .hsw.regs = &hsw_power_well_regs,
2765                        .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2766                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2767                        .hsw.has_vga = true,
2768                        .hsw.has_fuses = true,
2769                },
2770        },
2771        {
2772                .name = "dpio-common-a",
2773                .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2774                .ops = &bxt_dpio_cmn_power_well_ops,
2775                .id = BXT_DISP_PW_DPIO_CMN_A,
2776                {
2777                        .bxt.phy = DPIO_PHY1,
2778                },
2779        },
2780        {
2781                .name = "dpio-common-bc",
2782                .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2783                .ops = &bxt_dpio_cmn_power_well_ops,
2784                .id = VLV_DISP_PW_DPIO_CMN_BC,
2785                {
2786                        .bxt.phy = DPIO_PHY0,
2787                },
2788        },
2789};
2790
2791static const struct i915_power_well_desc glk_power_wells[] = {
2792        {
2793                .name = "always-on",
2794                .always_on = true,
2795                .domains = POWER_DOMAIN_MASK,
2796                .ops = &i9xx_always_on_power_well_ops,
2797                .id = DISP_PW_ID_NONE,
2798        },
2799        {
2800                .name = "power well 1",
2801                /* Handled by the DMC firmware */
2802                .always_on = true,
2803                .domains = 0,
2804                .ops = &hsw_power_well_ops,
2805                .id = SKL_DISP_PW_1,
2806                {
2807                        .hsw.regs = &hsw_power_well_regs,
2808                        .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2809                        .hsw.has_fuses = true,
2810                },
2811        },
2812        {
2813                .name = "DC off",
2814                .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2815                .ops = &gen9_dc_off_power_well_ops,
2816                .id = DISP_PW_ID_NONE,
2817        },
2818        {
2819                .name = "power well 2",
2820                .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2821                .ops = &hsw_power_well_ops,
2822                .id = SKL_DISP_PW_2,
2823                {
2824                        .hsw.regs = &hsw_power_well_regs,
2825                        .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2826                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2827                        .hsw.has_vga = true,
2828                        .hsw.has_fuses = true,
2829                },
2830        },
2831        {
2832                .name = "dpio-common-a",
2833                .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2834                .ops = &bxt_dpio_cmn_power_well_ops,
2835                .id = BXT_DISP_PW_DPIO_CMN_A,
2836                {
2837                        .bxt.phy = DPIO_PHY1,
2838                },
2839        },
2840        {
2841                .name = "dpio-common-b",
2842                .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2843                .ops = &bxt_dpio_cmn_power_well_ops,
2844                .id = VLV_DISP_PW_DPIO_CMN_BC,
2845                {
2846                        .bxt.phy = DPIO_PHY0,
2847                },
2848        },
2849        {
2850                .name = "dpio-common-c",
2851                .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2852                .ops = &bxt_dpio_cmn_power_well_ops,
2853                .id = GLK_DISP_PW_DPIO_CMN_C,
2854                {
2855                        .bxt.phy = DPIO_PHY2,
2856                },
2857        },
2858        {
2859                .name = "AUX A",
2860                .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2861                .ops = &hsw_power_well_ops,
2862                .id = DISP_PW_ID_NONE,
2863                {
2864                        .hsw.regs = &hsw_power_well_regs,
2865                        .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2866                },
2867        },
2868        {
2869                .name = "AUX B",
2870                .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2871                .ops = &hsw_power_well_ops,
2872                .id = DISP_PW_ID_NONE,
2873                {
2874                        .hsw.regs = &hsw_power_well_regs,
2875                        .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2876                },
2877        },
2878        {
2879                .name = "AUX C",
2880                .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2881                .ops = &hsw_power_well_ops,
2882                .id = DISP_PW_ID_NONE,
2883                {
2884                        .hsw.regs = &hsw_power_well_regs,
2885                        .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2886                },
2887        },
2888        {
2889                .name = "DDI A IO power well",
2890                .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2891                .ops = &hsw_power_well_ops,
2892                .id = DISP_PW_ID_NONE,
2893                {
2894                        .hsw.regs = &hsw_power_well_regs,
2895                        .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2896                },
2897        },
2898        {
2899                .name = "DDI B IO power well",
2900                .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2901                .ops = &hsw_power_well_ops,
2902                .id = DISP_PW_ID_NONE,
2903                {
2904                        .hsw.regs = &hsw_power_well_regs,
2905                        .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2906                },
2907        },
2908        {
2909                .name = "DDI C IO power well",
2910                .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2911                .ops = &hsw_power_well_ops,
2912                .id = DISP_PW_ID_NONE,
2913                {
2914                        .hsw.regs = &hsw_power_well_regs,
2915                        .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2916                },
2917        },
2918};
2919
2920static const struct i915_power_well_desc cnl_power_wells[] = {
2921        {
2922                .name = "always-on",
2923                .always_on = true,
2924                .domains = POWER_DOMAIN_MASK,
2925                .ops = &i9xx_always_on_power_well_ops,
2926                .id = DISP_PW_ID_NONE,
2927        },
2928        {
2929                .name = "power well 1",
2930                /* Handled by the DMC firmware */
2931                .always_on = true,
2932                .domains = 0,
2933                .ops = &hsw_power_well_ops,
2934                .id = SKL_DISP_PW_1,
2935                {
2936                        .hsw.regs = &hsw_power_well_regs,
2937                        .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2938                        .hsw.has_fuses = true,
2939                },
2940        },
2941        {
2942                .name = "AUX A",
2943                .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2944                .ops = &hsw_power_well_ops,
2945                .id = DISP_PW_ID_NONE,
2946                {
2947                        .hsw.regs = &hsw_power_well_regs,
2948                        .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2949                },
2950        },
2951        {
2952                .name = "AUX B",
2953                .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2954                .ops = &hsw_power_well_ops,
2955                .id = DISP_PW_ID_NONE,
2956                {
2957                        .hsw.regs = &hsw_power_well_regs,
2958                        .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2959                },
2960        },
2961        {
2962                .name = "AUX C",
2963                .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2964                .ops = &hsw_power_well_ops,
2965                .id = DISP_PW_ID_NONE,
2966                {
2967                        .hsw.regs = &hsw_power_well_regs,
2968                        .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2969                },
2970        },
2971        {
2972                .name = "AUX D",
2973                .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2974                .ops = &hsw_power_well_ops,
2975                .id = DISP_PW_ID_NONE,
2976                {
2977                        .hsw.regs = &hsw_power_well_regs,
2978                        .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2979                },
2980        },
2981        {
2982                .name = "DC off",
2983                .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2984                .ops = &gen9_dc_off_power_well_ops,
2985                .id = DISP_PW_ID_NONE,
2986        },
2987        {
2988                .name = "power well 2",
2989                .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2990                .ops = &hsw_power_well_ops,
2991                .id = SKL_DISP_PW_2,
2992                {
2993                        .hsw.regs = &hsw_power_well_regs,
2994                        .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2995                        .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2996                        .hsw.has_vga = true,
2997                        .hsw.has_fuses = true,
2998                },
2999        },
3000        {
3001                .name = "DDI A IO power well",
3002                .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3003                .ops = &hsw_power_well_ops,
3004                .id = DISP_PW_ID_NONE,
3005                {
3006                        .hsw.regs = &hsw_power_well_regs,
3007                        .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3008                },
3009        },
3010        {
3011                .name = "DDI B IO power well",
3012                .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3013                .ops = &hsw_power_well_ops,
3014                .id = DISP_PW_ID_NONE,
3015                {
3016                        .hsw.regs = &hsw_power_well_regs,
3017                        .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3018                },
3019        },
3020        {
3021                .name = "DDI C IO power well",
3022                .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3023                .ops = &hsw_power_well_ops,
3024                .id = DISP_PW_ID_NONE,
3025                {
3026                        .hsw.regs = &hsw_power_well_regs,
3027                        .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3028                },
3029        },
3030        {
3031                .name = "DDI D IO power well",
3032                .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3033                .ops = &hsw_power_well_ops,
3034                .id = DISP_PW_ID_NONE,
3035                {
3036                        .hsw.regs = &hsw_power_well_regs,
3037                        .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3038                },
3039        },
3040        {
3041                .name = "DDI F IO power well",
3042                .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3043                .ops = &hsw_power_well_ops,
3044                .id = DISP_PW_ID_NONE,
3045                {
3046                        .hsw.regs = &hsw_power_well_regs,
3047                        .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3048                },
3049        },
3050        {
3051                .name = "AUX F",
3052                .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3053                .ops = &hsw_power_well_ops,
3054                .id = DISP_PW_ID_NONE,
3055                {
3056                        .hsw.regs = &hsw_power_well_regs,
3057                        .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3058                },
3059        },
3060};
3061
3062static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3063        .sync_hw = hsw_power_well_sync_hw,
3064        .enable = icl_combo_phy_aux_power_well_enable,
3065        .disable = icl_combo_phy_aux_power_well_disable,
3066        .is_enabled = hsw_power_well_enabled,
3067};
3068
3069static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3070        .sync_hw = hsw_power_well_sync_hw,
3071        .enable = icl_tc_phy_aux_power_well_enable,
3072        .disable = hsw_power_well_disable,
3073        .is_enabled = hsw_power_well_enabled,
3074};
3075
3076static const struct i915_power_well_regs icl_aux_power_well_regs = {
3077        .bios   = ICL_PWR_WELL_CTL_AUX1,
3078        .driver = ICL_PWR_WELL_CTL_AUX2,
3079        .debug  = ICL_PWR_WELL_CTL_AUX4,
3080};
3081
3082static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3083        .bios   = ICL_PWR_WELL_CTL_DDI1,
3084        .driver = ICL_PWR_WELL_CTL_DDI2,
3085        .debug  = ICL_PWR_WELL_CTL_DDI4,
3086};
3087
3088static const struct i915_power_well_desc icl_power_wells[] = {
3089        {
3090                .name = "always-on",
3091                .always_on = true,
3092                .domains = POWER_DOMAIN_MASK,
3093                .ops = &i9xx_always_on_power_well_ops,
3094                .id = DISP_PW_ID_NONE,
3095        },
3096        {
3097                .name = "power well 1",
3098                /* Handled by the DMC firmware */
3099                .always_on = true,
3100                .domains = 0,
3101                .ops = &hsw_power_well_ops,
3102                .id = SKL_DISP_PW_1,
3103                {
3104                        .hsw.regs = &hsw_power_well_regs,
3105                        .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3106                        .hsw.has_fuses = true,
3107                },
3108        },
3109        {
3110                .name = "DC off",
3111                .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3112                .ops = &gen9_dc_off_power_well_ops,
3113                .id = DISP_PW_ID_NONE,
3114        },
3115        {
3116                .name = "power well 2",
3117                .domains = ICL_PW_2_POWER_DOMAINS,
3118                .ops = &hsw_power_well_ops,
3119                .id = SKL_DISP_PW_2,
3120                {
3121                        .hsw.regs = &hsw_power_well_regs,
3122                        .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3123                        .hsw.has_fuses = true,
3124                },
3125        },
3126        {
3127                .name = "power well 3",
3128                .domains = ICL_PW_3_POWER_DOMAINS,
3129                .ops = &hsw_power_well_ops,
3130                .id = DISP_PW_ID_NONE,
3131                {
3132                        .hsw.regs = &hsw_power_well_regs,
3133                        .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3134                        .hsw.irq_pipe_mask = BIT(PIPE_B),
3135                        .hsw.has_vga = true,
3136                        .hsw.has_fuses = true,
3137                },
3138        },
3139        {
3140                .name = "DDI A IO",
3141                .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3142                .ops = &hsw_power_well_ops,
3143                .id = DISP_PW_ID_NONE,
3144                {
3145                        .hsw.regs = &icl_ddi_power_well_regs,
3146                        .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3147                },
3148        },
3149        {
3150                .name = "DDI B IO",
3151                .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3152                .ops = &hsw_power_well_ops,
3153                .id = DISP_PW_ID_NONE,
3154                {
3155                        .hsw.regs = &icl_ddi_power_well_regs,
3156                        .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3157                },
3158        },
3159        {
3160                .name = "DDI C IO",
3161                .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3162                .ops = &hsw_power_well_ops,
3163                .id = DISP_PW_ID_NONE,
3164                {
3165                        .hsw.regs = &icl_ddi_power_well_regs,
3166                        .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3167                },
3168        },
3169        {
3170                .name = "DDI D IO",
3171                .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3172                .ops = &hsw_power_well_ops,
3173                .id = DISP_PW_ID_NONE,
3174                {
3175                        .hsw.regs = &icl_ddi_power_well_regs,
3176                        .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3177                },
3178        },
3179        {
3180                .name = "DDI E IO",
3181                .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3182                .ops = &hsw_power_well_ops,
3183                .id = DISP_PW_ID_NONE,
3184                {
3185                        .hsw.regs = &icl_ddi_power_well_regs,
3186                        .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3187                },
3188        },
3189        {
3190                .name = "DDI F IO",
3191                .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3192                .ops = &hsw_power_well_ops,
3193                .id = DISP_PW_ID_NONE,
3194                {
3195                        .hsw.regs = &icl_ddi_power_well_regs,
3196                        .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3197                },
3198        },
3199        {
3200                .name = "AUX A",
3201                .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3202                .ops = &icl_combo_phy_aux_power_well_ops,
3203                .id = DISP_PW_ID_NONE,
3204                {
3205                        .hsw.regs = &icl_aux_power_well_regs,
3206                        .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3207                },
3208        },
3209        {
3210                .name = "AUX B",
3211                .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3212                .ops = &icl_combo_phy_aux_power_well_ops,
3213                .id = DISP_PW_ID_NONE,
3214                {
3215                        .hsw.regs = &icl_aux_power_well_regs,
3216                        .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3217                },
3218        },
3219        {
3220                .name = "AUX C",
3221                .domains = ICL_AUX_C_IO_POWER_DOMAINS,
3222                .ops = &icl_tc_phy_aux_power_well_ops,
3223                .id = DISP_PW_ID_NONE,
3224                {
3225                        .hsw.regs = &icl_aux_power_well_regs,
3226                        .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3227                        .hsw.is_tc_tbt = false,
3228                },
3229        },
3230        {
3231                .name = "AUX D",
3232                .domains = ICL_AUX_D_IO_POWER_DOMAINS,
3233                .ops = &icl_tc_phy_aux_power_well_ops,
3234                .id = DISP_PW_ID_NONE,
3235                {
3236                        .hsw.regs = &icl_aux_power_well_regs,
3237                        .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3238                        .hsw.is_tc_tbt = false,
3239                },
3240        },
3241        {
3242                .name = "AUX E",
3243                .domains = ICL_AUX_E_IO_POWER_DOMAINS,
3244                .ops = &icl_tc_phy_aux_power_well_ops,
3245                .id = DISP_PW_ID_NONE,
3246                {
3247                        .hsw.regs = &icl_aux_power_well_regs,
3248                        .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3249                        .hsw.is_tc_tbt = false,
3250                },
3251        },
3252        {
3253                .name = "AUX F",
3254                .domains = ICL_AUX_F_IO_POWER_DOMAINS,
3255                .ops = &icl_tc_phy_aux_power_well_ops,
3256                .id = DISP_PW_ID_NONE,
3257                {
3258                        .hsw.regs = &icl_aux_power_well_regs,
3259                        .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3260                        .hsw.is_tc_tbt = false,
3261                },
3262        },
3263        {
3264                .name = "AUX TBT1",
3265                .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3266                .ops = &icl_tc_phy_aux_power_well_ops,
3267                .id = DISP_PW_ID_NONE,
3268                {
3269                        .hsw.regs = &icl_aux_power_well_regs,
3270                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3271                        .hsw.is_tc_tbt = true,
3272                },
3273        },
3274        {
3275                .name = "AUX TBT2",
3276                .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3277                .ops = &icl_tc_phy_aux_power_well_ops,
3278                .id = DISP_PW_ID_NONE,
3279                {
3280                        .hsw.regs = &icl_aux_power_well_regs,
3281                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3282                        .hsw.is_tc_tbt = true,
3283                },
3284        },
3285        {
3286                .name = "AUX TBT3",
3287                .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3288                .ops = &icl_tc_phy_aux_power_well_ops,
3289                .id = DISP_PW_ID_NONE,
3290                {
3291                        .hsw.regs = &icl_aux_power_well_regs,
3292                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3293                        .hsw.is_tc_tbt = true,
3294                },
3295        },
3296        {
3297                .name = "AUX TBT4",
3298                .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3299                .ops = &icl_tc_phy_aux_power_well_ops,
3300                .id = DISP_PW_ID_NONE,
3301                {
3302                        .hsw.regs = &icl_aux_power_well_regs,
3303                        .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3304                        .hsw.is_tc_tbt = true,
3305                },
3306        },
3307        {
3308                .name = "power well 4",
3309                .domains = ICL_PW_4_POWER_DOMAINS,
3310                .ops = &hsw_power_well_ops,
3311                .id = DISP_PW_ID_NONE,
3312                {
3313                        .hsw.regs = &hsw_power_well_regs,
3314                        .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3315                        .hsw.has_fuses = true,
3316                        .hsw.irq_pipe_mask = BIT(PIPE_C),
3317                },
3318        },
3319};
3320
3321static int
3322sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3323                                   int disable_power_well)
3324{
3325        if (disable_power_well >= 0)
3326                return !!disable_power_well;
3327
3328        return 1;
3329}
3330
3331static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3332                               int enable_dc)
3333{
3334        u32 mask;
3335        int requested_dc;
3336        int max_dc;
3337
3338        if (INTEL_GEN(dev_priv) >= 11) {
3339                max_dc = 2;
3340                /*
3341                 * DC9 has a separate HW flow from the rest of the DC states,
3342                 * not depending on the DMC firmware. It's needed by system
3343                 * suspend/resume, so allow it unconditionally.
3344                 */
3345                mask = DC_STATE_EN_DC9;
3346        } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3347                max_dc = 2;
3348                mask = 0;
3349        } else if (IS_GEN9_LP(dev_priv)) {
3350                max_dc = 1;
3351                mask = DC_STATE_EN_DC9;
3352        } else {
3353                max_dc = 0;
3354                mask = 0;
3355        }
3356
3357        if (!i915_modparams.disable_power_well)
3358                max_dc = 0;
3359
3360        if (enable_dc >= 0 && enable_dc <= max_dc) {
3361                requested_dc = enable_dc;
3362        } else if (enable_dc == -1) {
3363                requested_dc = max_dc;
3364        } else if (enable_dc > max_dc && enable_dc <= 2) {
3365                DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3366                              enable_dc, max_dc);
3367                requested_dc = max_dc;
3368        } else {
3369                DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3370                requested_dc = max_dc;
3371        }
3372
3373        if (requested_dc > 1)
3374                mask |= DC_STATE_EN_UPTO_DC6;
3375        if (requested_dc > 0)
3376                mask |= DC_STATE_EN_UPTO_DC5;
3377
3378        DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3379
3380        return mask;
3381}
3382
3383static int
3384__set_power_wells(struct i915_power_domains *power_domains,
3385                  const struct i915_power_well_desc *power_well_descs,
3386                  int power_well_count)
3387{
3388        u64 power_well_ids = 0;
3389        int i;
3390
3391        power_domains->power_well_count = power_well_count;
3392        power_domains->power_wells =
3393                                kcalloc(power_well_count,
3394                                        sizeof(*power_domains->power_wells),
3395                                        GFP_KERNEL);
3396        if (!power_domains->power_wells)
3397                return -ENOMEM;
3398
3399        for (i = 0; i < power_well_count; i++) {
3400                enum i915_power_well_id id = power_well_descs[i].id;
3401
3402                power_domains->power_wells[i].desc = &power_well_descs[i];
3403
3404                if (id == DISP_PW_ID_NONE)
3405                        continue;
3406
3407                WARN_ON(id >= sizeof(power_well_ids) * 8);
3408                WARN_ON(power_well_ids & BIT_ULL(id));
3409                power_well_ids |= BIT_ULL(id);
3410        }
3411
3412        return 0;
3413}
3414
3415#define set_power_wells(power_domains, __power_well_descs) \
3416        __set_power_wells(power_domains, __power_well_descs, \
3417                          ARRAY_SIZE(__power_well_descs))
3418
3419/**
3420 * intel_power_domains_init - initializes the power domain structures
3421 * @dev_priv: i915 device instance
3422 *
3423 * Initializes the power domain structures for @dev_priv depending upon the
3424 * supported platform.
3425 */
3426int intel_power_domains_init(struct drm_i915_private *dev_priv)
3427{
3428        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3429        int err;
3430
3431        i915_modparams.disable_power_well =
3432                sanitize_disable_power_well_option(dev_priv,
3433                                                   i915_modparams.disable_power_well);
3434        dev_priv->csr.allowed_dc_mask =
3435                get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3436
3437        BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3438
3439        mutex_init(&power_domains->lock);
3440
3441        /*
3442         * The enabling order will be from lower to higher indexed wells,
3443         * the disabling order is reversed.
3444         */
3445        if (IS_ICELAKE(dev_priv)) {
3446                err = set_power_wells(power_domains, icl_power_wells);
3447        } else if (IS_CANNONLAKE(dev_priv)) {
3448                err = set_power_wells(power_domains, cnl_power_wells);
3449
3450                /*
3451                 * DDI and Aux IO are getting enabled for all ports
3452                 * regardless the presence or use. So, in order to avoid
3453                 * timeouts, lets remove them from the list
3454                 * for the SKUs without port F.
3455                 */
3456                if (!IS_CNL_WITH_PORT_F(dev_priv))
3457                        power_domains->power_well_count -= 2;
3458        } else if (IS_GEMINILAKE(dev_priv)) {
3459                err = set_power_wells(power_domains, glk_power_wells);
3460        } else if (IS_BROXTON(dev_priv)) {
3461                err = set_power_wells(power_domains, bxt_power_wells);
3462        } else if (IS_GEN9_BC(dev_priv)) {
3463                err = set_power_wells(power_domains, skl_power_wells);
3464        } else if (IS_CHERRYVIEW(dev_priv)) {
3465                err = set_power_wells(power_domains, chv_power_wells);
3466        } else if (IS_BROADWELL(dev_priv)) {
3467                err = set_power_wells(power_domains, bdw_power_wells);
3468        } else if (IS_HASWELL(dev_priv)) {
3469                err = set_power_wells(power_domains, hsw_power_wells);
3470        } else if (IS_VALLEYVIEW(dev_priv)) {
3471                err = set_power_wells(power_domains, vlv_power_wells);
3472        } else if (IS_I830(dev_priv)) {
3473                err = set_power_wells(power_domains, i830_power_wells);
3474        } else {
3475                err = set_power_wells(power_domains, i9xx_always_on_power_well);
3476        }
3477
3478        return err;
3479}
3480
3481/**
3482 * intel_power_domains_cleanup - clean up power domains resources
3483 * @dev_priv: i915 device instance
3484 *
3485 * Release any resources acquired by intel_power_domains_init()
3486 */
3487void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3488{
3489        kfree(dev_priv->power_domains.power_wells);
3490}
3491
3492static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3493{
3494        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3495        struct i915_power_well *power_well;
3496
3497        mutex_lock(&power_domains->lock);
3498        for_each_power_well(dev_priv, power_well) {
3499                power_well->desc->ops->sync_hw(dev_priv, power_well);
3500                power_well->hw_enabled =
3501                        power_well->desc->ops->is_enabled(dev_priv, power_well);
3502        }
3503        mutex_unlock(&power_domains->lock);
3504}
3505
3506static inline
3507bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3508                          i915_reg_t reg, bool enable)
3509{
3510        u32 val, status;
3511
3512        val = I915_READ(reg);
3513        val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3514        I915_WRITE(reg, val);
3515        POSTING_READ(reg);
3516        udelay(10);
3517
3518        status = I915_READ(reg) & DBUF_POWER_STATE;
3519        if ((enable && !status) || (!enable && status)) {
3520                DRM_ERROR("DBus power %s timeout!\n",
3521                          enable ? "enable" : "disable");
3522                return false;
3523        }
3524        return true;
3525}
3526
3527static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3528{
3529        intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3530}
3531
3532static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3533{
3534        intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3535}
3536
3537static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3538{
3539        if (INTEL_GEN(dev_priv) < 11)
3540                return 1;
3541        return 2;
3542}
3543
3544void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3545                            u8 req_slices)
3546{
3547        const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3548        bool ret;
3549
3550        if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3551                DRM_ERROR("Invalid number of dbuf slices requested\n");
3552                return;
3553        }
3554
3555        if (req_slices == hw_enabled_slices || req_slices == 0)
3556                return;
3557
3558        if (req_slices > hw_enabled_slices)
3559                ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3560        else
3561                ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3562
3563        if (ret)
3564                dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3565}
3566
3567static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3568{
3569        I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3570        I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3571        POSTING_READ(DBUF_CTL_S2);
3572
3573        udelay(10);
3574
3575        if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3576            !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3577                DRM_ERROR("DBuf power enable timeout\n");
3578        else
3579                /*
3580                 * FIXME: for now pretend that we only have 1 slice, see
3581                 * intel_enabled_dbuf_slices_num().
3582                 */
3583                dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3584}
3585
3586static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3587{
3588        I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3589        I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3590        POSTING_READ(DBUF_CTL_S2);
3591
3592        udelay(10);
3593
3594        if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3595            (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3596                DRM_ERROR("DBuf power disable timeout!\n");
3597        else
3598                /*
3599                 * FIXME: for now pretend that the first slice is always
3600                 * enabled, see intel_enabled_dbuf_slices_num().
3601                 */
3602                dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3603}
3604
3605static void icl_mbus_init(struct drm_i915_private *dev_priv)
3606{
3607        u32 val;
3608
3609        val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3610              MBUS_ABOX_BT_CREDIT_POOL2(16) |
3611              MBUS_ABOX_B_CREDIT(1) |
3612              MBUS_ABOX_BW_CREDIT(1);
3613
3614        I915_WRITE(MBUS_ABOX_CTL, val);
3615}
3616
3617static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3618                                      bool enable)
3619{
3620        i915_reg_t reg;
3621        u32 reset_bits, val;
3622
3623        if (IS_IVYBRIDGE(dev_priv)) {
3624                reg = GEN7_MSG_CTL;
3625                reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3626        } else {
3627                reg = HSW_NDE_RSTWRN_OPT;
3628                reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3629        }
3630
3631        val = I915_READ(reg);
3632
3633        if (enable)
3634                val |= reset_bits;
3635        else
3636                val &= ~reset_bits;
3637
3638        I915_WRITE(reg, val);
3639}
3640
3641static void skl_display_core_init(struct drm_i915_private *dev_priv,
3642                                   bool resume)
3643{
3644        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3645        struct i915_power_well *well;
3646
3647        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3648
3649        /* enable PCH reset handshake */
3650        intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3651
3652        /* enable PG1 and Misc I/O */
3653        mutex_lock(&power_domains->lock);
3654
3655        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3656        intel_power_well_enable(dev_priv, well);
3657
3658        well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3659        intel_power_well_enable(dev_priv, well);
3660
3661        mutex_unlock(&power_domains->lock);
3662
3663        skl_init_cdclk(dev_priv);
3664
3665        gen9_dbuf_enable(dev_priv);
3666
3667        if (resume && dev_priv->csr.dmc_payload)
3668                intel_csr_load_program(dev_priv);
3669}
3670
3671static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3672{
3673        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3674        struct i915_power_well *well;
3675
3676        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3677
3678        gen9_dbuf_disable(dev_priv);
3679
3680        skl_uninit_cdclk(dev_priv);
3681
3682        /* The spec doesn't call for removing the reset handshake flag */
3683        /* disable PG1 and Misc I/O */
3684
3685        mutex_lock(&power_domains->lock);
3686
3687        /*
3688         * BSpec says to keep the MISC IO power well enabled here, only
3689         * remove our request for power well 1.
3690         * Note that even though the driver's request is removed power well 1
3691         * may stay enabled after this due to DMC's own request on it.
3692         */
3693        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3694        intel_power_well_disable(dev_priv, well);
3695
3696        mutex_unlock(&power_domains->lock);
3697
3698        usleep_range(10, 30);           /* 10 us delay per Bspec */
3699}
3700
3701void bxt_display_core_init(struct drm_i915_private *dev_priv,
3702                           bool resume)
3703{
3704        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3705        struct i915_power_well *well;
3706
3707        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3708
3709        /*
3710         * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3711         * or else the reset will hang because there is no PCH to respond.
3712         * Move the handshake programming to initialization sequence.
3713         * Previously was left up to BIOS.
3714         */
3715        intel_pch_reset_handshake(dev_priv, false);
3716
3717        /* Enable PG1 */
3718        mutex_lock(&power_domains->lock);
3719
3720        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3721        intel_power_well_enable(dev_priv, well);
3722
3723        mutex_unlock(&power_domains->lock);
3724
3725        bxt_init_cdclk(dev_priv);
3726
3727        gen9_dbuf_enable(dev_priv);
3728
3729        if (resume && dev_priv->csr.dmc_payload)
3730                intel_csr_load_program(dev_priv);
3731}
3732
3733void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3734{
3735        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3736        struct i915_power_well *well;
3737
3738        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3739
3740        gen9_dbuf_disable(dev_priv);
3741
3742        bxt_uninit_cdclk(dev_priv);
3743
3744        /* The spec doesn't call for removing the reset handshake flag */
3745
3746        /*
3747         * Disable PW1 (PG1).
3748         * Note that even though the driver's request is removed power well 1
3749         * may stay enabled after this due to DMC's own request on it.
3750         */
3751        mutex_lock(&power_domains->lock);
3752
3753        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3754        intel_power_well_disable(dev_priv, well);
3755
3756        mutex_unlock(&power_domains->lock);
3757
3758        usleep_range(10, 30);           /* 10 us delay per Bspec */
3759}
3760
3761static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3762{
3763        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3764        struct i915_power_well *well;
3765
3766        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3767
3768        /* 1. Enable PCH Reset Handshake */
3769        intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3770
3771        /* 2-3. */
3772        cnl_combo_phys_init(dev_priv);
3773
3774        /*
3775         * 4. Enable Power Well 1 (PG1).
3776         *    The AUX IO power wells will be enabled on demand.
3777         */
3778        mutex_lock(&power_domains->lock);
3779        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3780        intel_power_well_enable(dev_priv, well);
3781        mutex_unlock(&power_domains->lock);
3782
3783        /* 5. Enable CD clock */
3784        cnl_init_cdclk(dev_priv);
3785
3786        /* 6. Enable DBUF */
3787        gen9_dbuf_enable(dev_priv);
3788
3789        if (resume && dev_priv->csr.dmc_payload)
3790                intel_csr_load_program(dev_priv);
3791}
3792
3793static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3794{
3795        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3796        struct i915_power_well *well;
3797
3798        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3799
3800        /* 1. Disable all display engine functions -> aready done */
3801
3802        /* 2. Disable DBUF */
3803        gen9_dbuf_disable(dev_priv);
3804
3805        /* 3. Disable CD clock */
3806        cnl_uninit_cdclk(dev_priv);
3807
3808        /*
3809         * 4. Disable Power Well 1 (PG1).
3810         *    The AUX IO power wells are toggled on demand, so they are already
3811         *    disabled at this point.
3812         */
3813        mutex_lock(&power_domains->lock);
3814        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3815        intel_power_well_disable(dev_priv, well);
3816        mutex_unlock(&power_domains->lock);
3817
3818        usleep_range(10, 30);           /* 10 us delay per Bspec */
3819
3820        /* 5. */
3821        cnl_combo_phys_uninit(dev_priv);
3822}
3823
3824void icl_display_core_init(struct drm_i915_private *dev_priv,
3825                           bool resume)
3826{
3827        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3828        struct i915_power_well *well;
3829
3830        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3831
3832        /* 1. Enable PCH reset handshake. */
3833        intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3834
3835        /* 2-3. */
3836        icl_combo_phys_init(dev_priv);
3837
3838        /*
3839         * 4. Enable Power Well 1 (PG1).
3840         *    The AUX IO power wells will be enabled on demand.
3841         */
3842        mutex_lock(&power_domains->lock);
3843        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3844        intel_power_well_enable(dev_priv, well);
3845        mutex_unlock(&power_domains->lock);
3846
3847        /* 5. Enable CDCLK. */
3848        icl_init_cdclk(dev_priv);
3849
3850        /* 6. Enable DBUF. */
3851        icl_dbuf_enable(dev_priv);
3852
3853        /* 7. Setup MBUS. */
3854        icl_mbus_init(dev_priv);
3855
3856        if (resume && dev_priv->csr.dmc_payload)
3857                intel_csr_load_program(dev_priv);
3858}
3859
3860void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3861{
3862        struct i915_power_domains *power_domains = &dev_priv->power_domains;
3863        struct i915_power_well *well;
3864
3865        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3866
3867        /* 1. Disable all display engine functions -> aready done */
3868
3869        /* 2. Disable DBUF */
3870        icl_dbuf_disable(dev_priv);
3871
3872        /* 3. Disable CD clock */
3873        icl_uninit_cdclk(dev_priv);
3874
3875        /*
3876         * 4. Disable Power Well 1 (PG1).
3877         *    The AUX IO power wells are toggled on demand, so they are already
3878         *    disabled at this point.
3879         */
3880        mutex_lock(&power_domains->lock);
3881        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3882        intel_power_well_disable(dev_priv, well);
3883        mutex_unlock(&power_domains->lock);
3884
3885        /* 5. */
3886        icl_combo_phys_uninit(dev_priv);
3887}
3888
3889static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3890{
3891        struct i915_power_well *cmn_bc =
3892                lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3893        struct i915_power_well *cmn_d =
3894                lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
3895
3896        /*
3897         * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3898         * workaround never ever read DISPLAY_PHY_CONTROL, and
3899         * instead maintain a shadow copy ourselves. Use the actual
3900         * power well state and lane status to reconstruct the
3901         * expected initial value.
3902         */
3903        dev_priv->chv_phy_control =
3904                PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3905                PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3906                PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3907                PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3908                PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3909
3910        /*
3911         * If all lanes are disabled we leave the override disabled
3912         * with all power down bits cleared to match the state we
3913         * would use after disabling the port. Otherwise enable the
3914         * override and set the lane powerdown bits accding to the
3915         * current lane status.
3916         */
3917        if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
3918                u32 status = I915_READ(DPLL(PIPE_A));
3919                unsigned int mask;
3920
3921                mask = status & DPLL_PORTB_READY_MASK;
3922                if (mask == 0xf)
3923                        mask = 0x0;
3924                else
3925                        dev_priv->chv_phy_control |=
3926                                PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3927
3928                dev_priv->chv_phy_control |=
3929                        PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3930
3931                mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3932                if (mask == 0xf)
3933                        mask = 0x0;
3934                else
3935                        dev_priv->chv_phy_control |=
3936                                PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3937
3938                dev_priv->chv_phy_control |=
3939                        PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3940
3941                dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3942
3943                dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3944        } else {
3945                dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3946        }
3947
3948        if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
3949                u32 status = I915_READ(DPIO_PHY_STATUS);
3950                unsigned int mask;
3951
3952                mask = status & DPLL_PORTD_READY_MASK;
3953
3954                if (mask == 0xf)
3955                        mask = 0x0;
3956                else
3957                        dev_priv->chv_phy_control |=
3958                                PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3959
3960                dev_priv->chv_phy_control |=
3961                        PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3962
3963                dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3964
3965                dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3966        } else {
3967                dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3968        }
3969
3970        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3971
3972        DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3973                      dev_priv->chv_phy_control);
3974}
3975
3976static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3977{
3978        struct i915_power_well *cmn =
3979                lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3980        struct i915_power_well *disp2d =
3981                lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
3982
3983        /* If the display might be already active skip this */
3984        if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3985            disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
3986            I915_READ(DPIO_CTL) & DPIO_CMNRST)
3987                return;
3988
3989        DRM_DEBUG_KMS("toggling display PHY side reset\n");
3990
3991        /* cmnlane needs DPLL registers */
3992        disp2d->desc->ops->enable(dev_priv, disp2d);
3993
3994        /*
3995         * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3996         * Need to assert and de-assert PHY SB reset by gating the
3997         * common lane power, then un-gating it.
3998         * Simply ungating isn't enough to reset the PHY enough to get
3999         * ports and lanes running.
4000         */
4001        cmn->desc->ops->disable(dev_priv, cmn);
4002}
4003
4004static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4005
4006/**
4007 * intel_power_domains_init_hw - initialize hardware power domain state
4008 * @i915: i915 device instance
4009 * @resume: Called from resume code paths or not
4010 *
4011 * This function initializes the hardware power domain state and enables all
4012 * power wells belonging to the INIT power domain. Power wells in other
4013 * domains (and not in the INIT domain) are referenced or disabled by
4014 * intel_modeset_readout_hw_state(). After that the reference count of each
4015 * power well must match its HW enabled state, see
4016 * intel_power_domains_verify_state().
4017 *
4018 * It will return with power domains disabled (to be enabled later by
4019 * intel_power_domains_enable()) and must be paired with
4020 * intel_power_domains_fini_hw().
4021 */
4022void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4023{
4024        struct i915_power_domains *power_domains = &i915->power_domains;
4025
4026        power_domains->initializing = true;
4027
4028        if (IS_ICELAKE(i915)) {
4029                icl_display_core_init(i915, resume);
4030        } else if (IS_CANNONLAKE(i915)) {
4031                cnl_display_core_init(i915, resume);
4032        } else if (IS_GEN9_BC(i915)) {
4033                skl_display_core_init(i915, resume);
4034        } else if (IS_GEN9_LP(i915)) {
4035                bxt_display_core_init(i915, resume);
4036        } else if (IS_CHERRYVIEW(i915)) {
4037                mutex_lock(&power_domains->lock);
4038                chv_phy_control_init(i915);
4039                mutex_unlock(&power_domains->lock);
4040        } else if (IS_VALLEYVIEW(i915)) {
4041                mutex_lock(&power_domains->lock);
4042                vlv_cmnlane_wa(i915);
4043                mutex_unlock(&power_domains->lock);
4044        } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
4045                intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4046        }
4047
4048        /*
4049         * Keep all power wells enabled for any dependent HW access during
4050         * initialization and to make sure we keep BIOS enabled display HW
4051         * resources powered until display HW readout is complete. We drop
4052         * this reference in intel_power_domains_enable().
4053         */
4054        power_domains->wakeref =
4055                intel_display_power_get(i915, POWER_DOMAIN_INIT);
4056
4057        /* Disable power support if the user asked so. */
4058        if (!i915_modparams.disable_power_well)
4059                intel_display_power_get(i915, POWER_DOMAIN_INIT);
4060        intel_power_domains_sync_hw(i915);
4061
4062        power_domains->initializing = false;
4063}
4064
4065/**
4066 * intel_power_domains_fini_hw - deinitialize hw power domain state
4067 * @i915: i915 device instance
4068 *
4069 * De-initializes the display power domain HW state. It also ensures that the
4070 * device stays powered up so that the driver can be reloaded.
4071 *
4072 * It must be called with power domains already disabled (after a call to
4073 * intel_power_domains_disable()) and must be paired with
4074 * intel_power_domains_init_hw().
4075 */
4076void intel_power_domains_fini_hw(struct drm_i915_private *i915)
4077{
4078        intel_wakeref_t wakeref __maybe_unused =
4079                fetch_and_zero(&i915->power_domains.wakeref);
4080
4081        /* Remove the refcount we took to keep power well support disabled. */
4082        if (!i915_modparams.disable_power_well)
4083                intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4084
4085        intel_power_domains_verify_state(i915);
4086
4087        /* Keep the power well enabled, but cancel its rpm wakeref. */
4088        intel_runtime_pm_put(i915, wakeref);
4089}
4090
4091/**
4092 * intel_power_domains_enable - enable toggling of display power wells
4093 * @i915: i915 device instance
4094 *
4095 * Enable the ondemand enabling/disabling of the display power wells. Note that
4096 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4097 * only at specific points of the display modeset sequence, thus they are not
4098 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4099 * of these function is to keep the rest of power wells enabled until the end
4100 * of display HW readout (which will acquire the power references reflecting
4101 * the current HW state).
4102 */
4103void intel_power_domains_enable(struct drm_i915_private *i915)
4104{
4105        intel_wakeref_t wakeref __maybe_unused =
4106                fetch_and_zero(&i915->power_domains.wakeref);
4107
4108        intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4109        intel_power_domains_verify_state(i915);
4110}
4111
4112/**
4113 * intel_power_domains_disable - disable toggling of display power wells
4114 * @i915: i915 device instance
4115 *
4116 * Disable the ondemand enabling/disabling of the display power wells. See
4117 * intel_power_domains_enable() for which power wells this call controls.
4118 */
4119void intel_power_domains_disable(struct drm_i915_private *i915)
4120{
4121        struct i915_power_domains *power_domains = &i915->power_domains;
4122
4123        WARN_ON(power_domains->wakeref);
4124        power_domains->wakeref =
4125                intel_display_power_get(i915, POWER_DOMAIN_INIT);
4126
4127        intel_power_domains_verify_state(i915);
4128}
4129
4130/**
4131 * intel_power_domains_suspend - suspend power domain state
4132 * @i915: i915 device instance
4133 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4134 *
4135 * This function prepares the hardware power domain state before entering
4136 * system suspend.
4137 *
4138 * It must be called with power domains already disabled (after a call to
4139 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4140 */
4141void intel_power_domains_suspend(struct drm_i915_private *i915,
4142                                 enum i915_drm_suspend_mode suspend_mode)
4143{
4144        struct i915_power_domains *power_domains = &i915->power_domains;
4145        intel_wakeref_t wakeref __maybe_unused =
4146                fetch_and_zero(&power_domains->wakeref);
4147
4148        intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4149
4150        /*
4151         * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4152         * support don't manually deinit the power domains. This also means the
4153         * CSR/DMC firmware will stay active, it will power down any HW
4154         * resources as required and also enable deeper system power states
4155         * that would be blocked if the firmware was inactive.
4156         */
4157        if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4158            suspend_mode == I915_DRM_SUSPEND_IDLE &&
4159            i915->csr.dmc_payload) {
4160                intel_power_domains_verify_state(i915);
4161                return;
4162        }
4163
4164        /*
4165         * Even if power well support was disabled we still want to disable
4166         * power wells if power domains must be deinitialized for suspend.
4167         */
4168        if (!i915_modparams.disable_power_well) {
4169                intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4170                intel_power_domains_verify_state(i915);
4171        }
4172
4173        if (IS_ICELAKE(i915))
4174                icl_display_core_uninit(i915);
4175        else if (IS_CANNONLAKE(i915))
4176                cnl_display_core_uninit(i915);
4177        else if (IS_GEN9_BC(i915))
4178                skl_display_core_uninit(i915);
4179        else if (IS_GEN9_LP(i915))
4180                bxt_display_core_uninit(i915);
4181
4182        power_domains->display_core_suspended = true;
4183}
4184
4185/**
4186 * intel_power_domains_resume - resume power domain state
4187 * @i915: i915 device instance
4188 *
4189 * This function resume the hardware power domain state during system resume.
4190 *
4191 * It will return with power domain support disabled (to be enabled later by
4192 * intel_power_domains_enable()) and must be paired with
4193 * intel_power_domains_suspend().
4194 */
4195void intel_power_domains_resume(struct drm_i915_private *i915)
4196{
4197        struct i915_power_domains *power_domains = &i915->power_domains;
4198
4199        if (power_domains->display_core_suspended) {
4200                intel_power_domains_init_hw(i915, true);
4201                power_domains->display_core_suspended = false;
4202        } else {
4203                WARN_ON(power_domains->wakeref);
4204                power_domains->wakeref =
4205                        intel_display_power_get(i915, POWER_DOMAIN_INIT);
4206        }
4207
4208        intel_power_domains_verify_state(i915);
4209}
4210
4211#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4212
4213static void intel_power_domains_dump_info(struct drm_i915_private *i915)
4214{
4215        struct i915_power_domains *power_domains = &i915->power_domains;
4216        struct i915_power_well *power_well;
4217
4218        for_each_power_well(i915, power_well) {
4219                enum intel_display_power_domain domain;
4220
4221                DRM_DEBUG_DRIVER("%-25s %d\n",
4222                                 power_well->desc->name, power_well->count);
4223
4224                for_each_power_domain(domain, power_well->desc->domains)
4225                        DRM_DEBUG_DRIVER("  %-23s %d\n",
4226                                         intel_display_power_domain_str(domain),
4227                                         power_domains->domain_use_count[domain]);
4228        }
4229}
4230
4231/**
4232 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4233 * @i915: i915 device instance
4234 *
4235 * Verify if the reference count of each power well matches its HW enabled
4236 * state and the total refcount of the domains it belongs to. This must be
4237 * called after modeset HW state sanitization, which is responsible for
4238 * acquiring reference counts for any power wells in use and disabling the
4239 * ones left on by BIOS but not required by any active output.
4240 */
4241static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4242{
4243        struct i915_power_domains *power_domains = &i915->power_domains;
4244        struct i915_power_well *power_well;
4245        bool dump_domain_info;
4246
4247        mutex_lock(&power_domains->lock);
4248
4249        dump_domain_info = false;
4250        for_each_power_well(i915, power_well) {
4251                enum intel_display_power_domain domain;
4252                int domains_count;
4253                bool enabled;
4254
4255                enabled = power_well->desc->ops->is_enabled(i915, power_well);
4256                if ((power_well->count || power_well->desc->always_on) !=
4257                    enabled)
4258                        DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4259                                  power_well->desc->name,
4260                                  power_well->count, enabled);
4261
4262                domains_count = 0;
4263                for_each_power_domain(domain, power_well->desc->domains)
4264                        domains_count += power_domains->domain_use_count[domain];
4265
4266                if (power_well->count != domains_count) {
4267                        DRM_ERROR("power well %s refcount/domain refcount mismatch "
4268                                  "(refcount %d/domains refcount %d)\n",
4269                                  power_well->desc->name, power_well->count,
4270                                  domains_count);
4271                        dump_domain_info = true;
4272                }
4273        }
4274
4275        if (dump_domain_info) {
4276                static bool dumped;
4277
4278                if (!dumped) {
4279                        intel_power_domains_dump_info(i915);
4280                        dumped = true;
4281                }
4282        }
4283
4284        mutex_unlock(&power_domains->lock);
4285}
4286
4287#else
4288
4289static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4290{
4291}
4292
4293#endif
4294
4295/**
4296 * intel_runtime_pm_get - grab a runtime pm reference
4297 * @i915: i915 device instance
4298 *
4299 * This function grabs a device-level runtime pm reference (mostly used for GEM
4300 * code to ensure the GTT or GT is on) and ensures that it is powered up.
4301 *
4302 * Any runtime pm reference obtained by this function must have a symmetric
4303 * call to intel_runtime_pm_put() to release the reference again.
4304 *
4305 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4306 */
4307intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
4308{
4309        struct pci_dev *pdev = i915->drm.pdev;
4310        struct device *kdev = &pdev->dev;
4311        int ret;
4312
4313        ret = pm_runtime_get_sync(kdev);
4314        WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4315
4316        return track_intel_runtime_pm_wakeref(i915);
4317}
4318
4319/**
4320 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
4321 * @i915: i915 device instance
4322 *
4323 * This function grabs a device-level runtime pm reference if the device is
4324 * already in use and ensures that it is powered up. It is illegal to try
4325 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
4326 *
4327 * Any runtime pm reference obtained by this function must have a symmetric
4328 * call to intel_runtime_pm_put() to release the reference again.
4329 *
4330 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4331 * as True if the wakeref was acquired, or False otherwise.
4332 */
4333intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
4334{
4335        if (IS_ENABLED(CONFIG_PM)) {
4336                struct pci_dev *pdev = i915->drm.pdev;
4337                struct device *kdev = &pdev->dev;
4338
4339                /*
4340                 * In cases runtime PM is disabled by the RPM core and we get
4341                 * an -EINVAL return value we are not supposed to call this
4342                 * function, since the power state is undefined. This applies
4343                 * atm to the late/early system suspend/resume handlers.
4344                 */
4345                if (pm_runtime_get_if_in_use(kdev) <= 0)
4346                        return 0;
4347        }
4348
4349        return track_intel_runtime_pm_wakeref(i915);
4350}
4351
4352/**
4353 * intel_runtime_pm_get_noresume - grab a runtime pm reference
4354 * @i915: i915 device instance
4355 *
4356 * This function grabs a device-level runtime pm reference (mostly used for GEM
4357 * code to ensure the GTT or GT is on).
4358 *
4359 * It will _not_ power up the device but instead only check that it's powered
4360 * on.  Therefore it is only valid to call this functions from contexts where
4361 * the device is known to be powered up and where trying to power it up would
4362 * result in hilarity and deadlocks. That pretty much means only the system
4363 * suspend/resume code where this is used to grab runtime pm references for
4364 * delayed setup down in work items.
4365 *
4366 * Any runtime pm reference obtained by this function must have a symmetric
4367 * call to intel_runtime_pm_put() to release the reference again.
4368 *
4369 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4370 */
4371intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
4372{
4373        struct pci_dev *pdev = i915->drm.pdev;
4374        struct device *kdev = &pdev->dev;
4375
4376        assert_rpm_wakelock_held(i915);
4377        pm_runtime_get_noresume(kdev);
4378
4379        return track_intel_runtime_pm_wakeref(i915);
4380}
4381
4382/**
4383 * intel_runtime_pm_put - release a runtime pm reference
4384 * @i915: i915 device instance
4385 *
4386 * This function drops the device-level runtime pm reference obtained by
4387 * intel_runtime_pm_get() and might power down the corresponding
4388 * hardware block right away if this is the last reference.
4389 */
4390void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
4391{
4392        struct pci_dev *pdev = i915->drm.pdev;
4393        struct device *kdev = &pdev->dev;
4394
4395        untrack_intel_runtime_pm_wakeref(i915);
4396
4397        pm_runtime_mark_last_busy(kdev);
4398        pm_runtime_put_autosuspend(kdev);
4399}
4400
4401#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4402void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
4403{
4404        cancel_intel_runtime_pm_wakeref(i915, wref);
4405        intel_runtime_pm_put_unchecked(i915);
4406}
4407#endif
4408
4409/**
4410 * intel_runtime_pm_enable - enable runtime pm
4411 * @i915: i915 device instance
4412 *
4413 * This function enables runtime pm at the end of the driver load sequence.
4414 *
4415 * Note that this function does currently not enable runtime pm for the
4416 * subordinate display power domains. That is done by
4417 * intel_power_domains_enable().
4418 */
4419void intel_runtime_pm_enable(struct drm_i915_private *i915)
4420{
4421        struct pci_dev *pdev = i915->drm.pdev;
4422        struct device *kdev = &pdev->dev;
4423
4424        /*
4425         * Disable the system suspend direct complete optimization, which can
4426         * leave the device suspended skipping the driver's suspend handlers
4427         * if the device was already runtime suspended. This is needed due to
4428         * the difference in our runtime and system suspend sequence and
4429         * becaue the HDA driver may require us to enable the audio power
4430         * domain during system suspend.
4431         */
4432        dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4433
4434        pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4435        pm_runtime_mark_last_busy(kdev);
4436
4437        /*
4438         * Take a permanent reference to disable the RPM functionality and drop
4439         * it only when unloading the driver. Use the low level get/put helpers,
4440         * so the driver's own RPM reference tracking asserts also work on
4441         * platforms without RPM support.
4442         */
4443        if (!HAS_RUNTIME_PM(i915)) {
4444                int ret;
4445
4446                pm_runtime_dont_use_autosuspend(kdev);
4447                ret = pm_runtime_get_sync(kdev);
4448                WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4449        } else {
4450                pm_runtime_use_autosuspend(kdev);
4451        }
4452
4453        /*
4454         * The core calls the driver load handler with an RPM reference held.
4455         * We drop that here and will reacquire it during unloading in
4456         * intel_power_domains_fini().
4457         */
4458        pm_runtime_put_autosuspend(kdev);
4459}
4460
4461void intel_runtime_pm_disable(struct drm_i915_private *i915)
4462{
4463        struct pci_dev *pdev = i915->drm.pdev;
4464        struct device *kdev = &pdev->dev;
4465
4466        /* Transfer rpm ownership back to core */
4467        WARN(pm_runtime_get_sync(kdev) < 0,
4468             "Failed to pass rpm ownership back to core\n");
4469
4470        pm_runtime_dont_use_autosuspend(kdev);
4471
4472        if (!HAS_RUNTIME_PM(i915))
4473                pm_runtime_put(kdev);
4474}
4475
4476void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
4477{
4478        struct i915_runtime_pm *rpm = &i915->runtime_pm;
4479        int count;
4480
4481        count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
4482        WARN(count,
4483             "i915->runtime_pm.wakeref_count=%d on cleanup\n",
4484             count);
4485
4486        untrack_intel_runtime_pm_wakeref(i915);
4487}
4488
4489void intel_runtime_pm_init_early(struct drm_i915_private *i915)
4490{
4491        init_intel_runtime_pm_wakeref(i915);
4492}
4493