linux/drivers/gpu/drm/i915/intel_uncore.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2013 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include "i915_drv.h"
  25#include "intel_drv.h"
  26#include "i915_vgpu.h"
  27
  28#include <asm/iosf_mbi.h>
  29#include <linux/pm_runtime.h>
  30
  31#define FORCEWAKE_ACK_TIMEOUT_MS 50
  32#define GT_FIFO_TIMEOUT_MS       10
  33
  34#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  35
  36static const char * const forcewake_domain_names[] = {
  37        "render",
  38        "blitter",
  39        "media",
  40        "vdbox0",
  41        "vdbox1",
  42        "vdbox2",
  43        "vdbox3",
  44        "vebox0",
  45        "vebox1",
  46};
  47
  48const char *
  49intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  50{
  51        BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  52
  53        if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  54                return forcewake_domain_names[id];
  55
  56        WARN_ON(id);
  57
  58        return "unknown";
  59}
  60
  61static inline void
  62fw_domain_reset(struct drm_i915_private *i915,
  63                const struct intel_uncore_forcewake_domain *d)
  64{
  65        __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
  66}
  67
  68static inline void
  69fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  70{
  71        d->wake_count++;
  72        hrtimer_start_range_ns(&d->timer,
  73                               NSEC_PER_MSEC,
  74                               NSEC_PER_MSEC,
  75                               HRTIMER_MODE_REL);
  76}
  77
  78static inline int
  79__wait_for_ack(const struct drm_i915_private *i915,
  80               const struct intel_uncore_forcewake_domain *d,
  81               const u32 ack,
  82               const u32 value)
  83{
  84        return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
  85                               FORCEWAKE_ACK_TIMEOUT_MS);
  86}
  87
  88static inline int
  89wait_ack_clear(const struct drm_i915_private *i915,
  90               const struct intel_uncore_forcewake_domain *d,
  91               const u32 ack)
  92{
  93        return __wait_for_ack(i915, d, ack, 0);
  94}
  95
  96static inline int
  97wait_ack_set(const struct drm_i915_private *i915,
  98             const struct intel_uncore_forcewake_domain *d,
  99             const u32 ack)
 100{
 101        return __wait_for_ack(i915, d, ack, ack);
 102}
 103
 104static inline void
 105fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
 106                         const struct intel_uncore_forcewake_domain *d)
 107{
 108        if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
 109                DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
 110                          intel_uncore_forcewake_domain_to_str(d->id));
 111}
 112
 113enum ack_type {
 114        ACK_CLEAR = 0,
 115        ACK_SET
 116};
 117
 118static int
 119fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
 120                                 const struct intel_uncore_forcewake_domain *d,
 121                                 const enum ack_type type)
 122{
 123        const u32 ack_bit = FORCEWAKE_KERNEL;
 124        const u32 value = type == ACK_SET ? ack_bit : 0;
 125        unsigned int pass;
 126        bool ack_detected;
 127
 128        /*
 129         * There is a possibility of driver's wake request colliding
 130         * with hardware's own wake requests and that can cause
 131         * hardware to not deliver the driver's ack message.
 132         *
 133         * Use a fallback bit toggle to kick the gpu state machine
 134         * in the hope that the original ack will be delivered along with
 135         * the fallback ack.
 136         *
 137         * This workaround is described in HSDES #1604254524
 138         */
 139
 140        pass = 1;
 141        do {
 142                wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
 143
 144                __raw_i915_write32(i915, d->reg_set,
 145                                   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
 146                /* Give gt some time to relax before the polling frenzy */
 147                udelay(10 * pass);
 148                wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
 149
 150                ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
 151
 152                __raw_i915_write32(i915, d->reg_set,
 153                                   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
 154        } while (!ack_detected && pass++ < 10);
 155
 156        DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
 157                         intel_uncore_forcewake_domain_to_str(d->id),
 158                         type == ACK_SET ? "set" : "clear",
 159                         __raw_i915_read32(i915, d->reg_ack),
 160                         pass);
 161
 162        return ack_detected ? 0 : -ETIMEDOUT;
 163}
 164
 165static inline void
 166fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
 167                                  const struct intel_uncore_forcewake_domain *d)
 168{
 169        if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
 170                return;
 171
 172        if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
 173                fw_domain_wait_ack_clear(i915, d);
 174}
 175
 176static inline void
 177fw_domain_get(struct drm_i915_private *i915,
 178              const struct intel_uncore_forcewake_domain *d)
 179{
 180        __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
 181}
 182
 183static inline void
 184fw_domain_wait_ack_set(const struct drm_i915_private *i915,
 185                       const struct intel_uncore_forcewake_domain *d)
 186{
 187        if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
 188                DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
 189                          intel_uncore_forcewake_domain_to_str(d->id));
 190}
 191
 192static inline void
 193fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
 194                                const struct intel_uncore_forcewake_domain *d)
 195{
 196        if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
 197                return;
 198
 199        if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
 200                fw_domain_wait_ack_set(i915, d);
 201}
 202
 203static inline void
 204fw_domain_put(const struct drm_i915_private *i915,
 205              const struct intel_uncore_forcewake_domain *d)
 206{
 207        __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
 208}
 209
 210static void
 211fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
 212{
 213        struct intel_uncore_forcewake_domain *d;
 214        unsigned int tmp;
 215
 216        GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
 217
 218        for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
 219                fw_domain_wait_ack_clear(i915, d);
 220                fw_domain_get(i915, d);
 221        }
 222
 223        for_each_fw_domain_masked(d, fw_domains, i915, tmp)
 224                fw_domain_wait_ack_set(i915, d);
 225
 226        i915->uncore.fw_domains_active |= fw_domains;
 227}
 228
 229static void
 230fw_domains_get_with_fallback(struct drm_i915_private *i915,
 231                             enum forcewake_domains fw_domains)
 232{
 233        struct intel_uncore_forcewake_domain *d;
 234        unsigned int tmp;
 235
 236        GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
 237
 238        for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
 239                fw_domain_wait_ack_clear_fallback(i915, d);
 240                fw_domain_get(i915, d);
 241        }
 242
 243        for_each_fw_domain_masked(d, fw_domains, i915, tmp)
 244                fw_domain_wait_ack_set_fallback(i915, d);
 245
 246        i915->uncore.fw_domains_active |= fw_domains;
 247}
 248
 249static void
 250fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
 251{
 252        struct intel_uncore_forcewake_domain *d;
 253        unsigned int tmp;
 254
 255        GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
 256
 257        for_each_fw_domain_masked(d, fw_domains, i915, tmp)
 258                fw_domain_put(i915, d);
 259
 260        i915->uncore.fw_domains_active &= ~fw_domains;
 261}
 262
 263static void
 264fw_domains_reset(struct drm_i915_private *i915,
 265                 enum forcewake_domains fw_domains)
 266{
 267        struct intel_uncore_forcewake_domain *d;
 268        unsigned int tmp;
 269
 270        if (!fw_domains)
 271                return;
 272
 273        GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
 274
 275        for_each_fw_domain_masked(d, fw_domains, i915, tmp)
 276                fw_domain_reset(i915, d);
 277}
 278
 279static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 280{
 281        /* w/a for a sporadic read returning 0 by waiting for the GT
 282         * thread to wake up.
 283         */
 284        if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
 285                                GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
 286                DRM_ERROR("GT thread status wait timed out\n");
 287}
 288
 289static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
 290                                              enum forcewake_domains fw_domains)
 291{
 292        fw_domains_get(dev_priv, fw_domains);
 293
 294        /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
 295        __gen6_gt_wait_for_thread_c0(dev_priv);
 296}
 297
 298static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
 299{
 300        u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
 301
 302        return count & GT_FIFO_FREE_ENTRIES_MASK;
 303}
 304
 305static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 306{
 307        u32 n;
 308
 309        /* On VLV, FIFO will be shared by both SW and HW.
 310         * So, we need to read the FREE_ENTRIES everytime */
 311        if (IS_VALLEYVIEW(dev_priv))
 312                n = fifo_free_entries(dev_priv);
 313        else
 314                n = dev_priv->uncore.fifo_count;
 315
 316        if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
 317                if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
 318                                    GT_FIFO_NUM_RESERVED_ENTRIES,
 319                                    GT_FIFO_TIMEOUT_MS)) {
 320                        DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
 321                        return;
 322                }
 323        }
 324
 325        dev_priv->uncore.fifo_count = n - 1;
 326}
 327
 328static enum hrtimer_restart
 329intel_uncore_fw_release_timer(struct hrtimer *timer)
 330{
 331        struct intel_uncore_forcewake_domain *domain =
 332               container_of(timer, struct intel_uncore_forcewake_domain, timer);
 333        struct drm_i915_private *dev_priv =
 334                container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
 335        unsigned long irqflags;
 336
 337        assert_rpm_device_not_suspended(dev_priv);
 338
 339        if (xchg(&domain->active, false))
 340                return HRTIMER_RESTART;
 341
 342        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 343        if (WARN_ON(domain->wake_count == 0))
 344                domain->wake_count++;
 345
 346        if (--domain->wake_count == 0)
 347                dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
 348
 349        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 350
 351        return HRTIMER_NORESTART;
 352}
 353
 354/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
 355static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
 356                                         bool restore)
 357{
 358        unsigned long irqflags;
 359        struct intel_uncore_forcewake_domain *domain;
 360        int retry_count = 100;
 361        enum forcewake_domains fw, active_domains;
 362
 363        iosf_mbi_assert_punit_acquired();
 364
 365        /* Hold uncore.lock across reset to prevent any register access
 366         * with forcewake not set correctly. Wait until all pending
 367         * timers are run before holding.
 368         */
 369        while (1) {
 370                unsigned int tmp;
 371
 372                active_domains = 0;
 373
 374                for_each_fw_domain(domain, dev_priv, tmp) {
 375                        smp_store_mb(domain->active, false);
 376                        if (hrtimer_cancel(&domain->timer) == 0)
 377                                continue;
 378
 379                        intel_uncore_fw_release_timer(&domain->timer);
 380                }
 381
 382                spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 383
 384                for_each_fw_domain(domain, dev_priv, tmp) {
 385                        if (hrtimer_active(&domain->timer))
 386                                active_domains |= domain->mask;
 387                }
 388
 389                if (active_domains == 0)
 390                        break;
 391
 392                if (--retry_count == 0) {
 393                        DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
 394                        break;
 395                }
 396
 397                spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 398                cond_resched();
 399        }
 400
 401        WARN_ON(active_domains);
 402
 403        fw = dev_priv->uncore.fw_domains_active;
 404        if (fw)
 405                dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
 406
 407        fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
 408
 409        if (restore) { /* If reset with a user forcewake, try to restore */
 410                if (fw)
 411                        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
 412
 413                if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
 414                        dev_priv->uncore.fifo_count =
 415                                fifo_free_entries(dev_priv);
 416        }
 417
 418        if (!restore)
 419                assert_forcewakes_inactive(dev_priv);
 420
 421        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 422}
 423
 424static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
 425{
 426        const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
 427        const unsigned int sets[4] = { 1, 1, 2, 2 };
 428        const u32 cap = dev_priv->edram_cap;
 429
 430        return EDRAM_NUM_BANKS(cap) *
 431                ways[EDRAM_WAYS_IDX(cap)] *
 432                sets[EDRAM_SETS_IDX(cap)] *
 433                1024 * 1024;
 434}
 435
 436u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
 437{
 438        if (!HAS_EDRAM(dev_priv))
 439                return 0;
 440
 441        /* The needed capability bits for size calculation
 442         * are not there with pre gen9 so return 128MB always.
 443         */
 444        if (INTEL_GEN(dev_priv) < 9)
 445                return 128 * 1024 * 1024;
 446
 447        return gen9_edram_size(dev_priv);
 448}
 449
 450static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
 451{
 452        if (IS_HASWELL(dev_priv) ||
 453            IS_BROADWELL(dev_priv) ||
 454            INTEL_GEN(dev_priv) >= 9) {
 455                dev_priv->edram_cap = __raw_i915_read32(dev_priv,
 456                                                        HSW_EDRAM_CAP);
 457
 458                /* NB: We can't write IDICR yet because we do not have gt funcs
 459                 * set up */
 460        } else {
 461                dev_priv->edram_cap = 0;
 462        }
 463
 464        if (HAS_EDRAM(dev_priv))
 465                DRM_INFO("Found %lluMB of eDRAM\n",
 466                         intel_uncore_edram_size(dev_priv) / (1024 * 1024));
 467}
 468
 469static bool
 470fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 471{
 472        u32 dbg;
 473
 474        dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
 475        if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
 476                return false;
 477
 478        __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 479
 480        return true;
 481}
 482
 483static bool
 484vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 485{
 486        u32 cer;
 487
 488        cer = __raw_i915_read32(dev_priv, CLAIM_ER);
 489        if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
 490                return false;
 491
 492        __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
 493
 494        return true;
 495}
 496
 497static bool
 498gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
 499{
 500        u32 fifodbg;
 501
 502        fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
 503
 504        if (unlikely(fifodbg)) {
 505                DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
 506                __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
 507        }
 508
 509        return fifodbg;
 510}
 511
 512static bool
 513check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 514{
 515        bool ret = false;
 516
 517        if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
 518                ret |= fpga_check_for_unclaimed_mmio(dev_priv);
 519
 520        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 521                ret |= vlv_check_for_unclaimed_mmio(dev_priv);
 522
 523        if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
 524                ret |= gen6_check_for_fifo_debug(dev_priv);
 525
 526        return ret;
 527}
 528
 529static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
 530                                          bool restore_forcewake)
 531{
 532        /* clear out unclaimed reg detection bit */
 533        if (check_for_unclaimed_mmio(dev_priv))
 534                DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 535
 536        /* WaDisableShadowRegForCpd:chv */
 537        if (IS_CHERRYVIEW(dev_priv)) {
 538                __raw_i915_write32(dev_priv, GTFIFOCTL,
 539                                   __raw_i915_read32(dev_priv, GTFIFOCTL) |
 540                                   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 541                                   GT_FIFO_CTL_RC6_POLICY_STALL);
 542        }
 543
 544        iosf_mbi_punit_acquire();
 545        intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
 546        iosf_mbi_punit_release();
 547}
 548
 549void intel_uncore_suspend(struct drm_i915_private *dev_priv)
 550{
 551        iosf_mbi_punit_acquire();
 552        iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
 553                &dev_priv->uncore.pmic_bus_access_nb);
 554        intel_uncore_forcewake_reset(dev_priv, false);
 555        iosf_mbi_punit_release();
 556}
 557
 558void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
 559{
 560        __intel_uncore_early_sanitize(dev_priv, true);
 561        iosf_mbi_register_pmic_bus_access_notifier(
 562                &dev_priv->uncore.pmic_bus_access_nb);
 563        i915_check_and_clear_faults(dev_priv);
 564}
 565
 566void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
 567{
 568        iosf_mbi_register_pmic_bus_access_notifier(
 569                &dev_priv->uncore.pmic_bus_access_nb);
 570}
 571
 572void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 573{
 574        /* BIOS often leaves RC6 enabled, but disable it for hw init */
 575        intel_sanitize_gt_powersave(dev_priv);
 576}
 577
 578static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 579                                         enum forcewake_domains fw_domains)
 580{
 581        struct intel_uncore_forcewake_domain *domain;
 582        unsigned int tmp;
 583
 584        fw_domains &= dev_priv->uncore.fw_domains;
 585
 586        for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
 587                if (domain->wake_count++) {
 588                        fw_domains &= ~domain->mask;
 589                        domain->active = true;
 590                }
 591        }
 592
 593        if (fw_domains)
 594                dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 595}
 596
 597/**
 598 * intel_uncore_forcewake_get - grab forcewake domain references
 599 * @dev_priv: i915 device instance
 600 * @fw_domains: forcewake domains to get reference on
 601 *
 602 * This function can be used get GT's forcewake domain references.
 603 * Normal register access will handle the forcewake domains automatically.
 604 * However if some sequence requires the GT to not power down a particular
 605 * forcewake domains this function should be called at the beginning of the
 606 * sequence. And subsequently the reference should be dropped by symmetric
 607 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 608 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
 609 */
 610void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 611                                enum forcewake_domains fw_domains)
 612{
 613        unsigned long irqflags;
 614
 615        if (!dev_priv->uncore.funcs.force_wake_get)
 616                return;
 617
 618        assert_rpm_wakelock_held(dev_priv);
 619
 620        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 621        __intel_uncore_forcewake_get(dev_priv, fw_domains);
 622        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 623}
 624
 625/**
 626 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
 627 * @dev_priv: i915 device instance
 628 *
 629 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
 630 * the GT powerwell and in the process disable our debugging for the
 631 * duration of userspace's bypass.
 632 */
 633void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
 634{
 635        spin_lock_irq(&dev_priv->uncore.lock);
 636        if (!dev_priv->uncore.user_forcewake.count++) {
 637                intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
 638
 639                /* Save and disable mmio debugging for the user bypass */
 640                dev_priv->uncore.user_forcewake.saved_mmio_check =
 641                        dev_priv->uncore.unclaimed_mmio_check;
 642                dev_priv->uncore.user_forcewake.saved_mmio_debug =
 643                        i915_modparams.mmio_debug;
 644
 645                dev_priv->uncore.unclaimed_mmio_check = 0;
 646                i915_modparams.mmio_debug = 0;
 647        }
 648        spin_unlock_irq(&dev_priv->uncore.lock);
 649}
 650
 651/**
 652 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
 653 * @dev_priv: i915 device instance
 654 *
 655 * This function complements intel_uncore_forcewake_user_get() and releases
 656 * the GT powerwell taken on behalf of the userspace bypass.
 657 */
 658void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
 659{
 660        spin_lock_irq(&dev_priv->uncore.lock);
 661        if (!--dev_priv->uncore.user_forcewake.count) {
 662                if (intel_uncore_unclaimed_mmio(dev_priv))
 663                        dev_info(dev_priv->drm.dev,
 664                                 "Invalid mmio detected during user access\n");
 665
 666                dev_priv->uncore.unclaimed_mmio_check =
 667                        dev_priv->uncore.user_forcewake.saved_mmio_check;
 668                i915_modparams.mmio_debug =
 669                        dev_priv->uncore.user_forcewake.saved_mmio_debug;
 670
 671                intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
 672        }
 673        spin_unlock_irq(&dev_priv->uncore.lock);
 674}
 675
 676/**
 677 * intel_uncore_forcewake_get__locked - grab forcewake domain references
 678 * @dev_priv: i915 device instance
 679 * @fw_domains: forcewake domains to get reference on
 680 *
 681 * See intel_uncore_forcewake_get(). This variant places the onus
 682 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 683 */
 684void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
 685                                        enum forcewake_domains fw_domains)
 686{
 687        lockdep_assert_held(&dev_priv->uncore.lock);
 688
 689        if (!dev_priv->uncore.funcs.force_wake_get)
 690                return;
 691
 692        __intel_uncore_forcewake_get(dev_priv, fw_domains);
 693}
 694
 695static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 696                                         enum forcewake_domains fw_domains)
 697{
 698        struct intel_uncore_forcewake_domain *domain;
 699        unsigned int tmp;
 700
 701        fw_domains &= dev_priv->uncore.fw_domains;
 702
 703        for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
 704                if (WARN_ON(domain->wake_count == 0))
 705                        continue;
 706
 707                if (--domain->wake_count) {
 708                        domain->active = true;
 709                        continue;
 710                }
 711
 712                fw_domain_arm_timer(domain);
 713        }
 714}
 715
 716/**
 717 * intel_uncore_forcewake_put - release a forcewake domain reference
 718 * @dev_priv: i915 device instance
 719 * @fw_domains: forcewake domains to put references
 720 *
 721 * This function drops the device-level forcewakes for specified
 722 * domains obtained by intel_uncore_forcewake_get().
 723 */
 724void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 725                                enum forcewake_domains fw_domains)
 726{
 727        unsigned long irqflags;
 728
 729        if (!dev_priv->uncore.funcs.force_wake_put)
 730                return;
 731
 732        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 733        __intel_uncore_forcewake_put(dev_priv, fw_domains);
 734        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 735}
 736
 737/**
 738 * intel_uncore_forcewake_put__locked - grab forcewake domain references
 739 * @dev_priv: i915 device instance
 740 * @fw_domains: forcewake domains to get reference on
 741 *
 742 * See intel_uncore_forcewake_put(). This variant places the onus
 743 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 744 */
 745void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 746                                        enum forcewake_domains fw_domains)
 747{
 748        lockdep_assert_held(&dev_priv->uncore.lock);
 749
 750        if (!dev_priv->uncore.funcs.force_wake_put)
 751                return;
 752
 753        __intel_uncore_forcewake_put(dev_priv, fw_domains);
 754}
 755
 756void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 757{
 758        if (!dev_priv->uncore.funcs.force_wake_get)
 759                return;
 760
 761        WARN(dev_priv->uncore.fw_domains_active,
 762             "Expected all fw_domains to be inactive, but %08x are still on\n",
 763             dev_priv->uncore.fw_domains_active);
 764}
 765
 766void assert_forcewakes_active(struct drm_i915_private *dev_priv,
 767                              enum forcewake_domains fw_domains)
 768{
 769        if (!dev_priv->uncore.funcs.force_wake_get)
 770                return;
 771
 772        assert_rpm_wakelock_held(dev_priv);
 773
 774        fw_domains &= dev_priv->uncore.fw_domains;
 775        WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
 776             "Expected %08x fw_domains to be active, but %08x are off\n",
 777             fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
 778}
 779
 780/* We give fast paths for the really cool registers */
 781#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 782
 783#define GEN11_NEEDS_FORCE_WAKE(reg) \
 784        ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
 785
 786#define __gen6_reg_read_fw_domains(offset) \
 787({ \
 788        enum forcewake_domains __fwd; \
 789        if (NEEDS_FORCE_WAKE(offset)) \
 790                __fwd = FORCEWAKE_RENDER; \
 791        else \
 792                __fwd = 0; \
 793        __fwd; \
 794})
 795
 796static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
 797{
 798        if (offset < entry->start)
 799                return -1;
 800        else if (offset > entry->end)
 801                return 1;
 802        else
 803                return 0;
 804}
 805
 806/* Copied and "macroized" from lib/bsearch.c */
 807#define BSEARCH(key, base, num, cmp) ({                                 \
 808        unsigned int start__ = 0, end__ = (num);                        \
 809        typeof(base) result__ = NULL;                                   \
 810        while (start__ < end__) {                                       \
 811                unsigned int mid__ = start__ + (end__ - start__) / 2;   \
 812                int ret__ = (cmp)((key), (base) + mid__);               \
 813                if (ret__ < 0) {                                        \
 814                        end__ = mid__;                                  \
 815                } else if (ret__ > 0) {                                 \
 816                        start__ = mid__ + 1;                            \
 817                } else {                                                \
 818                        result__ = (base) + mid__;                      \
 819                        break;                                          \
 820                }                                                       \
 821        }                                                               \
 822        result__;                                                       \
 823})
 824
 825static enum forcewake_domains
 826find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
 827{
 828        const struct intel_forcewake_range *entry;
 829
 830        entry = BSEARCH(offset,
 831                        dev_priv->uncore.fw_domains_table,
 832                        dev_priv->uncore.fw_domains_table_entries,
 833                        fw_range_cmp);
 834
 835        if (!entry)
 836                return 0;
 837
 838        /*
 839         * The list of FW domains depends on the SKU in gen11+ so we
 840         * can't determine it statically. We use FORCEWAKE_ALL and
 841         * translate it here to the list of available domains.
 842         */
 843        if (entry->domains == FORCEWAKE_ALL)
 844                return dev_priv->uncore.fw_domains;
 845
 846        WARN(entry->domains & ~dev_priv->uncore.fw_domains,
 847             "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
 848             entry->domains & ~dev_priv->uncore.fw_domains, offset);
 849
 850        return entry->domains;
 851}
 852
 853#define GEN_FW_RANGE(s, e, d) \
 854        { .start = (s), .end = (e), .domains = (d) }
 855
 856#define HAS_FWTABLE(dev_priv) \
 857        (INTEL_GEN(dev_priv) >= 9 || \
 858         IS_CHERRYVIEW(dev_priv) || \
 859         IS_VALLEYVIEW(dev_priv))
 860
 861/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 862static const struct intel_forcewake_range __vlv_fw_ranges[] = {
 863        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
 864        GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
 865        GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
 866        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
 867        GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
 868        GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
 869        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
 870};
 871
 872#define __fwtable_reg_read_fw_domains(offset) \
 873({ \
 874        enum forcewake_domains __fwd = 0; \
 875        if (NEEDS_FORCE_WAKE((offset))) \
 876                __fwd = find_fw_domain(dev_priv, offset); \
 877        __fwd; \
 878})
 879
 880#define __gen11_fwtable_reg_read_fw_domains(offset) \
 881({ \
 882        enum forcewake_domains __fwd = 0; \
 883        if (GEN11_NEEDS_FORCE_WAKE((offset))) \
 884                __fwd = find_fw_domain(dev_priv, offset); \
 885        __fwd; \
 886})
 887
 888/* *Must* be sorted by offset! See intel_shadow_table_check(). */
 889static const i915_reg_t gen8_shadowed_regs[] = {
 890        RING_TAIL(RENDER_RING_BASE),    /* 0x2000 (base) */
 891        GEN6_RPNSWREQ,                  /* 0xA008 */
 892        GEN6_RC_VIDEO_FREQ,             /* 0xA00C */
 893        RING_TAIL(GEN6_BSD_RING_BASE),  /* 0x12000 (base) */
 894        RING_TAIL(VEBOX_RING_BASE),     /* 0x1a000 (base) */
 895        RING_TAIL(BLT_RING_BASE),       /* 0x22000 (base) */
 896        /* TODO: Other registers are not yet used */
 897};
 898
 899static const i915_reg_t gen11_shadowed_regs[] = {
 900        RING_TAIL(RENDER_RING_BASE),            /* 0x2000 (base) */
 901        GEN6_RPNSWREQ,                          /* 0xA008 */
 902        GEN6_RC_VIDEO_FREQ,                     /* 0xA00C */
 903        RING_TAIL(BLT_RING_BASE),               /* 0x22000 (base) */
 904        RING_TAIL(GEN11_BSD_RING_BASE),         /* 0x1C0000 (base) */
 905        RING_TAIL(GEN11_BSD2_RING_BASE),        /* 0x1C4000 (base) */
 906        RING_TAIL(GEN11_VEBOX_RING_BASE),       /* 0x1C8000 (base) */
 907        RING_TAIL(GEN11_BSD3_RING_BASE),        /* 0x1D0000 (base) */
 908        RING_TAIL(GEN11_BSD4_RING_BASE),        /* 0x1D4000 (base) */
 909        RING_TAIL(GEN11_VEBOX2_RING_BASE),      /* 0x1D8000 (base) */
 910        /* TODO: Other registers are not yet used */
 911};
 912
 913static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
 914{
 915        u32 offset = i915_mmio_reg_offset(*reg);
 916
 917        if (key < offset)
 918                return -1;
 919        else if (key > offset)
 920                return 1;
 921        else
 922                return 0;
 923}
 924
 925#define __is_genX_shadowed(x) \
 926static bool is_gen##x##_shadowed(u32 offset) \
 927{ \
 928        const i915_reg_t *regs = gen##x##_shadowed_regs; \
 929        return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
 930                       mmio_reg_cmp); \
 931}
 932
 933__is_genX_shadowed(8)
 934__is_genX_shadowed(11)
 935
 936#define __gen8_reg_write_fw_domains(offset) \
 937({ \
 938        enum forcewake_domains __fwd; \
 939        if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
 940                __fwd = FORCEWAKE_RENDER; \
 941        else \
 942                __fwd = 0; \
 943        __fwd; \
 944})
 945
 946/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 947static const struct intel_forcewake_range __chv_fw_ranges[] = {
 948        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
 949        GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 950        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
 951        GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 952        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
 953        GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 954        GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
 955        GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 956        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
 957        GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
 958        GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
 959        GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 960        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
 961        GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
 962        GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
 963        GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
 964};
 965
 966#define __fwtable_reg_write_fw_domains(offset) \
 967({ \
 968        enum forcewake_domains __fwd = 0; \
 969        if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
 970                __fwd = find_fw_domain(dev_priv, offset); \
 971        __fwd; \
 972})
 973
 974#define __gen11_fwtable_reg_write_fw_domains(offset) \
 975({ \
 976        enum forcewake_domains __fwd = 0; \
 977        if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
 978                __fwd = find_fw_domain(dev_priv, offset); \
 979        __fwd; \
 980})
 981
 982/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 983static const struct intel_forcewake_range __gen9_fw_ranges[] = {
 984        GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
 985        GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
 986        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
 987        GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
 988        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
 989        GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
 990        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
 991        GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
 992        GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
 993        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
 994        GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
 995        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
 996        GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
 997        GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
 998        GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
 999        GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1000        GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1001        GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1002        GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1003        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1004        GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
1005        GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1006        GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
1007        GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1008        GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
1009        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1010        GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
1011        GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1012        GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
1013        GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1014        GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
1015        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1016};
1017
1018/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1019static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1020        GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1021        GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1022        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1023        GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1024        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1025        GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1026        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1027        GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1028        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1029        GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1030        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1031        GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1032        GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1033        GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1034        GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1035        GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1036        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1037        GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1038        GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1039        GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1040        GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1041        GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1042        GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1043        GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1044        GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1045        GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1046        GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1047        GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1048        GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1049        GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1050};
1051
1052static void
1053ilk_dummy_write(struct drm_i915_private *dev_priv)
1054{
1055        /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1056         * the chip from rc6 before touching it for real. MI_MODE is masked,
1057         * hence harmless to write 0 into. */
1058        __raw_i915_write32(dev_priv, MI_MODE, 0);
1059}
1060
1061static void
1062__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
1063                      const i915_reg_t reg,
1064                      const bool read,
1065                      const bool before)
1066{
1067        if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
1068                 "Unclaimed %s register 0x%x\n",
1069                 read ? "read from" : "write to",
1070                 i915_mmio_reg_offset(reg)))
1071                /* Only report the first N failures */
1072                i915_modparams.mmio_debug--;
1073}
1074
1075static inline void
1076unclaimed_reg_debug(struct drm_i915_private *dev_priv,
1077                    const i915_reg_t reg,
1078                    const bool read,
1079                    const bool before)
1080{
1081        if (likely(!i915_modparams.mmio_debug))
1082                return;
1083
1084        __unclaimed_reg_debug(dev_priv, reg, read, before);
1085}
1086
1087#define GEN2_READ_HEADER(x) \
1088        u##x val = 0; \
1089        assert_rpm_wakelock_held(dev_priv);
1090
1091#define GEN2_READ_FOOTER \
1092        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1093        return val
1094
1095#define __gen2_read(x) \
1096static u##x \
1097gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1098        GEN2_READ_HEADER(x); \
1099        val = __raw_i915_read##x(dev_priv, reg); \
1100        GEN2_READ_FOOTER; \
1101}
1102
1103#define __gen5_read(x) \
1104static u##x \
1105gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1106        GEN2_READ_HEADER(x); \
1107        ilk_dummy_write(dev_priv); \
1108        val = __raw_i915_read##x(dev_priv, reg); \
1109        GEN2_READ_FOOTER; \
1110}
1111
1112__gen5_read(8)
1113__gen5_read(16)
1114__gen5_read(32)
1115__gen5_read(64)
1116__gen2_read(8)
1117__gen2_read(16)
1118__gen2_read(32)
1119__gen2_read(64)
1120
1121#undef __gen5_read
1122#undef __gen2_read
1123
1124#undef GEN2_READ_FOOTER
1125#undef GEN2_READ_HEADER
1126
1127#define GEN6_READ_HEADER(x) \
1128        u32 offset = i915_mmio_reg_offset(reg); \
1129        unsigned long irqflags; \
1130        u##x val = 0; \
1131        assert_rpm_wakelock_held(dev_priv); \
1132        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1133        unclaimed_reg_debug(dev_priv, reg, true, true)
1134
1135#define GEN6_READ_FOOTER \
1136        unclaimed_reg_debug(dev_priv, reg, true, false); \
1137        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1138        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1139        return val
1140
1141static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
1142                                        enum forcewake_domains fw_domains)
1143{
1144        struct intel_uncore_forcewake_domain *domain;
1145        unsigned int tmp;
1146
1147        GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1148
1149        for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
1150                fw_domain_arm_timer(domain);
1151
1152        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
1153}
1154
1155static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
1156                                     enum forcewake_domains fw_domains)
1157{
1158        if (WARN_ON(!fw_domains))
1159                return;
1160
1161        /* Turn on all requested but inactive supported forcewake domains. */
1162        fw_domains &= dev_priv->uncore.fw_domains;
1163        fw_domains &= ~dev_priv->uncore.fw_domains_active;
1164
1165        if (fw_domains)
1166                ___force_wake_auto(dev_priv, fw_domains);
1167}
1168
1169#define __gen_read(func, x) \
1170static u##x \
1171func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1172        enum forcewake_domains fw_engine; \
1173        GEN6_READ_HEADER(x); \
1174        fw_engine = __##func##_reg_read_fw_domains(offset); \
1175        if (fw_engine) \
1176                __force_wake_auto(dev_priv, fw_engine); \
1177        val = __raw_i915_read##x(dev_priv, reg); \
1178        GEN6_READ_FOOTER; \
1179}
1180#define __gen6_read(x) __gen_read(gen6, x)
1181#define __fwtable_read(x) __gen_read(fwtable, x)
1182#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
1183
1184__gen11_fwtable_read(8)
1185__gen11_fwtable_read(16)
1186__gen11_fwtable_read(32)
1187__gen11_fwtable_read(64)
1188__fwtable_read(8)
1189__fwtable_read(16)
1190__fwtable_read(32)
1191__fwtable_read(64)
1192__gen6_read(8)
1193__gen6_read(16)
1194__gen6_read(32)
1195__gen6_read(64)
1196
1197#undef __gen11_fwtable_read
1198#undef __fwtable_read
1199#undef __gen6_read
1200#undef GEN6_READ_FOOTER
1201#undef GEN6_READ_HEADER
1202
1203#define GEN2_WRITE_HEADER \
1204        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1205        assert_rpm_wakelock_held(dev_priv); \
1206
1207#define GEN2_WRITE_FOOTER
1208
1209#define __gen2_write(x) \
1210static void \
1211gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1212        GEN2_WRITE_HEADER; \
1213        __raw_i915_write##x(dev_priv, reg, val); \
1214        GEN2_WRITE_FOOTER; \
1215}
1216
1217#define __gen5_write(x) \
1218static void \
1219gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1220        GEN2_WRITE_HEADER; \
1221        ilk_dummy_write(dev_priv); \
1222        __raw_i915_write##x(dev_priv, reg, val); \
1223        GEN2_WRITE_FOOTER; \
1224}
1225
1226__gen5_write(8)
1227__gen5_write(16)
1228__gen5_write(32)
1229__gen2_write(8)
1230__gen2_write(16)
1231__gen2_write(32)
1232
1233#undef __gen5_write
1234#undef __gen2_write
1235
1236#undef GEN2_WRITE_FOOTER
1237#undef GEN2_WRITE_HEADER
1238
1239#define GEN6_WRITE_HEADER \
1240        u32 offset = i915_mmio_reg_offset(reg); \
1241        unsigned long irqflags; \
1242        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1243        assert_rpm_wakelock_held(dev_priv); \
1244        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1245        unclaimed_reg_debug(dev_priv, reg, false, true)
1246
1247#define GEN6_WRITE_FOOTER \
1248        unclaimed_reg_debug(dev_priv, reg, false, false); \
1249        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1250
1251#define __gen6_write(x) \
1252static void \
1253gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1254        GEN6_WRITE_HEADER; \
1255        if (NEEDS_FORCE_WAKE(offset)) \
1256                __gen6_gt_wait_for_fifo(dev_priv); \
1257        __raw_i915_write##x(dev_priv, reg, val); \
1258        GEN6_WRITE_FOOTER; \
1259}
1260
1261#define __gen_write(func, x) \
1262static void \
1263func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1264        enum forcewake_domains fw_engine; \
1265        GEN6_WRITE_HEADER; \
1266        fw_engine = __##func##_reg_write_fw_domains(offset); \
1267        if (fw_engine) \
1268                __force_wake_auto(dev_priv, fw_engine); \
1269        __raw_i915_write##x(dev_priv, reg, val); \
1270        GEN6_WRITE_FOOTER; \
1271}
1272#define __gen8_write(x) __gen_write(gen8, x)
1273#define __fwtable_write(x) __gen_write(fwtable, x)
1274#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
1275
1276__gen11_fwtable_write(8)
1277__gen11_fwtable_write(16)
1278__gen11_fwtable_write(32)
1279__fwtable_write(8)
1280__fwtable_write(16)
1281__fwtable_write(32)
1282__gen8_write(8)
1283__gen8_write(16)
1284__gen8_write(32)
1285__gen6_write(8)
1286__gen6_write(16)
1287__gen6_write(32)
1288
1289#undef __gen11_fwtable_write
1290#undef __fwtable_write
1291#undef __gen8_write
1292#undef __gen6_write
1293#undef GEN6_WRITE_FOOTER
1294#undef GEN6_WRITE_HEADER
1295
1296#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1297do { \
1298        (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1299        (i915)->uncore.funcs.mmio_writew = x##_write16; \
1300        (i915)->uncore.funcs.mmio_writel = x##_write32; \
1301} while (0)
1302
1303#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1304do { \
1305        (i915)->uncore.funcs.mmio_readb = x##_read8; \
1306        (i915)->uncore.funcs.mmio_readw = x##_read16; \
1307        (i915)->uncore.funcs.mmio_readl = x##_read32; \
1308        (i915)->uncore.funcs.mmio_readq = x##_read64; \
1309} while (0)
1310
1311
1312static void fw_domain_init(struct drm_i915_private *dev_priv,
1313                           enum forcewake_domain_id domain_id,
1314                           i915_reg_t reg_set,
1315                           i915_reg_t reg_ack)
1316{
1317        struct intel_uncore_forcewake_domain *d;
1318
1319        if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1320                return;
1321
1322        d = &dev_priv->uncore.fw_domain[domain_id];
1323
1324        WARN_ON(d->wake_count);
1325
1326        WARN_ON(!i915_mmio_reg_valid(reg_set));
1327        WARN_ON(!i915_mmio_reg_valid(reg_ack));
1328
1329        d->wake_count = 0;
1330        d->reg_set = reg_set;
1331        d->reg_ack = reg_ack;
1332
1333        d->id = domain_id;
1334
1335        BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1336        BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1337        BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1338        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1339        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1340        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1341        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1342        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1343        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1344
1345
1346        d->mask = BIT(domain_id);
1347
1348        hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1349        d->timer.function = intel_uncore_fw_release_timer;
1350
1351        dev_priv->uncore.fw_domains |= BIT(domain_id);
1352
1353        fw_domain_reset(dev_priv, d);
1354}
1355
1356static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1357{
1358        if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1359                return;
1360
1361        if (IS_GEN6(dev_priv)) {
1362                dev_priv->uncore.fw_reset = 0;
1363                dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1364                dev_priv->uncore.fw_clear = 0;
1365        } else {
1366                /* WaRsClearFWBitsAtReset:bdw,skl */
1367                dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1368                dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1369                dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1370        }
1371
1372        if (INTEL_GEN(dev_priv) >= 11) {
1373                int i;
1374
1375                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1376                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1377                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1378                               FORCEWAKE_RENDER_GEN9,
1379                               FORCEWAKE_ACK_RENDER_GEN9);
1380                fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1381                               FORCEWAKE_BLITTER_GEN9,
1382                               FORCEWAKE_ACK_BLITTER_GEN9);
1383                for (i = 0; i < I915_MAX_VCS; i++) {
1384                        if (!HAS_ENGINE(dev_priv, _VCS(i)))
1385                                continue;
1386
1387                        fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1388                                       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1389                                       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1390                }
1391                for (i = 0; i < I915_MAX_VECS; i++) {
1392                        if (!HAS_ENGINE(dev_priv, _VECS(i)))
1393                                continue;
1394
1395                        fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1396                                       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1397                                       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1398                }
1399        } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
1400                dev_priv->uncore.funcs.force_wake_get =
1401                        fw_domains_get_with_fallback;
1402                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1403                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1404                               FORCEWAKE_RENDER_GEN9,
1405                               FORCEWAKE_ACK_RENDER_GEN9);
1406                fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1407                               FORCEWAKE_BLITTER_GEN9,
1408                               FORCEWAKE_ACK_BLITTER_GEN9);
1409                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1410                               FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1411        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1412                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1413                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1414                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1415                               FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1416                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1417                               FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1418        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1419                dev_priv->uncore.funcs.force_wake_get =
1420                        fw_domains_get_with_thread_status;
1421                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1422                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1423                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1424        } else if (IS_IVYBRIDGE(dev_priv)) {
1425                u32 ecobus;
1426
1427                /* IVB configs may use multi-threaded forcewake */
1428
1429                /* A small trick here - if the bios hasn't configured
1430                 * MT forcewake, and if the device is in RC6, then
1431                 * force_wake_mt_get will not wake the device and the
1432                 * ECOBUS read will return zero. Which will be
1433                 * (correctly) interpreted by the test below as MT
1434                 * forcewake being disabled.
1435                 */
1436                dev_priv->uncore.funcs.force_wake_get =
1437                        fw_domains_get_with_thread_status;
1438                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1439
1440                /* We need to init first for ECOBUS access and then
1441                 * determine later if we want to reinit, in case of MT access is
1442                 * not working. In this stage we don't know which flavour this
1443                 * ivb is, so it is better to reset also the gen6 fw registers
1444                 * before the ecobus check.
1445                 */
1446
1447                __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1448                __raw_posting_read(dev_priv, ECOBUS);
1449
1450                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1451                               FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1452
1453                spin_lock_irq(&dev_priv->uncore.lock);
1454                fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1455                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1456                fw_domains_put(dev_priv, FORCEWAKE_RENDER);
1457                spin_unlock_irq(&dev_priv->uncore.lock);
1458
1459                if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1460                        DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1461                        DRM_INFO("when using vblank-synced partial screen updates.\n");
1462                        fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1463                                       FORCEWAKE, FORCEWAKE_ACK);
1464                }
1465        } else if (IS_GEN6(dev_priv)) {
1466                dev_priv->uncore.funcs.force_wake_get =
1467                        fw_domains_get_with_thread_status;
1468                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1469                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1470                               FORCEWAKE, FORCEWAKE_ACK);
1471        }
1472
1473        /* All future platforms are expected to require complex power gating */
1474        WARN_ON(dev_priv->uncore.fw_domains == 0);
1475}
1476
1477#define ASSIGN_FW_DOMAINS_TABLE(d) \
1478{ \
1479        dev_priv->uncore.fw_domains_table = \
1480                        (struct intel_forcewake_range *)(d); \
1481        dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1482}
1483
1484static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1485                                         unsigned long action, void *data)
1486{
1487        struct drm_i915_private *dev_priv = container_of(nb,
1488                        struct drm_i915_private, uncore.pmic_bus_access_nb);
1489
1490        switch (action) {
1491        case MBI_PMIC_BUS_ACCESS_BEGIN:
1492                /*
1493                 * forcewake all now to make sure that we don't need to do a
1494                 * forcewake later which on systems where this notifier gets
1495                 * called requires the punit to access to the shared pmic i2c
1496                 * bus, which will be busy after this notification, leading to:
1497                 * "render: timed out waiting for forcewake ack request."
1498                 * errors.
1499                 *
1500                 * The notifier is unregistered during intel_runtime_suspend(),
1501                 * so it's ok to access the HW here without holding a RPM
1502                 * wake reference -> disable wakeref asserts for the time of
1503                 * the access.
1504                 */
1505                disable_rpm_wakeref_asserts(dev_priv);
1506                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1507                enable_rpm_wakeref_asserts(dev_priv);
1508                break;
1509        case MBI_PMIC_BUS_ACCESS_END:
1510                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1511                break;
1512        }
1513
1514        return NOTIFY_OK;
1515}
1516
1517void intel_uncore_init(struct drm_i915_private *dev_priv)
1518{
1519        i915_check_vgpu(dev_priv);
1520
1521        intel_uncore_edram_detect(dev_priv);
1522        intel_uncore_fw_domains_init(dev_priv);
1523        __intel_uncore_early_sanitize(dev_priv, false);
1524
1525        dev_priv->uncore.unclaimed_mmio_check = 1;
1526        dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1527                i915_pmic_bus_access_notifier;
1528
1529        if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1530                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1531                ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
1532        } else if (IS_GEN5(dev_priv)) {
1533                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1534                ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
1535        } else if (IS_GEN(dev_priv, 6, 7)) {
1536                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
1537
1538                if (IS_VALLEYVIEW(dev_priv)) {
1539                        ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1540                        ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1541                } else {
1542                        ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1543                }
1544        } else if (IS_GEN8(dev_priv)) {
1545                if (IS_CHERRYVIEW(dev_priv)) {
1546                        ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1547                        ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1548                        ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1549
1550                } else {
1551                        ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1552                        ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1553                }
1554        } else if (IS_GEN(dev_priv, 9, 10)) {
1555                ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1556                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1557                ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1558        } else {
1559                ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
1560                ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
1561                ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
1562        }
1563
1564        iosf_mbi_register_pmic_bus_access_notifier(
1565                &dev_priv->uncore.pmic_bus_access_nb);
1566}
1567
1568void intel_uncore_fini(struct drm_i915_private *dev_priv)
1569{
1570        /* Paranoia: make sure we have disabled everything before we exit. */
1571        intel_uncore_sanitize(dev_priv);
1572
1573        iosf_mbi_punit_acquire();
1574        iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1575                &dev_priv->uncore.pmic_bus_access_nb);
1576        intel_uncore_forcewake_reset(dev_priv, false);
1577        iosf_mbi_punit_release();
1578}
1579
1580static const struct reg_whitelist {
1581        i915_reg_t offset_ldw;
1582        i915_reg_t offset_udw;
1583        u16 gen_mask;
1584        u8 size;
1585} reg_read_whitelist[] = { {
1586        .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1587        .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1588        .gen_mask = INTEL_GEN_MASK(4, 11),
1589        .size = 8
1590} };
1591
1592int i915_reg_read_ioctl(struct drm_device *dev,
1593                        void *data, struct drm_file *file)
1594{
1595        struct drm_i915_private *dev_priv = to_i915(dev);
1596        struct drm_i915_reg_read *reg = data;
1597        struct reg_whitelist const *entry;
1598        unsigned int flags;
1599        int remain;
1600        int ret = 0;
1601
1602        entry = reg_read_whitelist;
1603        remain = ARRAY_SIZE(reg_read_whitelist);
1604        while (remain) {
1605                u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1606
1607                GEM_BUG_ON(!is_power_of_2(entry->size));
1608                GEM_BUG_ON(entry->size > 8);
1609                GEM_BUG_ON(entry_offset & (entry->size - 1));
1610
1611                if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1612                    entry_offset == (reg->offset & -entry->size))
1613                        break;
1614                entry++;
1615                remain--;
1616        }
1617
1618        if (!remain)
1619                return -EINVAL;
1620
1621        flags = reg->offset & (entry->size - 1);
1622
1623        intel_runtime_pm_get(dev_priv);
1624        if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1625                reg->val = I915_READ64_2x32(entry->offset_ldw,
1626                                            entry->offset_udw);
1627        else if (entry->size == 8 && flags == 0)
1628                reg->val = I915_READ64(entry->offset_ldw);
1629        else if (entry->size == 4 && flags == 0)
1630                reg->val = I915_READ(entry->offset_ldw);
1631        else if (entry->size == 2 && flags == 0)
1632                reg->val = I915_READ16(entry->offset_ldw);
1633        else if (entry->size == 1 && flags == 0)
1634                reg->val = I915_READ8(entry->offset_ldw);
1635        else
1636                ret = -EINVAL;
1637        intel_runtime_pm_put(dev_priv);
1638
1639        return ret;
1640}
1641
1642static void gen3_stop_engine(struct intel_engine_cs *engine)
1643{
1644        struct drm_i915_private *dev_priv = engine->i915;
1645        const u32 base = engine->mmio_base;
1646        const i915_reg_t mode = RING_MI_MODE(base);
1647
1648        I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1649        if (intel_wait_for_register_fw(dev_priv,
1650                                       mode,
1651                                       MODE_IDLE,
1652                                       MODE_IDLE,
1653                                       500))
1654                DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1655                                 engine->name);
1656
1657        I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
1658        POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
1659
1660        I915_WRITE_FW(RING_HEAD(base), 0);
1661        I915_WRITE_FW(RING_TAIL(base), 0);
1662        POSTING_READ_FW(RING_TAIL(base));
1663
1664        /* The ring must be empty before it is disabled */
1665        I915_WRITE_FW(RING_CTL(base), 0);
1666
1667        /* Check acts as a post */
1668        if (I915_READ_FW(RING_HEAD(base)) != 0)
1669                DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1670                                 engine->name);
1671}
1672
1673static void i915_stop_engines(struct drm_i915_private *dev_priv,
1674                              unsigned engine_mask)
1675{
1676        struct intel_engine_cs *engine;
1677        enum intel_engine_id id;
1678
1679        if (INTEL_GEN(dev_priv) < 3)
1680                return;
1681
1682        for_each_engine_masked(engine, dev_priv, engine_mask, id)
1683                gen3_stop_engine(engine);
1684}
1685
1686static bool i915_in_reset(struct pci_dev *pdev)
1687{
1688        u8 gdrst;
1689
1690        pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1691        return gdrst & GRDOM_RESET_STATUS;
1692}
1693
1694static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1695{
1696        struct pci_dev *pdev = dev_priv->drm.pdev;
1697        int err;
1698
1699        /* Assert reset for at least 20 usec, and wait for acknowledgement. */
1700        pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1701        usleep_range(50, 200);
1702        err = wait_for(i915_in_reset(pdev), 500);
1703
1704        /* Clear the reset request. */
1705        pci_write_config_byte(pdev, I915_GDRST, 0);
1706        usleep_range(50, 200);
1707        if (!err)
1708                err = wait_for(!i915_in_reset(pdev), 500);
1709
1710        return err;
1711}
1712
1713static bool g4x_reset_complete(struct pci_dev *pdev)
1714{
1715        u8 gdrst;
1716
1717        pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1718        return (gdrst & GRDOM_RESET_ENABLE) == 0;
1719}
1720
1721static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1722{
1723        struct pci_dev *pdev = dev_priv->drm.pdev;
1724
1725        pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1726        return wait_for(g4x_reset_complete(pdev), 500);
1727}
1728
1729static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1730{
1731        struct pci_dev *pdev = dev_priv->drm.pdev;
1732        int ret;
1733
1734        /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1735        I915_WRITE(VDECCLK_GATE_D,
1736                   I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1737        POSTING_READ(VDECCLK_GATE_D);
1738
1739        pci_write_config_byte(pdev, I915_GDRST,
1740                              GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1741        ret =  wait_for(g4x_reset_complete(pdev), 500);
1742        if (ret) {
1743                DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1744                goto out;
1745        }
1746
1747        pci_write_config_byte(pdev, I915_GDRST,
1748                              GRDOM_RENDER | GRDOM_RESET_ENABLE);
1749        ret =  wait_for(g4x_reset_complete(pdev), 500);
1750        if (ret) {
1751                DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1752                goto out;
1753        }
1754
1755out:
1756        pci_write_config_byte(pdev, I915_GDRST, 0);
1757
1758        I915_WRITE(VDECCLK_GATE_D,
1759                   I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1760        POSTING_READ(VDECCLK_GATE_D);
1761
1762        return ret;
1763}
1764
1765static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1766                             unsigned engine_mask)
1767{
1768        int ret;
1769
1770        I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1771        ret = intel_wait_for_register(dev_priv,
1772                                      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1773                                      500);
1774        if (ret) {
1775                DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1776                goto out;
1777        }
1778
1779        I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1780        ret = intel_wait_for_register(dev_priv,
1781                                      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1782                                      500);
1783        if (ret) {
1784                DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1785                goto out;
1786        }
1787
1788out:
1789        I915_WRITE(ILK_GDSR, 0);
1790        POSTING_READ(ILK_GDSR);
1791        return ret;
1792}
1793
1794/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1795static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1796                                u32 hw_domain_mask)
1797{
1798        int err;
1799
1800        /* GEN6_GDRST is not in the gt power well, no need to check
1801         * for fifo space for the write or forcewake the chip for
1802         * the read
1803         */
1804        __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1805
1806        /* Wait for the device to ack the reset requests */
1807        err = intel_wait_for_register_fw(dev_priv,
1808                                          GEN6_GDRST, hw_domain_mask, 0,
1809                                          500);
1810        if (err)
1811                DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1812                                 hw_domain_mask);
1813
1814        return err;
1815}
1816
1817/**
1818 * gen6_reset_engines - reset individual engines
1819 * @dev_priv: i915 device
1820 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1821 *
1822 * This function will reset the individual engines that are set in engine_mask.
1823 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1824 *
1825 * Note: It is responsibility of the caller to handle the difference between
1826 * asking full domain reset versus reset for all available individual engines.
1827 *
1828 * Returns 0 on success, nonzero on error.
1829 */
1830static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1831                              unsigned engine_mask)
1832{
1833        struct intel_engine_cs *engine;
1834        const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1835                [RCS] = GEN6_GRDOM_RENDER,
1836                [BCS] = GEN6_GRDOM_BLT,
1837                [VCS] = GEN6_GRDOM_MEDIA,
1838                [VCS2] = GEN8_GRDOM_MEDIA2,
1839                [VECS] = GEN6_GRDOM_VECS,
1840        };
1841        u32 hw_mask;
1842
1843        if (engine_mask == ALL_ENGINES) {
1844                hw_mask = GEN6_GRDOM_FULL;
1845        } else {
1846                unsigned int tmp;
1847
1848                hw_mask = 0;
1849                for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1850                        hw_mask |= hw_engine_mask[engine->id];
1851        }
1852
1853        return gen6_hw_domain_reset(dev_priv, hw_mask);
1854}
1855
1856/**
1857 * __intel_wait_for_register_fw - wait until register matches expected state
1858 * @dev_priv: the i915 device
1859 * @reg: the register to read
1860 * @mask: mask to apply to register value
1861 * @value: expected value
1862 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1863 * @slow_timeout_ms: slow timeout in millisecond
1864 * @out_value: optional placeholder to hold registry value
1865 *
1866 * This routine waits until the target register @reg contains the expected
1867 * @value after applying the @mask, i.e. it waits until ::
1868 *
1869 *     (I915_READ_FW(reg) & mask) == value
1870 *
1871 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1872 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1873 * must be not larger than 20,0000 microseconds.
1874 *
1875 * Note that this routine assumes the caller holds forcewake asserted, it is
1876 * not suitable for very long waits. See intel_wait_for_register() if you
1877 * wish to wait without holding forcewake for the duration (i.e. you expect
1878 * the wait to be slow).
1879 *
1880 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1881 */
1882int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1883                                 i915_reg_t reg,
1884                                 u32 mask,
1885                                 u32 value,
1886                                 unsigned int fast_timeout_us,
1887                                 unsigned int slow_timeout_ms,
1888                                 u32 *out_value)
1889{
1890        u32 uninitialized_var(reg_value);
1891#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1892        int ret;
1893
1894        /* Catch any overuse of this function */
1895        might_sleep_if(slow_timeout_ms);
1896        GEM_BUG_ON(fast_timeout_us > 20000);
1897
1898        ret = -ETIMEDOUT;
1899        if (fast_timeout_us && fast_timeout_us <= 20000)
1900                ret = _wait_for_atomic(done, fast_timeout_us, 0);
1901        if (ret && slow_timeout_ms)
1902                ret = wait_for(done, slow_timeout_ms);
1903
1904        if (out_value)
1905                *out_value = reg_value;
1906
1907        return ret;
1908#undef done
1909}
1910
1911/**
1912 * __intel_wait_for_register - wait until register matches expected state
1913 * @dev_priv: the i915 device
1914 * @reg: the register to read
1915 * @mask: mask to apply to register value
1916 * @value: expected value
1917 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1918 * @slow_timeout_ms: slow timeout in millisecond
1919 * @out_value: optional placeholder to hold registry value
1920 *
1921 * This routine waits until the target register @reg contains the expected
1922 * @value after applying the @mask, i.e. it waits until ::
1923 *
1924 *     (I915_READ(reg) & mask) == value
1925 *
1926 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1927 *
1928 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1929 */
1930int __intel_wait_for_register(struct drm_i915_private *dev_priv,
1931                            i915_reg_t reg,
1932                            u32 mask,
1933                            u32 value,
1934                            unsigned int fast_timeout_us,
1935                            unsigned int slow_timeout_ms,
1936                            u32 *out_value)
1937{
1938        unsigned fw =
1939                intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1940        u32 reg_value;
1941        int ret;
1942
1943        might_sleep();
1944
1945        spin_lock_irq(&dev_priv->uncore.lock);
1946        intel_uncore_forcewake_get__locked(dev_priv, fw);
1947
1948        ret = __intel_wait_for_register_fw(dev_priv,
1949                                           reg, mask, value,
1950                                           fast_timeout_us, 0, &reg_value);
1951
1952        intel_uncore_forcewake_put__locked(dev_priv, fw);
1953        spin_unlock_irq(&dev_priv->uncore.lock);
1954
1955        if (ret)
1956                ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
1957                                 (reg_value & mask) == value,
1958                                 slow_timeout_ms * 1000, 10, 1000);
1959
1960        if (out_value)
1961                *out_value = reg_value;
1962
1963        return ret;
1964}
1965
1966static int gen8_reset_engine_start(struct intel_engine_cs *engine)
1967{
1968        struct drm_i915_private *dev_priv = engine->i915;
1969        int ret;
1970
1971        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1972                      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1973
1974        ret = intel_wait_for_register_fw(dev_priv,
1975                                         RING_RESET_CTL(engine->mmio_base),
1976                                         RESET_CTL_READY_TO_RESET,
1977                                         RESET_CTL_READY_TO_RESET,
1978                                         700);
1979        if (ret)
1980                DRM_ERROR("%s: reset request timeout\n", engine->name);
1981
1982        return ret;
1983}
1984
1985static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
1986{
1987        struct drm_i915_private *dev_priv = engine->i915;
1988
1989        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1990                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1991}
1992
1993static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1994                              unsigned engine_mask)
1995{
1996        struct intel_engine_cs *engine;
1997        unsigned int tmp;
1998
1999        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
2000                if (gen8_reset_engine_start(engine))
2001                        goto not_ready;
2002
2003        return gen6_reset_engines(dev_priv, engine_mask);
2004
2005not_ready:
2006        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
2007                gen8_reset_engine_cancel(engine);
2008
2009        return -EIO;
2010}
2011
2012typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
2013
2014static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
2015{
2016        if (!i915_modparams.reset)
2017                return NULL;
2018
2019        if (INTEL_GEN(dev_priv) >= 8)
2020                return gen8_reset_engines;
2021        else if (INTEL_GEN(dev_priv) >= 6)
2022                return gen6_reset_engines;
2023        else if (IS_GEN5(dev_priv))
2024                return ironlake_do_reset;
2025        else if (IS_G4X(dev_priv))
2026                return g4x_do_reset;
2027        else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
2028                return g33_do_reset;
2029        else if (INTEL_GEN(dev_priv) >= 3)
2030                return i915_do_reset;
2031        else
2032                return NULL;
2033}
2034
2035int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
2036{
2037        reset_func reset = intel_get_gpu_reset(dev_priv);
2038        int retry;
2039        int ret;
2040
2041        might_sleep();
2042
2043        /* If the power well sleeps during the reset, the reset
2044         * request may be dropped and never completes (causing -EIO).
2045         */
2046        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2047        for (retry = 0; retry < 3; retry++) {
2048
2049                /* We stop engines, otherwise we might get failed reset and a
2050                 * dead gpu (on elk). Also as modern gpu as kbl can suffer
2051                 * from system hang if batchbuffer is progressing when
2052                 * the reset is issued, regardless of READY_TO_RESET ack.
2053                 * Thus assume it is best to stop engines on all gens
2054                 * where we have a gpu reset.
2055                 *
2056                 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
2057                 *
2058                 * FIXME: Wa for more modern gens needs to be validated
2059                 */
2060                i915_stop_engines(dev_priv, engine_mask);
2061
2062                ret = -ENODEV;
2063                if (reset)
2064                        ret = reset(dev_priv, engine_mask);
2065                if (ret != -ETIMEDOUT)
2066                        break;
2067
2068                cond_resched();
2069        }
2070        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2071
2072        return ret;
2073}
2074
2075bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
2076{
2077        return intel_get_gpu_reset(dev_priv) != NULL;
2078}
2079
2080bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
2081{
2082        return (dev_priv->info.has_reset_engine &&
2083                i915_modparams.reset >= 2);
2084}
2085
2086int intel_reset_guc(struct drm_i915_private *dev_priv)
2087{
2088        int ret;
2089
2090        GEM_BUG_ON(!HAS_GUC(dev_priv));
2091
2092        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2093        ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
2094        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2095
2096        return ret;
2097}
2098
2099bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
2100{
2101        return check_for_unclaimed_mmio(dev_priv);
2102}
2103
2104bool
2105intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
2106{
2107        if (unlikely(i915_modparams.mmio_debug ||
2108                     dev_priv->uncore.unclaimed_mmio_check <= 0))
2109                return false;
2110
2111        if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
2112                DRM_DEBUG("Unclaimed register detected, "
2113                          "enabling oneshot unclaimed register reporting. "
2114                          "Please use i915.mmio_debug=N for more information.\n");
2115                i915_modparams.mmio_debug++;
2116                dev_priv->uncore.unclaimed_mmio_check--;
2117                return true;
2118        }
2119
2120        return false;
2121}
2122
2123static enum forcewake_domains
2124intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
2125                                i915_reg_t reg)
2126{
2127        u32 offset = i915_mmio_reg_offset(reg);
2128        enum forcewake_domains fw_domains;
2129
2130        if (INTEL_GEN(dev_priv) >= 11) {
2131                fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
2132        } else if (HAS_FWTABLE(dev_priv)) {
2133                fw_domains = __fwtable_reg_read_fw_domains(offset);
2134        } else if (INTEL_GEN(dev_priv) >= 6) {
2135                fw_domains = __gen6_reg_read_fw_domains(offset);
2136        } else {
2137                WARN_ON(!IS_GEN(dev_priv, 2, 5));
2138                fw_domains = 0;
2139        }
2140
2141        WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
2142
2143        return fw_domains;
2144}
2145
2146static enum forcewake_domains
2147intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
2148                                 i915_reg_t reg)
2149{
2150        u32 offset = i915_mmio_reg_offset(reg);
2151        enum forcewake_domains fw_domains;
2152
2153        if (INTEL_GEN(dev_priv) >= 11) {
2154                fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
2155        } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
2156                fw_domains = __fwtable_reg_write_fw_domains(offset);
2157        } else if (IS_GEN8(dev_priv)) {
2158                fw_domains = __gen8_reg_write_fw_domains(offset);
2159        } else if (IS_GEN(dev_priv, 6, 7)) {
2160                fw_domains = FORCEWAKE_RENDER;
2161        } else {
2162                WARN_ON(!IS_GEN(dev_priv, 2, 5));
2163                fw_domains = 0;
2164        }
2165
2166        WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
2167
2168        return fw_domains;
2169}
2170
2171/**
2172 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2173 *                                  a register
2174 * @dev_priv: pointer to struct drm_i915_private
2175 * @reg: register in question
2176 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2177 *
2178 * Returns a set of forcewake domains required to be taken with for example
2179 * intel_uncore_forcewake_get for the specified register to be accessible in the
2180 * specified mode (read, write or read/write) with raw mmio accessors.
2181 *
2182 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2183 * callers to do FIFO management on their own or risk losing writes.
2184 */
2185enum forcewake_domains
2186intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
2187                               i915_reg_t reg, unsigned int op)
2188{
2189        enum forcewake_domains fw_domains = 0;
2190
2191        WARN_ON(!op);
2192
2193        if (intel_vgpu_active(dev_priv))
2194                return 0;
2195
2196        if (op & FW_REG_READ)
2197                fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
2198
2199        if (op & FW_REG_WRITE)
2200                fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
2201
2202        return fw_domains;
2203}
2204
2205#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2206#include "selftests/mock_uncore.c"
2207#include "selftests/intel_uncore.c"
2208#endif
2209