linux/drivers/gpu/drm/i915/intel_uncore.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2013 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include "i915_drv.h"
  25#include "intel_drv.h"
  26#include "i915_vgpu.h"
  27
  28#include <linux/pm_runtime.h>
  29
  30#define FORCEWAKE_ACK_TIMEOUT_MS 50
  31
  32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  33
  34static const char * const forcewake_domain_names[] = {
  35        "render",
  36        "blitter",
  37        "media",
  38};
  39
  40const char *
  41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  42{
  43        BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  44
  45        if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  46                return forcewake_domain_names[id];
  47
  48        WARN_ON(id);
  49
  50        return "unknown";
  51}
  52
  53static inline void
  54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  55{
  56        WARN_ON(!i915_mmio_reg_valid(d->reg_set));
  57        __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  58}
  59
  60static inline void
  61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  62{
  63        d->wake_count++;
  64        hrtimer_start_range_ns(&d->timer,
  65                               ktime_set(0, NSEC_PER_MSEC),
  66                               NSEC_PER_MSEC,
  67                               HRTIMER_MODE_REL);
  68}
  69
  70static inline void
  71fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  72{
  73        if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  74                             FORCEWAKE_KERNEL) == 0,
  75                            FORCEWAKE_ACK_TIMEOUT_MS))
  76                DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  77                          intel_uncore_forcewake_domain_to_str(d->id));
  78}
  79
  80static inline void
  81fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  82{
  83        __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  84}
  85
  86static inline void
  87fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  88{
  89        if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  90                             FORCEWAKE_KERNEL),
  91                            FORCEWAKE_ACK_TIMEOUT_MS))
  92                DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  93                          intel_uncore_forcewake_domain_to_str(d->id));
  94}
  95
  96static inline void
  97fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  98{
  99        __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
 100}
 101
 102static inline void
 103fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
 104{
 105        /* something from same cacheline, but not from the set register */
 106        if (i915_mmio_reg_valid(d->reg_post))
 107                __raw_posting_read(d->i915, d->reg_post);
 108}
 109
 110static void
 111fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 112{
 113        struct intel_uncore_forcewake_domain *d;
 114
 115        for_each_fw_domain_masked(d, fw_domains, dev_priv) {
 116                fw_domain_wait_ack_clear(d);
 117                fw_domain_get(d);
 118        }
 119
 120        for_each_fw_domain_masked(d, fw_domains, dev_priv)
 121                fw_domain_wait_ack(d);
 122}
 123
 124static void
 125fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 126{
 127        struct intel_uncore_forcewake_domain *d;
 128
 129        for_each_fw_domain_masked(d, fw_domains, dev_priv) {
 130                fw_domain_put(d);
 131                fw_domain_posting_read(d);
 132        }
 133}
 134
 135static void
 136fw_domains_posting_read(struct drm_i915_private *dev_priv)
 137{
 138        struct intel_uncore_forcewake_domain *d;
 139
 140        /* No need to do for all, just do for first found */
 141        for_each_fw_domain(d, dev_priv) {
 142                fw_domain_posting_read(d);
 143                break;
 144        }
 145}
 146
 147static void
 148fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 149{
 150        struct intel_uncore_forcewake_domain *d;
 151
 152        if (dev_priv->uncore.fw_domains == 0)
 153                return;
 154
 155        for_each_fw_domain_masked(d, fw_domains, dev_priv)
 156                fw_domain_reset(d);
 157
 158        fw_domains_posting_read(dev_priv);
 159}
 160
 161static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 162{
 163        /* w/a for a sporadic read returning 0 by waiting for the GT
 164         * thread to wake up.
 165         */
 166        if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
 167                                GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
 168                DRM_ERROR("GT thread status wait timed out\n");
 169}
 170
 171static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
 172                                              enum forcewake_domains fw_domains)
 173{
 174        fw_domains_get(dev_priv, fw_domains);
 175
 176        /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
 177        __gen6_gt_wait_for_thread_c0(dev_priv);
 178}
 179
 180static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 181{
 182        u32 gtfifodbg;
 183
 184        gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
 185        if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
 186                __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
 187}
 188
 189static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
 190                                     enum forcewake_domains fw_domains)
 191{
 192        fw_domains_put(dev_priv, fw_domains);
 193        gen6_gt_check_fifodbg(dev_priv);
 194}
 195
 196static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
 197{
 198        u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
 199
 200        return count & GT_FIFO_FREE_ENTRIES_MASK;
 201}
 202
 203static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 204{
 205        int ret = 0;
 206
 207        /* On VLV, FIFO will be shared by both SW and HW.
 208         * So, we need to read the FREE_ENTRIES everytime */
 209        if (IS_VALLEYVIEW(dev_priv))
 210                dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
 211
 212        if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
 213                int loop = 500;
 214                u32 fifo = fifo_free_entries(dev_priv);
 215
 216                while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
 217                        udelay(10);
 218                        fifo = fifo_free_entries(dev_priv);
 219                }
 220                if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
 221                        ++ret;
 222                dev_priv->uncore.fifo_count = fifo;
 223        }
 224        dev_priv->uncore.fifo_count--;
 225
 226        return ret;
 227}
 228
 229static enum hrtimer_restart
 230intel_uncore_fw_release_timer(struct hrtimer *timer)
 231{
 232        struct intel_uncore_forcewake_domain *domain =
 233               container_of(timer, struct intel_uncore_forcewake_domain, timer);
 234        unsigned long irqflags;
 235
 236        assert_rpm_device_not_suspended(domain->i915);
 237
 238        spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
 239        if (WARN_ON(domain->wake_count == 0))
 240                domain->wake_count++;
 241
 242        if (--domain->wake_count == 0)
 243                domain->i915->uncore.funcs.force_wake_put(domain->i915,
 244                                                          1 << domain->id);
 245
 246        spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
 247
 248        return HRTIMER_NORESTART;
 249}
 250
 251void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
 252{
 253        struct drm_i915_private *dev_priv = dev->dev_private;
 254        unsigned long irqflags;
 255        struct intel_uncore_forcewake_domain *domain;
 256        int retry_count = 100;
 257        enum forcewake_domains fw = 0, active_domains;
 258
 259        /* Hold uncore.lock across reset to prevent any register access
 260         * with forcewake not set correctly. Wait until all pending
 261         * timers are run before holding.
 262         */
 263        while (1) {
 264                active_domains = 0;
 265
 266                for_each_fw_domain(domain, dev_priv) {
 267                        if (hrtimer_cancel(&domain->timer) == 0)
 268                                continue;
 269
 270                        intel_uncore_fw_release_timer(&domain->timer);
 271                }
 272
 273                spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 274
 275                for_each_fw_domain(domain, dev_priv) {
 276                        if (hrtimer_active(&domain->timer))
 277                                active_domains |= domain->mask;
 278                }
 279
 280                if (active_domains == 0)
 281                        break;
 282
 283                if (--retry_count == 0) {
 284                        DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
 285                        break;
 286                }
 287
 288                spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 289                cond_resched();
 290        }
 291
 292        WARN_ON(active_domains);
 293
 294        for_each_fw_domain(domain, dev_priv)
 295                if (domain->wake_count)
 296                        fw |= domain->mask;
 297
 298        if (fw)
 299                dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
 300
 301        fw_domains_reset(dev_priv, FORCEWAKE_ALL);
 302
 303        if (restore) { /* If reset with a user forcewake, try to restore */
 304                if (fw)
 305                        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
 306
 307                if (IS_GEN6(dev) || IS_GEN7(dev))
 308                        dev_priv->uncore.fifo_count =
 309                                fifo_free_entries(dev_priv);
 310        }
 311
 312        if (!restore)
 313                assert_forcewakes_inactive(dev_priv);
 314
 315        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 316}
 317
 318static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
 319{
 320        const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
 321        const unsigned int sets[4] = { 1, 1, 2, 2 };
 322        const u32 cap = dev_priv->edram_cap;
 323
 324        return EDRAM_NUM_BANKS(cap) *
 325                ways[EDRAM_WAYS_IDX(cap)] *
 326                sets[EDRAM_SETS_IDX(cap)] *
 327                1024 * 1024;
 328}
 329
 330u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
 331{
 332        if (!HAS_EDRAM(dev_priv))
 333                return 0;
 334
 335        /* The needed capability bits for size calculation
 336         * are not there with pre gen9 so return 128MB always.
 337         */
 338        if (INTEL_GEN(dev_priv) < 9)
 339                return 128 * 1024 * 1024;
 340
 341        return gen9_edram_size(dev_priv);
 342}
 343
 344static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
 345{
 346        if (IS_HASWELL(dev_priv) ||
 347            IS_BROADWELL(dev_priv) ||
 348            INTEL_GEN(dev_priv) >= 9) {
 349                dev_priv->edram_cap = __raw_i915_read32(dev_priv,
 350                                                        HSW_EDRAM_CAP);
 351
 352                /* NB: We can't write IDICR yet because we do not have gt funcs
 353                 * set up */
 354        } else {
 355                dev_priv->edram_cap = 0;
 356        }
 357
 358        if (HAS_EDRAM(dev_priv))
 359                DRM_INFO("Found %lluMB of eDRAM\n",
 360                         intel_uncore_edram_size(dev_priv) / (1024 * 1024));
 361}
 362
 363static bool
 364fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 365{
 366        u32 dbg;
 367
 368        dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
 369        if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
 370                return false;
 371
 372        __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 373
 374        return true;
 375}
 376
 377static bool
 378vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 379{
 380        u32 cer;
 381
 382        cer = __raw_i915_read32(dev_priv, CLAIM_ER);
 383        if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
 384                return false;
 385
 386        __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
 387
 388        return true;
 389}
 390
 391static bool
 392check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 393{
 394        if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
 395                return fpga_check_for_unclaimed_mmio(dev_priv);
 396
 397        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 398                return vlv_check_for_unclaimed_mmio(dev_priv);
 399
 400        return false;
 401}
 402
 403static void __intel_uncore_early_sanitize(struct drm_device *dev,
 404                                          bool restore_forcewake)
 405{
 406        struct drm_i915_private *dev_priv = dev->dev_private;
 407
 408        /* clear out unclaimed reg detection bit */
 409        if (check_for_unclaimed_mmio(dev_priv))
 410                DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 411
 412        /* clear out old GT FIFO errors */
 413        if (IS_GEN6(dev) || IS_GEN7(dev))
 414                __raw_i915_write32(dev_priv, GTFIFODBG,
 415                                   __raw_i915_read32(dev_priv, GTFIFODBG));
 416
 417        /* WaDisableShadowRegForCpd:chv */
 418        if (IS_CHERRYVIEW(dev)) {
 419                __raw_i915_write32(dev_priv, GTFIFOCTL,
 420                                   __raw_i915_read32(dev_priv, GTFIFOCTL) |
 421                                   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 422                                   GT_FIFO_CTL_RC6_POLICY_STALL);
 423        }
 424
 425        intel_uncore_forcewake_reset(dev, restore_forcewake);
 426}
 427
 428void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
 429{
 430        __intel_uncore_early_sanitize(dev, restore_forcewake);
 431        i915_check_and_clear_faults(dev);
 432}
 433
 434void intel_uncore_sanitize(struct drm_device *dev)
 435{
 436        i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 437
 438        /* BIOS often leaves RC6 enabled, but disable it for hw init */
 439        intel_disable_gt_powersave(dev);
 440}
 441
 442static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 443                                         enum forcewake_domains fw_domains)
 444{
 445        struct intel_uncore_forcewake_domain *domain;
 446
 447        if (!dev_priv->uncore.funcs.force_wake_get)
 448                return;
 449
 450        fw_domains &= dev_priv->uncore.fw_domains;
 451
 452        for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 453                if (domain->wake_count++)
 454                        fw_domains &= ~domain->mask;
 455        }
 456
 457        if (fw_domains)
 458                dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 459}
 460
 461/**
 462 * intel_uncore_forcewake_get - grab forcewake domain references
 463 * @dev_priv: i915 device instance
 464 * @fw_domains: forcewake domains to get reference on
 465 *
 466 * This function can be used get GT's forcewake domain references.
 467 * Normal register access will handle the forcewake domains automatically.
 468 * However if some sequence requires the GT to not power down a particular
 469 * forcewake domains this function should be called at the beginning of the
 470 * sequence. And subsequently the reference should be dropped by symmetric
 471 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 472 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
 473 */
 474void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 475                                enum forcewake_domains fw_domains)
 476{
 477        unsigned long irqflags;
 478
 479        if (!dev_priv->uncore.funcs.force_wake_get)
 480                return;
 481
 482        assert_rpm_wakelock_held(dev_priv);
 483
 484        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 485        __intel_uncore_forcewake_get(dev_priv, fw_domains);
 486        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 487}
 488
 489/**
 490 * intel_uncore_forcewake_get__locked - grab forcewake domain references
 491 * @dev_priv: i915 device instance
 492 * @fw_domains: forcewake domains to get reference on
 493 *
 494 * See intel_uncore_forcewake_get(). This variant places the onus
 495 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 496 */
 497void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
 498                                        enum forcewake_domains fw_domains)
 499{
 500        assert_spin_locked(&dev_priv->uncore.lock);
 501
 502        if (!dev_priv->uncore.funcs.force_wake_get)
 503                return;
 504
 505        __intel_uncore_forcewake_get(dev_priv, fw_domains);
 506}
 507
 508static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 509                                         enum forcewake_domains fw_domains)
 510{
 511        struct intel_uncore_forcewake_domain *domain;
 512
 513        if (!dev_priv->uncore.funcs.force_wake_put)
 514                return;
 515
 516        fw_domains &= dev_priv->uncore.fw_domains;
 517
 518        for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 519                if (WARN_ON(domain->wake_count == 0))
 520                        continue;
 521
 522                if (--domain->wake_count)
 523                        continue;
 524
 525                fw_domain_arm_timer(domain);
 526        }
 527}
 528
 529/**
 530 * intel_uncore_forcewake_put - release a forcewake domain reference
 531 * @dev_priv: i915 device instance
 532 * @fw_domains: forcewake domains to put references
 533 *
 534 * This function drops the device-level forcewakes for specified
 535 * domains obtained by intel_uncore_forcewake_get().
 536 */
 537void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 538                                enum forcewake_domains fw_domains)
 539{
 540        unsigned long irqflags;
 541
 542        if (!dev_priv->uncore.funcs.force_wake_put)
 543                return;
 544
 545        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 546        __intel_uncore_forcewake_put(dev_priv, fw_domains);
 547        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 548}
 549
 550/**
 551 * intel_uncore_forcewake_put__locked - grab forcewake domain references
 552 * @dev_priv: i915 device instance
 553 * @fw_domains: forcewake domains to get reference on
 554 *
 555 * See intel_uncore_forcewake_put(). This variant places the onus
 556 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 557 */
 558void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 559                                        enum forcewake_domains fw_domains)
 560{
 561        assert_spin_locked(&dev_priv->uncore.lock);
 562
 563        if (!dev_priv->uncore.funcs.force_wake_put)
 564                return;
 565
 566        __intel_uncore_forcewake_put(dev_priv, fw_domains);
 567}
 568
 569void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 570{
 571        struct intel_uncore_forcewake_domain *domain;
 572
 573        if (!dev_priv->uncore.funcs.force_wake_get)
 574                return;
 575
 576        for_each_fw_domain(domain, dev_priv)
 577                WARN_ON(domain->wake_count);
 578}
 579
 580/* We give fast paths for the really cool registers */
 581#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 582
 583#define __gen6_reg_read_fw_domains(offset) \
 584({ \
 585        enum forcewake_domains __fwd; \
 586        if (NEEDS_FORCE_WAKE(offset)) \
 587                __fwd = FORCEWAKE_RENDER; \
 588        else \
 589                __fwd = 0; \
 590        __fwd; \
 591})
 592
 593#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 594
 595#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
 596        (REG_RANGE((reg), 0x2000, 0x4000) || \
 597         REG_RANGE((reg), 0x5000, 0x8000) || \
 598         REG_RANGE((reg), 0xB000, 0x12000) || \
 599         REG_RANGE((reg), 0x2E000, 0x30000))
 600
 601#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
 602        (REG_RANGE((reg), 0x12000, 0x14000) || \
 603         REG_RANGE((reg), 0x22000, 0x24000) || \
 604         REG_RANGE((reg), 0x30000, 0x40000))
 605
 606#define __vlv_reg_read_fw_domains(offset) \
 607({ \
 608        enum forcewake_domains __fwd = 0; \
 609        if (!NEEDS_FORCE_WAKE(offset)) \
 610                __fwd = 0; \
 611        else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
 612                __fwd = FORCEWAKE_RENDER; \
 613        else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
 614                __fwd = FORCEWAKE_MEDIA; \
 615        __fwd; \
 616})
 617
 618static const i915_reg_t gen8_shadowed_regs[] = {
 619        GEN6_RPNSWREQ,
 620        GEN6_RC_VIDEO_FREQ,
 621        RING_TAIL(RENDER_RING_BASE),
 622        RING_TAIL(GEN6_BSD_RING_BASE),
 623        RING_TAIL(VEBOX_RING_BASE),
 624        RING_TAIL(BLT_RING_BASE),
 625        /* TODO: Other registers are not yet used */
 626};
 627
 628static bool is_gen8_shadowed(u32 offset)
 629{
 630        int i;
 631        for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
 632                if (offset == gen8_shadowed_regs[i].reg)
 633                        return true;
 634
 635        return false;
 636}
 637
 638#define __gen8_reg_write_fw_domains(offset) \
 639({ \
 640        enum forcewake_domains __fwd; \
 641        if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
 642                __fwd = FORCEWAKE_RENDER; \
 643        else \
 644                __fwd = 0; \
 645        __fwd; \
 646})
 647
 648#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
 649        (REG_RANGE((reg), 0x2000, 0x4000) || \
 650         REG_RANGE((reg), 0x5200, 0x8000) || \
 651         REG_RANGE((reg), 0x8300, 0x8500) || \
 652         REG_RANGE((reg), 0xB000, 0xB480) || \
 653         REG_RANGE((reg), 0xE000, 0xE800))
 654
 655#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
 656        (REG_RANGE((reg), 0x8800, 0x8900) || \
 657         REG_RANGE((reg), 0xD000, 0xD800) || \
 658         REG_RANGE((reg), 0x12000, 0x14000) || \
 659         REG_RANGE((reg), 0x1A000, 0x1C000) || \
 660         REG_RANGE((reg), 0x1E800, 0x1EA00) || \
 661         REG_RANGE((reg), 0x30000, 0x38000))
 662
 663#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
 664        (REG_RANGE((reg), 0x4000, 0x5000) || \
 665         REG_RANGE((reg), 0x8000, 0x8300) || \
 666         REG_RANGE((reg), 0x8500, 0x8600) || \
 667         REG_RANGE((reg), 0x9000, 0xB000) || \
 668         REG_RANGE((reg), 0xF000, 0x10000))
 669
 670#define __chv_reg_read_fw_domains(offset) \
 671({ \
 672        enum forcewake_domains __fwd = 0; \
 673        if (!NEEDS_FORCE_WAKE(offset)) \
 674                __fwd = 0; \
 675        else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
 676                __fwd = FORCEWAKE_RENDER; \
 677        else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
 678                __fwd = FORCEWAKE_MEDIA; \
 679        else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
 680                __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
 681        __fwd; \
 682})
 683
 684#define __chv_reg_write_fw_domains(offset) \
 685({ \
 686        enum forcewake_domains __fwd = 0; \
 687        if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
 688                __fwd = 0; \
 689        else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
 690                __fwd = FORCEWAKE_RENDER; \
 691        else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
 692                __fwd = FORCEWAKE_MEDIA; \
 693        else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
 694                __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
 695        __fwd; \
 696})
 697
 698#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
 699        REG_RANGE((reg), 0xB00,  0x2000)
 700
 701#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
 702        (REG_RANGE((reg), 0x2000, 0x2700) || \
 703         REG_RANGE((reg), 0x3000, 0x4000) || \
 704         REG_RANGE((reg), 0x5200, 0x8000) || \
 705         REG_RANGE((reg), 0x8140, 0x8160) || \
 706         REG_RANGE((reg), 0x8300, 0x8500) || \
 707         REG_RANGE((reg), 0x8C00, 0x8D00) || \
 708         REG_RANGE((reg), 0xB000, 0xB480) || \
 709         REG_RANGE((reg), 0xE000, 0xE900) || \
 710         REG_RANGE((reg), 0x24400, 0x24800))
 711
 712#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
 713        (REG_RANGE((reg), 0x8130, 0x8140) || \
 714         REG_RANGE((reg), 0x8800, 0x8A00) || \
 715         REG_RANGE((reg), 0xD000, 0xD800) || \
 716         REG_RANGE((reg), 0x12000, 0x14000) || \
 717         REG_RANGE((reg), 0x1A000, 0x1EA00) || \
 718         REG_RANGE((reg), 0x30000, 0x40000))
 719
 720#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
 721        REG_RANGE((reg), 0x9400, 0x9800)
 722
 723#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
 724        ((reg) < 0x40000 && \
 725         !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
 726         !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
 727         !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
 728         !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
 729
 730#define SKL_NEEDS_FORCE_WAKE(reg) \
 731        ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
 732
 733#define __gen9_reg_read_fw_domains(offset) \
 734({ \
 735        enum forcewake_domains __fwd; \
 736        if (!SKL_NEEDS_FORCE_WAKE(offset)) \
 737                __fwd = 0; \
 738        else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
 739                __fwd = FORCEWAKE_RENDER; \
 740        else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
 741                __fwd = FORCEWAKE_MEDIA; \
 742        else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
 743                __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
 744        else \
 745                __fwd = FORCEWAKE_BLITTER; \
 746        __fwd; \
 747})
 748
 749static const i915_reg_t gen9_shadowed_regs[] = {
 750        RING_TAIL(RENDER_RING_BASE),
 751        RING_TAIL(GEN6_BSD_RING_BASE),
 752        RING_TAIL(VEBOX_RING_BASE),
 753        RING_TAIL(BLT_RING_BASE),
 754        GEN6_RPNSWREQ,
 755        GEN6_RC_VIDEO_FREQ,
 756        /* TODO: Other registers are not yet used */
 757};
 758
 759static bool is_gen9_shadowed(u32 offset)
 760{
 761        int i;
 762        for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
 763                if (offset == gen9_shadowed_regs[i].reg)
 764                        return true;
 765
 766        return false;
 767}
 768
 769#define __gen9_reg_write_fw_domains(offset) \
 770({ \
 771        enum forcewake_domains __fwd; \
 772        if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
 773                __fwd = 0; \
 774        else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
 775                __fwd = FORCEWAKE_RENDER; \
 776        else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
 777                __fwd = FORCEWAKE_MEDIA; \
 778        else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
 779                __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
 780        else \
 781                __fwd = FORCEWAKE_BLITTER; \
 782        __fwd; \
 783})
 784
 785static void
 786ilk_dummy_write(struct drm_i915_private *dev_priv)
 787{
 788        /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
 789         * the chip from rc6 before touching it for real. MI_MODE is masked,
 790         * hence harmless to write 0 into. */
 791        __raw_i915_write32(dev_priv, MI_MODE, 0);
 792}
 793
 794static void
 795__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 796                      const i915_reg_t reg,
 797                      const bool read,
 798                      const bool before)
 799{
 800        if (WARN(check_for_unclaimed_mmio(dev_priv),
 801                 "Unclaimed register detected %s %s register 0x%x\n",
 802                 before ? "before" : "after",
 803                 read ? "reading" : "writing to",
 804                 i915_mmio_reg_offset(reg)))
 805                i915.mmio_debug--; /* Only report the first N failures */
 806}
 807
 808static inline void
 809unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 810                    const i915_reg_t reg,
 811                    const bool read,
 812                    const bool before)
 813{
 814        if (likely(!i915.mmio_debug))
 815                return;
 816
 817        __unclaimed_reg_debug(dev_priv, reg, read, before);
 818}
 819
 820#define GEN2_READ_HEADER(x) \
 821        u##x val = 0; \
 822        assert_rpm_wakelock_held(dev_priv);
 823
 824#define GEN2_READ_FOOTER \
 825        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 826        return val
 827
 828#define __gen2_read(x) \
 829static u##x \
 830gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 831        GEN2_READ_HEADER(x); \
 832        val = __raw_i915_read##x(dev_priv, reg); \
 833        GEN2_READ_FOOTER; \
 834}
 835
 836#define __gen5_read(x) \
 837static u##x \
 838gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 839        GEN2_READ_HEADER(x); \
 840        ilk_dummy_write(dev_priv); \
 841        val = __raw_i915_read##x(dev_priv, reg); \
 842        GEN2_READ_FOOTER; \
 843}
 844
 845__gen5_read(8)
 846__gen5_read(16)
 847__gen5_read(32)
 848__gen5_read(64)
 849__gen2_read(8)
 850__gen2_read(16)
 851__gen2_read(32)
 852__gen2_read(64)
 853
 854#undef __gen5_read
 855#undef __gen2_read
 856
 857#undef GEN2_READ_FOOTER
 858#undef GEN2_READ_HEADER
 859
 860#define GEN6_READ_HEADER(x) \
 861        u32 offset = i915_mmio_reg_offset(reg); \
 862        unsigned long irqflags; \
 863        u##x val = 0; \
 864        assert_rpm_wakelock_held(dev_priv); \
 865        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
 866        unclaimed_reg_debug(dev_priv, reg, true, true)
 867
 868#define GEN6_READ_FOOTER \
 869        unclaimed_reg_debug(dev_priv, reg, true, false); \
 870        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 871        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 872        return val
 873
 874static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
 875                                     enum forcewake_domains fw_domains)
 876{
 877        struct intel_uncore_forcewake_domain *domain;
 878
 879        if (WARN_ON(!fw_domains))
 880                return;
 881
 882        /* Ideally GCC would be constant-fold and eliminate this loop */
 883        for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 884                if (domain->wake_count) {
 885                        fw_domains &= ~domain->mask;
 886                        continue;
 887                }
 888
 889                fw_domain_arm_timer(domain);
 890        }
 891
 892        if (fw_domains)
 893                dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 894}
 895
 896#define __gen6_read(x) \
 897static u##x \
 898gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 899        enum forcewake_domains fw_engine; \
 900        GEN6_READ_HEADER(x); \
 901        fw_engine = __gen6_reg_read_fw_domains(offset); \
 902        if (fw_engine) \
 903                __force_wake_auto(dev_priv, fw_engine); \
 904        val = __raw_i915_read##x(dev_priv, reg); \
 905        GEN6_READ_FOOTER; \
 906}
 907
 908#define __vlv_read(x) \
 909static u##x \
 910vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 911        enum forcewake_domains fw_engine; \
 912        GEN6_READ_HEADER(x); \
 913        fw_engine = __vlv_reg_read_fw_domains(offset); \
 914        if (fw_engine) \
 915                __force_wake_auto(dev_priv, fw_engine); \
 916        val = __raw_i915_read##x(dev_priv, reg); \
 917        GEN6_READ_FOOTER; \
 918}
 919
 920#define __chv_read(x) \
 921static u##x \
 922chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 923        enum forcewake_domains fw_engine; \
 924        GEN6_READ_HEADER(x); \
 925        fw_engine = __chv_reg_read_fw_domains(offset); \
 926        if (fw_engine) \
 927                __force_wake_auto(dev_priv, fw_engine); \
 928        val = __raw_i915_read##x(dev_priv, reg); \
 929        GEN6_READ_FOOTER; \
 930}
 931
 932#define __gen9_read(x) \
 933static u##x \
 934gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 935        enum forcewake_domains fw_engine; \
 936        GEN6_READ_HEADER(x); \
 937        fw_engine = __gen9_reg_read_fw_domains(offset); \
 938        if (fw_engine) \
 939                __force_wake_auto(dev_priv, fw_engine); \
 940        val = __raw_i915_read##x(dev_priv, reg); \
 941        GEN6_READ_FOOTER; \
 942}
 943
 944__gen9_read(8)
 945__gen9_read(16)
 946__gen9_read(32)
 947__gen9_read(64)
 948__chv_read(8)
 949__chv_read(16)
 950__chv_read(32)
 951__chv_read(64)
 952__vlv_read(8)
 953__vlv_read(16)
 954__vlv_read(32)
 955__vlv_read(64)
 956__gen6_read(8)
 957__gen6_read(16)
 958__gen6_read(32)
 959__gen6_read(64)
 960
 961#undef __gen9_read
 962#undef __chv_read
 963#undef __vlv_read
 964#undef __gen6_read
 965#undef GEN6_READ_FOOTER
 966#undef GEN6_READ_HEADER
 967
 968#define VGPU_READ_HEADER(x) \
 969        unsigned long irqflags; \
 970        u##x val = 0; \
 971        assert_rpm_device_not_suspended(dev_priv); \
 972        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 973
 974#define VGPU_READ_FOOTER \
 975        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 976        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 977        return val
 978
 979#define __vgpu_read(x) \
 980static u##x \
 981vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 982        VGPU_READ_HEADER(x); \
 983        val = __raw_i915_read##x(dev_priv, reg); \
 984        VGPU_READ_FOOTER; \
 985}
 986
 987__vgpu_read(8)
 988__vgpu_read(16)
 989__vgpu_read(32)
 990__vgpu_read(64)
 991
 992#undef __vgpu_read
 993#undef VGPU_READ_FOOTER
 994#undef VGPU_READ_HEADER
 995
 996#define GEN2_WRITE_HEADER \
 997        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 998        assert_rpm_wakelock_held(dev_priv); \
 999
1000#define GEN2_WRITE_FOOTER
1001
1002#define __gen2_write(x) \
1003static void \
1004gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1005        GEN2_WRITE_HEADER; \
1006        __raw_i915_write##x(dev_priv, reg, val); \
1007        GEN2_WRITE_FOOTER; \
1008}
1009
1010#define __gen5_write(x) \
1011static void \
1012gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1013        GEN2_WRITE_HEADER; \
1014        ilk_dummy_write(dev_priv); \
1015        __raw_i915_write##x(dev_priv, reg, val); \
1016        GEN2_WRITE_FOOTER; \
1017}
1018
1019__gen5_write(8)
1020__gen5_write(16)
1021__gen5_write(32)
1022__gen5_write(64)
1023__gen2_write(8)
1024__gen2_write(16)
1025__gen2_write(32)
1026__gen2_write(64)
1027
1028#undef __gen5_write
1029#undef __gen2_write
1030
1031#undef GEN2_WRITE_FOOTER
1032#undef GEN2_WRITE_HEADER
1033
1034#define GEN6_WRITE_HEADER \
1035        u32 offset = i915_mmio_reg_offset(reg); \
1036        unsigned long irqflags; \
1037        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1038        assert_rpm_wakelock_held(dev_priv); \
1039        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1040        unclaimed_reg_debug(dev_priv, reg, false, true)
1041
1042#define GEN6_WRITE_FOOTER \
1043        unclaimed_reg_debug(dev_priv, reg, false, false); \
1044        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1045
1046#define __gen6_write(x) \
1047static void \
1048gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1049        u32 __fifo_ret = 0; \
1050        GEN6_WRITE_HEADER; \
1051        if (NEEDS_FORCE_WAKE(offset)) { \
1052                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1053        } \
1054        __raw_i915_write##x(dev_priv, reg, val); \
1055        if (unlikely(__fifo_ret)) { \
1056                gen6_gt_check_fifodbg(dev_priv); \
1057        } \
1058        GEN6_WRITE_FOOTER; \
1059}
1060
1061#define __hsw_write(x) \
1062static void \
1063hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1064        u32 __fifo_ret = 0; \
1065        GEN6_WRITE_HEADER; \
1066        if (NEEDS_FORCE_WAKE(offset)) { \
1067                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1068        } \
1069        __raw_i915_write##x(dev_priv, reg, val); \
1070        if (unlikely(__fifo_ret)) { \
1071                gen6_gt_check_fifodbg(dev_priv); \
1072        } \
1073        GEN6_WRITE_FOOTER; \
1074}
1075
1076#define __gen8_write(x) \
1077static void \
1078gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1079        enum forcewake_domains fw_engine; \
1080        GEN6_WRITE_HEADER; \
1081        fw_engine = __gen8_reg_write_fw_domains(offset); \
1082        if (fw_engine) \
1083                __force_wake_auto(dev_priv, fw_engine); \
1084        __raw_i915_write##x(dev_priv, reg, val); \
1085        GEN6_WRITE_FOOTER; \
1086}
1087
1088#define __chv_write(x) \
1089static void \
1090chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1091        enum forcewake_domains fw_engine; \
1092        GEN6_WRITE_HEADER; \
1093        fw_engine = __chv_reg_write_fw_domains(offset); \
1094        if (fw_engine) \
1095                __force_wake_auto(dev_priv, fw_engine); \
1096        __raw_i915_write##x(dev_priv, reg, val); \
1097        GEN6_WRITE_FOOTER; \
1098}
1099
1100#define __gen9_write(x) \
1101static void \
1102gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
1103                bool trace) { \
1104        enum forcewake_domains fw_engine; \
1105        GEN6_WRITE_HEADER; \
1106        fw_engine = __gen9_reg_write_fw_domains(offset); \
1107        if (fw_engine) \
1108                __force_wake_auto(dev_priv, fw_engine); \
1109        __raw_i915_write##x(dev_priv, reg, val); \
1110        GEN6_WRITE_FOOTER; \
1111}
1112
1113__gen9_write(8)
1114__gen9_write(16)
1115__gen9_write(32)
1116__gen9_write(64)
1117__chv_write(8)
1118__chv_write(16)
1119__chv_write(32)
1120__chv_write(64)
1121__gen8_write(8)
1122__gen8_write(16)
1123__gen8_write(32)
1124__gen8_write(64)
1125__hsw_write(8)
1126__hsw_write(16)
1127__hsw_write(32)
1128__hsw_write(64)
1129__gen6_write(8)
1130__gen6_write(16)
1131__gen6_write(32)
1132__gen6_write(64)
1133
1134#undef __gen9_write
1135#undef __chv_write
1136#undef __gen8_write
1137#undef __hsw_write
1138#undef __gen6_write
1139#undef GEN6_WRITE_FOOTER
1140#undef GEN6_WRITE_HEADER
1141
1142#define VGPU_WRITE_HEADER \
1143        unsigned long irqflags; \
1144        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1145        assert_rpm_device_not_suspended(dev_priv); \
1146        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1147
1148#define VGPU_WRITE_FOOTER \
1149        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1150
1151#define __vgpu_write(x) \
1152static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1153                          i915_reg_t reg, u##x val, bool trace) { \
1154        VGPU_WRITE_HEADER; \
1155        __raw_i915_write##x(dev_priv, reg, val); \
1156        VGPU_WRITE_FOOTER; \
1157}
1158
1159__vgpu_write(8)
1160__vgpu_write(16)
1161__vgpu_write(32)
1162__vgpu_write(64)
1163
1164#undef __vgpu_write
1165#undef VGPU_WRITE_FOOTER
1166#undef VGPU_WRITE_HEADER
1167
1168#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1169do { \
1170        dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1171        dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1172        dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1173        dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1174} while (0)
1175
1176#define ASSIGN_READ_MMIO_VFUNCS(x) \
1177do { \
1178        dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1179        dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1180        dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1181        dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1182} while (0)
1183
1184
1185static void fw_domain_init(struct drm_i915_private *dev_priv,
1186                           enum forcewake_domain_id domain_id,
1187                           i915_reg_t reg_set,
1188                           i915_reg_t reg_ack)
1189{
1190        struct intel_uncore_forcewake_domain *d;
1191
1192        if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1193                return;
1194
1195        d = &dev_priv->uncore.fw_domain[domain_id];
1196
1197        WARN_ON(d->wake_count);
1198
1199        d->wake_count = 0;
1200        d->reg_set = reg_set;
1201        d->reg_ack = reg_ack;
1202
1203        if (IS_GEN6(dev_priv)) {
1204                d->val_reset = 0;
1205                d->val_set = FORCEWAKE_KERNEL;
1206                d->val_clear = 0;
1207        } else {
1208                /* WaRsClearFWBitsAtReset:bdw,skl */
1209                d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1210                d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1211                d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1212        }
1213
1214        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1215                d->reg_post = FORCEWAKE_ACK_VLV;
1216        else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1217                d->reg_post = ECOBUS;
1218
1219        d->i915 = dev_priv;
1220        d->id = domain_id;
1221
1222        BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1223        BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1224        BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1225
1226        d->mask = 1 << domain_id;
1227
1228        hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1229        d->timer.function = intel_uncore_fw_release_timer;
1230
1231        dev_priv->uncore.fw_domains |= (1 << domain_id);
1232
1233        fw_domain_reset(d);
1234}
1235
1236static void intel_uncore_fw_domains_init(struct drm_device *dev)
1237{
1238        struct drm_i915_private *dev_priv = dev->dev_private;
1239
1240        if (INTEL_INFO(dev_priv)->gen <= 5)
1241                return;
1242
1243        if (IS_GEN9(dev)) {
1244                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1245                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1246                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247                               FORCEWAKE_RENDER_GEN9,
1248                               FORCEWAKE_ACK_RENDER_GEN9);
1249                fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1250                               FORCEWAKE_BLITTER_GEN9,
1251                               FORCEWAKE_ACK_BLITTER_GEN9);
1252                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1253                               FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1254        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1255                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1256                if (!IS_CHERRYVIEW(dev))
1257                        dev_priv->uncore.funcs.force_wake_put =
1258                                fw_domains_put_with_fifo;
1259                else
1260                        dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1261                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1262                               FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1263                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1264                               FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1265        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1266                dev_priv->uncore.funcs.force_wake_get =
1267                        fw_domains_get_with_thread_status;
1268                if (IS_HASWELL(dev))
1269                        dev_priv->uncore.funcs.force_wake_put =
1270                                fw_domains_put_with_fifo;
1271                else
1272                        dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1273                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1274                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1275        } else if (IS_IVYBRIDGE(dev)) {
1276                u32 ecobus;
1277
1278                /* IVB configs may use multi-threaded forcewake */
1279
1280                /* A small trick here - if the bios hasn't configured
1281                 * MT forcewake, and if the device is in RC6, then
1282                 * force_wake_mt_get will not wake the device and the
1283                 * ECOBUS read will return zero. Which will be
1284                 * (correctly) interpreted by the test below as MT
1285                 * forcewake being disabled.
1286                 */
1287                dev_priv->uncore.funcs.force_wake_get =
1288                        fw_domains_get_with_thread_status;
1289                dev_priv->uncore.funcs.force_wake_put =
1290                        fw_domains_put_with_fifo;
1291
1292                /* We need to init first for ECOBUS access and then
1293                 * determine later if we want to reinit, in case of MT access is
1294                 * not working. In this stage we don't know which flavour this
1295                 * ivb is, so it is better to reset also the gen6 fw registers
1296                 * before the ecobus check.
1297                 */
1298
1299                __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1300                __raw_posting_read(dev_priv, ECOBUS);
1301
1302                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1303                               FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1304
1305                mutex_lock(&dev->struct_mutex);
1306                fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1307                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1308                fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1309                mutex_unlock(&dev->struct_mutex);
1310
1311                if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1312                        DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1313                        DRM_INFO("when using vblank-synced partial screen updates.\n");
1314                        fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1315                                       FORCEWAKE, FORCEWAKE_ACK);
1316                }
1317        } else if (IS_GEN6(dev)) {
1318                dev_priv->uncore.funcs.force_wake_get =
1319                        fw_domains_get_with_thread_status;
1320                dev_priv->uncore.funcs.force_wake_put =
1321                        fw_domains_put_with_fifo;
1322                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1323                               FORCEWAKE, FORCEWAKE_ACK);
1324        }
1325
1326        /* All future platforms are expected to require complex power gating */
1327        WARN_ON(dev_priv->uncore.fw_domains == 0);
1328}
1329
1330void intel_uncore_init(struct drm_device *dev)
1331{
1332        struct drm_i915_private *dev_priv = dev->dev_private;
1333
1334        i915_check_vgpu(dev);
1335
1336        intel_uncore_edram_detect(dev_priv);
1337        intel_uncore_fw_domains_init(dev);
1338        __intel_uncore_early_sanitize(dev, false);
1339
1340        dev_priv->uncore.unclaimed_mmio_check = 1;
1341
1342        switch (INTEL_INFO(dev)->gen) {
1343        default:
1344        case 9:
1345                ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1346                ASSIGN_READ_MMIO_VFUNCS(gen9);
1347                break;
1348        case 8:
1349                if (IS_CHERRYVIEW(dev)) {
1350                        ASSIGN_WRITE_MMIO_VFUNCS(chv);
1351                        ASSIGN_READ_MMIO_VFUNCS(chv);
1352
1353                } else {
1354                        ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1355                        ASSIGN_READ_MMIO_VFUNCS(gen6);
1356                }
1357                break;
1358        case 7:
1359        case 6:
1360                if (IS_HASWELL(dev)) {
1361                        ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1362                } else {
1363                        ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1364                }
1365
1366                if (IS_VALLEYVIEW(dev)) {
1367                        ASSIGN_READ_MMIO_VFUNCS(vlv);
1368                } else {
1369                        ASSIGN_READ_MMIO_VFUNCS(gen6);
1370                }
1371                break;
1372        case 5:
1373                ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1374                ASSIGN_READ_MMIO_VFUNCS(gen5);
1375                break;
1376        case 4:
1377        case 3:
1378        case 2:
1379                ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1380                ASSIGN_READ_MMIO_VFUNCS(gen2);
1381                break;
1382        }
1383
1384        if (intel_vgpu_active(dev)) {
1385                ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1386                ASSIGN_READ_MMIO_VFUNCS(vgpu);
1387        }
1388
1389        i915_check_and_clear_faults(dev);
1390}
1391#undef ASSIGN_WRITE_MMIO_VFUNCS
1392#undef ASSIGN_READ_MMIO_VFUNCS
1393
1394void intel_uncore_fini(struct drm_device *dev)
1395{
1396        /* Paranoia: make sure we have disabled everything before we exit. */
1397        intel_uncore_sanitize(dev);
1398        intel_uncore_forcewake_reset(dev, false);
1399}
1400
1401#define GEN_RANGE(l, h) GENMASK(h, l)
1402
1403static const struct register_whitelist {
1404        i915_reg_t offset_ldw, offset_udw;
1405        uint32_t size;
1406        /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1407        uint32_t gen_bitmask;
1408} whitelist[] = {
1409        { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1410          .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1411          .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1412};
1413
1414int i915_reg_read_ioctl(struct drm_device *dev,
1415                        void *data, struct drm_file *file)
1416{
1417        struct drm_i915_private *dev_priv = dev->dev_private;
1418        struct drm_i915_reg_read *reg = data;
1419        struct register_whitelist const *entry = whitelist;
1420        unsigned size;
1421        i915_reg_t offset_ldw, offset_udw;
1422        int i, ret = 0;
1423
1424        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1425                if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1426                    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1427                        break;
1428        }
1429
1430        if (i == ARRAY_SIZE(whitelist))
1431                return -EINVAL;
1432
1433        /* We use the low bits to encode extra flags as the register should
1434         * be naturally aligned (and those that are not so aligned merely
1435         * limit the available flags for that register).
1436         */
1437        offset_ldw = entry->offset_ldw;
1438        offset_udw = entry->offset_udw;
1439        size = entry->size;
1440        size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1441
1442        intel_runtime_pm_get(dev_priv);
1443
1444        switch (size) {
1445        case 8 | 1:
1446                reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1447                break;
1448        case 8:
1449                reg->val = I915_READ64(offset_ldw);
1450                break;
1451        case 4:
1452                reg->val = I915_READ(offset_ldw);
1453                break;
1454        case 2:
1455                reg->val = I915_READ16(offset_ldw);
1456                break;
1457        case 1:
1458                reg->val = I915_READ8(offset_ldw);
1459                break;
1460        default:
1461                ret = -EINVAL;
1462                goto out;
1463        }
1464
1465out:
1466        intel_runtime_pm_put(dev_priv);
1467        return ret;
1468}
1469
1470int i915_get_reset_stats_ioctl(struct drm_device *dev,
1471                               void *data, struct drm_file *file)
1472{
1473        struct drm_i915_private *dev_priv = dev->dev_private;
1474        struct drm_i915_reset_stats *args = data;
1475        struct i915_ctx_hang_stats *hs;
1476        struct intel_context *ctx;
1477        int ret;
1478
1479        if (args->flags || args->pad)
1480                return -EINVAL;
1481
1482        if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1483                return -EPERM;
1484
1485        ret = mutex_lock_interruptible(&dev->struct_mutex);
1486        if (ret)
1487                return ret;
1488
1489        ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1490        if (IS_ERR(ctx)) {
1491                mutex_unlock(&dev->struct_mutex);
1492                return PTR_ERR(ctx);
1493        }
1494        hs = &ctx->hang_stats;
1495
1496        if (capable(CAP_SYS_ADMIN))
1497                args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1498        else
1499                args->reset_count = 0;
1500
1501        args->batch_active = hs->batch_active;
1502        args->batch_pending = hs->batch_pending;
1503
1504        mutex_unlock(&dev->struct_mutex);
1505
1506        return 0;
1507}
1508
1509static int i915_reset_complete(struct drm_device *dev)
1510{
1511        u8 gdrst;
1512        pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1513        return (gdrst & GRDOM_RESET_STATUS) == 0;
1514}
1515
1516static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
1517{
1518        /* assert reset for at least 20 usec */
1519        pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1520        udelay(20);
1521        pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1522
1523        return wait_for(i915_reset_complete(dev), 500);
1524}
1525
1526static int g4x_reset_complete(struct drm_device *dev)
1527{
1528        u8 gdrst;
1529        pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1530        return (gdrst & GRDOM_RESET_ENABLE) == 0;
1531}
1532
1533static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
1534{
1535        pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1536        return wait_for(g4x_reset_complete(dev), 500);
1537}
1538
1539static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1540{
1541        struct drm_i915_private *dev_priv = dev->dev_private;
1542        int ret;
1543
1544        pci_write_config_byte(dev->pdev, I915_GDRST,
1545                              GRDOM_RENDER | GRDOM_RESET_ENABLE);
1546        ret =  wait_for(g4x_reset_complete(dev), 500);
1547        if (ret)
1548                return ret;
1549
1550        /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1551        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1552        POSTING_READ(VDECCLK_GATE_D);
1553
1554        pci_write_config_byte(dev->pdev, I915_GDRST,
1555                              GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1556        ret =  wait_for(g4x_reset_complete(dev), 500);
1557        if (ret)
1558                return ret;
1559
1560        /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1561        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1562        POSTING_READ(VDECCLK_GATE_D);
1563
1564        pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1565
1566        return 0;
1567}
1568
1569static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
1570{
1571        struct drm_i915_private *dev_priv = dev->dev_private;
1572        int ret;
1573
1574        I915_WRITE(ILK_GDSR,
1575                   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1576        ret = wait_for((I915_READ(ILK_GDSR) &
1577                        ILK_GRDOM_RESET_ENABLE) == 0, 500);
1578        if (ret)
1579                return ret;
1580
1581        I915_WRITE(ILK_GDSR,
1582                   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1583        ret = wait_for((I915_READ(ILK_GDSR) &
1584                        ILK_GRDOM_RESET_ENABLE) == 0, 500);
1585        if (ret)
1586                return ret;
1587
1588        I915_WRITE(ILK_GDSR, 0);
1589
1590        return 0;
1591}
1592
1593/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1594static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1595                                u32 hw_domain_mask)
1596{
1597        int ret;
1598
1599        /* GEN6_GDRST is not in the gt power well, no need to check
1600         * for fifo space for the write or forcewake the chip for
1601         * the read
1602         */
1603        __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1604
1605#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
1606        /* Spin waiting for the device to ack the reset requests */
1607        ret = wait_for(ACKED, 500);
1608#undef ACKED
1609
1610        return ret;
1611}
1612
1613/**
1614 * gen6_reset_engines - reset individual engines
1615 * @dev: DRM device
1616 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1617 *
1618 * This function will reset the individual engines that are set in engine_mask.
1619 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1620 *
1621 * Note: It is responsibility of the caller to handle the difference between
1622 * asking full domain reset versus reset for all available individual engines.
1623 *
1624 * Returns 0 on success, nonzero on error.
1625 */
1626static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
1627{
1628        struct drm_i915_private *dev_priv = dev->dev_private;
1629        struct intel_engine_cs *engine;
1630        const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1631                [RCS] = GEN6_GRDOM_RENDER,
1632                [BCS] = GEN6_GRDOM_BLT,
1633                [VCS] = GEN6_GRDOM_MEDIA,
1634                [VCS2] = GEN8_GRDOM_MEDIA2,
1635                [VECS] = GEN6_GRDOM_VECS,
1636        };
1637        u32 hw_mask;
1638        int ret;
1639
1640        if (engine_mask == ALL_ENGINES) {
1641                hw_mask = GEN6_GRDOM_FULL;
1642        } else {
1643                hw_mask = 0;
1644                for_each_engine_masked(engine, dev_priv, engine_mask)
1645                        hw_mask |= hw_engine_mask[engine->id];
1646        }
1647
1648        ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1649
1650        intel_uncore_forcewake_reset(dev, true);
1651
1652        return ret;
1653}
1654
1655static int wait_for_register_fw(struct drm_i915_private *dev_priv,
1656                                i915_reg_t reg,
1657                                const u32 mask,
1658                                const u32 value,
1659                                const unsigned long timeout_ms)
1660{
1661        return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
1662}
1663
1664static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1665{
1666        int ret;
1667        struct drm_i915_private *dev_priv = engine->dev->dev_private;
1668
1669        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1670                      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1671
1672        ret = wait_for_register_fw(dev_priv,
1673                                   RING_RESET_CTL(engine->mmio_base),
1674                                   RESET_CTL_READY_TO_RESET,
1675                                   RESET_CTL_READY_TO_RESET,
1676                                   700);
1677        if (ret)
1678                DRM_ERROR("%s: reset request timeout\n", engine->name);
1679
1680        return ret;
1681}
1682
1683static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1684{
1685        struct drm_i915_private *dev_priv = engine->dev->dev_private;
1686
1687        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1688                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1689}
1690
1691static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
1692{
1693        struct drm_i915_private *dev_priv = dev->dev_private;
1694        struct intel_engine_cs *engine;
1695
1696        for_each_engine_masked(engine, dev_priv, engine_mask)
1697                if (gen8_request_engine_reset(engine))
1698                        goto not_ready;
1699
1700        return gen6_reset_engines(dev, engine_mask);
1701
1702not_ready:
1703        for_each_engine_masked(engine, dev_priv, engine_mask)
1704                gen8_unrequest_engine_reset(engine);
1705
1706        return -EIO;
1707}
1708
1709static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
1710                                                          unsigned engine_mask)
1711{
1712        if (!i915.reset)
1713                return NULL;
1714
1715        if (INTEL_INFO(dev)->gen >= 8)
1716                return gen8_reset_engines;
1717        else if (INTEL_INFO(dev)->gen >= 6)
1718                return gen6_reset_engines;
1719        else if (IS_GEN5(dev))
1720                return ironlake_do_reset;
1721        else if (IS_G4X(dev))
1722                return g4x_do_reset;
1723        else if (IS_G33(dev))
1724                return g33_do_reset;
1725        else if (INTEL_INFO(dev)->gen >= 3)
1726                return i915_do_reset;
1727        else
1728                return NULL;
1729}
1730
1731int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
1732{
1733        struct drm_i915_private *dev_priv = to_i915(dev);
1734        int (*reset)(struct drm_device *, unsigned);
1735        int ret;
1736
1737        reset = intel_get_gpu_reset(dev);
1738        if (reset == NULL)
1739                return -ENODEV;
1740
1741        /* If the power well sleeps during the reset, the reset
1742         * request may be dropped and never completes (causing -EIO).
1743         */
1744        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1745        ret = reset(dev, engine_mask);
1746        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1747
1748        return ret;
1749}
1750
1751bool intel_has_gpu_reset(struct drm_device *dev)
1752{
1753        return intel_get_gpu_reset(dev) != NULL;
1754}
1755
1756int intel_guc_reset(struct drm_i915_private *dev_priv)
1757{
1758        int ret;
1759        unsigned long irqflags;
1760
1761        if (!i915.enable_guc_submission)
1762                return -EINVAL;
1763
1764        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1765        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1766
1767        ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1768
1769        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1770        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1771
1772        return ret;
1773}
1774
1775bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1776{
1777        return check_for_unclaimed_mmio(dev_priv);
1778}
1779
1780bool
1781intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1782{
1783        if (unlikely(i915.mmio_debug ||
1784                     dev_priv->uncore.unclaimed_mmio_check <= 0))
1785                return false;
1786
1787        if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1788                DRM_DEBUG("Unclaimed register detected, "
1789                          "enabling oneshot unclaimed register reporting. "
1790                          "Please use i915.mmio_debug=N for more information.\n");
1791                i915.mmio_debug++;
1792                dev_priv->uncore.unclaimed_mmio_check--;
1793                return true;
1794        }
1795
1796        return false;
1797}
1798
1799static enum forcewake_domains
1800intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1801                                i915_reg_t reg)
1802{
1803        enum forcewake_domains fw_domains;
1804
1805        if (intel_vgpu_active(dev_priv->dev))
1806                return 0;
1807
1808        switch (INTEL_INFO(dev_priv)->gen) {
1809        case 9:
1810                fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1811                break;
1812        case 8:
1813                if (IS_CHERRYVIEW(dev_priv))
1814                        fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1815                else
1816                        fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1817                break;
1818        case 7:
1819        case 6:
1820                if (IS_VALLEYVIEW(dev_priv))
1821                        fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1822                else
1823                        fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1824                break;
1825        default:
1826                MISSING_CASE(INTEL_INFO(dev_priv)->gen);
1827        case 5: /* forcewake was introduced with gen6 */
1828        case 4:
1829        case 3:
1830        case 2:
1831                return 0;
1832        }
1833
1834        WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1835
1836        return fw_domains;
1837}
1838
1839static enum forcewake_domains
1840intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1841                                 i915_reg_t reg)
1842{
1843        enum forcewake_domains fw_domains;
1844
1845        if (intel_vgpu_active(dev_priv->dev))
1846                return 0;
1847
1848        switch (INTEL_INFO(dev_priv)->gen) {
1849        case 9:
1850                fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1851                break;
1852        case 8:
1853                if (IS_CHERRYVIEW(dev_priv))
1854                        fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1855                else
1856                        fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1857                break;
1858        case 7:
1859        case 6:
1860                fw_domains = FORCEWAKE_RENDER;
1861                break;
1862        default:
1863                MISSING_CASE(INTEL_INFO(dev_priv)->gen);
1864        case 5:
1865        case 4:
1866        case 3:
1867        case 2:
1868                return 0;
1869        }
1870
1871        WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1872
1873        return fw_domains;
1874}
1875
1876/**
1877 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1878 *                                  a register
1879 * @dev_priv: pointer to struct drm_i915_private
1880 * @reg: register in question
1881 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1882 *
1883 * Returns a set of forcewake domains required to be taken with for example
1884 * intel_uncore_forcewake_get for the specified register to be accessible in the
1885 * specified mode (read, write or read/write) with raw mmio accessors.
1886 *
1887 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1888 * callers to do FIFO management on their own or risk losing writes.
1889 */
1890enum forcewake_domains
1891intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1892                               i915_reg_t reg, unsigned int op)
1893{
1894        enum forcewake_domains fw_domains = 0;
1895
1896        WARN_ON(!op);
1897
1898        if (op & FW_REG_READ)
1899                fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1900
1901        if (op & FW_REG_WRITE)
1902                fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1903
1904        return fw_domains;
1905}
1906