linux/drivers/gpu/drm/i915/intel_uncore.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2013 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include "i915_drv.h"
  25#include "intel_drv.h"
  26#include "i915_vgpu.h"
  27
  28#include <linux/pm_runtime.h>
  29
  30#define FORCEWAKE_ACK_TIMEOUT_MS 50
  31
  32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  33
  34static const char * const forcewake_domain_names[] = {
  35        "render",
  36        "blitter",
  37        "media",
  38};
  39
  40const char *
  41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  42{
  43        BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  44
  45        if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  46                return forcewake_domain_names[id];
  47
  48        WARN_ON(id);
  49
  50        return "unknown";
  51}
  52
  53static inline void
  54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  55{
  56        WARN_ON(!i915_mmio_reg_valid(d->reg_set));
  57        __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  58}
  59
  60static inline void
  61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  62{
  63        d->wake_count++;
  64        hrtimer_start_range_ns(&d->timer,
  65                               NSEC_PER_MSEC,
  66                               NSEC_PER_MSEC,
  67                               HRTIMER_MODE_REL);
  68}
  69
  70static inline void
  71fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  72{
  73        if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  74                             FORCEWAKE_KERNEL) == 0,
  75                            FORCEWAKE_ACK_TIMEOUT_MS))
  76                DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  77                          intel_uncore_forcewake_domain_to_str(d->id));
  78}
  79
  80static inline void
  81fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  82{
  83        __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  84}
  85
  86static inline void
  87fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  88{
  89        if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  90                             FORCEWAKE_KERNEL),
  91                            FORCEWAKE_ACK_TIMEOUT_MS))
  92                DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  93                          intel_uncore_forcewake_domain_to_str(d->id));
  94}
  95
  96static inline void
  97fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  98{
  99        __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
 100}
 101
 102static inline void
 103fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
 104{
 105        /* something from same cacheline, but not from the set register */
 106        if (i915_mmio_reg_valid(d->reg_post))
 107                __raw_posting_read(d->i915, d->reg_post);
 108}
 109
 110static void
 111fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 112{
 113        struct intel_uncore_forcewake_domain *d;
 114
 115        for_each_fw_domain_masked(d, fw_domains, dev_priv) {
 116                fw_domain_wait_ack_clear(d);
 117                fw_domain_get(d);
 118        }
 119
 120        for_each_fw_domain_masked(d, fw_domains, dev_priv)
 121                fw_domain_wait_ack(d);
 122
 123        dev_priv->uncore.fw_domains_active |= fw_domains;
 124}
 125
 126static void
 127fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 128{
 129        struct intel_uncore_forcewake_domain *d;
 130
 131        for_each_fw_domain_masked(d, fw_domains, dev_priv) {
 132                fw_domain_put(d);
 133                fw_domain_posting_read(d);
 134        }
 135
 136        dev_priv->uncore.fw_domains_active &= ~fw_domains;
 137}
 138
 139static void
 140fw_domains_posting_read(struct drm_i915_private *dev_priv)
 141{
 142        struct intel_uncore_forcewake_domain *d;
 143
 144        /* No need to do for all, just do for first found */
 145        for_each_fw_domain(d, dev_priv) {
 146                fw_domain_posting_read(d);
 147                break;
 148        }
 149}
 150
 151static void
 152fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 153{
 154        struct intel_uncore_forcewake_domain *d;
 155
 156        if (dev_priv->uncore.fw_domains == 0)
 157                return;
 158
 159        for_each_fw_domain_masked(d, fw_domains, dev_priv)
 160                fw_domain_reset(d);
 161
 162        fw_domains_posting_read(dev_priv);
 163}
 164
 165static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 166{
 167        /* w/a for a sporadic read returning 0 by waiting for the GT
 168         * thread to wake up.
 169         */
 170        if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
 171                                GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
 172                DRM_ERROR("GT thread status wait timed out\n");
 173}
 174
 175static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
 176                                              enum forcewake_domains fw_domains)
 177{
 178        fw_domains_get(dev_priv, fw_domains);
 179
 180        /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
 181        __gen6_gt_wait_for_thread_c0(dev_priv);
 182}
 183
 184static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 185{
 186        u32 gtfifodbg;
 187
 188        gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
 189        if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
 190                __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
 191}
 192
 193static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
 194                                     enum forcewake_domains fw_domains)
 195{
 196        fw_domains_put(dev_priv, fw_domains);
 197        gen6_gt_check_fifodbg(dev_priv);
 198}
 199
 200static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
 201{
 202        u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
 203
 204        return count & GT_FIFO_FREE_ENTRIES_MASK;
 205}
 206
 207static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 208{
 209        int ret = 0;
 210
 211        /* On VLV, FIFO will be shared by both SW and HW.
 212         * So, we need to read the FREE_ENTRIES everytime */
 213        if (IS_VALLEYVIEW(dev_priv))
 214                dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
 215
 216        if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
 217                int loop = 500;
 218                u32 fifo = fifo_free_entries(dev_priv);
 219
 220                while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
 221                        udelay(10);
 222                        fifo = fifo_free_entries(dev_priv);
 223                }
 224                if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
 225                        ++ret;
 226                dev_priv->uncore.fifo_count = fifo;
 227        }
 228        dev_priv->uncore.fifo_count--;
 229
 230        return ret;
 231}
 232
 233static enum hrtimer_restart
 234intel_uncore_fw_release_timer(struct hrtimer *timer)
 235{
 236        struct intel_uncore_forcewake_domain *domain =
 237               container_of(timer, struct intel_uncore_forcewake_domain, timer);
 238        struct drm_i915_private *dev_priv = domain->i915;
 239        unsigned long irqflags;
 240
 241        assert_rpm_device_not_suspended(dev_priv);
 242
 243        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 244        if (WARN_ON(domain->wake_count == 0))
 245                domain->wake_count++;
 246
 247        if (--domain->wake_count == 0)
 248                dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
 249
 250        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 251
 252        return HRTIMER_NORESTART;
 253}
 254
 255void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
 256                                  bool restore)
 257{
 258        unsigned long irqflags;
 259        struct intel_uncore_forcewake_domain *domain;
 260        int retry_count = 100;
 261        enum forcewake_domains fw, active_domains;
 262
 263        /* Hold uncore.lock across reset to prevent any register access
 264         * with forcewake not set correctly. Wait until all pending
 265         * timers are run before holding.
 266         */
 267        while (1) {
 268                active_domains = 0;
 269
 270                for_each_fw_domain(domain, dev_priv) {
 271                        if (hrtimer_cancel(&domain->timer) == 0)
 272                                continue;
 273
 274                        intel_uncore_fw_release_timer(&domain->timer);
 275                }
 276
 277                spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 278
 279                for_each_fw_domain(domain, dev_priv) {
 280                        if (hrtimer_active(&domain->timer))
 281                                active_domains |= domain->mask;
 282                }
 283
 284                if (active_domains == 0)
 285                        break;
 286
 287                if (--retry_count == 0) {
 288                        DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
 289                        break;
 290                }
 291
 292                spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 293                cond_resched();
 294        }
 295
 296        WARN_ON(active_domains);
 297
 298        fw = dev_priv->uncore.fw_domains_active;
 299        if (fw)
 300                dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
 301
 302        fw_domains_reset(dev_priv, FORCEWAKE_ALL);
 303
 304        if (restore) { /* If reset with a user forcewake, try to restore */
 305                if (fw)
 306                        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
 307
 308                if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
 309                        dev_priv->uncore.fifo_count =
 310                                fifo_free_entries(dev_priv);
 311        }
 312
 313        if (!restore)
 314                assert_forcewakes_inactive(dev_priv);
 315
 316        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 317}
 318
 319static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
 320{
 321        const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
 322        const unsigned int sets[4] = { 1, 1, 2, 2 };
 323        const u32 cap = dev_priv->edram_cap;
 324
 325        return EDRAM_NUM_BANKS(cap) *
 326                ways[EDRAM_WAYS_IDX(cap)] *
 327                sets[EDRAM_SETS_IDX(cap)] *
 328                1024 * 1024;
 329}
 330
 331u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
 332{
 333        if (!HAS_EDRAM(dev_priv))
 334                return 0;
 335
 336        /* The needed capability bits for size calculation
 337         * are not there with pre gen9 so return 128MB always.
 338         */
 339        if (INTEL_GEN(dev_priv) < 9)
 340                return 128 * 1024 * 1024;
 341
 342        return gen9_edram_size(dev_priv);
 343}
 344
 345static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
 346{
 347        if (IS_HASWELL(dev_priv) ||
 348            IS_BROADWELL(dev_priv) ||
 349            INTEL_GEN(dev_priv) >= 9) {
 350                dev_priv->edram_cap = __raw_i915_read32(dev_priv,
 351                                                        HSW_EDRAM_CAP);
 352
 353                /* NB: We can't write IDICR yet because we do not have gt funcs
 354                 * set up */
 355        } else {
 356                dev_priv->edram_cap = 0;
 357        }
 358
 359        if (HAS_EDRAM(dev_priv))
 360                DRM_INFO("Found %lluMB of eDRAM\n",
 361                         intel_uncore_edram_size(dev_priv) / (1024 * 1024));
 362}
 363
 364static bool
 365fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 366{
 367        u32 dbg;
 368
 369        dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
 370        if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
 371                return false;
 372
 373        __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 374
 375        return true;
 376}
 377
 378static bool
 379vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 380{
 381        u32 cer;
 382
 383        cer = __raw_i915_read32(dev_priv, CLAIM_ER);
 384        if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
 385                return false;
 386
 387        __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
 388
 389        return true;
 390}
 391
 392static bool
 393check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 394{
 395        if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
 396                return fpga_check_for_unclaimed_mmio(dev_priv);
 397
 398        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 399                return vlv_check_for_unclaimed_mmio(dev_priv);
 400
 401        return false;
 402}
 403
 404static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
 405                                          bool restore_forcewake)
 406{
 407        struct intel_device_info *info = mkwrite_device_info(dev_priv);
 408
 409        /* clear out unclaimed reg detection bit */
 410        if (check_for_unclaimed_mmio(dev_priv))
 411                DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 412
 413        /* clear out old GT FIFO errors */
 414        if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
 415                __raw_i915_write32(dev_priv, GTFIFODBG,
 416                                   __raw_i915_read32(dev_priv, GTFIFODBG));
 417
 418        /* WaDisableShadowRegForCpd:chv */
 419        if (IS_CHERRYVIEW(dev_priv)) {
 420                __raw_i915_write32(dev_priv, GTFIFOCTL,
 421                                   __raw_i915_read32(dev_priv, GTFIFOCTL) |
 422                                   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 423                                   GT_FIFO_CTL_RC6_POLICY_STALL);
 424        }
 425
 426        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
 427                info->has_decoupled_mmio = false;
 428
 429        intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
 430}
 431
 432void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
 433                                 bool restore_forcewake)
 434{
 435        __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
 436        i915_check_and_clear_faults(dev_priv);
 437}
 438
 439void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 440{
 441        i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
 442
 443        /* BIOS often leaves RC6 enabled, but disable it for hw init */
 444        intel_sanitize_gt_powersave(dev_priv);
 445}
 446
 447static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 448                                         enum forcewake_domains fw_domains)
 449{
 450        struct intel_uncore_forcewake_domain *domain;
 451
 452        fw_domains &= dev_priv->uncore.fw_domains;
 453
 454        for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 455                if (domain->wake_count++)
 456                        fw_domains &= ~domain->mask;
 457        }
 458
 459        if (fw_domains)
 460                dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 461}
 462
 463/**
 464 * intel_uncore_forcewake_get - grab forcewake domain references
 465 * @dev_priv: i915 device instance
 466 * @fw_domains: forcewake domains to get reference on
 467 *
 468 * This function can be used get GT's forcewake domain references.
 469 * Normal register access will handle the forcewake domains automatically.
 470 * However if some sequence requires the GT to not power down a particular
 471 * forcewake domains this function should be called at the beginning of the
 472 * sequence. And subsequently the reference should be dropped by symmetric
 473 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 474 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
 475 */
 476void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 477                                enum forcewake_domains fw_domains)
 478{
 479        unsigned long irqflags;
 480
 481        if (!dev_priv->uncore.funcs.force_wake_get)
 482                return;
 483
 484        assert_rpm_wakelock_held(dev_priv);
 485
 486        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 487        __intel_uncore_forcewake_get(dev_priv, fw_domains);
 488        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 489}
 490
 491/**
 492 * intel_uncore_forcewake_get__locked - grab forcewake domain references
 493 * @dev_priv: i915 device instance
 494 * @fw_domains: forcewake domains to get reference on
 495 *
 496 * See intel_uncore_forcewake_get(). This variant places the onus
 497 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 498 */
 499void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
 500                                        enum forcewake_domains fw_domains)
 501{
 502        assert_spin_locked(&dev_priv->uncore.lock);
 503
 504        if (!dev_priv->uncore.funcs.force_wake_get)
 505                return;
 506
 507        __intel_uncore_forcewake_get(dev_priv, fw_domains);
 508}
 509
 510static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 511                                         enum forcewake_domains fw_domains)
 512{
 513        struct intel_uncore_forcewake_domain *domain;
 514
 515        fw_domains &= dev_priv->uncore.fw_domains;
 516
 517        for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 518                if (WARN_ON(domain->wake_count == 0))
 519                        continue;
 520
 521                if (--domain->wake_count)
 522                        continue;
 523
 524                fw_domain_arm_timer(domain);
 525        }
 526}
 527
 528/**
 529 * intel_uncore_forcewake_put - release a forcewake domain reference
 530 * @dev_priv: i915 device instance
 531 * @fw_domains: forcewake domains to put references
 532 *
 533 * This function drops the device-level forcewakes for specified
 534 * domains obtained by intel_uncore_forcewake_get().
 535 */
 536void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 537                                enum forcewake_domains fw_domains)
 538{
 539        unsigned long irqflags;
 540
 541        if (!dev_priv->uncore.funcs.force_wake_put)
 542                return;
 543
 544        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 545        __intel_uncore_forcewake_put(dev_priv, fw_domains);
 546        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 547}
 548
 549/**
 550 * intel_uncore_forcewake_put__locked - grab forcewake domain references
 551 * @dev_priv: i915 device instance
 552 * @fw_domains: forcewake domains to get reference on
 553 *
 554 * See intel_uncore_forcewake_put(). This variant places the onus
 555 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 556 */
 557void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 558                                        enum forcewake_domains fw_domains)
 559{
 560        assert_spin_locked(&dev_priv->uncore.lock);
 561
 562        if (!dev_priv->uncore.funcs.force_wake_put)
 563                return;
 564
 565        __intel_uncore_forcewake_put(dev_priv, fw_domains);
 566}
 567
 568void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 569{
 570        if (!dev_priv->uncore.funcs.force_wake_get)
 571                return;
 572
 573        WARN_ON(dev_priv->uncore.fw_domains_active);
 574}
 575
 576/* We give fast paths for the really cool registers */
 577#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 578
 579#define __gen6_reg_read_fw_domains(offset) \
 580({ \
 581        enum forcewake_domains __fwd; \
 582        if (NEEDS_FORCE_WAKE(offset)) \
 583                __fwd = FORCEWAKE_RENDER; \
 584        else \
 585                __fwd = 0; \
 586        __fwd; \
 587})
 588
 589static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
 590{
 591        if (offset < entry->start)
 592                return -1;
 593        else if (offset > entry->end)
 594                return 1;
 595        else
 596                return 0;
 597}
 598
 599/* Copied and "macroized" from lib/bsearch.c */
 600#define BSEARCH(key, base, num, cmp) ({                                 \
 601        unsigned int start__ = 0, end__ = (num);                        \
 602        typeof(base) result__ = NULL;                                   \
 603        while (start__ < end__) {                                       \
 604                unsigned int mid__ = start__ + (end__ - start__) / 2;   \
 605                int ret__ = (cmp)((key), (base) + mid__);               \
 606                if (ret__ < 0) {                                        \
 607                        end__ = mid__;                                  \
 608                } else if (ret__ > 0) {                                 \
 609                        start__ = mid__ + 1;                            \
 610                } else {                                                \
 611                        result__ = (base) + mid__;                      \
 612                        break;                                          \
 613                }                                                       \
 614        }                                                               \
 615        result__;                                                       \
 616})
 617
 618static enum forcewake_domains
 619find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
 620{
 621        const struct intel_forcewake_range *entry;
 622
 623        entry = BSEARCH(offset,
 624                        dev_priv->uncore.fw_domains_table,
 625                        dev_priv->uncore.fw_domains_table_entries,
 626                        fw_range_cmp);
 627
 628        if (!entry)
 629                return 0;
 630
 631        WARN(entry->domains & ~dev_priv->uncore.fw_domains,
 632             "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
 633             entry->domains & ~dev_priv->uncore.fw_domains, offset);
 634
 635        return entry->domains;
 636}
 637
 638static void
 639intel_fw_table_check(struct drm_i915_private *dev_priv)
 640{
 641        const struct intel_forcewake_range *ranges;
 642        unsigned int num_ranges;
 643        s32 prev;
 644        unsigned int i;
 645
 646        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
 647                return;
 648
 649        ranges = dev_priv->uncore.fw_domains_table;
 650        if (!ranges)
 651                return;
 652
 653        num_ranges = dev_priv->uncore.fw_domains_table_entries;
 654
 655        for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
 656                WARN_ON_ONCE(IS_GEN9(dev_priv) &&
 657                             (prev + 1) != (s32)ranges->start);
 658                WARN_ON_ONCE(prev >= (s32)ranges->start);
 659                prev = ranges->start;
 660                WARN_ON_ONCE(prev >= (s32)ranges->end);
 661                prev = ranges->end;
 662        }
 663}
 664
 665#define GEN_FW_RANGE(s, e, d) \
 666        { .start = (s), .end = (e), .domains = (d) }
 667
 668#define HAS_FWTABLE(dev_priv) \
 669        (IS_GEN9(dev_priv) || \
 670         IS_CHERRYVIEW(dev_priv) || \
 671         IS_VALLEYVIEW(dev_priv))
 672
 673/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 674static const struct intel_forcewake_range __vlv_fw_ranges[] = {
 675        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
 676        GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
 677        GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
 678        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
 679        GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
 680        GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
 681        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
 682};
 683
 684#define __fwtable_reg_read_fw_domains(offset) \
 685({ \
 686        enum forcewake_domains __fwd = 0; \
 687        if (NEEDS_FORCE_WAKE((offset))) \
 688                __fwd = find_fw_domain(dev_priv, offset); \
 689        __fwd; \
 690})
 691
 692/* *Must* be sorted by offset! See intel_shadow_table_check(). */
 693static const i915_reg_t gen8_shadowed_regs[] = {
 694        RING_TAIL(RENDER_RING_BASE),    /* 0x2000 (base) */
 695        GEN6_RPNSWREQ,                  /* 0xA008 */
 696        GEN6_RC_VIDEO_FREQ,             /* 0xA00C */
 697        RING_TAIL(GEN6_BSD_RING_BASE),  /* 0x12000 (base) */
 698        RING_TAIL(VEBOX_RING_BASE),     /* 0x1a000 (base) */
 699        RING_TAIL(BLT_RING_BASE),       /* 0x22000 (base) */
 700        /* TODO: Other registers are not yet used */
 701};
 702
 703static void intel_shadow_table_check(void)
 704{
 705        const i915_reg_t *reg = gen8_shadowed_regs;
 706        s32 prev;
 707        u32 offset;
 708        unsigned int i;
 709
 710        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
 711                return;
 712
 713        for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
 714                offset = i915_mmio_reg_offset(*reg);
 715                WARN_ON_ONCE(prev >= (s32)offset);
 716                prev = offset;
 717        }
 718}
 719
 720static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
 721{
 722        u32 offset = i915_mmio_reg_offset(*reg);
 723
 724        if (key < offset)
 725                return -1;
 726        else if (key > offset)
 727                return 1;
 728        else
 729                return 0;
 730}
 731
 732static bool is_gen8_shadowed(u32 offset)
 733{
 734        const i915_reg_t *regs = gen8_shadowed_regs;
 735
 736        return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
 737                       mmio_reg_cmp);
 738}
 739
 740#define __gen8_reg_write_fw_domains(offset) \
 741({ \
 742        enum forcewake_domains __fwd; \
 743        if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
 744                __fwd = FORCEWAKE_RENDER; \
 745        else \
 746                __fwd = 0; \
 747        __fwd; \
 748})
 749
 750/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 751static const struct intel_forcewake_range __chv_fw_ranges[] = {
 752        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
 753        GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 754        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
 755        GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 756        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
 757        GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 758        GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
 759        GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 760        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
 761        GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
 762        GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
 763        GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 764        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
 765        GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
 766        GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
 767        GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
 768};
 769
 770#define __fwtable_reg_write_fw_domains(offset) \
 771({ \
 772        enum forcewake_domains __fwd = 0; \
 773        if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
 774                __fwd = find_fw_domain(dev_priv, offset); \
 775        __fwd; \
 776})
 777
 778/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 779static const struct intel_forcewake_range __gen9_fw_ranges[] = {
 780        GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
 781        GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
 782        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
 783        GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
 784        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
 785        GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
 786        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
 787        GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
 788        GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
 789        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
 790        GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
 791        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
 792        GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
 793        GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
 794        GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
 795        GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
 796        GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
 797        GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
 798        GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
 799        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
 800        GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
 801        GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
 802        GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
 803        GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
 804        GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
 805        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
 806        GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
 807        GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
 808        GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
 809        GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
 810        GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
 811        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
 812};
 813
 814static void
 815ilk_dummy_write(struct drm_i915_private *dev_priv)
 816{
 817        /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
 818         * the chip from rc6 before touching it for real. MI_MODE is masked,
 819         * hence harmless to write 0 into. */
 820        __raw_i915_write32(dev_priv, MI_MODE, 0);
 821}
 822
 823static void
 824__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 825                      const i915_reg_t reg,
 826                      const bool read,
 827                      const bool before)
 828{
 829        if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
 830                 "Unclaimed %s register 0x%x\n",
 831                 read ? "read from" : "write to",
 832                 i915_mmio_reg_offset(reg)))
 833                i915.mmio_debug--; /* Only report the first N failures */
 834}
 835
 836static inline void
 837unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 838                    const i915_reg_t reg,
 839                    const bool read,
 840                    const bool before)
 841{
 842        if (likely(!i915.mmio_debug))
 843                return;
 844
 845        __unclaimed_reg_debug(dev_priv, reg, read, before);
 846}
 847
 848static const enum decoupled_power_domain fw2dpd_domain[] = {
 849        GEN9_DECOUPLED_PD_RENDER,
 850        GEN9_DECOUPLED_PD_BLITTER,
 851        GEN9_DECOUPLED_PD_ALL,
 852        GEN9_DECOUPLED_PD_MEDIA,
 853        GEN9_DECOUPLED_PD_ALL,
 854        GEN9_DECOUPLED_PD_ALL,
 855        GEN9_DECOUPLED_PD_ALL
 856};
 857
 858/*
 859 * Decoupled MMIO access for only 1 DWORD
 860 */
 861static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
 862                                         u32 reg,
 863                                         enum forcewake_domains fw_domain,
 864                                         enum decoupled_ops operation)
 865{
 866        enum decoupled_power_domain dp_domain;
 867        u32 ctrl_reg_data = 0;
 868
 869        dp_domain = fw2dpd_domain[fw_domain - 1];
 870
 871        ctrl_reg_data |= reg;
 872        ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
 873        ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
 874        ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
 875        __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
 876
 877        if (wait_for_atomic((__raw_i915_read32(dev_priv,
 878                            GEN9_DECOUPLED_REG0_DW1) &
 879                            GEN9_DECOUPLED_DW1_GO) == 0,
 880                            FORCEWAKE_ACK_TIMEOUT_MS))
 881                DRM_ERROR("Decoupled MMIO wait timed out\n");
 882}
 883
 884static inline u32
 885__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
 886                             u32 reg,
 887                             enum forcewake_domains fw_domain)
 888{
 889        __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
 890                                     GEN9_DECOUPLED_OP_READ);
 891
 892        return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
 893}
 894
 895static inline void
 896__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
 897                            u32 reg, u32 data,
 898                            enum forcewake_domains fw_domain)
 899{
 900
 901        __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
 902
 903        __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
 904                                     GEN9_DECOUPLED_OP_WRITE);
 905}
 906
 907
 908#define GEN2_READ_HEADER(x) \
 909        u##x val = 0; \
 910        assert_rpm_wakelock_held(dev_priv);
 911
 912#define GEN2_READ_FOOTER \
 913        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 914        return val
 915
 916#define __gen2_read(x) \
 917static u##x \
 918gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 919        GEN2_READ_HEADER(x); \
 920        val = __raw_i915_read##x(dev_priv, reg); \
 921        GEN2_READ_FOOTER; \
 922}
 923
 924#define __gen5_read(x) \
 925static u##x \
 926gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 927        GEN2_READ_HEADER(x); \
 928        ilk_dummy_write(dev_priv); \
 929        val = __raw_i915_read##x(dev_priv, reg); \
 930        GEN2_READ_FOOTER; \
 931}
 932
 933__gen5_read(8)
 934__gen5_read(16)
 935__gen5_read(32)
 936__gen5_read(64)
 937__gen2_read(8)
 938__gen2_read(16)
 939__gen2_read(32)
 940__gen2_read(64)
 941
 942#undef __gen5_read
 943#undef __gen2_read
 944
 945#undef GEN2_READ_FOOTER
 946#undef GEN2_READ_HEADER
 947
 948#define GEN6_READ_HEADER(x) \
 949        u32 offset = i915_mmio_reg_offset(reg); \
 950        unsigned long irqflags; \
 951        u##x val = 0; \
 952        assert_rpm_wakelock_held(dev_priv); \
 953        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
 954        unclaimed_reg_debug(dev_priv, reg, true, true)
 955
 956#define GEN6_READ_FOOTER \
 957        unclaimed_reg_debug(dev_priv, reg, true, false); \
 958        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 959        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 960        return val
 961
 962static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
 963                                        enum forcewake_domains fw_domains)
 964{
 965        struct intel_uncore_forcewake_domain *domain;
 966
 967        for_each_fw_domain_masked(domain, fw_domains, dev_priv)
 968                fw_domain_arm_timer(domain);
 969
 970        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 971}
 972
 973static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
 974                                     enum forcewake_domains fw_domains)
 975{
 976        if (WARN_ON(!fw_domains))
 977                return;
 978
 979        /* Turn on all requested but inactive supported forcewake domains. */
 980        fw_domains &= dev_priv->uncore.fw_domains;
 981        fw_domains &= ~dev_priv->uncore.fw_domains_active;
 982
 983        if (fw_domains)
 984                ___force_wake_auto(dev_priv, fw_domains);
 985}
 986
 987#define __gen6_read(x) \
 988static u##x \
 989gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 990        enum forcewake_domains fw_engine; \
 991        GEN6_READ_HEADER(x); \
 992        fw_engine = __gen6_reg_read_fw_domains(offset); \
 993        if (fw_engine) \
 994                __force_wake_auto(dev_priv, fw_engine); \
 995        val = __raw_i915_read##x(dev_priv, reg); \
 996        GEN6_READ_FOOTER; \
 997}
 998
 999#define __fwtable_read(x) \
1000static u##x \
1001fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1002        enum forcewake_domains fw_engine; \
1003        GEN6_READ_HEADER(x); \
1004        fw_engine = __fwtable_reg_read_fw_domains(offset); \
1005        if (fw_engine) \
1006                __force_wake_auto(dev_priv, fw_engine); \
1007        val = __raw_i915_read##x(dev_priv, reg); \
1008        GEN6_READ_FOOTER; \
1009}
1010
1011#define __gen9_decoupled_read(x) \
1012static u##x \
1013gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
1014                       i915_reg_t reg, bool trace) { \
1015        enum forcewake_domains fw_engine; \
1016        GEN6_READ_HEADER(x); \
1017        fw_engine = __fwtable_reg_read_fw_domains(offset); \
1018        if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
1019                unsigned i; \
1020                u32 *ptr_data = (u32 *) &val; \
1021                for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
1022                        *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
1023                                                                 offset, \
1024                                                                 fw_engine); \
1025        } else { \
1026                val = __raw_i915_read##x(dev_priv, reg); \
1027        } \
1028        GEN6_READ_FOOTER; \
1029}
1030
1031__gen9_decoupled_read(32)
1032__gen9_decoupled_read(64)
1033__fwtable_read(8)
1034__fwtable_read(16)
1035__fwtable_read(32)
1036__fwtable_read(64)
1037__gen6_read(8)
1038__gen6_read(16)
1039__gen6_read(32)
1040__gen6_read(64)
1041
1042#undef __fwtable_read
1043#undef __gen6_read
1044#undef GEN6_READ_FOOTER
1045#undef GEN6_READ_HEADER
1046
1047#define VGPU_READ_HEADER(x) \
1048        unsigned long irqflags; \
1049        u##x val = 0; \
1050        assert_rpm_device_not_suspended(dev_priv); \
1051        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1052
1053#define VGPU_READ_FOOTER \
1054        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1055        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1056        return val
1057
1058#define __vgpu_read(x) \
1059static u##x \
1060vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1061        VGPU_READ_HEADER(x); \
1062        val = __raw_i915_read##x(dev_priv, reg); \
1063        VGPU_READ_FOOTER; \
1064}
1065
1066__vgpu_read(8)
1067__vgpu_read(16)
1068__vgpu_read(32)
1069__vgpu_read(64)
1070
1071#undef __vgpu_read
1072#undef VGPU_READ_FOOTER
1073#undef VGPU_READ_HEADER
1074
1075#define GEN2_WRITE_HEADER \
1076        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1077        assert_rpm_wakelock_held(dev_priv); \
1078
1079#define GEN2_WRITE_FOOTER
1080
1081#define __gen2_write(x) \
1082static void \
1083gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1084        GEN2_WRITE_HEADER; \
1085        __raw_i915_write##x(dev_priv, reg, val); \
1086        GEN2_WRITE_FOOTER; \
1087}
1088
1089#define __gen5_write(x) \
1090static void \
1091gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1092        GEN2_WRITE_HEADER; \
1093        ilk_dummy_write(dev_priv); \
1094        __raw_i915_write##x(dev_priv, reg, val); \
1095        GEN2_WRITE_FOOTER; \
1096}
1097
1098__gen5_write(8)
1099__gen5_write(16)
1100__gen5_write(32)
1101__gen2_write(8)
1102__gen2_write(16)
1103__gen2_write(32)
1104
1105#undef __gen5_write
1106#undef __gen2_write
1107
1108#undef GEN2_WRITE_FOOTER
1109#undef GEN2_WRITE_HEADER
1110
1111#define GEN6_WRITE_HEADER \
1112        u32 offset = i915_mmio_reg_offset(reg); \
1113        unsigned long irqflags; \
1114        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1115        assert_rpm_wakelock_held(dev_priv); \
1116        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1117        unclaimed_reg_debug(dev_priv, reg, false, true)
1118
1119#define GEN6_WRITE_FOOTER \
1120        unclaimed_reg_debug(dev_priv, reg, false, false); \
1121        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1122
1123#define __gen6_write(x) \
1124static void \
1125gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1126        u32 __fifo_ret = 0; \
1127        GEN6_WRITE_HEADER; \
1128        if (NEEDS_FORCE_WAKE(offset)) { \
1129                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1130        } \
1131        __raw_i915_write##x(dev_priv, reg, val); \
1132        if (unlikely(__fifo_ret)) { \
1133                gen6_gt_check_fifodbg(dev_priv); \
1134        } \
1135        GEN6_WRITE_FOOTER; \
1136}
1137
1138#define __gen8_write(x) \
1139static void \
1140gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1141        enum forcewake_domains fw_engine; \
1142        GEN6_WRITE_HEADER; \
1143        fw_engine = __gen8_reg_write_fw_domains(offset); \
1144        if (fw_engine) \
1145                __force_wake_auto(dev_priv, fw_engine); \
1146        __raw_i915_write##x(dev_priv, reg, val); \
1147        GEN6_WRITE_FOOTER; \
1148}
1149
1150#define __fwtable_write(x) \
1151static void \
1152fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1153        enum forcewake_domains fw_engine; \
1154        GEN6_WRITE_HEADER; \
1155        fw_engine = __fwtable_reg_write_fw_domains(offset); \
1156        if (fw_engine) \
1157                __force_wake_auto(dev_priv, fw_engine); \
1158        __raw_i915_write##x(dev_priv, reg, val); \
1159        GEN6_WRITE_FOOTER; \
1160}
1161
1162#define __gen9_decoupled_write(x) \
1163static void \
1164gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1165                        i915_reg_t reg, u##x val, \
1166                bool trace) { \
1167        enum forcewake_domains fw_engine; \
1168        GEN6_WRITE_HEADER; \
1169        fw_engine = __fwtable_reg_write_fw_domains(offset); \
1170        if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1171                __gen9_decoupled_mmio_write(dev_priv, \
1172                                            offset, \
1173                                            val, \
1174                                            fw_engine); \
1175        else \
1176                __raw_i915_write##x(dev_priv, reg, val); \
1177        GEN6_WRITE_FOOTER; \
1178}
1179
1180__gen9_decoupled_write(32)
1181__fwtable_write(8)
1182__fwtable_write(16)
1183__fwtable_write(32)
1184__gen8_write(8)
1185__gen8_write(16)
1186__gen8_write(32)
1187__gen6_write(8)
1188__gen6_write(16)
1189__gen6_write(32)
1190
1191#undef __fwtable_write
1192#undef __gen8_write
1193#undef __gen6_write
1194#undef GEN6_WRITE_FOOTER
1195#undef GEN6_WRITE_HEADER
1196
1197#define VGPU_WRITE_HEADER \
1198        unsigned long irqflags; \
1199        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1200        assert_rpm_device_not_suspended(dev_priv); \
1201        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1202
1203#define VGPU_WRITE_FOOTER \
1204        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1205
1206#define __vgpu_write(x) \
1207static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1208                          i915_reg_t reg, u##x val, bool trace) { \
1209        VGPU_WRITE_HEADER; \
1210        __raw_i915_write##x(dev_priv, reg, val); \
1211        VGPU_WRITE_FOOTER; \
1212}
1213
1214__vgpu_write(8)
1215__vgpu_write(16)
1216__vgpu_write(32)
1217
1218#undef __vgpu_write
1219#undef VGPU_WRITE_FOOTER
1220#undef VGPU_WRITE_HEADER
1221
1222#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1223do { \
1224        dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1225        dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1226        dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1227} while (0)
1228
1229#define ASSIGN_READ_MMIO_VFUNCS(x) \
1230do { \
1231        dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1232        dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1233        dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1234        dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1235} while (0)
1236
1237
1238static void fw_domain_init(struct drm_i915_private *dev_priv,
1239                           enum forcewake_domain_id domain_id,
1240                           i915_reg_t reg_set,
1241                           i915_reg_t reg_ack)
1242{
1243        struct intel_uncore_forcewake_domain *d;
1244
1245        if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1246                return;
1247
1248        d = &dev_priv->uncore.fw_domain[domain_id];
1249
1250        WARN_ON(d->wake_count);
1251
1252        d->wake_count = 0;
1253        d->reg_set = reg_set;
1254        d->reg_ack = reg_ack;
1255
1256        if (IS_GEN6(dev_priv)) {
1257                d->val_reset = 0;
1258                d->val_set = FORCEWAKE_KERNEL;
1259                d->val_clear = 0;
1260        } else {
1261                /* WaRsClearFWBitsAtReset:bdw,skl */
1262                d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1263                d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1264                d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1265        }
1266
1267        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1268                d->reg_post = FORCEWAKE_ACK_VLV;
1269        else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1270                d->reg_post = ECOBUS;
1271
1272        d->i915 = dev_priv;
1273        d->id = domain_id;
1274
1275        BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1276        BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1277        BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1278
1279        d->mask = 1 << domain_id;
1280
1281        hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1282        d->timer.function = intel_uncore_fw_release_timer;
1283
1284        dev_priv->uncore.fw_domains |= (1 << domain_id);
1285
1286        fw_domain_reset(d);
1287}
1288
1289static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1290{
1291        if (INTEL_INFO(dev_priv)->gen <= 5)
1292                return;
1293
1294        if (IS_GEN9(dev_priv)) {
1295                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1296                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1297                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1298                               FORCEWAKE_RENDER_GEN9,
1299                               FORCEWAKE_ACK_RENDER_GEN9);
1300                fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1301                               FORCEWAKE_BLITTER_GEN9,
1302                               FORCEWAKE_ACK_BLITTER_GEN9);
1303                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1304                               FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1305        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1306                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1307                if (!IS_CHERRYVIEW(dev_priv))
1308                        dev_priv->uncore.funcs.force_wake_put =
1309                                fw_domains_put_with_fifo;
1310                else
1311                        dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1312                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1313                               FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1314                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1315                               FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1316        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1317                dev_priv->uncore.funcs.force_wake_get =
1318                        fw_domains_get_with_thread_status;
1319                if (IS_HASWELL(dev_priv))
1320                        dev_priv->uncore.funcs.force_wake_put =
1321                                fw_domains_put_with_fifo;
1322                else
1323                        dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1324                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1325                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1326        } else if (IS_IVYBRIDGE(dev_priv)) {
1327                u32 ecobus;
1328
1329                /* IVB configs may use multi-threaded forcewake */
1330
1331                /* A small trick here - if the bios hasn't configured
1332                 * MT forcewake, and if the device is in RC6, then
1333                 * force_wake_mt_get will not wake the device and the
1334                 * ECOBUS read will return zero. Which will be
1335                 * (correctly) interpreted by the test below as MT
1336                 * forcewake being disabled.
1337                 */
1338                dev_priv->uncore.funcs.force_wake_get =
1339                        fw_domains_get_with_thread_status;
1340                dev_priv->uncore.funcs.force_wake_put =
1341                        fw_domains_put_with_fifo;
1342
1343                /* We need to init first for ECOBUS access and then
1344                 * determine later if we want to reinit, in case of MT access is
1345                 * not working. In this stage we don't know which flavour this
1346                 * ivb is, so it is better to reset also the gen6 fw registers
1347                 * before the ecobus check.
1348                 */
1349
1350                __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1351                __raw_posting_read(dev_priv, ECOBUS);
1352
1353                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1354                               FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1355
1356                spin_lock_irq(&dev_priv->uncore.lock);
1357                fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1358                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1359                fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1360                spin_unlock_irq(&dev_priv->uncore.lock);
1361
1362                if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1363                        DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1364                        DRM_INFO("when using vblank-synced partial screen updates.\n");
1365                        fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1366                                       FORCEWAKE, FORCEWAKE_ACK);
1367                }
1368        } else if (IS_GEN6(dev_priv)) {
1369                dev_priv->uncore.funcs.force_wake_get =
1370                        fw_domains_get_with_thread_status;
1371                dev_priv->uncore.funcs.force_wake_put =
1372                        fw_domains_put_with_fifo;
1373                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1374                               FORCEWAKE, FORCEWAKE_ACK);
1375        }
1376
1377        /* All future platforms are expected to require complex power gating */
1378        WARN_ON(dev_priv->uncore.fw_domains == 0);
1379}
1380
1381#define ASSIGN_FW_DOMAINS_TABLE(d) \
1382{ \
1383        dev_priv->uncore.fw_domains_table = \
1384                        (struct intel_forcewake_range *)(d); \
1385        dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1386}
1387
1388void intel_uncore_init(struct drm_i915_private *dev_priv)
1389{
1390        i915_check_vgpu(dev_priv);
1391
1392        intel_uncore_edram_detect(dev_priv);
1393        intel_uncore_fw_domains_init(dev_priv);
1394        __intel_uncore_early_sanitize(dev_priv, false);
1395
1396        dev_priv->uncore.unclaimed_mmio_check = 1;
1397
1398        switch (INTEL_INFO(dev_priv)->gen) {
1399        default:
1400        case 9:
1401                ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1402                ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1403                ASSIGN_READ_MMIO_VFUNCS(fwtable);
1404                if (HAS_DECOUPLED_MMIO(dev_priv)) {
1405                        dev_priv->uncore.funcs.mmio_readl =
1406                                                gen9_decoupled_read32;
1407                        dev_priv->uncore.funcs.mmio_readq =
1408                                                gen9_decoupled_read64;
1409                        dev_priv->uncore.funcs.mmio_writel =
1410                                                gen9_decoupled_write32;
1411                }
1412                break;
1413        case 8:
1414                if (IS_CHERRYVIEW(dev_priv)) {
1415                        ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1416                        ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1417                        ASSIGN_READ_MMIO_VFUNCS(fwtable);
1418
1419                } else {
1420                        ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1421                        ASSIGN_READ_MMIO_VFUNCS(gen6);
1422                }
1423                break;
1424        case 7:
1425        case 6:
1426                ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1427
1428                if (IS_VALLEYVIEW(dev_priv)) {
1429                        ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1430                        ASSIGN_READ_MMIO_VFUNCS(fwtable);
1431                } else {
1432                        ASSIGN_READ_MMIO_VFUNCS(gen6);
1433                }
1434                break;
1435        case 5:
1436                ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1437                ASSIGN_READ_MMIO_VFUNCS(gen5);
1438                break;
1439        case 4:
1440        case 3:
1441        case 2:
1442                ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1443                ASSIGN_READ_MMIO_VFUNCS(gen2);
1444                break;
1445        }
1446
1447        intel_fw_table_check(dev_priv);
1448        if (INTEL_GEN(dev_priv) >= 8)
1449                intel_shadow_table_check();
1450
1451        if (intel_vgpu_active(dev_priv)) {
1452                ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1453                ASSIGN_READ_MMIO_VFUNCS(vgpu);
1454        }
1455
1456        i915_check_and_clear_faults(dev_priv);
1457}
1458#undef ASSIGN_WRITE_MMIO_VFUNCS
1459#undef ASSIGN_READ_MMIO_VFUNCS
1460
1461void intel_uncore_fini(struct drm_i915_private *dev_priv)
1462{
1463        /* Paranoia: make sure we have disabled everything before we exit. */
1464        intel_uncore_sanitize(dev_priv);
1465        intel_uncore_forcewake_reset(dev_priv, false);
1466}
1467
1468#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1469
1470static const struct register_whitelist {
1471        i915_reg_t offset_ldw, offset_udw;
1472        uint32_t size;
1473        /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1474        uint32_t gen_bitmask;
1475} whitelist[] = {
1476        { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1477          .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1478          .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1479};
1480
1481int i915_reg_read_ioctl(struct drm_device *dev,
1482                        void *data, struct drm_file *file)
1483{
1484        struct drm_i915_private *dev_priv = to_i915(dev);
1485        struct drm_i915_reg_read *reg = data;
1486        struct register_whitelist const *entry = whitelist;
1487        unsigned size;
1488        i915_reg_t offset_ldw, offset_udw;
1489        int i, ret = 0;
1490
1491        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1492                if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1493                    (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
1494                        break;
1495        }
1496
1497        if (i == ARRAY_SIZE(whitelist))
1498                return -EINVAL;
1499
1500        /* We use the low bits to encode extra flags as the register should
1501         * be naturally aligned (and those that are not so aligned merely
1502         * limit the available flags for that register).
1503         */
1504        offset_ldw = entry->offset_ldw;
1505        offset_udw = entry->offset_udw;
1506        size = entry->size;
1507        size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1508
1509        intel_runtime_pm_get(dev_priv);
1510
1511        switch (size) {
1512        case 8 | 1:
1513                reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1514                break;
1515        case 8:
1516                reg->val = I915_READ64(offset_ldw);
1517                break;
1518        case 4:
1519                reg->val = I915_READ(offset_ldw);
1520                break;
1521        case 2:
1522                reg->val = I915_READ16(offset_ldw);
1523                break;
1524        case 1:
1525                reg->val = I915_READ8(offset_ldw);
1526                break;
1527        default:
1528                ret = -EINVAL;
1529                goto out;
1530        }
1531
1532out:
1533        intel_runtime_pm_put(dev_priv);
1534        return ret;
1535}
1536
1537static int i915_reset_complete(struct pci_dev *pdev)
1538{
1539        u8 gdrst;
1540        pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1541        return (gdrst & GRDOM_RESET_STATUS) == 0;
1542}
1543
1544static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1545{
1546        struct pci_dev *pdev = dev_priv->drm.pdev;
1547
1548        /* assert reset for at least 20 usec */
1549        pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1550        udelay(20);
1551        pci_write_config_byte(pdev, I915_GDRST, 0);
1552
1553        return wait_for(i915_reset_complete(pdev), 500);
1554}
1555
1556static int g4x_reset_complete(struct pci_dev *pdev)
1557{
1558        u8 gdrst;
1559        pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1560        return (gdrst & GRDOM_RESET_ENABLE) == 0;
1561}
1562
1563static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1564{
1565        struct pci_dev *pdev = dev_priv->drm.pdev;
1566        pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1567        return wait_for(g4x_reset_complete(pdev), 500);
1568}
1569
1570static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1571{
1572        struct pci_dev *pdev = dev_priv->drm.pdev;
1573        int ret;
1574
1575        pci_write_config_byte(pdev, I915_GDRST,
1576                              GRDOM_RENDER | GRDOM_RESET_ENABLE);
1577        ret =  wait_for(g4x_reset_complete(pdev), 500);
1578        if (ret)
1579                return ret;
1580
1581        /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1582        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1583        POSTING_READ(VDECCLK_GATE_D);
1584
1585        pci_write_config_byte(pdev, I915_GDRST,
1586                              GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1587        ret =  wait_for(g4x_reset_complete(pdev), 500);
1588        if (ret)
1589                return ret;
1590
1591        /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1592        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1593        POSTING_READ(VDECCLK_GATE_D);
1594
1595        pci_write_config_byte(pdev, I915_GDRST, 0);
1596
1597        return 0;
1598}
1599
1600static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1601                             unsigned engine_mask)
1602{
1603        int ret;
1604
1605        I915_WRITE(ILK_GDSR,
1606                   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1607        ret = intel_wait_for_register(dev_priv,
1608                                      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1609                                      500);
1610        if (ret)
1611                return ret;
1612
1613        I915_WRITE(ILK_GDSR,
1614                   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1615        ret = intel_wait_for_register(dev_priv,
1616                                      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1617                                      500);
1618        if (ret)
1619                return ret;
1620
1621        I915_WRITE(ILK_GDSR, 0);
1622
1623        return 0;
1624}
1625
1626/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1627static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1628                                u32 hw_domain_mask)
1629{
1630        /* GEN6_GDRST is not in the gt power well, no need to check
1631         * for fifo space for the write or forcewake the chip for
1632         * the read
1633         */
1634        __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1635
1636        /* Spin waiting for the device to ack the reset requests */
1637        return intel_wait_for_register_fw(dev_priv,
1638                                          GEN6_GDRST, hw_domain_mask, 0,
1639                                          500);
1640}
1641
1642/**
1643 * gen6_reset_engines - reset individual engines
1644 * @dev_priv: i915 device
1645 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1646 *
1647 * This function will reset the individual engines that are set in engine_mask.
1648 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1649 *
1650 * Note: It is responsibility of the caller to handle the difference between
1651 * asking full domain reset versus reset for all available individual engines.
1652 *
1653 * Returns 0 on success, nonzero on error.
1654 */
1655static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1656                              unsigned engine_mask)
1657{
1658        struct intel_engine_cs *engine;
1659        const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1660                [RCS] = GEN6_GRDOM_RENDER,
1661                [BCS] = GEN6_GRDOM_BLT,
1662                [VCS] = GEN6_GRDOM_MEDIA,
1663                [VCS2] = GEN8_GRDOM_MEDIA2,
1664                [VECS] = GEN6_GRDOM_VECS,
1665        };
1666        u32 hw_mask;
1667        int ret;
1668
1669        if (engine_mask == ALL_ENGINES) {
1670                hw_mask = GEN6_GRDOM_FULL;
1671        } else {
1672                unsigned int tmp;
1673
1674                hw_mask = 0;
1675                for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1676                        hw_mask |= hw_engine_mask[engine->id];
1677        }
1678
1679        ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1680
1681        intel_uncore_forcewake_reset(dev_priv, true);
1682
1683        return ret;
1684}
1685
1686/**
1687 * intel_wait_for_register_fw - wait until register matches expected state
1688 * @dev_priv: the i915 device
1689 * @reg: the register to read
1690 * @mask: mask to apply to register value
1691 * @value: expected value
1692 * @timeout_ms: timeout in millisecond
1693 *
1694 * This routine waits until the target register @reg contains the expected
1695 * @value after applying the @mask, i.e. it waits until ::
1696 *
1697 *     (I915_READ_FW(reg) & mask) == value
1698 *
1699 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1700 *
1701 * Note that this routine assumes the caller holds forcewake asserted, it is
1702 * not suitable for very long waits. See intel_wait_for_register() if you
1703 * wish to wait without holding forcewake for the duration (i.e. you expect
1704 * the wait to be slow).
1705 *
1706 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1707 */
1708int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1709                               i915_reg_t reg,
1710                               const u32 mask,
1711                               const u32 value,
1712                               const unsigned long timeout_ms)
1713{
1714#define done ((I915_READ_FW(reg) & mask) == value)
1715        int ret = wait_for_us(done, 2);
1716        if (ret)
1717                ret = wait_for(done, timeout_ms);
1718        return ret;
1719#undef done
1720}
1721
1722/**
1723 * intel_wait_for_register - wait until register matches expected state
1724 * @dev_priv: the i915 device
1725 * @reg: the register to read
1726 * @mask: mask to apply to register value
1727 * @value: expected value
1728 * @timeout_ms: timeout in millisecond
1729 *
1730 * This routine waits until the target register @reg contains the expected
1731 * @value after applying the @mask, i.e. it waits until ::
1732 *
1733 *     (I915_READ(reg) & mask) == value
1734 *
1735 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1736 *
1737 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1738 */
1739int intel_wait_for_register(struct drm_i915_private *dev_priv,
1740                            i915_reg_t reg,
1741                            const u32 mask,
1742                            const u32 value,
1743                            const unsigned long timeout_ms)
1744{
1745
1746        unsigned fw =
1747                intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1748        int ret;
1749
1750        intel_uncore_forcewake_get(dev_priv, fw);
1751        ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1752        intel_uncore_forcewake_put(dev_priv, fw);
1753        if (ret)
1754                ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1755                               timeout_ms);
1756
1757        return ret;
1758}
1759
1760static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1761{
1762        struct drm_i915_private *dev_priv = engine->i915;
1763        int ret;
1764
1765        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1766                      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1767
1768        ret = intel_wait_for_register_fw(dev_priv,
1769                                         RING_RESET_CTL(engine->mmio_base),
1770                                         RESET_CTL_READY_TO_RESET,
1771                                         RESET_CTL_READY_TO_RESET,
1772                                         700);
1773        if (ret)
1774                DRM_ERROR("%s: reset request timeout\n", engine->name);
1775
1776        return ret;
1777}
1778
1779static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1780{
1781        struct drm_i915_private *dev_priv = engine->i915;
1782
1783        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1784                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1785}
1786
1787static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1788                              unsigned engine_mask)
1789{
1790        struct intel_engine_cs *engine;
1791        unsigned int tmp;
1792
1793        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1794                if (gen8_request_engine_reset(engine))
1795                        goto not_ready;
1796
1797        return gen6_reset_engines(dev_priv, engine_mask);
1798
1799not_ready:
1800        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1801                gen8_unrequest_engine_reset(engine);
1802
1803        return -EIO;
1804}
1805
1806typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1807
1808static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1809{
1810        if (!i915.reset)
1811                return NULL;
1812
1813        if (INTEL_INFO(dev_priv)->gen >= 8)
1814                return gen8_reset_engines;
1815        else if (INTEL_INFO(dev_priv)->gen >= 6)
1816                return gen6_reset_engines;
1817        else if (IS_GEN5(dev_priv))
1818                return ironlake_do_reset;
1819        else if (IS_G4X(dev_priv))
1820                return g4x_do_reset;
1821        else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1822                return g33_do_reset;
1823        else if (INTEL_INFO(dev_priv)->gen >= 3)
1824                return i915_do_reset;
1825        else
1826                return NULL;
1827}
1828
1829int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1830{
1831        reset_func reset;
1832        int ret;
1833
1834        reset = intel_get_gpu_reset(dev_priv);
1835        if (reset == NULL)
1836                return -ENODEV;
1837
1838        /* If the power well sleeps during the reset, the reset
1839         * request may be dropped and never completes (causing -EIO).
1840         */
1841        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1842        ret = reset(dev_priv, engine_mask);
1843        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1844
1845        return ret;
1846}
1847
1848bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1849{
1850        return intel_get_gpu_reset(dev_priv) != NULL;
1851}
1852
1853int intel_guc_reset(struct drm_i915_private *dev_priv)
1854{
1855        int ret;
1856        unsigned long irqflags;
1857
1858        if (!HAS_GUC(dev_priv))
1859                return -EINVAL;
1860
1861        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1862        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1863
1864        ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1865
1866        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1867        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1868
1869        return ret;
1870}
1871
1872bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1873{
1874        return check_for_unclaimed_mmio(dev_priv);
1875}
1876
1877bool
1878intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1879{
1880        if (unlikely(i915.mmio_debug ||
1881                     dev_priv->uncore.unclaimed_mmio_check <= 0))
1882                return false;
1883
1884        if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1885                DRM_DEBUG("Unclaimed register detected, "
1886                          "enabling oneshot unclaimed register reporting. "
1887                          "Please use i915.mmio_debug=N for more information.\n");
1888                i915.mmio_debug++;
1889                dev_priv->uncore.unclaimed_mmio_check--;
1890                return true;
1891        }
1892
1893        return false;
1894}
1895
1896static enum forcewake_domains
1897intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1898                                i915_reg_t reg)
1899{
1900        u32 offset = i915_mmio_reg_offset(reg);
1901        enum forcewake_domains fw_domains;
1902
1903        if (HAS_FWTABLE(dev_priv)) {
1904                fw_domains = __fwtable_reg_read_fw_domains(offset);
1905        } else if (INTEL_GEN(dev_priv) >= 6) {
1906                fw_domains = __gen6_reg_read_fw_domains(offset);
1907        } else {
1908                WARN_ON(!IS_GEN(dev_priv, 2, 5));
1909                fw_domains = 0;
1910        }
1911
1912        WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1913
1914        return fw_domains;
1915}
1916
1917static enum forcewake_domains
1918intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1919                                 i915_reg_t reg)
1920{
1921        u32 offset = i915_mmio_reg_offset(reg);
1922        enum forcewake_domains fw_domains;
1923
1924        if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1925                fw_domains = __fwtable_reg_write_fw_domains(offset);
1926        } else if (IS_GEN8(dev_priv)) {
1927                fw_domains = __gen8_reg_write_fw_domains(offset);
1928        } else if (IS_GEN(dev_priv, 6, 7)) {
1929                fw_domains = FORCEWAKE_RENDER;
1930        } else {
1931                WARN_ON(!IS_GEN(dev_priv, 2, 5));
1932                fw_domains = 0;
1933        }
1934
1935        WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1936
1937        return fw_domains;
1938}
1939
1940/**
1941 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1942 *                                  a register
1943 * @dev_priv: pointer to struct drm_i915_private
1944 * @reg: register in question
1945 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1946 *
1947 * Returns a set of forcewake domains required to be taken with for example
1948 * intel_uncore_forcewake_get for the specified register to be accessible in the
1949 * specified mode (read, write or read/write) with raw mmio accessors.
1950 *
1951 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1952 * callers to do FIFO management on their own or risk losing writes.
1953 */
1954enum forcewake_domains
1955intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1956                               i915_reg_t reg, unsigned int op)
1957{
1958        enum forcewake_domains fw_domains = 0;
1959
1960        WARN_ON(!op);
1961
1962        if (intel_vgpu_active(dev_priv))
1963                return 0;
1964
1965        if (op & FW_REG_READ)
1966                fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1967
1968        if (op & FW_REG_WRITE)
1969                fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1970
1971        return fw_domains;
1972}
1973