linux/drivers/gpu/drm/i915/intel_uncore.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2013 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include <linux/pm_runtime.h>
  25#include <asm/iosf_mbi.h>
  26
  27#include "gt/intel_lrc_reg.h" /* for shadow reg list */
  28
  29#include "i915_drv.h"
  30#include "i915_trace.h"
  31#include "i915_vgpu.h"
  32#include "intel_pm.h"
  33
  34#define FORCEWAKE_ACK_TIMEOUT_MS 50
  35#define GT_FIFO_TIMEOUT_MS       10
  36
  37#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
  38
  39void
  40intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
  41{
  42        spin_lock_init(&mmio_debug->lock);
  43        mmio_debug->unclaimed_mmio_check = 1;
  44}
  45
  46static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
  47{
  48        lockdep_assert_held(&mmio_debug->lock);
  49
  50        /* Save and disable mmio debugging for the user bypass */
  51        if (!mmio_debug->suspend_count++) {
  52                mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
  53                mmio_debug->unclaimed_mmio_check = 0;
  54        }
  55}
  56
  57static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
  58{
  59        lockdep_assert_held(&mmio_debug->lock);
  60
  61        if (!--mmio_debug->suspend_count)
  62                mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
  63}
  64
  65static const char * const forcewake_domain_names[] = {
  66        "render",
  67        "blitter",
  68        "media",
  69        "vdbox0",
  70        "vdbox1",
  71        "vdbox2",
  72        "vdbox3",
  73        "vdbox4",
  74        "vdbox5",
  75        "vdbox6",
  76        "vdbox7",
  77        "vebox0",
  78        "vebox1",
  79        "vebox2",
  80        "vebox3",
  81};
  82
  83const char *
  84intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  85{
  86        BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  87
  88        if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  89                return forcewake_domain_names[id];
  90
  91        WARN_ON(id);
  92
  93        return "unknown";
  94}
  95
  96#define fw_ack(d) readl((d)->reg_ack)
  97#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
  98#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
  99
 100static inline void
 101fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
 102{
 103        /*
 104         * We don't really know if the powerwell for the forcewake domain we are
 105         * trying to reset here does exist at this point (engines could be fused
 106         * off in ICL+), so no waiting for acks
 107         */
 108        /* WaRsClearFWBitsAtReset:bdw,skl */
 109        fw_clear(d, 0xffff);
 110}
 111
 112static inline void
 113fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
 114{
 115        GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
 116        d->uncore->fw_domains_timer |= d->mask;
 117        d->wake_count++;
 118        hrtimer_start_range_ns(&d->timer,
 119                               NSEC_PER_MSEC,
 120                               NSEC_PER_MSEC,
 121                               HRTIMER_MODE_REL);
 122}
 123
 124static inline int
 125__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
 126               const u32 ack,
 127               const u32 value)
 128{
 129        return wait_for_atomic((fw_ack(d) & ack) == value,
 130                               FORCEWAKE_ACK_TIMEOUT_MS);
 131}
 132
 133static inline int
 134wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
 135               const u32 ack)
 136{
 137        return __wait_for_ack(d, ack, 0);
 138}
 139
 140static inline int
 141wait_ack_set(const struct intel_uncore_forcewake_domain *d,
 142             const u32 ack)
 143{
 144        return __wait_for_ack(d, ack, ack);
 145}
 146
 147static inline void
 148fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
 149{
 150        if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
 151                DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
 152                          intel_uncore_forcewake_domain_to_str(d->id));
 153                add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
 154        }
 155}
 156
 157enum ack_type {
 158        ACK_CLEAR = 0,
 159        ACK_SET
 160};
 161
 162static int
 163fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
 164                                 const enum ack_type type)
 165{
 166        const u32 ack_bit = FORCEWAKE_KERNEL;
 167        const u32 value = type == ACK_SET ? ack_bit : 0;
 168        unsigned int pass;
 169        bool ack_detected;
 170
 171        /*
 172         * There is a possibility of driver's wake request colliding
 173         * with hardware's own wake requests and that can cause
 174         * hardware to not deliver the driver's ack message.
 175         *
 176         * Use a fallback bit toggle to kick the gpu state machine
 177         * in the hope that the original ack will be delivered along with
 178         * the fallback ack.
 179         *
 180         * This workaround is described in HSDES #1604254524 and it's known as:
 181         * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
 182         * although the name is a bit misleading.
 183         */
 184
 185        pass = 1;
 186        do {
 187                wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
 188
 189                fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
 190                /* Give gt some time to relax before the polling frenzy */
 191                udelay(10 * pass);
 192                wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
 193
 194                ack_detected = (fw_ack(d) & ack_bit) == value;
 195
 196                fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
 197        } while (!ack_detected && pass++ < 10);
 198
 199        DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
 200                         intel_uncore_forcewake_domain_to_str(d->id),
 201                         type == ACK_SET ? "set" : "clear",
 202                         fw_ack(d),
 203                         pass);
 204
 205        return ack_detected ? 0 : -ETIMEDOUT;
 206}
 207
 208static inline void
 209fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
 210{
 211        if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
 212                return;
 213
 214        if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
 215                fw_domain_wait_ack_clear(d);
 216}
 217
 218static inline void
 219fw_domain_get(const struct intel_uncore_forcewake_domain *d)
 220{
 221        fw_set(d, FORCEWAKE_KERNEL);
 222}
 223
 224static inline void
 225fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
 226{
 227        if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
 228                DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
 229                          intel_uncore_forcewake_domain_to_str(d->id));
 230                add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
 231        }
 232}
 233
 234static inline void
 235fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
 236{
 237        if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
 238                return;
 239
 240        if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
 241                fw_domain_wait_ack_set(d);
 242}
 243
 244static inline void
 245fw_domain_put(const struct intel_uncore_forcewake_domain *d)
 246{
 247        fw_clear(d, FORCEWAKE_KERNEL);
 248}
 249
 250static void
 251fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
 252{
 253        struct intel_uncore_forcewake_domain *d;
 254        unsigned int tmp;
 255
 256        GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 257
 258        for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
 259                fw_domain_wait_ack_clear(d);
 260                fw_domain_get(d);
 261        }
 262
 263        for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 264                fw_domain_wait_ack_set(d);
 265
 266        uncore->fw_domains_active |= fw_domains;
 267}
 268
 269static void
 270fw_domains_get_with_fallback(struct intel_uncore *uncore,
 271                             enum forcewake_domains fw_domains)
 272{
 273        struct intel_uncore_forcewake_domain *d;
 274        unsigned int tmp;
 275
 276        GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 277
 278        for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
 279                fw_domain_wait_ack_clear_fallback(d);
 280                fw_domain_get(d);
 281        }
 282
 283        for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 284                fw_domain_wait_ack_set_fallback(d);
 285
 286        uncore->fw_domains_active |= fw_domains;
 287}
 288
 289static void
 290fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
 291{
 292        struct intel_uncore_forcewake_domain *d;
 293        unsigned int tmp;
 294
 295        GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 296
 297        for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 298                fw_domain_put(d);
 299
 300        uncore->fw_domains_active &= ~fw_domains;
 301}
 302
 303static void
 304fw_domains_reset(struct intel_uncore *uncore,
 305                 enum forcewake_domains fw_domains)
 306{
 307        struct intel_uncore_forcewake_domain *d;
 308        unsigned int tmp;
 309
 310        if (!fw_domains)
 311                return;
 312
 313        GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 314
 315        for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 316                fw_domain_reset(d);
 317}
 318
 319static inline u32 gt_thread_status(struct intel_uncore *uncore)
 320{
 321        u32 val;
 322
 323        val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
 324        val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
 325
 326        return val;
 327}
 328
 329static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
 330{
 331        /*
 332         * w/a for a sporadic read returning 0 by waiting for the GT
 333         * thread to wake up.
 334         */
 335        drm_WARN_ONCE(&uncore->i915->drm,
 336                      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
 337                      "GT thread status wait timed out\n");
 338}
 339
 340static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
 341                                              enum forcewake_domains fw_domains)
 342{
 343        fw_domains_get(uncore, fw_domains);
 344
 345        /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
 346        __gen6_gt_wait_for_thread_c0(uncore);
 347}
 348
 349static inline u32 fifo_free_entries(struct intel_uncore *uncore)
 350{
 351        u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
 352
 353        return count & GT_FIFO_FREE_ENTRIES_MASK;
 354}
 355
 356static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
 357{
 358        u32 n;
 359
 360        /* On VLV, FIFO will be shared by both SW and HW.
 361         * So, we need to read the FREE_ENTRIES everytime */
 362        if (IS_VALLEYVIEW(uncore->i915))
 363                n = fifo_free_entries(uncore);
 364        else
 365                n = uncore->fifo_count;
 366
 367        if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
 368                if (wait_for_atomic((n = fifo_free_entries(uncore)) >
 369                                    GT_FIFO_NUM_RESERVED_ENTRIES,
 370                                    GT_FIFO_TIMEOUT_MS)) {
 371                        drm_dbg(&uncore->i915->drm,
 372                                "GT_FIFO timeout, entries: %u\n", n);
 373                        return;
 374                }
 375        }
 376
 377        uncore->fifo_count = n - 1;
 378}
 379
 380static enum hrtimer_restart
 381intel_uncore_fw_release_timer(struct hrtimer *timer)
 382{
 383        struct intel_uncore_forcewake_domain *domain =
 384               container_of(timer, struct intel_uncore_forcewake_domain, timer);
 385        struct intel_uncore *uncore = domain->uncore;
 386        unsigned long irqflags;
 387
 388        assert_rpm_device_not_suspended(uncore->rpm);
 389
 390        if (xchg(&domain->active, false))
 391                return HRTIMER_RESTART;
 392
 393        spin_lock_irqsave(&uncore->lock, irqflags);
 394
 395        uncore->fw_domains_timer &= ~domain->mask;
 396
 397        GEM_BUG_ON(!domain->wake_count);
 398        if (--domain->wake_count == 0)
 399                uncore->funcs.force_wake_put(uncore, domain->mask);
 400
 401        spin_unlock_irqrestore(&uncore->lock, irqflags);
 402
 403        return HRTIMER_NORESTART;
 404}
 405
 406/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
 407static unsigned int
 408intel_uncore_forcewake_reset(struct intel_uncore *uncore)
 409{
 410        unsigned long irqflags;
 411        struct intel_uncore_forcewake_domain *domain;
 412        int retry_count = 100;
 413        enum forcewake_domains fw, active_domains;
 414
 415        iosf_mbi_assert_punit_acquired();
 416
 417        /* Hold uncore.lock across reset to prevent any register access
 418         * with forcewake not set correctly. Wait until all pending
 419         * timers are run before holding.
 420         */
 421        while (1) {
 422                unsigned int tmp;
 423
 424                active_domains = 0;
 425
 426                for_each_fw_domain(domain, uncore, tmp) {
 427                        smp_store_mb(domain->active, false);
 428                        if (hrtimer_cancel(&domain->timer) == 0)
 429                                continue;
 430
 431                        intel_uncore_fw_release_timer(&domain->timer);
 432                }
 433
 434                spin_lock_irqsave(&uncore->lock, irqflags);
 435
 436                for_each_fw_domain(domain, uncore, tmp) {
 437                        if (hrtimer_active(&domain->timer))
 438                                active_domains |= domain->mask;
 439                }
 440
 441                if (active_domains == 0)
 442                        break;
 443
 444                if (--retry_count == 0) {
 445                        drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
 446                        break;
 447                }
 448
 449                spin_unlock_irqrestore(&uncore->lock, irqflags);
 450                cond_resched();
 451        }
 452
 453        drm_WARN_ON(&uncore->i915->drm, active_domains);
 454
 455        fw = uncore->fw_domains_active;
 456        if (fw)
 457                uncore->funcs.force_wake_put(uncore, fw);
 458
 459        fw_domains_reset(uncore, uncore->fw_domains);
 460        assert_forcewakes_inactive(uncore);
 461
 462        spin_unlock_irqrestore(&uncore->lock, irqflags);
 463
 464        return fw; /* track the lost user forcewake domains */
 465}
 466
 467static bool
 468fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
 469{
 470        u32 dbg;
 471
 472        dbg = __raw_uncore_read32(uncore, FPGA_DBG);
 473        if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
 474                return false;
 475
 476        /*
 477         * Bugs in PCI programming (or failing hardware) can occasionally cause
 478         * us to lose access to the MMIO BAR.  When this happens, register
 479         * reads will come back with 0xFFFFFFFF for every register and things
 480         * go bad very quickly.  Let's try to detect that special case and at
 481         * least try to print a more informative message about what has
 482         * happened.
 483         *
 484         * During normal operation the FPGA_DBG register has several unused
 485         * bits that will always read back as 0's so we can use them as canaries
 486         * to recognize when MMIO accesses are just busted.
 487         */
 488        if (unlikely(dbg == ~0))
 489                drm_err(&uncore->i915->drm,
 490                        "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
 491
 492        __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 493
 494        return true;
 495}
 496
 497static bool
 498vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
 499{
 500        u32 cer;
 501
 502        cer = __raw_uncore_read32(uncore, CLAIM_ER);
 503        if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
 504                return false;
 505
 506        __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
 507
 508        return true;
 509}
 510
 511static bool
 512gen6_check_for_fifo_debug(struct intel_uncore *uncore)
 513{
 514        u32 fifodbg;
 515
 516        fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
 517
 518        if (unlikely(fifodbg)) {
 519                drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
 520                __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
 521        }
 522
 523        return fifodbg;
 524}
 525
 526static bool
 527check_for_unclaimed_mmio(struct intel_uncore *uncore)
 528{
 529        bool ret = false;
 530
 531        lockdep_assert_held(&uncore->debug->lock);
 532
 533        if (uncore->debug->suspend_count)
 534                return false;
 535
 536        if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
 537                ret |= fpga_check_for_unclaimed_mmio(uncore);
 538
 539        if (intel_uncore_has_dbg_unclaimed(uncore))
 540                ret |= vlv_check_for_unclaimed_mmio(uncore);
 541
 542        if (intel_uncore_has_fifo(uncore))
 543                ret |= gen6_check_for_fifo_debug(uncore);
 544
 545        return ret;
 546}
 547
 548static void forcewake_early_sanitize(struct intel_uncore *uncore,
 549                                     unsigned int restore_forcewake)
 550{
 551        GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
 552
 553        /* WaDisableShadowRegForCpd:chv */
 554        if (IS_CHERRYVIEW(uncore->i915)) {
 555                __raw_uncore_write32(uncore, GTFIFOCTL,
 556                                     __raw_uncore_read32(uncore, GTFIFOCTL) |
 557                                     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 558                                     GT_FIFO_CTL_RC6_POLICY_STALL);
 559        }
 560
 561        iosf_mbi_punit_acquire();
 562        intel_uncore_forcewake_reset(uncore);
 563        if (restore_forcewake) {
 564                spin_lock_irq(&uncore->lock);
 565                uncore->funcs.force_wake_get(uncore, restore_forcewake);
 566
 567                if (intel_uncore_has_fifo(uncore))
 568                        uncore->fifo_count = fifo_free_entries(uncore);
 569                spin_unlock_irq(&uncore->lock);
 570        }
 571        iosf_mbi_punit_release();
 572}
 573
 574void intel_uncore_suspend(struct intel_uncore *uncore)
 575{
 576        if (!intel_uncore_has_forcewake(uncore))
 577                return;
 578
 579        iosf_mbi_punit_acquire();
 580        iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
 581                &uncore->pmic_bus_access_nb);
 582        uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
 583        iosf_mbi_punit_release();
 584}
 585
 586void intel_uncore_resume_early(struct intel_uncore *uncore)
 587{
 588        unsigned int restore_forcewake;
 589
 590        if (intel_uncore_unclaimed_mmio(uncore))
 591                drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
 592
 593        if (!intel_uncore_has_forcewake(uncore))
 594                return;
 595
 596        restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
 597        forcewake_early_sanitize(uncore, restore_forcewake);
 598
 599        iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 600}
 601
 602void intel_uncore_runtime_resume(struct intel_uncore *uncore)
 603{
 604        if (!intel_uncore_has_forcewake(uncore))
 605                return;
 606
 607        iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 608}
 609
 610static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
 611                                         enum forcewake_domains fw_domains)
 612{
 613        struct intel_uncore_forcewake_domain *domain;
 614        unsigned int tmp;
 615
 616        fw_domains &= uncore->fw_domains;
 617
 618        for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 619                if (domain->wake_count++) {
 620                        fw_domains &= ~domain->mask;
 621                        domain->active = true;
 622                }
 623        }
 624
 625        if (fw_domains)
 626                uncore->funcs.force_wake_get(uncore, fw_domains);
 627}
 628
 629/**
 630 * intel_uncore_forcewake_get - grab forcewake domain references
 631 * @uncore: the intel_uncore structure
 632 * @fw_domains: forcewake domains to get reference on
 633 *
 634 * This function can be used get GT's forcewake domain references.
 635 * Normal register access will handle the forcewake domains automatically.
 636 * However if some sequence requires the GT to not power down a particular
 637 * forcewake domains this function should be called at the beginning of the
 638 * sequence. And subsequently the reference should be dropped by symmetric
 639 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
 640 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
 641 */
 642void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 643                                enum forcewake_domains fw_domains)
 644{
 645        unsigned long irqflags;
 646
 647        if (!uncore->funcs.force_wake_get)
 648                return;
 649
 650        assert_rpm_wakelock_held(uncore->rpm);
 651
 652        spin_lock_irqsave(&uncore->lock, irqflags);
 653        __intel_uncore_forcewake_get(uncore, fw_domains);
 654        spin_unlock_irqrestore(&uncore->lock, irqflags);
 655}
 656
 657/**
 658 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
 659 * @uncore: the intel_uncore structure
 660 *
 661 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
 662 * the GT powerwell and in the process disable our debugging for the
 663 * duration of userspace's bypass.
 664 */
 665void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
 666{
 667        spin_lock_irq(&uncore->lock);
 668        if (!uncore->user_forcewake_count++) {
 669                intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
 670                spin_lock(&uncore->debug->lock);
 671                mmio_debug_suspend(uncore->debug);
 672                spin_unlock(&uncore->debug->lock);
 673        }
 674        spin_unlock_irq(&uncore->lock);
 675}
 676
 677/**
 678 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
 679 * @uncore: the intel_uncore structure
 680 *
 681 * This function complements intel_uncore_forcewake_user_get() and releases
 682 * the GT powerwell taken on behalf of the userspace bypass.
 683 */
 684void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
 685{
 686        spin_lock_irq(&uncore->lock);
 687        if (!--uncore->user_forcewake_count) {
 688                spin_lock(&uncore->debug->lock);
 689                mmio_debug_resume(uncore->debug);
 690
 691                if (check_for_unclaimed_mmio(uncore))
 692                        drm_info(&uncore->i915->drm,
 693                                 "Invalid mmio detected during user access\n");
 694                spin_unlock(&uncore->debug->lock);
 695
 696                intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
 697        }
 698        spin_unlock_irq(&uncore->lock);
 699}
 700
 701/**
 702 * intel_uncore_forcewake_get__locked - grab forcewake domain references
 703 * @uncore: the intel_uncore structure
 704 * @fw_domains: forcewake domains to get reference on
 705 *
 706 * See intel_uncore_forcewake_get(). This variant places the onus
 707 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 708 */
 709void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
 710                                        enum forcewake_domains fw_domains)
 711{
 712        lockdep_assert_held(&uncore->lock);
 713
 714        if (!uncore->funcs.force_wake_get)
 715                return;
 716
 717        __intel_uncore_forcewake_get(uncore, fw_domains);
 718}
 719
 720static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
 721                                         enum forcewake_domains fw_domains)
 722{
 723        struct intel_uncore_forcewake_domain *domain;
 724        unsigned int tmp;
 725
 726        fw_domains &= uncore->fw_domains;
 727
 728        for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 729                GEM_BUG_ON(!domain->wake_count);
 730
 731                if (--domain->wake_count) {
 732                        domain->active = true;
 733                        continue;
 734                }
 735
 736                uncore->funcs.force_wake_put(uncore, domain->mask);
 737        }
 738}
 739
 740/**
 741 * intel_uncore_forcewake_put - release a forcewake domain reference
 742 * @uncore: the intel_uncore structure
 743 * @fw_domains: forcewake domains to put references
 744 *
 745 * This function drops the device-level forcewakes for specified
 746 * domains obtained by intel_uncore_forcewake_get().
 747 */
 748void intel_uncore_forcewake_put(struct intel_uncore *uncore,
 749                                enum forcewake_domains fw_domains)
 750{
 751        unsigned long irqflags;
 752
 753        if (!uncore->funcs.force_wake_put)
 754                return;
 755
 756        spin_lock_irqsave(&uncore->lock, irqflags);
 757        __intel_uncore_forcewake_put(uncore, fw_domains);
 758        spin_unlock_irqrestore(&uncore->lock, irqflags);
 759}
 760
 761/**
 762 * intel_uncore_forcewake_flush - flush the delayed release
 763 * @uncore: the intel_uncore structure
 764 * @fw_domains: forcewake domains to flush
 765 */
 766void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
 767                                  enum forcewake_domains fw_domains)
 768{
 769        struct intel_uncore_forcewake_domain *domain;
 770        unsigned int tmp;
 771
 772        if (!uncore->funcs.force_wake_put)
 773                return;
 774
 775        fw_domains &= uncore->fw_domains;
 776        for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 777                WRITE_ONCE(domain->active, false);
 778                if (hrtimer_cancel(&domain->timer))
 779                        intel_uncore_fw_release_timer(&domain->timer);
 780        }
 781}
 782
 783/**
 784 * intel_uncore_forcewake_put__locked - grab forcewake domain references
 785 * @uncore: the intel_uncore structure
 786 * @fw_domains: forcewake domains to get reference on
 787 *
 788 * See intel_uncore_forcewake_put(). This variant places the onus
 789 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
 790 */
 791void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
 792                                        enum forcewake_domains fw_domains)
 793{
 794        lockdep_assert_held(&uncore->lock);
 795
 796        if (!uncore->funcs.force_wake_put)
 797                return;
 798
 799        __intel_uncore_forcewake_put(uncore, fw_domains);
 800}
 801
 802void assert_forcewakes_inactive(struct intel_uncore *uncore)
 803{
 804        if (!uncore->funcs.force_wake_get)
 805                return;
 806
 807        drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
 808                 "Expected all fw_domains to be inactive, but %08x are still on\n",
 809                 uncore->fw_domains_active);
 810}
 811
 812void assert_forcewakes_active(struct intel_uncore *uncore,
 813                              enum forcewake_domains fw_domains)
 814{
 815        struct intel_uncore_forcewake_domain *domain;
 816        unsigned int tmp;
 817
 818        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
 819                return;
 820
 821        if (!uncore->funcs.force_wake_get)
 822                return;
 823
 824        spin_lock_irq(&uncore->lock);
 825
 826        assert_rpm_wakelock_held(uncore->rpm);
 827
 828        fw_domains &= uncore->fw_domains;
 829        drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
 830                 "Expected %08x fw_domains to be active, but %08x are off\n",
 831                 fw_domains, fw_domains & ~uncore->fw_domains_active);
 832
 833        /*
 834         * Check that the caller has an explicit wakeref and we don't mistake
 835         * it for the auto wakeref.
 836         */
 837        for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 838                unsigned int actual = READ_ONCE(domain->wake_count);
 839                unsigned int expect = 1;
 840
 841                if (uncore->fw_domains_timer & domain->mask)
 842                        expect++; /* pending automatic release */
 843
 844                if (drm_WARN(&uncore->i915->drm, actual < expect,
 845                             "Expected domain %d to be held awake by caller, count=%d\n",
 846                             domain->id, actual))
 847                        break;
 848        }
 849
 850        spin_unlock_irq(&uncore->lock);
 851}
 852
 853/* We give fast paths for the really cool registers */
 854#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 855
 856#define __gen6_reg_read_fw_domains(uncore, offset) \
 857({ \
 858        enum forcewake_domains __fwd; \
 859        if (NEEDS_FORCE_WAKE(offset)) \
 860                __fwd = FORCEWAKE_RENDER; \
 861        else \
 862                __fwd = 0; \
 863        __fwd; \
 864})
 865
 866static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
 867{
 868        if (offset < entry->start)
 869                return -1;
 870        else if (offset > entry->end)
 871                return 1;
 872        else
 873                return 0;
 874}
 875
 876/* Copied and "macroized" from lib/bsearch.c */
 877#define BSEARCH(key, base, num, cmp) ({                                 \
 878        unsigned int start__ = 0, end__ = (num);                        \
 879        typeof(base) result__ = NULL;                                   \
 880        while (start__ < end__) {                                       \
 881                unsigned int mid__ = start__ + (end__ - start__) / 2;   \
 882                int ret__ = (cmp)((key), (base) + mid__);               \
 883                if (ret__ < 0) {                                        \
 884                        end__ = mid__;                                  \
 885                } else if (ret__ > 0) {                                 \
 886                        start__ = mid__ + 1;                            \
 887                } else {                                                \
 888                        result__ = (base) + mid__;                      \
 889                        break;                                          \
 890                }                                                       \
 891        }                                                               \
 892        result__;                                                       \
 893})
 894
 895static enum forcewake_domains
 896find_fw_domain(struct intel_uncore *uncore, u32 offset)
 897{
 898        const struct intel_forcewake_range *entry;
 899
 900        entry = BSEARCH(offset,
 901                        uncore->fw_domains_table,
 902                        uncore->fw_domains_table_entries,
 903                        fw_range_cmp);
 904
 905        if (!entry)
 906                return 0;
 907
 908        /*
 909         * The list of FW domains depends on the SKU in gen11+ so we
 910         * can't determine it statically. We use FORCEWAKE_ALL and
 911         * translate it here to the list of available domains.
 912         */
 913        if (entry->domains == FORCEWAKE_ALL)
 914                return uncore->fw_domains;
 915
 916        drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
 917                 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
 918                 entry->domains & ~uncore->fw_domains, offset);
 919
 920        return entry->domains;
 921}
 922
 923#define GEN_FW_RANGE(s, e, d) \
 924        { .start = (s), .end = (e), .domains = (d) }
 925
 926/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 927static const struct intel_forcewake_range __vlv_fw_ranges[] = {
 928        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
 929        GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
 930        GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
 931        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
 932        GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
 933        GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
 934        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
 935};
 936
 937#define __fwtable_reg_read_fw_domains(uncore, offset) \
 938({ \
 939        enum forcewake_domains __fwd = 0; \
 940        if (NEEDS_FORCE_WAKE((offset))) \
 941                __fwd = find_fw_domain(uncore, offset); \
 942        __fwd; \
 943})
 944
 945#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
 946        find_fw_domain(uncore, offset)
 947
 948#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
 949        find_fw_domain(uncore, offset)
 950
 951/* *Must* be sorted by offset! See intel_shadow_table_check(). */
 952static const i915_reg_t gen8_shadowed_regs[] = {
 953        RING_TAIL(RENDER_RING_BASE),    /* 0x2000 (base) */
 954        GEN6_RPNSWREQ,                  /* 0xA008 */
 955        GEN6_RC_VIDEO_FREQ,             /* 0xA00C */
 956        RING_TAIL(GEN6_BSD_RING_BASE),  /* 0x12000 (base) */
 957        RING_TAIL(VEBOX_RING_BASE),     /* 0x1a000 (base) */
 958        RING_TAIL(BLT_RING_BASE),       /* 0x22000 (base) */
 959        /* TODO: Other registers are not yet used */
 960};
 961
 962static const i915_reg_t gen11_shadowed_regs[] = {
 963        RING_TAIL(RENDER_RING_BASE),                    /* 0x2000 (base) */
 964        RING_EXECLIST_CONTROL(RENDER_RING_BASE),        /* 0x2550 */
 965        GEN6_RPNSWREQ,                                  /* 0xA008 */
 966        GEN6_RC_VIDEO_FREQ,                             /* 0xA00C */
 967        RING_TAIL(BLT_RING_BASE),                       /* 0x22000 (base) */
 968        RING_EXECLIST_CONTROL(BLT_RING_BASE),           /* 0x22550 */
 969        RING_TAIL(GEN11_BSD_RING_BASE),                 /* 0x1C0000 (base) */
 970        RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE),     /* 0x1C0550 */
 971        RING_TAIL(GEN11_BSD2_RING_BASE),                /* 0x1C4000 (base) */
 972        RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE),    /* 0x1C4550 */
 973        RING_TAIL(GEN11_VEBOX_RING_BASE),               /* 0x1C8000 (base) */
 974        RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE),   /* 0x1C8550 */
 975        RING_TAIL(GEN11_BSD3_RING_BASE),                /* 0x1D0000 (base) */
 976        RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE),    /* 0x1D0550 */
 977        RING_TAIL(GEN11_BSD4_RING_BASE),                /* 0x1D4000 (base) */
 978        RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE),    /* 0x1D4550 */
 979        RING_TAIL(GEN11_VEBOX2_RING_BASE),              /* 0x1D8000 (base) */
 980        RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE),  /* 0x1D8550 */
 981        /* TODO: Other registers are not yet used */
 982};
 983
 984static const i915_reg_t gen12_shadowed_regs[] = {
 985        RING_TAIL(RENDER_RING_BASE),                    /* 0x2000 (base) */
 986        RING_EXECLIST_CONTROL(RENDER_RING_BASE),        /* 0x2550 */
 987        GEN6_RPNSWREQ,                                  /* 0xA008 */
 988        GEN6_RC_VIDEO_FREQ,                             /* 0xA00C */
 989        RING_TAIL(BLT_RING_BASE),                       /* 0x22000 (base) */
 990        RING_EXECLIST_CONTROL(BLT_RING_BASE),           /* 0x22550 */
 991        RING_TAIL(GEN11_BSD_RING_BASE),                 /* 0x1C0000 (base) */
 992        RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE),     /* 0x1C0550 */
 993        RING_TAIL(GEN11_BSD2_RING_BASE),                /* 0x1C4000 (base) */
 994        RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE),    /* 0x1C4550 */
 995        RING_TAIL(GEN11_VEBOX_RING_BASE),               /* 0x1C8000 (base) */
 996        RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE),   /* 0x1C8550 */
 997        RING_TAIL(GEN11_BSD3_RING_BASE),                /* 0x1D0000 (base) */
 998        RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE),    /* 0x1D0550 */
 999        RING_TAIL(GEN11_BSD4_RING_BASE),                /* 0x1D4000 (base) */
1000        RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE),    /* 0x1D4550 */
1001        RING_TAIL(GEN11_VEBOX2_RING_BASE),              /* 0x1D8000 (base) */
1002        RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE),  /* 0x1D8550 */
1003        /* TODO: Other registers are not yet used */
1004};
1005
1006static const i915_reg_t xehp_shadowed_regs[] = {
1007        RING_TAIL(RENDER_RING_BASE),                    /* 0x2000 (base) */
1008        RING_EXECLIST_CONTROL(RENDER_RING_BASE),        /* 0x2550 */
1009        GEN6_RPNSWREQ,                                  /* 0xA008 */
1010        GEN6_RC_VIDEO_FREQ,                             /* 0xA00C */
1011        RING_TAIL(BLT_RING_BASE),                       /* 0x22000 (base) */
1012        RING_EXECLIST_CONTROL(BLT_RING_BASE),           /* 0x22550 */
1013        RING_TAIL(GEN11_BSD_RING_BASE),                 /* 0x1C0000 (base) */
1014        RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE),     /* 0x1C0550 */
1015        RING_TAIL(GEN11_BSD2_RING_BASE),                /* 0x1C4000 (base) */
1016        RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE),    /* 0x1C4550 */
1017        RING_TAIL(GEN11_VEBOX_RING_BASE),               /* 0x1C8000 (base) */
1018        RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE),   /* 0x1C8550 */
1019        RING_TAIL(GEN11_BSD3_RING_BASE),                /* 0x1D0000 (base) */
1020        RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE),    /* 0x1D0550 */
1021        RING_TAIL(GEN11_BSD4_RING_BASE),                /* 0x1D4000 (base) */
1022        RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE),    /* 0x1D4550 */
1023        RING_TAIL(GEN11_VEBOX2_RING_BASE),              /* 0x1D8000 (base) */
1024        RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE),  /* 0x1D8550 */
1025        RING_TAIL(XEHP_BSD5_RING_BASE),                 /* 0x1E0000 (base) */
1026        RING_EXECLIST_CONTROL(XEHP_BSD5_RING_BASE),     /* 0x1E0550 */
1027        RING_TAIL(XEHP_BSD6_RING_BASE),                 /* 0x1E4000 (base) */
1028        RING_EXECLIST_CONTROL(XEHP_BSD6_RING_BASE),     /* 0x1E4550 */
1029        RING_TAIL(XEHP_VEBOX3_RING_BASE),               /* 0x1E8000 (base) */
1030        RING_EXECLIST_CONTROL(XEHP_VEBOX3_RING_BASE),   /* 0x1E8550 */
1031        RING_TAIL(XEHP_BSD7_RING_BASE),                 /* 0x1F0000 (base) */
1032        RING_EXECLIST_CONTROL(XEHP_BSD7_RING_BASE),     /* 0x1F0550 */
1033        RING_TAIL(XEHP_BSD8_RING_BASE),                 /* 0x1F4000 (base) */
1034        RING_EXECLIST_CONTROL(XEHP_BSD8_RING_BASE),     /* 0x1F4550 */
1035        RING_TAIL(XEHP_VEBOX4_RING_BASE),               /* 0x1F8000 (base) */
1036        RING_EXECLIST_CONTROL(XEHP_VEBOX4_RING_BASE),   /* 0x1F8550 */
1037        /* TODO: Other registers are not yet used */
1038};
1039
1040static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
1041{
1042        u32 offset = i915_mmio_reg_offset(*reg);
1043
1044        if (key < offset)
1045                return -1;
1046        else if (key > offset)
1047                return 1;
1048        else
1049                return 0;
1050}
1051
1052#define __is_X_shadowed(x) \
1053static bool is_##x##_shadowed(u32 offset) \
1054{ \
1055        const i915_reg_t *regs = x##_shadowed_regs; \
1056        return BSEARCH(offset, regs, ARRAY_SIZE(x##_shadowed_regs), \
1057                       mmio_reg_cmp); \
1058}
1059
1060__is_X_shadowed(gen8)
1061__is_X_shadowed(gen11)
1062__is_X_shadowed(gen12)
1063__is_X_shadowed(xehp)
1064
1065static enum forcewake_domains
1066gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1067{
1068        return FORCEWAKE_RENDER;
1069}
1070
1071#define __gen8_reg_write_fw_domains(uncore, offset) \
1072({ \
1073        enum forcewake_domains __fwd; \
1074        if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
1075                __fwd = FORCEWAKE_RENDER; \
1076        else \
1077                __fwd = 0; \
1078        __fwd; \
1079})
1080
1081/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1082static const struct intel_forcewake_range __chv_fw_ranges[] = {
1083        GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1084        GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1085        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1086        GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1087        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1088        GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1089        GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1090        GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1091        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1092        GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1093        GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1094        GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1095        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1096        GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1097        GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1098        GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1099};
1100
1101#define __fwtable_reg_write_fw_domains(uncore, offset) \
1102({ \
1103        enum forcewake_domains __fwd = 0; \
1104        if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
1105                __fwd = find_fw_domain(uncore, offset); \
1106        __fwd; \
1107})
1108
1109#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
1110({ \
1111        enum forcewake_domains __fwd = 0; \
1112        const u32 __offset = (offset); \
1113        if (!is_gen11_shadowed(__offset)) \
1114                __fwd = find_fw_domain(uncore, __offset); \
1115        __fwd; \
1116})
1117
1118#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
1119({ \
1120        enum forcewake_domains __fwd = 0; \
1121        const u32 __offset = (offset); \
1122        if (!is_gen12_shadowed(__offset)) \
1123                __fwd = find_fw_domain(uncore, __offset); \
1124        __fwd; \
1125})
1126
1127#define __xehp_fwtable_reg_write_fw_domains(uncore, offset) \
1128({ \
1129        enum forcewake_domains __fwd = 0; \
1130        const u32 __offset = (offset); \
1131        if (!is_xehp_shadowed(__offset)) \
1132                __fwd = find_fw_domain(uncore, __offset); \
1133        __fwd; \
1134})
1135
1136/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1137static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1138        GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1139        GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1140        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1141        GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1142        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1143        GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1144        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1145        GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1146        GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1147        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1148        GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1149        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1150        GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1151        GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1152        GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1153        GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1154        GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1155        GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1156        GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1157        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1158        GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1159        GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1160        GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1161        GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1162        GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1163        GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1164        GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1165        GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1166        GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1167        GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1168        GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1169        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1170};
1171
1172/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1173static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1174        GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1175        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1176        GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1177        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1178        GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1179        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1180        GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1181        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1182        GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1183        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1184        GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1185        GEN_FW_RANGE(0x8800, 0x8bff, 0),
1186        GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1187        GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1188        GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1189        GEN_FW_RANGE(0x9560, 0x95ff, 0),
1190        GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1191        GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1192        GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1193        GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1194        GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1195        GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1196        GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1197        GEN_FW_RANGE(0x24000, 0x2407f, 0),
1198        GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1199        GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1200        GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1201        GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1202        GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1203        GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1204        GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1205        GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1206        GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1207        GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1208        GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1209};
1210
1211/*
1212 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1213 *
1214 * Note that the spec lists several reserved/unused ranges that don't
1215 * actually contain any registers.  In the table below we'll combine those
1216 * reserved ranges with either the preceding or following range to keep the
1217 * table small and lookups fast.
1218 */
1219static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1220        GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1221                0x0   -  0xaff: reserved
1222                0xb00 - 0x1fff: always on */
1223        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1224        GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1225        GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1226        GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1227        GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1228        GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1229                0x4000 - 0x48ff: gt
1230                0x4900 - 0x51ff: reserved */
1231        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1232                0x5200 - 0x53ff: render
1233                0x5400 - 0x54ff: reserved
1234                0x5500 - 0x7fff: render */
1235        GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1236        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1237        GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1238                0x8160 - 0x817f: reserved
1239                0x8180 - 0x81ff: always on */
1240        GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1241        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1242        GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1243                0x8500 - 0x87ff: gt
1244                0x8800 - 0x8fff: reserved
1245                0x9000 - 0x947f: gt
1246                0x9480 - 0x94cf: reserved */
1247        GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1248        GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1249                0x9560 - 0x95ff: always on
1250                0x9600 - 0x97ff: reserved */
1251        GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1252        GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1253        GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1254                0xb400 - 0xbf7f: gt
1255                0xb480 - 0xbfff: reserved
1256                0xc000 - 0xcfff: gt */
1257        GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1258        GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1259        GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1260        GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1261                0xdc00 - 0xddff: render
1262                0xde00 - 0xde7f: reserved
1263                0xde80 - 0xe8ff: render
1264                0xe900 - 0xefff: reserved */
1265        GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1266                 0xf000 - 0xffff: gt
1267                0x10000 - 0x147ff: reserved */
1268        GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1269                0x14800 - 0x14fff: render
1270                0x15000 - 0x16dff: reserved
1271                0x16e00 - 0x1bfff: render
1272                0x1c000 - 0x1ffff: reserved */
1273        GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1274        GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1275        GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1276        GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1277                0x24000 - 0x2407f: always on
1278                0x24080 - 0x2417f: reserved */
1279        GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1280                0x24180 - 0x241ff: gt
1281                0x24200 - 0x249ff: reserved */
1282        GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1283                0x24a00 - 0x24a7f: render
1284                0x24a80 - 0x251ff: reserved */
1285        GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1286                0x25200 - 0x252ff: gt
1287                0x25300 - 0x255ff: reserved */
1288        GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1289        GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1290                0x25680 - 0x256ff: VD2
1291                0x25700 - 0x259ff: reserved */
1292        GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1293        GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1294                0x25a80 - 0x25aff: VD2
1295                0x25b00 - 0x2ffff: reserved */
1296        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1297        GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1298        GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1299                0x1c0000 - 0x1c2bff: VD0
1300                0x1c2c00 - 0x1c2cff: reserved
1301                0x1c2d00 - 0x1c2dff: VD0
1302                0x1c2e00 - 0x1c3eff: reserved
1303                0x1c3f00 - 0x1c3fff: VD0 */
1304        GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1305        GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1306                0x1c8000 - 0x1ca0ff: VE0
1307                0x1ca100 - 0x1cbeff: reserved
1308                0x1cbf00 - 0x1cbfff: VE0 */
1309        GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1310                0x1cc000 - 0x1ccfff: VD0
1311                0x1cd000 - 0x1cffff: reserved */
1312        GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1313                0x1d0000 - 0x1d2bff: VD2
1314                0x1d2c00 - 0x1d2cff: reserved
1315                0x1d2d00 - 0x1d2dff: VD2
1316                0x1d2e00 - 0x1d3eff: reserved
1317                0x1d3f00 - 0x1d3fff: VD2 */
1318};
1319
1320/*
1321 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1322 * switching it from the GT domain to the render domain.
1323 *
1324 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1325 */
1326#define XEHP_FWRANGES(FW_RANGE_D800)                                    \
1327        GEN_FW_RANGE(0x0, 0x1fff, 0), /*                                        \
1328                  0x0 -  0xaff: reserved                                        \
1329                0xb00 - 0x1fff: always on */                                    \
1330        GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),                         \
1331        GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),                             \
1332        GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*                                     \
1333                0x4b00 - 0x4fff: reserved                                       \
1334                0x5000 - 0x51ff: always on */                                   \
1335        GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),                         \
1336        GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),                             \
1337        GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),                         \
1338        GEN_FW_RANGE(0x8160, 0x81ff, 0), /*                                     \
1339                0x8160 - 0x817f: reserved                                       \
1340                0x8180 - 0x81ff: always on */                                   \
1341        GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),                             \
1342        GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),                         \
1343        GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*                          \
1344                0x8500 - 0x87ff: gt                                             \
1345                0x8800 - 0x8c7f: reserved                                       \
1346                0x8c80 - 0x8cff: gt (DG2 only) */                               \
1347        GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*                      \
1348                0x8d00 - 0x8dff: render (DG2 only)                              \
1349                0x8e00 - 0x8fff: reserved */                                    \
1350        GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*                          \
1351                0x9000 - 0x947f: gt                                             \
1352                0x9480 - 0x94cf: reserved */                                    \
1353        GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),                         \
1354        GEN_FW_RANGE(0x9560, 0x967f, 0), /*                                     \
1355                0x9560 - 0x95ff: always on                                      \
1356                0x9600 - 0x967f: reserved */                                    \
1357        GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*                      \
1358                0x9680 - 0x96ff: render (DG2 only)                              \
1359                0x9700 - 0x97ff: reserved */                                    \
1360        GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*                          \
1361                0x9800 - 0xb4ff: gt                                             \
1362                0xb500 - 0xbfff: reserved                                       \
1363                0xc000 - 0xcfff: gt */                                          \
1364        GEN_FW_RANGE(0xd000, 0xd7ff, 0),                                        \
1365        GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800),                    \
1366        GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),                             \
1367        GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),                         \
1368        GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*                          \
1369                0xdd00 - 0xddff: gt                                             \
1370                0xde00 - 0xde7f: reserved */                                    \
1371        GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*                      \
1372                0xde80 - 0xdfff: render                                         \
1373                0xe000 - 0xe0ff: reserved                                       \
1374                0xe100 - 0xe8ff: render */                                      \
1375        GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*                          \
1376                0xe900 - 0xe9ff: gt                                             \
1377                0xea00 - 0xefff: reserved                                       \
1378                0xf000 - 0xffff: gt */                                          \
1379        GEN_FW_RANGE(0x10000, 0x12fff, 0), /*                                   \
1380                0x10000 - 0x11fff: reserved                                     \
1381                0x12000 - 0x127ff: always on                                    \
1382                0x12800 - 0x12fff: reserved */                                  \
1383        GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */  \
1384        GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /*              \
1385                0x13200 - 0x133ff: VD2 (DG2 only)                               \
1386                0x13400 - 0x13fff: reserved */                                  \
1387        GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */      \
1388        GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */      \
1389        GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */      \
1390        GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */      \
1391        GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),                       \
1392        GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*                        \
1393                0x15000 - 0x15fff: gt (DG2 only)                                \
1394                0x16000 - 0x16dff: reserved */                                  \
1395        GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER),                       \
1396        GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /*              \
1397                0x20000 - 0x20fff: VD0 (XEHPSDV only)                           \
1398                0x21000 - 0x21fff: reserved */                                  \
1399        GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),                           \
1400        GEN_FW_RANGE(0x24000, 0x2417f, 0), /*                                   \
1401                0x24000 - 0x2407f: always on                                    \
1402                0x24080 - 0x2417f: reserved */                                  \
1403        GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*                        \
1404                0x24180 - 0x241ff: gt                                           \
1405                0x24200 - 0x249ff: reserved */                                  \
1406        GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*                    \
1407                0x24a00 - 0x24a7f: render                                       \
1408                0x24a80 - 0x251ff: reserved */                                  \
1409        GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*                        \
1410                0x25200 - 0x252ff: gt                                           \
1411                0x25300 - 0x25fff: reserved */                                  \
1412        GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*                    \
1413                0x26000 - 0x27fff: render                                       \
1414                0x28000 - 0x29fff: reserved                                     \
1415                0x2a000 - 0x2ffff: undocumented */                              \
1416        GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),                           \
1417        GEN_FW_RANGE(0x40000, 0x1bffff, 0),                                     \
1418        GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*            \
1419                0x1c0000 - 0x1c2bff: VD0                                        \
1420                0x1c2c00 - 0x1c2cff: reserved                                   \
1421                0x1c2d00 - 0x1c2dff: VD0                                        \
1422                0x1c2e00 - 0x1c3eff: VD0 (DG2 only)                             \
1423                0x1c3f00 - 0x1c3fff: VD0 */                                     \
1424        GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*            \
1425                0x1c4000 - 0x1c6bff: VD1                                        \
1426                0x1c6c00 - 0x1c6cff: reserved                                   \
1427                0x1c6d00 - 0x1c6dff: VD1                                        \
1428                0x1c6e00 - 0x1c7fff: reserved */                                \
1429        GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*            \
1430                0x1c8000 - 0x1ca0ff: VE0                                        \
1431                0x1ca100 - 0x1cbfff: reserved */                                \
1432        GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),               \
1433        GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),               \
1434        GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),               \
1435        GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),               \
1436        GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*            \
1437                0x1d0000 - 0x1d2bff: VD2                                        \
1438                0x1d2c00 - 0x1d2cff: reserved                                   \
1439                0x1d2d00 - 0x1d2dff: VD2                                        \
1440                0x1d2e00 - 0x1d3dff: VD2 (DG2 only)                             \
1441                0x1d3e00 - 0x1d3eff: reserved                                   \
1442                0x1d3f00 - 0x1d3fff: VD2 */                                     \
1443        GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*            \
1444                0x1d4000 - 0x1d6bff: VD3                                        \
1445                0x1d6c00 - 0x1d6cff: reserved                                   \
1446                0x1d6d00 - 0x1d6dff: VD3                                        \
1447                0x1d6e00 - 0x1d7fff: reserved */                                \
1448        GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*            \
1449                0x1d8000 - 0x1da0ff: VE1                                        \
1450                0x1da100 - 0x1dffff: reserved */                                \
1451        GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*            \
1452                0x1e0000 - 0x1e2bff: VD4                                        \
1453                0x1e2c00 - 0x1e2cff: reserved                                   \
1454                0x1e2d00 - 0x1e2dff: VD4                                        \
1455                0x1e2e00 - 0x1e3eff: reserved                                   \
1456                0x1e3f00 - 0x1e3fff: VD4 */                                     \
1457        GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*            \
1458                0x1e4000 - 0x1e6bff: VD5                                        \
1459                0x1e6c00 - 0x1e6cff: reserved                                   \
1460                0x1e6d00 - 0x1e6dff: VD5                                        \
1461                0x1e6e00 - 0x1e7fff: reserved */                                \
1462        GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*            \
1463                0x1e8000 - 0x1ea0ff: VE2                                        \
1464                0x1ea100 - 0x1effff: reserved */                                \
1465        GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*            \
1466                0x1f0000 - 0x1f2bff: VD6                                        \
1467                0x1f2c00 - 0x1f2cff: reserved                                   \
1468                0x1f2d00 - 0x1f2dff: VD6                                        \
1469                0x1f2e00 - 0x1f3eff: reserved                                   \
1470                0x1f3f00 - 0x1f3fff: VD6 */                                     \
1471        GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*            \
1472                0x1f4000 - 0x1f6bff: VD7                                        \
1473                0x1f6c00 - 0x1f6cff: reserved                                   \
1474                0x1f6d00 - 0x1f6dff: VD7                                        \
1475                0x1f6e00 - 0x1f7fff: reserved */                                \
1476        GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1477
1478static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1479        XEHP_FWRANGES(FORCEWAKE_GT)
1480};
1481
1482static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1483        XEHP_FWRANGES(FORCEWAKE_RENDER)
1484};
1485
1486static void
1487ilk_dummy_write(struct intel_uncore *uncore)
1488{
1489        /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1490         * the chip from rc6 before touching it for real. MI_MODE is masked,
1491         * hence harmless to write 0 into. */
1492        __raw_uncore_write32(uncore, MI_MODE, 0);
1493}
1494
1495static void
1496__unclaimed_reg_debug(struct intel_uncore *uncore,
1497                      const i915_reg_t reg,
1498                      const bool read,
1499                      const bool before)
1500{
1501        if (drm_WARN(&uncore->i915->drm,
1502                     check_for_unclaimed_mmio(uncore) && !before,
1503                     "Unclaimed %s register 0x%x\n",
1504                     read ? "read from" : "write to",
1505                     i915_mmio_reg_offset(reg)))
1506                /* Only report the first N failures */
1507                uncore->i915->params.mmio_debug--;
1508}
1509
1510static inline void
1511unclaimed_reg_debug(struct intel_uncore *uncore,
1512                    const i915_reg_t reg,
1513                    const bool read,
1514                    const bool before)
1515{
1516        if (likely(!uncore->i915->params.mmio_debug))
1517                return;
1518
1519        /* interrupts are disabled and re-enabled around uncore->lock usage */
1520        lockdep_assert_held(&uncore->lock);
1521
1522        if (before)
1523                spin_lock(&uncore->debug->lock);
1524
1525        __unclaimed_reg_debug(uncore, reg, read, before);
1526
1527        if (!before)
1528                spin_unlock(&uncore->debug->lock);
1529}
1530
1531#define __vgpu_read(x) \
1532static u##x \
1533vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1534        u##x val = __raw_uncore_read##x(uncore, reg); \
1535        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1536        return val; \
1537}
1538__vgpu_read(8)
1539__vgpu_read(16)
1540__vgpu_read(32)
1541__vgpu_read(64)
1542
1543#define GEN2_READ_HEADER(x) \
1544        u##x val = 0; \
1545        assert_rpm_wakelock_held(uncore->rpm);
1546
1547#define GEN2_READ_FOOTER \
1548        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1549        return val
1550
1551#define __gen2_read(x) \
1552static u##x \
1553gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1554        GEN2_READ_HEADER(x); \
1555        val = __raw_uncore_read##x(uncore, reg); \
1556        GEN2_READ_FOOTER; \
1557}
1558
1559#define __gen5_read(x) \
1560static u##x \
1561gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1562        GEN2_READ_HEADER(x); \
1563        ilk_dummy_write(uncore); \
1564        val = __raw_uncore_read##x(uncore, reg); \
1565        GEN2_READ_FOOTER; \
1566}
1567
1568__gen5_read(8)
1569__gen5_read(16)
1570__gen5_read(32)
1571__gen5_read(64)
1572__gen2_read(8)
1573__gen2_read(16)
1574__gen2_read(32)
1575__gen2_read(64)
1576
1577#undef __gen5_read
1578#undef __gen2_read
1579
1580#undef GEN2_READ_FOOTER
1581#undef GEN2_READ_HEADER
1582
1583#define GEN6_READ_HEADER(x) \
1584        u32 offset = i915_mmio_reg_offset(reg); \
1585        unsigned long irqflags; \
1586        u##x val = 0; \
1587        assert_rpm_wakelock_held(uncore->rpm); \
1588        spin_lock_irqsave(&uncore->lock, irqflags); \
1589        unclaimed_reg_debug(uncore, reg, true, true)
1590
1591#define GEN6_READ_FOOTER \
1592        unclaimed_reg_debug(uncore, reg, true, false); \
1593        spin_unlock_irqrestore(&uncore->lock, irqflags); \
1594        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1595        return val
1596
1597static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1598                                        enum forcewake_domains fw_domains)
1599{
1600        struct intel_uncore_forcewake_domain *domain;
1601        unsigned int tmp;
1602
1603        GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1604
1605        for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1606                fw_domain_arm_timer(domain);
1607
1608        uncore->funcs.force_wake_get(uncore, fw_domains);
1609}
1610
1611static inline void __force_wake_auto(struct intel_uncore *uncore,
1612                                     enum forcewake_domains fw_domains)
1613{
1614        GEM_BUG_ON(!fw_domains);
1615
1616        /* Turn on all requested but inactive supported forcewake domains. */
1617        fw_domains &= uncore->fw_domains;
1618        fw_domains &= ~uncore->fw_domains_active;
1619
1620        if (fw_domains)
1621                ___force_wake_auto(uncore, fw_domains);
1622}
1623
1624#define __gen_read(func, x) \
1625static u##x \
1626func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1627        enum forcewake_domains fw_engine; \
1628        GEN6_READ_HEADER(x); \
1629        fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1630        if (fw_engine) \
1631                __force_wake_auto(uncore, fw_engine); \
1632        val = __raw_uncore_read##x(uncore, reg); \
1633        GEN6_READ_FOOTER; \
1634}
1635
1636#define __gen_reg_read_funcs(func) \
1637static enum forcewake_domains \
1638func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1639        return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1640} \
1641\
1642__gen_read(func, 8) \
1643__gen_read(func, 16) \
1644__gen_read(func, 32) \
1645__gen_read(func, 64)
1646
1647__gen_reg_read_funcs(gen12_fwtable);
1648__gen_reg_read_funcs(gen11_fwtable);
1649__gen_reg_read_funcs(fwtable);
1650__gen_reg_read_funcs(gen6);
1651
1652#undef __gen_reg_read_funcs
1653#undef GEN6_READ_FOOTER
1654#undef GEN6_READ_HEADER
1655
1656#define GEN2_WRITE_HEADER \
1657        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1658        assert_rpm_wakelock_held(uncore->rpm); \
1659
1660#define GEN2_WRITE_FOOTER
1661
1662#define __gen2_write(x) \
1663static void \
1664gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1665        GEN2_WRITE_HEADER; \
1666        __raw_uncore_write##x(uncore, reg, val); \
1667        GEN2_WRITE_FOOTER; \
1668}
1669
1670#define __gen5_write(x) \
1671static void \
1672gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1673        GEN2_WRITE_HEADER; \
1674        ilk_dummy_write(uncore); \
1675        __raw_uncore_write##x(uncore, reg, val); \
1676        GEN2_WRITE_FOOTER; \
1677}
1678
1679__gen5_write(8)
1680__gen5_write(16)
1681__gen5_write(32)
1682__gen2_write(8)
1683__gen2_write(16)
1684__gen2_write(32)
1685
1686#undef __gen5_write
1687#undef __gen2_write
1688
1689#undef GEN2_WRITE_FOOTER
1690#undef GEN2_WRITE_HEADER
1691
1692#define GEN6_WRITE_HEADER \
1693        u32 offset = i915_mmio_reg_offset(reg); \
1694        unsigned long irqflags; \
1695        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1696        assert_rpm_wakelock_held(uncore->rpm); \
1697        spin_lock_irqsave(&uncore->lock, irqflags); \
1698        unclaimed_reg_debug(uncore, reg, false, true)
1699
1700#define GEN6_WRITE_FOOTER \
1701        unclaimed_reg_debug(uncore, reg, false, false); \
1702        spin_unlock_irqrestore(&uncore->lock, irqflags)
1703
1704#define __gen6_write(x) \
1705static void \
1706gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1707        GEN6_WRITE_HEADER; \
1708        if (NEEDS_FORCE_WAKE(offset)) \
1709                __gen6_gt_wait_for_fifo(uncore); \
1710        __raw_uncore_write##x(uncore, reg, val); \
1711        GEN6_WRITE_FOOTER; \
1712}
1713__gen6_write(8)
1714__gen6_write(16)
1715__gen6_write(32)
1716
1717#define __gen_write(func, x) \
1718static void \
1719func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1720        enum forcewake_domains fw_engine; \
1721        GEN6_WRITE_HEADER; \
1722        fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1723        if (fw_engine) \
1724                __force_wake_auto(uncore, fw_engine); \
1725        __raw_uncore_write##x(uncore, reg, val); \
1726        GEN6_WRITE_FOOTER; \
1727}
1728
1729#define __gen_reg_write_funcs(func) \
1730static enum forcewake_domains \
1731func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1732        return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1733} \
1734\
1735__gen_write(func, 8) \
1736__gen_write(func, 16) \
1737__gen_write(func, 32)
1738
1739__gen_reg_write_funcs(xehp_fwtable);
1740__gen_reg_write_funcs(gen12_fwtable);
1741__gen_reg_write_funcs(gen11_fwtable);
1742__gen_reg_write_funcs(fwtable);
1743__gen_reg_write_funcs(gen8);
1744
1745#undef __gen_reg_write_funcs
1746#undef GEN6_WRITE_FOOTER
1747#undef GEN6_WRITE_HEADER
1748
1749#define __vgpu_write(x) \
1750static void \
1751vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1752        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1753        __raw_uncore_write##x(uncore, reg, val); \
1754}
1755__vgpu_write(8)
1756__vgpu_write(16)
1757__vgpu_write(32)
1758
1759#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1760do { \
1761        (uncore)->funcs.mmio_writeb = x##_write8; \
1762        (uncore)->funcs.mmio_writew = x##_write16; \
1763        (uncore)->funcs.mmio_writel = x##_write32; \
1764} while (0)
1765
1766#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1767do { \
1768        (uncore)->funcs.mmio_readb = x##_read8; \
1769        (uncore)->funcs.mmio_readw = x##_read16; \
1770        (uncore)->funcs.mmio_readl = x##_read32; \
1771        (uncore)->funcs.mmio_readq = x##_read64; \
1772} while (0)
1773
1774#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1775do { \
1776        ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1777        (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1778} while (0)
1779
1780#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1781do { \
1782        ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1783        (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1784} while (0)
1785
1786static int __fw_domain_init(struct intel_uncore *uncore,
1787                            enum forcewake_domain_id domain_id,
1788                            i915_reg_t reg_set,
1789                            i915_reg_t reg_ack)
1790{
1791        struct intel_uncore_forcewake_domain *d;
1792
1793        GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1794        GEM_BUG_ON(uncore->fw_domain[domain_id]);
1795
1796        if (i915_inject_probe_failure(uncore->i915))
1797                return -ENOMEM;
1798
1799        d = kzalloc(sizeof(*d), GFP_KERNEL);
1800        if (!d)
1801                return -ENOMEM;
1802
1803        drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1804        drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
1805
1806        d->uncore = uncore;
1807        d->wake_count = 0;
1808        d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1809        d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1810
1811        d->id = domain_id;
1812
1813        BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1814        BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
1815        BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1816        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1817        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1818        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1819        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1820        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
1821        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
1822        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
1823        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
1824        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1825        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1826        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
1827        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
1828
1829        d->mask = BIT(domain_id);
1830
1831        hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1832        d->timer.function = intel_uncore_fw_release_timer;
1833
1834        uncore->fw_domains |= BIT(domain_id);
1835
1836        fw_domain_reset(d);
1837
1838        uncore->fw_domain[domain_id] = d;
1839
1840        return 0;
1841}
1842
1843static void fw_domain_fini(struct intel_uncore *uncore,
1844                           enum forcewake_domain_id domain_id)
1845{
1846        struct intel_uncore_forcewake_domain *d;
1847
1848        GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1849
1850        d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1851        if (!d)
1852                return;
1853
1854        uncore->fw_domains &= ~BIT(domain_id);
1855        drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1856        drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
1857        kfree(d);
1858}
1859
1860static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1861{
1862        struct intel_uncore_forcewake_domain *d;
1863        int tmp;
1864
1865        for_each_fw_domain(d, uncore, tmp)
1866                fw_domain_fini(uncore, d->id);
1867}
1868
1869static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1870{
1871        struct drm_i915_private *i915 = uncore->i915;
1872        int ret = 0;
1873
1874        GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1875
1876#define fw_domain_init(uncore__, id__, set__, ack__) \
1877        (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1878
1879        if (GRAPHICS_VER(i915) >= 11) {
1880                /* we'll prune the domains of missing engines later */
1881                intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
1882                int i;
1883
1884                uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1885                uncore->funcs.force_wake_put = fw_domains_put;
1886                fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1887                               FORCEWAKE_RENDER_GEN9,
1888                               FORCEWAKE_ACK_RENDER_GEN9);
1889                fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1890                               FORCEWAKE_GT_GEN9,
1891                               FORCEWAKE_ACK_GT_GEN9);
1892
1893                for (i = 0; i < I915_MAX_VCS; i++) {
1894                        if (!__HAS_ENGINE(emask, _VCS(i)))
1895                                continue;
1896
1897                        fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1898                                       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1899                                       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1900                }
1901                for (i = 0; i < I915_MAX_VECS; i++) {
1902                        if (!__HAS_ENGINE(emask, _VECS(i)))
1903                                continue;
1904
1905                        fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1906                                       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1907                                       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1908                }
1909        } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
1910                uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1911                uncore->funcs.force_wake_put = fw_domains_put;
1912                fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1913                               FORCEWAKE_RENDER_GEN9,
1914                               FORCEWAKE_ACK_RENDER_GEN9);
1915                fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1916                               FORCEWAKE_GT_GEN9,
1917                               FORCEWAKE_ACK_GT_GEN9);
1918                fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1919                               FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1920        } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1921                uncore->funcs.force_wake_get = fw_domains_get;
1922                uncore->funcs.force_wake_put = fw_domains_put;
1923                fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1924                               FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1925                fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1926                               FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1927        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1928                uncore->funcs.force_wake_get =
1929                        fw_domains_get_with_thread_status;
1930                uncore->funcs.force_wake_put = fw_domains_put;
1931                fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1932                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1933        } else if (IS_IVYBRIDGE(i915)) {
1934                u32 ecobus;
1935
1936                /* IVB configs may use multi-threaded forcewake */
1937
1938                /* A small trick here - if the bios hasn't configured
1939                 * MT forcewake, and if the device is in RC6, then
1940                 * force_wake_mt_get will not wake the device and the
1941                 * ECOBUS read will return zero. Which will be
1942                 * (correctly) interpreted by the test below as MT
1943                 * forcewake being disabled.
1944                 */
1945                uncore->funcs.force_wake_get =
1946                        fw_domains_get_with_thread_status;
1947                uncore->funcs.force_wake_put = fw_domains_put;
1948
1949                /* We need to init first for ECOBUS access and then
1950                 * determine later if we want to reinit, in case of MT access is
1951                 * not working. In this stage we don't know which flavour this
1952                 * ivb is, so it is better to reset also the gen6 fw registers
1953                 * before the ecobus check.
1954                 */
1955
1956                __raw_uncore_write32(uncore, FORCEWAKE, 0);
1957                __raw_posting_read(uncore, ECOBUS);
1958
1959                ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1960                                       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1961                if (ret)
1962                        goto out;
1963
1964                spin_lock_irq(&uncore->lock);
1965                fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1966                ecobus = __raw_uncore_read32(uncore, ECOBUS);
1967                fw_domains_put(uncore, FORCEWAKE_RENDER);
1968                spin_unlock_irq(&uncore->lock);
1969
1970                if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1971                        drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1972                        drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
1973                        fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1974                        fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1975                                       FORCEWAKE, FORCEWAKE_ACK);
1976                }
1977        } else if (GRAPHICS_VER(i915) == 6) {
1978                uncore->funcs.force_wake_get =
1979                        fw_domains_get_with_thread_status;
1980                uncore->funcs.force_wake_put = fw_domains_put;
1981                fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1982                               FORCEWAKE, FORCEWAKE_ACK);
1983        }
1984
1985#undef fw_domain_init
1986
1987        /* All future platforms are expected to require complex power gating */
1988        drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
1989
1990out:
1991        if (ret)
1992                intel_uncore_fw_domains_fini(uncore);
1993
1994        return ret;
1995}
1996
1997#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1998{ \
1999        (uncore)->fw_domains_table = \
2000                        (struct intel_forcewake_range *)(d); \
2001        (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2002}
2003
2004static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2005                                         unsigned long action, void *data)
2006{
2007        struct intel_uncore *uncore = container_of(nb,
2008                        struct intel_uncore, pmic_bus_access_nb);
2009
2010        switch (action) {
2011        case MBI_PMIC_BUS_ACCESS_BEGIN:
2012                /*
2013                 * forcewake all now to make sure that we don't need to do a
2014                 * forcewake later which on systems where this notifier gets
2015                 * called requires the punit to access to the shared pmic i2c
2016                 * bus, which will be busy after this notification, leading to:
2017                 * "render: timed out waiting for forcewake ack request."
2018                 * errors.
2019                 *
2020                 * The notifier is unregistered during intel_runtime_suspend(),
2021                 * so it's ok to access the HW here without holding a RPM
2022                 * wake reference -> disable wakeref asserts for the time of
2023                 * the access.
2024                 */
2025                disable_rpm_wakeref_asserts(uncore->rpm);
2026                intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2027                enable_rpm_wakeref_asserts(uncore->rpm);
2028                break;
2029        case MBI_PMIC_BUS_ACCESS_END:
2030                intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2031                break;
2032        }
2033
2034        return NOTIFY_OK;
2035}
2036
2037static int uncore_mmio_setup(struct intel_uncore *uncore)
2038{
2039        struct drm_i915_private *i915 = uncore->i915;
2040        struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
2041        int mmio_bar;
2042        int mmio_size;
2043
2044        mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
2045        /*
2046         * Before gen4, the registers and the GTT are behind different BARs.
2047         * However, from gen4 onwards, the registers and the GTT are shared
2048         * in the same BAR, so we want to restrict this ioremap from
2049         * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2050         * the register BAR remains the same size for all the earlier
2051         * generations up to Ironlake.
2052         * For dgfx chips register range is expanded to 4MB.
2053         */
2054        if (GRAPHICS_VER(i915) < 5)
2055                mmio_size = 512 * 1024;
2056        else if (IS_DGFX(i915))
2057                mmio_size = 4 * 1024 * 1024;
2058        else
2059                mmio_size = 2 * 1024 * 1024;
2060
2061        uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
2062        if (uncore->regs == NULL) {
2063                drm_err(&i915->drm, "failed to map registers\n");
2064                return -EIO;
2065        }
2066
2067        return 0;
2068}
2069
2070static void uncore_mmio_cleanup(struct intel_uncore *uncore)
2071{
2072        struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
2073
2074        pci_iounmap(pdev, uncore->regs);
2075}
2076
2077void intel_uncore_init_early(struct intel_uncore *uncore,
2078                             struct drm_i915_private *i915)
2079{
2080        spin_lock_init(&uncore->lock);
2081        uncore->i915 = i915;
2082        uncore->rpm = &i915->runtime_pm;
2083        uncore->debug = &i915->mmio_debug;
2084}
2085
2086static void uncore_raw_init(struct intel_uncore *uncore)
2087{
2088        GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2089
2090        if (intel_vgpu_active(uncore->i915)) {
2091                ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2092                ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2093        } else if (GRAPHICS_VER(uncore->i915) == 5) {
2094                ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2095                ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2096        } else {
2097                ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2098                ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2099        }
2100}
2101
2102static int uncore_forcewake_init(struct intel_uncore *uncore)
2103{
2104        struct drm_i915_private *i915 = uncore->i915;
2105        int ret;
2106
2107        GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2108
2109        ret = intel_uncore_fw_domains_init(uncore);
2110        if (ret)
2111                return ret;
2112        forcewake_early_sanitize(uncore, 0);
2113
2114        if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2115                ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2116                ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
2117                ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
2118        } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2119                ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2120                ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
2121                ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
2122        } else if (GRAPHICS_VER(i915) >= 12) {
2123                ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2124                ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
2125                ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
2126        } else if (GRAPHICS_VER(i915) == 11) {
2127                ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2128                ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
2129                ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
2130        } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2131                ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2132                ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2133                ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2134        } else if (IS_CHERRYVIEW(i915)) {
2135                ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2136                ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2137                ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2138        } else if (GRAPHICS_VER(i915) == 8) {
2139                ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
2140                ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
2141        } else if (IS_VALLEYVIEW(i915)) {
2142                ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2143                ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2144                ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2145        } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2146                ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2147                ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
2148        }
2149
2150        uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2151        iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2152
2153        return 0;
2154}
2155
2156int intel_uncore_init_mmio(struct intel_uncore *uncore)
2157{
2158        struct drm_i915_private *i915 = uncore->i915;
2159        int ret;
2160
2161        ret = uncore_mmio_setup(uncore);
2162        if (ret)
2163                return ret;
2164
2165        /*
2166         * The boot firmware initializes local memory and assesses its health.
2167         * If memory training fails, the punit will have been instructed to
2168         * keep the GT powered down; we won't be able to communicate with it
2169         * and we should not continue with driver initialization.
2170         */
2171        if (IS_DGFX(i915) &&
2172            !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2173                drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2174                return -ENODEV;
2175        }
2176
2177        if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2178                uncore->flags |= UNCORE_HAS_FORCEWAKE;
2179
2180        if (!intel_uncore_has_forcewake(uncore)) {
2181                uncore_raw_init(uncore);
2182        } else {
2183                ret = uncore_forcewake_init(uncore);
2184                if (ret)
2185                        goto out_mmio_cleanup;
2186        }
2187
2188        /* make sure fw funcs are set if and only if we have fw*/
2189        GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
2190        GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
2191        GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2192        GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2193
2194        if (HAS_FPGA_DBG_UNCLAIMED(i915))
2195                uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2196
2197        if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2198                uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2199
2200        if (IS_GRAPHICS_VER(i915, 6, 7))
2201                uncore->flags |= UNCORE_HAS_FIFO;
2202
2203        /* clear out unclaimed reg detection bit */
2204        if (intel_uncore_unclaimed_mmio(uncore))
2205                drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2206
2207        return 0;
2208
2209out_mmio_cleanup:
2210        uncore_mmio_cleanup(uncore);
2211
2212        return ret;
2213}
2214
2215/*
2216 * We might have detected that some engines are fused off after we initialized
2217 * the forcewake domains. Prune them, to make sure they only reference existing
2218 * engines.
2219 */
2220void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2221                                          struct intel_gt *gt)
2222{
2223        enum forcewake_domains fw_domains = uncore->fw_domains;
2224        enum forcewake_domain_id domain_id;
2225        int i;
2226
2227        if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2228                return;
2229
2230        for (i = 0; i < I915_MAX_VCS; i++) {
2231                domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2232
2233                if (HAS_ENGINE(gt, _VCS(i)))
2234                        continue;
2235
2236                /*
2237                 * Starting with XeHP, the power well for an even-numbered
2238                 * VDBOX is also used for shared units within the
2239                 * media slice such as SFC.  So even if the engine
2240                 * itself is fused off, we still need to initialize
2241                 * the forcewake domain if any of the other engines
2242                 * in the same media slice are present.
2243                 */
2244                if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2245                        if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2246                                continue;
2247
2248                        if (HAS_ENGINE(gt, _VECS(i / 2)))
2249                                continue;
2250                }
2251
2252                if (fw_domains & BIT(domain_id))
2253                        fw_domain_fini(uncore, domain_id);
2254        }
2255
2256        for (i = 0; i < I915_MAX_VECS; i++) {
2257                domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2258
2259                if (HAS_ENGINE(gt, _VECS(i)))
2260                        continue;
2261
2262                if (fw_domains & BIT(domain_id))
2263                        fw_domain_fini(uncore, domain_id);
2264        }
2265}
2266
2267void intel_uncore_fini_mmio(struct intel_uncore *uncore)
2268{
2269        if (intel_uncore_has_forcewake(uncore)) {
2270                iosf_mbi_punit_acquire();
2271                iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2272                        &uncore->pmic_bus_access_nb);
2273                intel_uncore_forcewake_reset(uncore);
2274                intel_uncore_fw_domains_fini(uncore);
2275                iosf_mbi_punit_release();
2276        }
2277
2278        uncore_mmio_cleanup(uncore);
2279}
2280
2281static const struct reg_whitelist {
2282        i915_reg_t offset_ldw;
2283        i915_reg_t offset_udw;
2284        u8 min_graphics_ver;
2285        u8 max_graphics_ver;
2286        u8 size;
2287} reg_read_whitelist[] = { {
2288        .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
2289        .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
2290        .min_graphics_ver = 4,
2291        .max_graphics_ver = 12,
2292        .size = 8
2293} };
2294
2295int i915_reg_read_ioctl(struct drm_device *dev,
2296                        void *data, struct drm_file *file)
2297{
2298        struct drm_i915_private *i915 = to_i915(dev);
2299        struct intel_uncore *uncore = &i915->uncore;
2300        struct drm_i915_reg_read *reg = data;
2301        struct reg_whitelist const *entry;
2302        intel_wakeref_t wakeref;
2303        unsigned int flags;
2304        int remain;
2305        int ret = 0;
2306
2307        entry = reg_read_whitelist;
2308        remain = ARRAY_SIZE(reg_read_whitelist);
2309        while (remain) {
2310                u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
2311
2312                GEM_BUG_ON(!is_power_of_2(entry->size));
2313                GEM_BUG_ON(entry->size > 8);
2314                GEM_BUG_ON(entry_offset & (entry->size - 1));
2315
2316                if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) &&
2317                    entry_offset == (reg->offset & -entry->size))
2318                        break;
2319                entry++;
2320                remain--;
2321        }
2322
2323        if (!remain)
2324                return -EINVAL;
2325
2326        flags = reg->offset & (entry->size - 1);
2327
2328        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2329                if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
2330                        reg->val = intel_uncore_read64_2x32(uncore,
2331                                                            entry->offset_ldw,
2332                                                            entry->offset_udw);
2333                else if (entry->size == 8 && flags == 0)
2334                        reg->val = intel_uncore_read64(uncore,
2335                                                       entry->offset_ldw);
2336                else if (entry->size == 4 && flags == 0)
2337                        reg->val = intel_uncore_read(uncore, entry->offset_ldw);
2338                else if (entry->size == 2 && flags == 0)
2339                        reg->val = intel_uncore_read16(uncore,
2340                                                       entry->offset_ldw);
2341                else if (entry->size == 1 && flags == 0)
2342                        reg->val = intel_uncore_read8(uncore,
2343                                                      entry->offset_ldw);
2344                else
2345                        ret = -EINVAL;
2346        }
2347
2348        return ret;
2349}
2350
2351/**
2352 * __intel_wait_for_register_fw - wait until register matches expected state
2353 * @uncore: the struct intel_uncore
2354 * @reg: the register to read
2355 * @mask: mask to apply to register value
2356 * @value: expected value
2357 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2358 * @slow_timeout_ms: slow timeout in millisecond
2359 * @out_value: optional placeholder to hold registry value
2360 *
2361 * This routine waits until the target register @reg contains the expected
2362 * @value after applying the @mask, i.e. it waits until ::
2363 *
2364 *     (intel_uncore_read_fw(uncore, reg) & mask) == value
2365 *
2366 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2367 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2368 * must be not larger than 20,0000 microseconds.
2369 *
2370 * Note that this routine assumes the caller holds forcewake asserted, it is
2371 * not suitable for very long waits. See intel_wait_for_register() if you
2372 * wish to wait without holding forcewake for the duration (i.e. you expect
2373 * the wait to be slow).
2374 *
2375 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2376 */
2377int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2378                                 i915_reg_t reg,
2379                                 u32 mask,
2380                                 u32 value,
2381                                 unsigned int fast_timeout_us,
2382                                 unsigned int slow_timeout_ms,
2383                                 u32 *out_value)
2384{
2385        u32 reg_value = 0;
2386#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2387        int ret;
2388
2389        /* Catch any overuse of this function */
2390        might_sleep_if(slow_timeout_ms);
2391        GEM_BUG_ON(fast_timeout_us > 20000);
2392        GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2393
2394        ret = -ETIMEDOUT;
2395        if (fast_timeout_us && fast_timeout_us <= 20000)
2396                ret = _wait_for_atomic(done, fast_timeout_us, 0);
2397        if (ret && slow_timeout_ms)
2398                ret = wait_for(done, slow_timeout_ms);
2399
2400        if (out_value)
2401                *out_value = reg_value;
2402
2403        return ret;
2404#undef done
2405}
2406
2407/**
2408 * __intel_wait_for_register - wait until register matches expected state
2409 * @uncore: the struct intel_uncore
2410 * @reg: the register to read
2411 * @mask: mask to apply to register value
2412 * @value: expected value
2413 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2414 * @slow_timeout_ms: slow timeout in millisecond
2415 * @out_value: optional placeholder to hold registry value
2416 *
2417 * This routine waits until the target register @reg contains the expected
2418 * @value after applying the @mask, i.e. it waits until ::
2419 *
2420 *     (intel_uncore_read(uncore, reg) & mask) == value
2421 *
2422 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2423 *
2424 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2425 */
2426int __intel_wait_for_register(struct intel_uncore *uncore,
2427                              i915_reg_t reg,
2428                              u32 mask,
2429                              u32 value,
2430                              unsigned int fast_timeout_us,
2431                              unsigned int slow_timeout_ms,
2432                              u32 *out_value)
2433{
2434        unsigned fw =
2435                intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2436        u32 reg_value;
2437        int ret;
2438
2439        might_sleep_if(slow_timeout_ms);
2440
2441        spin_lock_irq(&uncore->lock);
2442        intel_uncore_forcewake_get__locked(uncore, fw);
2443
2444        ret = __intel_wait_for_register_fw(uncore,
2445                                           reg, mask, value,
2446                                           fast_timeout_us, 0, &reg_value);
2447
2448        intel_uncore_forcewake_put__locked(uncore, fw);
2449        spin_unlock_irq(&uncore->lock);
2450
2451        if (ret && slow_timeout_ms)
2452                ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2453                                                                       reg),
2454                                 (reg_value & mask) == value,
2455                                 slow_timeout_ms * 1000, 10, 1000);
2456
2457        /* just trace the final value */
2458        trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2459
2460        if (out_value)
2461                *out_value = reg_value;
2462
2463        return ret;
2464}
2465
2466bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2467{
2468        bool ret;
2469
2470        spin_lock_irq(&uncore->debug->lock);
2471        ret = check_for_unclaimed_mmio(uncore);
2472        spin_unlock_irq(&uncore->debug->lock);
2473
2474        return ret;
2475}
2476
2477bool
2478intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2479{
2480        bool ret = false;
2481
2482        spin_lock_irq(&uncore->debug->lock);
2483
2484        if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2485                goto out;
2486
2487        if (unlikely(check_for_unclaimed_mmio(uncore))) {
2488                if (!uncore->i915->params.mmio_debug) {
2489                        drm_dbg(&uncore->i915->drm,
2490                                "Unclaimed register detected, "
2491                                "enabling oneshot unclaimed register reporting. "
2492                                "Please use i915.mmio_debug=N for more information.\n");
2493                        uncore->i915->params.mmio_debug++;
2494                }
2495                uncore->debug->unclaimed_mmio_check--;
2496                ret = true;
2497        }
2498
2499out:
2500        spin_unlock_irq(&uncore->debug->lock);
2501
2502        return ret;
2503}
2504
2505/**
2506 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2507 *                                  a register
2508 * @uncore: pointer to struct intel_uncore
2509 * @reg: register in question
2510 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2511 *
2512 * Returns a set of forcewake domains required to be taken with for example
2513 * intel_uncore_forcewake_get for the specified register to be accessible in the
2514 * specified mode (read, write or read/write) with raw mmio accessors.
2515 *
2516 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2517 * callers to do FIFO management on their own or risk losing writes.
2518 */
2519enum forcewake_domains
2520intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2521                               i915_reg_t reg, unsigned int op)
2522{
2523        enum forcewake_domains fw_domains = 0;
2524
2525        drm_WARN_ON(&uncore->i915->drm, !op);
2526
2527        if (!intel_uncore_has_forcewake(uncore))
2528                return 0;
2529
2530        if (op & FW_REG_READ)
2531                fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2532
2533        if (op & FW_REG_WRITE)
2534                fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2535
2536        drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2537
2538        return fw_domains;
2539}
2540
2541u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
2542                                           i915_reg_t reg,
2543                                           int slice, int subslice)
2544{
2545        u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
2546
2547        lockdep_assert_held(&uncore->lock);
2548
2549        if (GRAPHICS_VER(uncore->i915) >= 11) {
2550                mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
2551                mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
2552        } else {
2553                mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
2554                mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
2555        }
2556
2557        old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
2558
2559        mcr &= ~mcr_mask;
2560        mcr |= mcr_ss;
2561        intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2562
2563        val = intel_uncore_read_fw(uncore, reg);
2564
2565        mcr &= ~mcr_mask;
2566        mcr |= old_mcr & mcr_mask;
2567
2568        intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2569
2570        return val;
2571}
2572
2573u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
2574                                        i915_reg_t reg, int slice, int subslice)
2575{
2576        enum forcewake_domains fw_domains;
2577        u32 val;
2578
2579        fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
2580                                                    FW_REG_READ);
2581        fw_domains |= intel_uncore_forcewake_for_reg(uncore,
2582                                                     GEN8_MCR_SELECTOR,
2583                                                     FW_REG_READ | FW_REG_WRITE);
2584
2585        spin_lock_irq(&uncore->lock);
2586        intel_uncore_forcewake_get__locked(uncore, fw_domains);
2587
2588        val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
2589
2590        intel_uncore_forcewake_put__locked(uncore, fw_domains);
2591        spin_unlock_irq(&uncore->lock);
2592
2593        return val;
2594}
2595
2596#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2597#include "selftests/mock_uncore.c"
2598#include "selftests/intel_uncore.c"
2599#endif
2600