linux/drivers/gpu/drm/i915/intel_uncore.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2017 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#ifndef __INTEL_UNCORE_H__
  26#define __INTEL_UNCORE_H__
  27
  28#include <linux/spinlock.h>
  29#include <linux/notifier.h>
  30#include <linux/hrtimer.h>
  31#include <linux/io-64-nonatomic-lo-hi.h>
  32
  33#include "i915_reg.h"
  34
  35struct drm_i915_private;
  36struct intel_runtime_pm;
  37struct intel_uncore;
  38struct intel_gt;
  39
  40struct intel_uncore_mmio_debug {
  41        spinlock_t lock; /** lock is also taken in irq contexts. */
  42        int unclaimed_mmio_check;
  43        int saved_mmio_check;
  44        u32 suspend_count;
  45};
  46
  47enum forcewake_domain_id {
  48        FW_DOMAIN_ID_RENDER = 0,
  49        FW_DOMAIN_ID_GT,        /* also includes blitter engine */
  50        FW_DOMAIN_ID_MEDIA,
  51        FW_DOMAIN_ID_MEDIA_VDBOX0,
  52        FW_DOMAIN_ID_MEDIA_VDBOX1,
  53        FW_DOMAIN_ID_MEDIA_VDBOX2,
  54        FW_DOMAIN_ID_MEDIA_VDBOX3,
  55        FW_DOMAIN_ID_MEDIA_VEBOX0,
  56        FW_DOMAIN_ID_MEDIA_VEBOX1,
  57
  58        FW_DOMAIN_ID_COUNT
  59};
  60
  61enum forcewake_domains {
  62        FORCEWAKE_RENDER        = BIT(FW_DOMAIN_ID_RENDER),
  63        FORCEWAKE_GT            = BIT(FW_DOMAIN_ID_GT),
  64        FORCEWAKE_MEDIA         = BIT(FW_DOMAIN_ID_MEDIA),
  65        FORCEWAKE_MEDIA_VDBOX0  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
  66        FORCEWAKE_MEDIA_VDBOX1  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
  67        FORCEWAKE_MEDIA_VDBOX2  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
  68        FORCEWAKE_MEDIA_VDBOX3  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
  69        FORCEWAKE_MEDIA_VEBOX0  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
  70        FORCEWAKE_MEDIA_VEBOX1  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
  71
  72        FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1
  73};
  74
  75struct intel_uncore_funcs {
  76        void (*force_wake_get)(struct intel_uncore *uncore,
  77                               enum forcewake_domains domains);
  78        void (*force_wake_put)(struct intel_uncore *uncore,
  79                               enum forcewake_domains domains);
  80
  81        enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
  82                                                  i915_reg_t r);
  83        enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
  84                                                   i915_reg_t r);
  85
  86        u8 (*mmio_readb)(struct intel_uncore *uncore,
  87                         i915_reg_t r, bool trace);
  88        u16 (*mmio_readw)(struct intel_uncore *uncore,
  89                          i915_reg_t r, bool trace);
  90        u32 (*mmio_readl)(struct intel_uncore *uncore,
  91                          i915_reg_t r, bool trace);
  92        u64 (*mmio_readq)(struct intel_uncore *uncore,
  93                          i915_reg_t r, bool trace);
  94
  95        void (*mmio_writeb)(struct intel_uncore *uncore,
  96                            i915_reg_t r, u8 val, bool trace);
  97        void (*mmio_writew)(struct intel_uncore *uncore,
  98                            i915_reg_t r, u16 val, bool trace);
  99        void (*mmio_writel)(struct intel_uncore *uncore,
 100                            i915_reg_t r, u32 val, bool trace);
 101};
 102
 103struct intel_forcewake_range {
 104        u32 start;
 105        u32 end;
 106
 107        enum forcewake_domains domains;
 108};
 109
 110struct intel_uncore {
 111        void __iomem *regs;
 112
 113        struct drm_i915_private *i915;
 114        struct intel_runtime_pm *rpm;
 115
 116        spinlock_t lock; /** lock is also taken in irq contexts. */
 117
 118        unsigned int flags;
 119#define UNCORE_HAS_FORCEWAKE            BIT(0)
 120#define UNCORE_HAS_FPGA_DBG_UNCLAIMED   BIT(1)
 121#define UNCORE_HAS_DBG_UNCLAIMED        BIT(2)
 122#define UNCORE_HAS_FIFO                 BIT(3)
 123
 124        const struct intel_forcewake_range *fw_domains_table;
 125        unsigned int fw_domains_table_entries;
 126
 127        struct notifier_block pmic_bus_access_nb;
 128        struct intel_uncore_funcs funcs;
 129
 130        unsigned int fifo_count;
 131
 132        enum forcewake_domains fw_domains;
 133        enum forcewake_domains fw_domains_active;
 134        enum forcewake_domains fw_domains_timer;
 135        enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
 136
 137        struct intel_uncore_forcewake_domain {
 138                struct intel_uncore *uncore;
 139                enum forcewake_domain_id id;
 140                enum forcewake_domains mask;
 141                unsigned int wake_count;
 142                bool active;
 143                struct hrtimer timer;
 144                u32 __iomem *reg_set;
 145                u32 __iomem *reg_ack;
 146        } *fw_domain[FW_DOMAIN_ID_COUNT];
 147
 148        unsigned int user_forcewake_count;
 149
 150        struct intel_uncore_mmio_debug *debug;
 151};
 152
 153/* Iterate over initialised fw domains */
 154#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
 155        for (tmp__ = (mask__); tmp__ ;) \
 156                for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
 157
 158#define for_each_fw_domain(domain__, uncore__, tmp__) \
 159        for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
 160
 161static inline bool
 162intel_uncore_has_forcewake(const struct intel_uncore *uncore)
 163{
 164        return uncore->flags & UNCORE_HAS_FORCEWAKE;
 165}
 166
 167static inline bool
 168intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
 169{
 170        return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
 171}
 172
 173static inline bool
 174intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
 175{
 176        return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
 177}
 178
 179static inline bool
 180intel_uncore_has_fifo(const struct intel_uncore *uncore)
 181{
 182        return uncore->flags & UNCORE_HAS_FIFO;
 183}
 184
 185void
 186intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
 187void intel_uncore_init_early(struct intel_uncore *uncore,
 188                             struct drm_i915_private *i915);
 189int intel_uncore_init_mmio(struct intel_uncore *uncore);
 190void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
 191                                          struct intel_gt *gt);
 192bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
 193bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
 194void intel_uncore_fini_mmio(struct intel_uncore *uncore);
 195void intel_uncore_suspend(struct intel_uncore *uncore);
 196void intel_uncore_resume_early(struct intel_uncore *uncore);
 197void intel_uncore_runtime_resume(struct intel_uncore *uncore);
 198
 199void assert_forcewakes_inactive(struct intel_uncore *uncore);
 200void assert_forcewakes_active(struct intel_uncore *uncore,
 201                              enum forcewake_domains fw_domains);
 202const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 203
 204enum forcewake_domains
 205intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
 206                               i915_reg_t reg, unsigned int op);
 207#define FW_REG_READ  (1)
 208#define FW_REG_WRITE (2)
 209
 210void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 211                                enum forcewake_domains domains);
 212void intel_uncore_forcewake_put(struct intel_uncore *uncore,
 213                                enum forcewake_domains domains);
 214void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
 215                                  enum forcewake_domains fw_domains);
 216
 217/*
 218 * Like above but the caller must manage the uncore.lock itself.
 219 * Must be used with intel_uncore_read_fw() and friends.
 220 */
 221void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
 222                                        enum forcewake_domains domains);
 223void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
 224                                        enum forcewake_domains domains);
 225
 226void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
 227void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
 228
 229int __intel_wait_for_register(struct intel_uncore *uncore,
 230                              i915_reg_t reg,
 231                              u32 mask,
 232                              u32 value,
 233                              unsigned int fast_timeout_us,
 234                              unsigned int slow_timeout_ms,
 235                              u32 *out_value);
 236static inline int
 237intel_wait_for_register(struct intel_uncore *uncore,
 238                        i915_reg_t reg,
 239                        u32 mask,
 240                        u32 value,
 241                        unsigned int timeout_ms)
 242{
 243        return __intel_wait_for_register(uncore, reg, mask, value, 2,
 244                                         timeout_ms, NULL);
 245}
 246
 247int __intel_wait_for_register_fw(struct intel_uncore *uncore,
 248                                 i915_reg_t reg,
 249                                 u32 mask,
 250                                 u32 value,
 251                                 unsigned int fast_timeout_us,
 252                                 unsigned int slow_timeout_ms,
 253                                 u32 *out_value);
 254static inline int
 255intel_wait_for_register_fw(struct intel_uncore *uncore,
 256                           i915_reg_t reg,
 257                           u32 mask,
 258                           u32 value,
 259                               unsigned int timeout_ms)
 260{
 261        return __intel_wait_for_register_fw(uncore, reg, mask, value,
 262                                            2, timeout_ms, NULL);
 263}
 264
 265/* register access functions */
 266#define __raw_read(x__, s__) \
 267static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
 268                                            i915_reg_t reg) \
 269{ \
 270        return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
 271}
 272
 273#define __raw_write(x__, s__) \
 274static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
 275                                           i915_reg_t reg, u##x__ val) \
 276{ \
 277        write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
 278}
 279__raw_read(8, b)
 280__raw_read(16, w)
 281__raw_read(32, l)
 282__raw_read(64, q)
 283
 284__raw_write(8, b)
 285__raw_write(16, w)
 286__raw_write(32, l)
 287__raw_write(64, q)
 288
 289#undef __raw_read
 290#undef __raw_write
 291
 292#define __uncore_read(name__, x__, s__, trace__) \
 293static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
 294                                           i915_reg_t reg) \
 295{ \
 296        return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
 297}
 298
 299#define __uncore_write(name__, x__, s__, trace__) \
 300static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
 301                                         i915_reg_t reg, u##x__ val) \
 302{ \
 303        uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
 304}
 305
 306__uncore_read(read8, 8, b, true)
 307__uncore_read(read16, 16, w, true)
 308__uncore_read(read, 32, l, true)
 309__uncore_read(read16_notrace, 16, w, false)
 310__uncore_read(read_notrace, 32, l, false)
 311
 312__uncore_write(write8, 8, b, true)
 313__uncore_write(write16, 16, w, true)
 314__uncore_write(write, 32, l, true)
 315__uncore_write(write_notrace, 32, l, false)
 316
 317/* Be very careful with read/write 64-bit values. On 32-bit machines, they
 318 * will be implemented using 2 32-bit writes in an arbitrary order with
 319 * an arbitrary delay between them. This can cause the hardware to
 320 * act upon the intermediate value, possibly leading to corruption and
 321 * machine death. For this reason we do not support intel_uncore_write64,
 322 * or uncore->funcs.mmio_writeq.
 323 *
 324 * When reading a 64-bit value as two 32-bit values, the delay may cause
 325 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
 326 * occasionally a 64-bit register does not actually support a full readq
 327 * and must be read using two 32-bit reads.
 328 *
 329 * You have been warned.
 330 */
 331__uncore_read(read64, 64, q, true)
 332
 333static inline u64
 334intel_uncore_read64_2x32(struct intel_uncore *uncore,
 335                         i915_reg_t lower_reg, i915_reg_t upper_reg)
 336{
 337        u32 upper, lower, old_upper, loop = 0;
 338        upper = intel_uncore_read(uncore, upper_reg);
 339        do {
 340                old_upper = upper;
 341                lower = intel_uncore_read(uncore, lower_reg);
 342                upper = intel_uncore_read(uncore, upper_reg);
 343        } while (upper != old_upper && loop++ < 2);
 344        return (u64)upper << 32 | lower;
 345}
 346
 347#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
 348#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
 349
 350#undef __uncore_read
 351#undef __uncore_write
 352
 353/* These are untraced mmio-accessors that are only valid to be used inside
 354 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
 355 * controlled.
 356 *
 357 * Think twice, and think again, before using these.
 358 *
 359 * As an example, these accessors can possibly be used between:
 360 *
 361 * spin_lock_irq(&uncore->lock);
 362 * intel_uncore_forcewake_get__locked();
 363 *
 364 * and
 365 *
 366 * intel_uncore_forcewake_put__locked();
 367 * spin_unlock_irq(&uncore->lock);
 368 *
 369 *
 370 * Note: some registers may not need forcewake held, so
 371 * intel_uncore_forcewake_{get,put} can be omitted, see
 372 * intel_uncore_forcewake_for_reg().
 373 *
 374 * Certain architectures will die if the same cacheline is concurrently accessed
 375 * by different clients (e.g. on Ivybridge). Access to registers should
 376 * therefore generally be serialised, by either the dev_priv->uncore.lock or
 377 * a more localised lock guarding all access to that bank of registers.
 378 */
 379#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
 380#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
 381#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
 382#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
 383
 384static inline void intel_uncore_rmw(struct intel_uncore *uncore,
 385                                    i915_reg_t reg, u32 clear, u32 set)
 386{
 387        u32 old, val;
 388
 389        old = intel_uncore_read(uncore, reg);
 390        val = (old & ~clear) | set;
 391        if (val != old)
 392                intel_uncore_write(uncore, reg, val);
 393}
 394
 395static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
 396                                       i915_reg_t reg, u32 clear, u32 set)
 397{
 398        u32 old, val;
 399
 400        old = intel_uncore_read_fw(uncore, reg);
 401        val = (old & ~clear) | set;
 402        if (val != old)
 403                intel_uncore_write_fw(uncore, reg, val);
 404}
 405
 406static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
 407                                                i915_reg_t reg, u32 val,
 408                                                u32 mask, u32 expected_val)
 409{
 410        u32 reg_val;
 411
 412        intel_uncore_write(uncore, reg, val);
 413        reg_val = intel_uncore_read(uncore, reg);
 414
 415        return (reg_val & mask) != expected_val ? -EINVAL : 0;
 416}
 417
 418#define raw_reg_read(base, reg) \
 419        readl(base + i915_mmio_reg_offset(reg))
 420#define raw_reg_write(base, reg, value) \
 421        writel(value, base + i915_mmio_reg_offset(reg))
 422
 423#endif /* !__INTEL_UNCORE_H__ */
 424