linux/drivers/gpu/drm/i915/gt/intel_gtt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 *
   5 * Please try to maintain the following order within this file unless it makes
   6 * sense to do otherwise. From top to bottom:
   7 * 1. typedefs
   8 * 2. #defines, and macros
   9 * 3. structure definitions
  10 * 4. function prototypes
  11 *
  12 * Within each section, please try to order by generation in ascending order,
  13 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
  14 */
  15
  16#ifndef __INTEL_GTT_H__
  17#define __INTEL_GTT_H__
  18
  19#include <linux/io-mapping.h>
  20#include <linux/kref.h>
  21#include <linux/mm.h>
  22#include <linux/pagevec.h>
  23#include <linux/scatterlist.h>
  24#include <linux/workqueue.h>
  25
  26#include <drm/drm_mm.h>
  27
  28#include "gt/intel_reset.h"
  29#include "i915_selftest.h"
  30#include "i915_vma_types.h"
  31
  32#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
  33
  34#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
  35#define DBG(...) trace_printk(__VA_ARGS__)
  36#else
  37#define DBG(...)
  38#endif
  39
  40#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
  41
  42#define I915_GTT_PAGE_SIZE_4K   BIT_ULL(12)
  43#define I915_GTT_PAGE_SIZE_64K  BIT_ULL(16)
  44#define I915_GTT_PAGE_SIZE_2M   BIT_ULL(21)
  45
  46#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
  47#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
  48
  49#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
  50
  51#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
  52
  53#define I915_FENCE_REG_NONE -1
  54#define I915_MAX_NUM_FENCES 32
  55/* 32 fences + sign bit for FENCE_REG_NONE */
  56#define I915_MAX_NUM_FENCE_BITS 6
  57
  58typedef u32 gen6_pte_t;
  59typedef u64 gen8_pte_t;
  60
  61#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
  62
  63#define I915_PTES(pte_len)              ((unsigned int)(PAGE_SIZE / (pte_len)))
  64#define I915_PTE_MASK(pte_len)          (I915_PTES(pte_len) - 1)
  65#define I915_PDES                       512
  66#define I915_PDE_MASK                   (I915_PDES - 1)
  67
  68/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
  69#define GEN6_GTT_ADDR_ENCODE(addr)      ((addr) | (((addr) >> 28) & 0xff0))
  70#define GEN6_PTE_ADDR_ENCODE(addr)      GEN6_GTT_ADDR_ENCODE(addr)
  71#define GEN6_PDE_ADDR_ENCODE(addr)      GEN6_GTT_ADDR_ENCODE(addr)
  72#define GEN6_PTE_CACHE_LLC              (2 << 1)
  73#define GEN6_PTE_UNCACHED               (1 << 1)
  74#define GEN6_PTE_VALID                  REG_BIT(0)
  75
  76#define GEN6_PTES                       I915_PTES(sizeof(gen6_pte_t))
  77#define GEN6_PD_SIZE                    (I915_PDES * PAGE_SIZE)
  78#define GEN6_PD_ALIGN                   (PAGE_SIZE * 16)
  79#define GEN6_PDE_SHIFT                  22
  80#define GEN6_PDE_VALID                  REG_BIT(0)
  81#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
  82
  83#define GEN7_PTE_CACHE_L3_LLC           (3 << 1)
  84
  85#define BYT_PTE_SNOOPED_BY_CPU_CACHES   REG_BIT(2)
  86#define BYT_PTE_WRITEABLE               REG_BIT(1)
  87
  88#define GEN12_PPGTT_PTE_LM      BIT_ULL(11)
  89
  90#define GEN12_GGTT_PTE_LM       BIT_ULL(1)
  91
  92/*
  93 * Cacheability Control is a 4-bit value. The low three bits are stored in bits
  94 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
  95 */
  96#define HSW_CACHEABILITY_CONTROL(bits)  ((((bits) & 0x7) << 1) | \
  97                                         (((bits) & 0x8) << (11 - 3)))
  98#define HSW_WB_LLC_AGE3                 HSW_CACHEABILITY_CONTROL(0x2)
  99#define HSW_WB_LLC_AGE0                 HSW_CACHEABILITY_CONTROL(0x3)
 100#define HSW_WB_ELLC_LLC_AGE3            HSW_CACHEABILITY_CONTROL(0x8)
 101#define HSW_WB_ELLC_LLC_AGE0            HSW_CACHEABILITY_CONTROL(0xb)
 102#define HSW_WT_ELLC_LLC_AGE3            HSW_CACHEABILITY_CONTROL(0x7)
 103#define HSW_WT_ELLC_LLC_AGE0            HSW_CACHEABILITY_CONTROL(0x6)
 104#define HSW_PTE_UNCACHED                (0)
 105#define HSW_GTT_ADDR_ENCODE(addr)       ((addr) | (((addr) >> 28) & 0x7f0))
 106#define HSW_PTE_ADDR_ENCODE(addr)       HSW_GTT_ADDR_ENCODE(addr)
 107
 108/*
 109 * GEN8 32b style address is defined as a 3 level page table:
 110 * 31:30 | 29:21 | 20:12 |  11:0
 111 * PDPE  |  PDE  |  PTE  | offset
 112 * The difference as compared to normal x86 3 level page table is the PDPEs are
 113 * programmed via register.
 114 *
 115 * GEN8 48b style address is defined as a 4 level page table:
 116 * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
 117 * PML4E | PDPE  |  PDE  |  PTE  | offset
 118 */
 119#define GEN8_3LVL_PDPES                 4
 120
 121#define PPAT_UNCACHED                   (_PAGE_PWT | _PAGE_PCD)
 122#define PPAT_CACHED_PDE                 0 /* WB LLC */
 123#define PPAT_CACHED                     _PAGE_PAT /* WB LLCeLLC */
 124#define PPAT_DISPLAY_ELLC               _PAGE_PCD /* WT eLLC */
 125
 126#define CHV_PPAT_SNOOP                  REG_BIT(6)
 127#define GEN8_PPAT_AGE(x)                ((x)<<4)
 128#define GEN8_PPAT_LLCeLLC               (3<<2)
 129#define GEN8_PPAT_LLCELLC               (2<<2)
 130#define GEN8_PPAT_LLC                   (1<<2)
 131#define GEN8_PPAT_WB                    (3<<0)
 132#define GEN8_PPAT_WT                    (2<<0)
 133#define GEN8_PPAT_WC                    (1<<0)
 134#define GEN8_PPAT_UC                    (0<<0)
 135#define GEN8_PPAT_ELLC_OVERRIDE         (0<<2)
 136#define GEN8_PPAT(i, x)                 ((u64)(x) << ((i) * 8))
 137
 138#define GEN8_PDE_IPS_64K BIT(11)
 139#define GEN8_PDE_PS_2M   BIT(7)
 140
 141enum i915_cache_level;
 142
 143struct drm_i915_gem_object;
 144struct i915_fence_reg;
 145struct i915_vma;
 146struct intel_gt;
 147
 148#define for_each_sgt_daddr(__dp, __iter, __sgt) \
 149        __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
 150
 151struct i915_page_table {
 152        struct drm_i915_gem_object *base;
 153        union {
 154                atomic_t used;
 155                struct i915_page_table *stash;
 156        };
 157};
 158
 159struct i915_page_directory {
 160        struct i915_page_table pt;
 161        spinlock_t lock;
 162        void **entry;
 163};
 164
 165#define __px_choose_expr(x, type, expr, other) \
 166        __builtin_choose_expr( \
 167        __builtin_types_compatible_p(typeof(x), type) || \
 168        __builtin_types_compatible_p(typeof(x), const type), \
 169        ({ type __x = (type)(x); expr; }), \
 170        other)
 171
 172#define px_base(px) \
 173        __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
 174        __px_choose_expr(px, struct i915_page_table *, __x->base, \
 175        __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
 176        (void)0)))
 177
 178struct page *__px_page(struct drm_i915_gem_object *p);
 179dma_addr_t __px_dma(struct drm_i915_gem_object *p);
 180#define px_dma(px) (__px_dma(px_base(px)))
 181
 182void *__px_vaddr(struct drm_i915_gem_object *p);
 183#define px_vaddr(px) (__px_vaddr(px_base(px)))
 184
 185#define px_pt(px) \
 186        __px_choose_expr(px, struct i915_page_table *, __x, \
 187        __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
 188        (void)0))
 189#define px_used(px) (&px_pt(px)->used)
 190
 191struct i915_vm_pt_stash {
 192        /* preallocated chains of page tables/directories */
 193        struct i915_page_table *pt[2];
 194};
 195
 196struct i915_vma_ops {
 197        /* Map an object into an address space with the given cache flags. */
 198        void (*bind_vma)(struct i915_address_space *vm,
 199                         struct i915_vm_pt_stash *stash,
 200                         struct i915_vma *vma,
 201                         enum i915_cache_level cache_level,
 202                         u32 flags);
 203        /*
 204         * Unmap an object from an address space. This usually consists of
 205         * setting the valid PTE entries to a reserved scratch page.
 206         */
 207        void (*unbind_vma)(struct i915_address_space *vm,
 208                           struct i915_vma *vma);
 209
 210        int (*set_pages)(struct i915_vma *vma);
 211        void (*clear_pages)(struct i915_vma *vma);
 212};
 213
 214struct i915_address_space {
 215        struct kref ref;
 216        struct rcu_work rcu;
 217
 218        struct drm_mm mm;
 219        struct intel_gt *gt;
 220        struct drm_i915_private *i915;
 221        struct device *dma;
 222        u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
 223        u64 reserved;           /* size addr space reserved */
 224
 225        unsigned int bind_async_flags;
 226
 227        /*
 228         * Each active user context has its own address space (in full-ppgtt).
 229         * Since the vm may be shared between multiple contexts, we count how
 230         * many contexts keep us "open". Once open hits zero, we are closed
 231         * and do not allow any new attachments, and proceed to shutdown our
 232         * vma and page directories.
 233         */
 234        atomic_t open;
 235
 236        struct mutex mutex; /* protects vma and our lists */
 237
 238        struct kref resv_ref; /* kref to keep the reservation lock alive. */
 239        struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
 240#define VM_CLASS_GGTT 0
 241#define VM_CLASS_PPGTT 1
 242#define VM_CLASS_DPT 2
 243
 244        struct drm_i915_gem_object *scratch[4];
 245        /**
 246         * List of vma currently bound.
 247         */
 248        struct list_head bound_list;
 249
 250        /* Global GTT */
 251        bool is_ggtt:1;
 252
 253        /* Display page table */
 254        bool is_dpt:1;
 255
 256        /* Some systems support read-only mappings for GGTT and/or PPGTT */
 257        bool has_read_only:1;
 258
 259        u8 top;
 260        u8 pd_shift;
 261        u8 scratch_order;
 262
 263        struct drm_i915_gem_object *
 264                (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
 265
 266        u64 (*pte_encode)(dma_addr_t addr,
 267                          enum i915_cache_level level,
 268                          u32 flags); /* Create a valid PTE */
 269#define PTE_READ_ONLY   BIT(0)
 270#define PTE_LM          BIT(1)
 271
 272        void (*allocate_va_range)(struct i915_address_space *vm,
 273                                  struct i915_vm_pt_stash *stash,
 274                                  u64 start, u64 length);
 275        void (*clear_range)(struct i915_address_space *vm,
 276                            u64 start, u64 length);
 277        void (*insert_page)(struct i915_address_space *vm,
 278                            dma_addr_t addr,
 279                            u64 offset,
 280                            enum i915_cache_level cache_level,
 281                            u32 flags);
 282        void (*insert_entries)(struct i915_address_space *vm,
 283                               struct i915_vma *vma,
 284                               enum i915_cache_level cache_level,
 285                               u32 flags);
 286        void (*cleanup)(struct i915_address_space *vm);
 287
 288        void (*foreach)(struct i915_address_space *vm,
 289                        u64 start, u64 length,
 290                        void (*fn)(struct i915_address_space *vm,
 291                                   struct i915_page_table *pt,
 292                                   void *data),
 293                        void *data);
 294
 295        struct i915_vma_ops vma_ops;
 296
 297        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
 298        I915_SELFTEST_DECLARE(bool scrub_64K);
 299};
 300
 301/*
 302 * The Graphics Translation Table is the way in which GEN hardware translates a
 303 * Graphics Virtual Address into a Physical Address. In addition to the normal
 304 * collateral associated with any va->pa translations GEN hardware also has a
 305 * portion of the GTT which can be mapped by the CPU and remain both coherent
 306 * and correct (in cases like swizzling). That region is referred to as GMADR in
 307 * the spec.
 308 */
 309struct i915_ggtt {
 310        struct i915_address_space vm;
 311
 312        struct io_mapping iomap;        /* Mapping to our CPU mappable region */
 313        struct resource gmadr;          /* GMADR resource */
 314        resource_size_t mappable_end;   /* End offset that we can CPU map */
 315
 316        /** "Graphics Stolen Memory" holds the global PTEs */
 317        void __iomem *gsm;
 318        void (*invalidate)(struct i915_ggtt *ggtt);
 319
 320        /** PPGTT used for aliasing the PPGTT with the GTT */
 321        struct i915_ppgtt *alias;
 322
 323        bool do_idle_maps;
 324
 325        int mtrr;
 326
 327        /** Bit 6 swizzling required for X tiling */
 328        u32 bit_6_swizzle_x;
 329        /** Bit 6 swizzling required for Y tiling */
 330        u32 bit_6_swizzle_y;
 331
 332        u32 pin_bias;
 333
 334        unsigned int num_fences;
 335        struct i915_fence_reg *fence_regs;
 336        struct list_head fence_list;
 337
 338        /**
 339         * List of all objects in gtt_space, currently mmaped by userspace.
 340         * All objects within this list must also be on bound_list.
 341         */
 342        struct list_head userfault_list;
 343
 344        /* Manual runtime pm autosuspend delay for user GGTT mmaps */
 345        struct intel_wakeref_auto userfault_wakeref;
 346
 347        struct mutex error_mutex;
 348        struct drm_mm_node error_capture;
 349        struct drm_mm_node uc_fw;
 350};
 351
 352struct i915_ppgtt {
 353        struct i915_address_space vm;
 354
 355        struct i915_page_directory *pd;
 356};
 357
 358#define i915_is_ggtt(vm) ((vm)->is_ggtt)
 359#define i915_is_dpt(vm) ((vm)->is_dpt)
 360#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
 361
 362int __must_check
 363i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
 364
 365static inline bool
 366i915_vm_is_4lvl(const struct i915_address_space *vm)
 367{
 368        return (vm->total - 1) >> 32;
 369}
 370
 371static inline bool
 372i915_vm_has_scratch_64K(struct i915_address_space *vm)
 373{
 374        return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
 375}
 376
 377static inline bool
 378i915_vm_has_cache_coloring(struct i915_address_space *vm)
 379{
 380        return i915_is_ggtt(vm) && vm->mm.color_adjust;
 381}
 382
 383static inline struct i915_ggtt *
 384i915_vm_to_ggtt(struct i915_address_space *vm)
 385{
 386        BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
 387        GEM_BUG_ON(!i915_is_ggtt(vm));
 388        return container_of(vm, struct i915_ggtt, vm);
 389}
 390
 391static inline struct i915_ppgtt *
 392i915_vm_to_ppgtt(struct i915_address_space *vm)
 393{
 394        BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
 395        GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
 396        return container_of(vm, struct i915_ppgtt, vm);
 397}
 398
 399static inline struct i915_address_space *
 400i915_vm_get(struct i915_address_space *vm)
 401{
 402        kref_get(&vm->ref);
 403        return vm;
 404}
 405
 406/**
 407 * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
 408 * @vm: The vm whose reservation lock we want to share.
 409 *
 410 * Return: A pointer to the vm's reservation lock.
 411 */
 412static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
 413{
 414        kref_get(&vm->resv_ref);
 415        return &vm->_resv;
 416}
 417
 418void i915_vm_release(struct kref *kref);
 419
 420void i915_vm_resv_release(struct kref *kref);
 421
 422static inline void i915_vm_put(struct i915_address_space *vm)
 423{
 424        kref_put(&vm->ref, i915_vm_release);
 425}
 426
 427/**
 428 * i915_vm_resv_put - Release a reference on the vm's reservation lock
 429 * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
 430 */
 431static inline void i915_vm_resv_put(struct i915_address_space *vm)
 432{
 433        kref_put(&vm->resv_ref, i915_vm_resv_release);
 434}
 435
 436static inline struct i915_address_space *
 437i915_vm_open(struct i915_address_space *vm)
 438{
 439        GEM_BUG_ON(!atomic_read(&vm->open));
 440        atomic_inc(&vm->open);
 441        return i915_vm_get(vm);
 442}
 443
 444static inline bool
 445i915_vm_tryopen(struct i915_address_space *vm)
 446{
 447        if (atomic_add_unless(&vm->open, 1, 0))
 448                return i915_vm_get(vm);
 449
 450        return false;
 451}
 452
 453void __i915_vm_close(struct i915_address_space *vm);
 454
 455static inline void
 456i915_vm_close(struct i915_address_space *vm)
 457{
 458        GEM_BUG_ON(!atomic_read(&vm->open));
 459        __i915_vm_close(vm);
 460
 461        i915_vm_put(vm);
 462}
 463
 464void i915_address_space_init(struct i915_address_space *vm, int subclass);
 465void i915_address_space_fini(struct i915_address_space *vm);
 466
 467static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
 468{
 469        const u32 mask = NUM_PTE(pde_shift) - 1;
 470
 471        return (address >> PAGE_SHIFT) & mask;
 472}
 473
 474/*
 475 * Helper to counts the number of PTEs within the given length. This count
 476 * does not cross a page table boundary, so the max value would be
 477 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
 478 */
 479static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
 480{
 481        const u64 mask = ~((1ULL << pde_shift) - 1);
 482        u64 end;
 483
 484        GEM_BUG_ON(length == 0);
 485        GEM_BUG_ON(offset_in_page(addr | length));
 486
 487        end = addr + length;
 488
 489        if ((addr & mask) != (end & mask))
 490                return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
 491
 492        return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
 493}
 494
 495static inline u32 i915_pde_index(u64 addr, u32 shift)
 496{
 497        return (addr >> shift) & I915_PDE_MASK;
 498}
 499
 500static inline struct i915_page_table *
 501i915_pt_entry(const struct i915_page_directory * const pd,
 502              const unsigned short n)
 503{
 504        return pd->entry[n];
 505}
 506
 507static inline struct i915_page_directory *
 508i915_pd_entry(const struct i915_page_directory * const pdp,
 509              const unsigned short n)
 510{
 511        return pdp->entry[n];
 512}
 513
 514static inline dma_addr_t
 515i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
 516{
 517        struct i915_page_table *pt = ppgtt->pd->entry[n];
 518
 519        return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
 520}
 521
 522void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
 523
 524int i915_ggtt_probe_hw(struct drm_i915_private *i915);
 525int i915_ggtt_init_hw(struct drm_i915_private *i915);
 526int i915_ggtt_enable_hw(struct drm_i915_private *i915);
 527void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
 528void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
 529int i915_init_ggtt(struct drm_i915_private *i915);
 530void i915_ggtt_driver_release(struct drm_i915_private *i915);
 531void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
 532
 533static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
 534{
 535        return ggtt->mappable_end > 0;
 536}
 537
 538int i915_ppgtt_init_hw(struct intel_gt *gt);
 539
 540struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
 541
 542void i915_ggtt_suspend(struct i915_ggtt *gtt);
 543void i915_ggtt_resume(struct i915_ggtt *ggtt);
 544
 545void
 546fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
 547
 548#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
 549#define fill32_px(px, v) do {                                           \
 550        u64 v__ = lower_32_bits(v);                                     \
 551        fill_px((px), v__ << 32 | v__);                                 \
 552} while (0)
 553
 554int setup_scratch_page(struct i915_address_space *vm);
 555void free_scratch(struct i915_address_space *vm);
 556
 557struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
 558struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
 559struct i915_page_table *alloc_pt(struct i915_address_space *vm);
 560struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
 561struct i915_page_directory *__alloc_pd(int npde);
 562
 563int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
 564int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
 565
 566void free_px(struct i915_address_space *vm,
 567             struct i915_page_table *pt, int lvl);
 568#define free_pt(vm, px) free_px(vm, px, 0)
 569#define free_pd(vm, px) free_px(vm, px_pt(px), 1)
 570
 571void
 572__set_pd_entry(struct i915_page_directory * const pd,
 573               const unsigned short idx,
 574               struct i915_page_table *pt,
 575               u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
 576
 577#define set_pd_entry(pd, idx, to) \
 578        __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
 579
 580void
 581clear_pd_entry(struct i915_page_directory * const pd,
 582               const unsigned short idx,
 583               const struct drm_i915_gem_object * const scratch);
 584
 585bool
 586release_pd_entry(struct i915_page_directory * const pd,
 587                 const unsigned short idx,
 588                 struct i915_page_table * const pt,
 589                 const struct drm_i915_gem_object * const scratch);
 590void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
 591
 592int ggtt_set_pages(struct i915_vma *vma);
 593int ppgtt_set_pages(struct i915_vma *vma);
 594void clear_pages(struct i915_vma *vma);
 595
 596void ppgtt_bind_vma(struct i915_address_space *vm,
 597                    struct i915_vm_pt_stash *stash,
 598                    struct i915_vma *vma,
 599                    enum i915_cache_level cache_level,
 600                    u32 flags);
 601void ppgtt_unbind_vma(struct i915_address_space *vm,
 602                      struct i915_vma *vma);
 603
 604void gtt_write_workarounds(struct intel_gt *gt);
 605
 606void setup_private_pat(struct intel_uncore *uncore);
 607
 608int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
 609                           struct i915_vm_pt_stash *stash,
 610                           u64 size);
 611int i915_vm_map_pt_stash(struct i915_address_space *vm,
 612                         struct i915_vm_pt_stash *stash);
 613void i915_vm_free_pt_stash(struct i915_address_space *vm,
 614                           struct i915_vm_pt_stash *stash);
 615
 616struct i915_vma *
 617__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
 618
 619struct i915_vma *
 620__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
 621
 622static inline struct sgt_dma {
 623        struct scatterlist *sg;
 624        dma_addr_t dma, max;
 625} sgt_dma(struct i915_vma *vma) {
 626        struct scatterlist *sg = vma->pages->sgl;
 627        dma_addr_t addr = sg_dma_address(sg);
 628
 629        return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
 630}
 631
 632#endif
 633