linux/drivers/gpu/drm/i915/i915_gem_gtt.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Please try to maintain the following order within this file unless it makes
  24 * sense to do otherwise. From top to bottom:
  25 * 1. typedefs
  26 * 2. #defines, and macros
  27 * 3. structure definitions
  28 * 4. function prototypes
  29 *
  30 * Within each section, please try to order by generation in ascending order,
  31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
  32 */
  33
  34#ifndef __I915_GEM_GTT_H__
  35#define __I915_GEM_GTT_H__
  36
  37#include <linux/io-mapping.h>
  38#include <linux/mm.h>
  39#include <linux/pagevec.h>
  40
  41#include "i915_request.h"
  42#include "i915_reset.h"
  43#include "i915_selftest.h"
  44#include "i915_timeline.h"
  45
  46#define I915_GTT_PAGE_SIZE_4K   BIT_ULL(12)
  47#define I915_GTT_PAGE_SIZE_64K  BIT_ULL(16)
  48#define I915_GTT_PAGE_SIZE_2M   BIT_ULL(21)
  49
  50#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
  51#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
  52
  53#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
  54
  55#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
  56
  57#define I915_FENCE_REG_NONE -1
  58#define I915_MAX_NUM_FENCES 32
  59/* 32 fences + sign bit for FENCE_REG_NONE */
  60#define I915_MAX_NUM_FENCE_BITS 6
  61
  62struct drm_i915_file_private;
  63struct drm_i915_fence_reg;
  64struct i915_vma;
  65
  66typedef u32 gen6_pte_t;
  67typedef u64 gen8_pte_t;
  68typedef u64 gen8_pde_t;
  69typedef u64 gen8_ppgtt_pdpe_t;
  70typedef u64 gen8_ppgtt_pml4e_t;
  71
  72#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
  73
  74/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
  75#define GEN6_GTT_ADDR_ENCODE(addr)      ((addr) | (((addr) >> 28) & 0xff0))
  76#define GEN6_PTE_ADDR_ENCODE(addr)      GEN6_GTT_ADDR_ENCODE(addr)
  77#define GEN6_PDE_ADDR_ENCODE(addr)      GEN6_GTT_ADDR_ENCODE(addr)
  78#define GEN6_PTE_CACHE_LLC              (2 << 1)
  79#define GEN6_PTE_UNCACHED               (1 << 1)
  80#define GEN6_PTE_VALID                  (1 << 0)
  81
  82#define I915_PTES(pte_len)              ((unsigned int)(PAGE_SIZE / (pte_len)))
  83#define I915_PTE_MASK(pte_len)          (I915_PTES(pte_len) - 1)
  84#define I915_PDES                       512
  85#define I915_PDE_MASK                   (I915_PDES - 1)
  86#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
  87
  88#define GEN6_PTES                       I915_PTES(sizeof(gen6_pte_t))
  89#define GEN6_PD_SIZE                    (I915_PDES * PAGE_SIZE)
  90#define GEN6_PD_ALIGN                   (PAGE_SIZE * 16)
  91#define GEN6_PDE_SHIFT                  22
  92#define GEN6_PDE_VALID                  (1 << 0)
  93
  94#define GEN7_PTE_CACHE_L3_LLC           (3 << 1)
  95
  96#define BYT_PTE_SNOOPED_BY_CPU_CACHES   (1 << 2)
  97#define BYT_PTE_WRITEABLE               (1 << 1)
  98
  99/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
 100 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
 101 */
 102#define HSW_CACHEABILITY_CONTROL(bits)  ((((bits) & 0x7) << 1) | \
 103                                         (((bits) & 0x8) << (11 - 3)))
 104#define HSW_WB_LLC_AGE3                 HSW_CACHEABILITY_CONTROL(0x2)
 105#define HSW_WB_LLC_AGE0                 HSW_CACHEABILITY_CONTROL(0x3)
 106#define HSW_WB_ELLC_LLC_AGE3            HSW_CACHEABILITY_CONTROL(0x8)
 107#define HSW_WB_ELLC_LLC_AGE0            HSW_CACHEABILITY_CONTROL(0xb)
 108#define HSW_WT_ELLC_LLC_AGE3            HSW_CACHEABILITY_CONTROL(0x7)
 109#define HSW_WT_ELLC_LLC_AGE0            HSW_CACHEABILITY_CONTROL(0x6)
 110#define HSW_PTE_UNCACHED                (0)
 111#define HSW_GTT_ADDR_ENCODE(addr)       ((addr) | (((addr) >> 28) & 0x7f0))
 112#define HSW_PTE_ADDR_ENCODE(addr)       HSW_GTT_ADDR_ENCODE(addr)
 113
 114/* GEN8 32b style address is defined as a 3 level page table:
 115 * 31:30 | 29:21 | 20:12 |  11:0
 116 * PDPE  |  PDE  |  PTE  | offset
 117 * The difference as compared to normal x86 3 level page table is the PDPEs are
 118 * programmed via register.
 119 */
 120#define GEN8_3LVL_PDPES                 4
 121#define GEN8_PDE_SHIFT                  21
 122#define GEN8_PDE_MASK                   0x1ff
 123#define GEN8_PTE_SHIFT                  12
 124#define GEN8_PTE_MASK                   0x1ff
 125#define GEN8_PTES                       I915_PTES(sizeof(gen8_pte_t))
 126
 127/* GEN8 48b style address is defined as a 4 level page table:
 128 * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
 129 * PML4E | PDPE  |  PDE  |  PTE  | offset
 130 */
 131#define GEN8_PML4ES_PER_PML4            512
 132#define GEN8_PML4E_SHIFT                39
 133#define GEN8_PML4E_MASK                 (GEN8_PML4ES_PER_PML4 - 1)
 134#define GEN8_PDPE_SHIFT                 30
 135/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
 136 * tables */
 137#define GEN8_PDPE_MASK                  0x1ff
 138
 139#define PPAT_UNCACHED                   (_PAGE_PWT | _PAGE_PCD)
 140#define PPAT_CACHED_PDE                 0 /* WB LLC */
 141#define PPAT_CACHED                     _PAGE_PAT /* WB LLCeLLC */
 142#define PPAT_DISPLAY_ELLC               _PAGE_PCD /* WT eLLC */
 143
 144#define CHV_PPAT_SNOOP                  (1<<6)
 145#define GEN8_PPAT_AGE(x)                ((x)<<4)
 146#define GEN8_PPAT_LLCeLLC               (3<<2)
 147#define GEN8_PPAT_LLCELLC               (2<<2)
 148#define GEN8_PPAT_LLC                   (1<<2)
 149#define GEN8_PPAT_WB                    (3<<0)
 150#define GEN8_PPAT_WT                    (2<<0)
 151#define GEN8_PPAT_WC                    (1<<0)
 152#define GEN8_PPAT_UC                    (0<<0)
 153#define GEN8_PPAT_ELLC_OVERRIDE         (0<<2)
 154#define GEN8_PPAT(i, x)                 ((u64)(x) << ((i) * 8))
 155
 156#define GEN8_PPAT_GET_CA(x) ((x) & 3)
 157#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
 158#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
 159#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
 160
 161#define GEN8_PDE_IPS_64K BIT(11)
 162#define GEN8_PDE_PS_2M   BIT(7)
 163
 164struct sg_table;
 165
 166struct intel_rotation_info {
 167        struct intel_rotation_plane_info {
 168                /* tiles */
 169                unsigned int width, height, stride, offset;
 170        } plane[2];
 171} __packed;
 172
 173struct intel_partial_info {
 174        u64 offset;
 175        unsigned int size;
 176} __packed;
 177
 178enum i915_ggtt_view_type {
 179        I915_GGTT_VIEW_NORMAL = 0,
 180        I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
 181        I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
 182};
 183
 184static inline void assert_i915_gem_gtt_types(void)
 185{
 186        BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
 187        BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
 188
 189        /* As we encode the size of each branch inside the union into its type,
 190         * we have to be careful that each branch has a unique size.
 191         */
 192        switch ((enum i915_ggtt_view_type)0) {
 193        case I915_GGTT_VIEW_NORMAL:
 194        case I915_GGTT_VIEW_PARTIAL:
 195        case I915_GGTT_VIEW_ROTATED:
 196                /* gcc complains if these are identical cases */
 197                break;
 198        }
 199}
 200
 201struct i915_ggtt_view {
 202        enum i915_ggtt_view_type type;
 203        union {
 204                /* Members need to contain no holes/padding */
 205                struct intel_partial_info partial;
 206                struct intel_rotation_info rotated;
 207        };
 208};
 209
 210enum i915_cache_level;
 211
 212struct i915_vma;
 213
 214struct i915_page_dma {
 215        struct page *page;
 216        int order;
 217        union {
 218                dma_addr_t daddr;
 219
 220                /* For gen6/gen7 only. This is the offset in the GGTT
 221                 * where the page directory entries for PPGTT begin
 222                 */
 223                u32 ggtt_offset;
 224        };
 225};
 226
 227#define px_base(px) (&(px)->base)
 228#define px_dma(px) (px_base(px)->daddr)
 229
 230struct i915_page_table {
 231        struct i915_page_dma base;
 232        unsigned int used_ptes;
 233};
 234
 235struct i915_page_directory {
 236        struct i915_page_dma base;
 237
 238        struct i915_page_table *page_table[I915_PDES]; /* PDEs */
 239        unsigned int used_pdes;
 240};
 241
 242struct i915_page_directory_pointer {
 243        struct i915_page_dma base;
 244        struct i915_page_directory **page_directory;
 245        unsigned int used_pdpes;
 246};
 247
 248struct i915_pml4 {
 249        struct i915_page_dma base;
 250        struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
 251};
 252
 253struct i915_vma_ops {
 254        /* Map an object into an address space with the given cache flags. */
 255        int (*bind_vma)(struct i915_vma *vma,
 256                        enum i915_cache_level cache_level,
 257                        u32 flags);
 258        /*
 259         * Unmap an object from an address space. This usually consists of
 260         * setting the valid PTE entries to a reserved scratch page.
 261         */
 262        void (*unbind_vma)(struct i915_vma *vma);
 263
 264        int (*set_pages)(struct i915_vma *vma);
 265        void (*clear_pages)(struct i915_vma *vma);
 266};
 267
 268struct pagestash {
 269        spinlock_t lock;
 270        struct pagevec pvec;
 271};
 272
 273struct i915_address_space {
 274        struct drm_mm mm;
 275        struct drm_i915_private *i915;
 276        struct device *dma;
 277        /* Every address space belongs to a struct file - except for the global
 278         * GTT that is owned by the driver (and so @file is set to NULL). In
 279         * principle, no information should leak from one context to another
 280         * (or between files/processes etc) unless explicitly shared by the
 281         * owner. Tracking the owner is important in order to free up per-file
 282         * objects along with the file, to aide resource tracking, and to
 283         * assign blame.
 284         */
 285        struct drm_i915_file_private *file;
 286        u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
 287        u64 reserved;           /* size addr space reserved */
 288
 289        bool closed;
 290
 291        struct mutex mutex; /* protects vma and our lists */
 292#define VM_CLASS_GGTT 0
 293#define VM_CLASS_PPGTT 1
 294
 295        u64 scratch_pte;
 296        struct i915_page_dma scratch_page;
 297        struct i915_page_table *scratch_pt;
 298        struct i915_page_directory *scratch_pd;
 299        struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
 300
 301        /**
 302         * List of vma currently bound.
 303         */
 304        struct list_head bound_list;
 305
 306        /**
 307         * List of vma that are not unbound.
 308         */
 309        struct list_head unbound_list;
 310
 311        struct pagestash free_pages;
 312
 313        /* Global GTT */
 314        bool is_ggtt:1;
 315
 316        /* Some systems require uncached updates of the page directories */
 317        bool pt_kmap_wc:1;
 318
 319        /* Some systems support read-only mappings for GGTT and/or PPGTT */
 320        bool has_read_only:1;
 321
 322        u64 (*pte_encode)(dma_addr_t addr,
 323                          enum i915_cache_level level,
 324                          u32 flags); /* Create a valid PTE */
 325#define PTE_READ_ONLY   (1<<0)
 326
 327        int (*allocate_va_range)(struct i915_address_space *vm,
 328                                 u64 start, u64 length);
 329        void (*clear_range)(struct i915_address_space *vm,
 330                            u64 start, u64 length);
 331        void (*insert_page)(struct i915_address_space *vm,
 332                            dma_addr_t addr,
 333                            u64 offset,
 334                            enum i915_cache_level cache_level,
 335                            u32 flags);
 336        void (*insert_entries)(struct i915_address_space *vm,
 337                               struct i915_vma *vma,
 338                               enum i915_cache_level cache_level,
 339                               u32 flags);
 340        void (*cleanup)(struct i915_address_space *vm);
 341
 342        struct i915_vma_ops vma_ops;
 343
 344        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
 345        I915_SELFTEST_DECLARE(bool scrub_64K);
 346};
 347
 348#define i915_is_ggtt(vm) ((vm)->is_ggtt)
 349
 350static inline bool
 351i915_vm_is_48bit(const struct i915_address_space *vm)
 352{
 353        return (vm->total - 1) >> 32;
 354}
 355
 356static inline bool
 357i915_vm_has_scratch_64K(struct i915_address_space *vm)
 358{
 359        return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
 360}
 361
 362/* The Graphics Translation Table is the way in which GEN hardware translates a
 363 * Graphics Virtual Address into a Physical Address. In addition to the normal
 364 * collateral associated with any va->pa translations GEN hardware also has a
 365 * portion of the GTT which can be mapped by the CPU and remain both coherent
 366 * and correct (in cases like swizzling). That region is referred to as GMADR in
 367 * the spec.
 368 */
 369struct i915_ggtt {
 370        struct i915_address_space vm;
 371
 372        struct io_mapping iomap;        /* Mapping to our CPU mappable region */
 373        struct resource gmadr;          /* GMADR resource */
 374        resource_size_t mappable_end;   /* End offset that we can CPU map */
 375
 376        /** "Graphics Stolen Memory" holds the global PTEs */
 377        void __iomem *gsm;
 378        void (*invalidate)(struct drm_i915_private *dev_priv);
 379
 380        bool do_idle_maps;
 381
 382        int mtrr;
 383
 384        u32 pin_bias;
 385
 386        struct drm_mm_node error_capture;
 387};
 388
 389struct i915_hw_ppgtt {
 390        struct i915_address_space vm;
 391        struct kref ref;
 392
 393        unsigned long pd_dirty_rings;
 394        union {
 395                struct i915_pml4 pml4;          /* GEN8+ & 48b PPGTT */
 396                struct i915_page_directory_pointer pdp; /* GEN8+ */
 397                struct i915_page_directory pd;          /* GEN6-7 */
 398        };
 399};
 400
 401struct gen6_hw_ppgtt {
 402        struct i915_hw_ppgtt base;
 403
 404        struct i915_vma *vma;
 405        gen6_pte_t __iomem *pd_addr;
 406
 407        unsigned int pin_count;
 408        bool scan_for_unused_pt;
 409};
 410
 411#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
 412
 413static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
 414{
 415        BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
 416        return __to_gen6_ppgtt(base);
 417}
 418
 419/*
 420 * gen6_for_each_pde() iterates over every pde from start until start+length.
 421 * If start and start+length are not perfectly divisible, the macro will round
 422 * down and up as needed. Start=0 and length=2G effectively iterates over
 423 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
 424 * so each of the other parameters should preferably be a simple variable, or
 425 * at most an lvalue with no side-effects!
 426 */
 427#define gen6_for_each_pde(pt, pd, start, length, iter)                  \
 428        for (iter = gen6_pde_index(start);                              \
 429             length > 0 && iter < I915_PDES &&                          \
 430                (pt = (pd)->page_table[iter], true);                    \
 431             ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);         \
 432                    temp = min(temp - start, length);                   \
 433                    start += temp, length -= temp; }), ++iter)
 434
 435#define gen6_for_all_pdes(pt, pd, iter)                                 \
 436        for (iter = 0;                                                  \
 437             iter < I915_PDES &&                                        \
 438                (pt = (pd)->page_table[iter], true);                    \
 439             ++iter)
 440
 441static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
 442{
 443        const u32 mask = NUM_PTE(pde_shift) - 1;
 444
 445        return (address >> PAGE_SHIFT) & mask;
 446}
 447
 448/* Helper to counts the number of PTEs within the given length. This count
 449 * does not cross a page table boundary, so the max value would be
 450 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
 451*/
 452static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
 453{
 454        const u64 mask = ~((1ULL << pde_shift) - 1);
 455        u64 end;
 456
 457        GEM_BUG_ON(length == 0);
 458        GEM_BUG_ON(offset_in_page(addr | length));
 459
 460        end = addr + length;
 461
 462        if ((addr & mask) != (end & mask))
 463                return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
 464
 465        return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
 466}
 467
 468static inline u32 i915_pde_index(u64 addr, u32 shift)
 469{
 470        return (addr >> shift) & I915_PDE_MASK;
 471}
 472
 473static inline u32 gen6_pte_index(u32 addr)
 474{
 475        return i915_pte_index(addr, GEN6_PDE_SHIFT);
 476}
 477
 478static inline u32 gen6_pte_count(u32 addr, u32 length)
 479{
 480        return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
 481}
 482
 483static inline u32 gen6_pde_index(u32 addr)
 484{
 485        return i915_pde_index(addr, GEN6_PDE_SHIFT);
 486}
 487
 488static inline unsigned int
 489i915_pdpes_per_pdp(const struct i915_address_space *vm)
 490{
 491        if (i915_vm_is_48bit(vm))
 492                return GEN8_PML4ES_PER_PML4;
 493
 494        return GEN8_3LVL_PDPES;
 495}
 496
 497/* Equivalent to the gen6 version, For each pde iterates over every pde
 498 * between from start until start + length. On gen8+ it simply iterates
 499 * over every page directory entry in a page directory.
 500 */
 501#define gen8_for_each_pde(pt, pd, start, length, iter)                  \
 502        for (iter = gen8_pde_index(start);                              \
 503             length > 0 && iter < I915_PDES &&                          \
 504                (pt = (pd)->page_table[iter], true);                    \
 505             ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT);         \
 506                    temp = min(temp - start, length);                   \
 507                    start += temp, length -= temp; }), ++iter)
 508
 509#define gen8_for_each_pdpe(pd, pdp, start, length, iter)                \
 510        for (iter = gen8_pdpe_index(start);                             \
 511             length > 0 && iter < i915_pdpes_per_pdp(vm) &&             \
 512                (pd = (pdp)->page_directory[iter], true);               \
 513             ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT);        \
 514                    temp = min(temp - start, length);                   \
 515                    start += temp, length -= temp; }), ++iter)
 516
 517#define gen8_for_each_pml4e(pdp, pml4, start, length, iter)             \
 518        for (iter = gen8_pml4e_index(start);                            \
 519             length > 0 && iter < GEN8_PML4ES_PER_PML4 &&               \
 520                (pdp = (pml4)->pdps[iter], true);                       \
 521             ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT);    \
 522                    temp = min(temp - start, length);                   \
 523                    start += temp, length -= temp; }), ++iter)
 524
 525static inline u32 gen8_pte_index(u64 address)
 526{
 527        return i915_pte_index(address, GEN8_PDE_SHIFT);
 528}
 529
 530static inline u32 gen8_pde_index(u64 address)
 531{
 532        return i915_pde_index(address, GEN8_PDE_SHIFT);
 533}
 534
 535static inline u32 gen8_pdpe_index(u64 address)
 536{
 537        return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
 538}
 539
 540static inline u32 gen8_pml4e_index(u64 address)
 541{
 542        return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
 543}
 544
 545static inline u64 gen8_pte_count(u64 address, u64 length)
 546{
 547        return i915_pte_count(address, length, GEN8_PDE_SHIFT);
 548}
 549
 550static inline dma_addr_t
 551i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
 552{
 553        return px_dma(ppgtt->pdp.page_directory[n]);
 554}
 555
 556static inline struct i915_ggtt *
 557i915_vm_to_ggtt(struct i915_address_space *vm)
 558{
 559        GEM_BUG_ON(!i915_is_ggtt(vm));
 560        return container_of(vm, struct i915_ggtt, vm);
 561}
 562
 563#define INTEL_MAX_PPAT_ENTRIES 8
 564#define INTEL_PPAT_PERFECT_MATCH (~0U)
 565
 566struct intel_ppat;
 567
 568struct intel_ppat_entry {
 569        struct intel_ppat *ppat;
 570        struct kref ref;
 571        u8 value;
 572};
 573
 574struct intel_ppat {
 575        struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
 576        DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
 577        DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
 578        unsigned int max_entries;
 579        u8 clear_value;
 580        /*
 581         * Return a score to show how two PPAT values match,
 582         * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
 583         */
 584        unsigned int (*match)(u8 src, u8 dst);
 585        void (*update_hw)(struct drm_i915_private *i915);
 586
 587        struct drm_i915_private *i915;
 588};
 589
 590const struct intel_ppat_entry *
 591intel_ppat_get(struct drm_i915_private *i915, u8 value);
 592void intel_ppat_put(const struct intel_ppat_entry *entry);
 593
 594int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
 595void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
 596
 597int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
 598int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
 599int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
 600void i915_ggtt_enable_guc(struct drm_i915_private *i915);
 601void i915_ggtt_disable_guc(struct drm_i915_private *i915);
 602int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
 603void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 604
 605int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
 606void i915_ppgtt_release(struct kref *kref);
 607struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
 608                                        struct drm_i915_file_private *fpriv);
 609void i915_ppgtt_close(struct i915_address_space *vm);
 610static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 611{
 612        if (ppgtt)
 613                kref_get(&ppgtt->ref);
 614}
 615static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
 616{
 617        if (ppgtt)
 618                kref_put(&ppgtt->ref, i915_ppgtt_release);
 619}
 620
 621int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
 622void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
 623
 624void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 625void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
 626void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
 627
 628int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 629                                            struct sg_table *pages);
 630void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 631                               struct sg_table *pages);
 632
 633int i915_gem_gtt_reserve(struct i915_address_space *vm,
 634                         struct drm_mm_node *node,
 635                         u64 size, u64 offset, unsigned long color,
 636                         unsigned int flags);
 637
 638int i915_gem_gtt_insert(struct i915_address_space *vm,
 639                        struct drm_mm_node *node,
 640                        u64 size, u64 alignment, unsigned long color,
 641                        u64 start, u64 end, unsigned int flags);
 642
 643/* Flags used by pin/bind&friends. */
 644#define PIN_NONBLOCK            BIT_ULL(0)
 645#define PIN_NONFAULT            BIT_ULL(1)
 646#define PIN_NOEVICT             BIT_ULL(2)
 647#define PIN_MAPPABLE            BIT_ULL(3)
 648#define PIN_ZONE_4G             BIT_ULL(4)
 649#define PIN_HIGH                BIT_ULL(5)
 650#define PIN_OFFSET_BIAS         BIT_ULL(6)
 651#define PIN_OFFSET_FIXED        BIT_ULL(7)
 652
 653#define PIN_MBZ                 BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
 654#define PIN_GLOBAL              BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
 655#define PIN_USER                BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
 656#define PIN_UPDATE              BIT_ULL(11)
 657
 658#define PIN_OFFSET_MASK         (-I915_GTT_PAGE_SIZE)
 659
 660#endif
 661