linux/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_TYPES_H__
   8#define __I915_GEM_OBJECT_TYPES_H__
   9
  10#include <linux/mmu_notifier.h>
  11
  12#include <drm/drm_gem.h>
  13#include <drm/ttm/ttm_bo_api.h>
  14#include <uapi/drm/i915_drm.h>
  15
  16#include "i915_active.h"
  17#include "i915_selftest.h"
  18#include "i915_vma_resource.h"
  19
  20struct drm_i915_gem_object;
  21struct intel_fronbuffer;
  22struct intel_memory_region;
  23
  24/*
  25 * struct i915_lut_handle tracks the fast lookups from handle to vma used
  26 * for execbuf. Although we use a radixtree for that mapping, in order to
  27 * remove them as the object or context is closed, we need a secondary list
  28 * and a translation entry (i915_lut_handle).
  29 */
  30struct i915_lut_handle {
  31        struct list_head obj_link;
  32        struct i915_gem_context *ctx;
  33        u32 handle;
  34};
  35
  36struct drm_i915_gem_object_ops {
  37        unsigned int flags;
  38#define I915_GEM_OBJECT_IS_SHRINKABLE                   BIT(1)
  39/* Skip the shrinker management in set_pages/unset_pages */
  40#define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST        BIT(2)
  41#define I915_GEM_OBJECT_IS_PROXY                        BIT(3)
  42#define I915_GEM_OBJECT_NO_MMAP                         BIT(4)
  43
  44        /* Interface between the GEM object and its backing storage.
  45         * get_pages() is called once prior to the use of the associated set
  46         * of pages before to binding them into the GTT, and put_pages() is
  47         * called after we no longer need them. As we expect there to be
  48         * associated cost with migrating pages between the backing storage
  49         * and making them available for the GPU (e.g. clflush), we may hold
  50         * onto the pages after they are no longer referenced by the GPU
  51         * in case they may be used again shortly (for example migrating the
  52         * pages to a different memory domain within the GTT). put_pages()
  53         * will therefore most likely be called when the object itself is
  54         * being released or under memory pressure (where we attempt to
  55         * reap pages for the shrinker).
  56         */
  57        int (*get_pages)(struct drm_i915_gem_object *obj);
  58        void (*put_pages)(struct drm_i915_gem_object *obj,
  59                          struct sg_table *pages);
  60        int (*truncate)(struct drm_i915_gem_object *obj);
  61        /**
  62         * shrink - Perform further backend specific actions to facilate
  63         * shrinking.
  64         * @obj: The gem object
  65         * @flags: Extra flags to control shrinking behaviour in the backend
  66         *
  67         * Possible values for @flags:
  68         *
  69         * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
  70         * backing pages, if supported.
  71         *
  72         * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
  73         * idle.  Active objects can be considered later. The TTM backend for
  74         * example might have aync migrations going on, which don't use any
  75         * i915_vma to track the active GTT binding, and hence having an unbound
  76         * object might not be enough.
  77         */
  78#define I915_GEM_OBJECT_SHRINK_WRITEBACK   BIT(0)
  79#define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
  80        int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
  81
  82        int (*pread)(struct drm_i915_gem_object *obj,
  83                     const struct drm_i915_gem_pread *arg);
  84        int (*pwrite)(struct drm_i915_gem_object *obj,
  85                      const struct drm_i915_gem_pwrite *arg);
  86        u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
  87        void (*unmap_virtual)(struct drm_i915_gem_object *obj);
  88
  89        int (*dmabuf_export)(struct drm_i915_gem_object *obj);
  90
  91        /**
  92         * adjust_lru - notify that the madvise value was updated
  93         * @obj: The gem object
  94         *
  95         * The madvise value may have been updated, or object was recently
  96         * referenced so act accordingly (Perhaps changing an LRU list etc).
  97         */
  98        void (*adjust_lru)(struct drm_i915_gem_object *obj);
  99
 100        /**
 101         * delayed_free - Override the default delayed free implementation
 102         */
 103        void (*delayed_free)(struct drm_i915_gem_object *obj);
 104
 105        /**
 106         * migrate - Migrate object to a different region either for
 107         * pinning or for as long as the object lock is held.
 108         */
 109        int (*migrate)(struct drm_i915_gem_object *obj,
 110                       struct intel_memory_region *mr);
 111
 112        void (*release)(struct drm_i915_gem_object *obj);
 113
 114        const struct vm_operations_struct *mmap_ops;
 115        const char *name; /* friendly name for debug, e.g. lockdep classes */
 116};
 117
 118/**
 119 * enum i915_cache_level - The supported GTT caching values for system memory
 120 * pages.
 121 *
 122 * These translate to some special GTT PTE bits when binding pages into some
 123 * address space. It also determines whether an object, or rather its pages are
 124 * coherent with the GPU, when also reading or writing through the CPU cache
 125 * with those pages.
 126 *
 127 * Userspace can also control this through struct drm_i915_gem_caching.
 128 */
 129enum i915_cache_level {
 130        /**
 131         * @I915_CACHE_NONE:
 132         *
 133         * GPU access is not coherent with the CPU cache. If the cache is dirty
 134         * and we need the underlying pages to be coherent with some later GPU
 135         * access then we need to manually flush the pages.
 136         *
 137         * On shared LLC platforms reads and writes through the CPU cache are
 138         * still coherent even with this setting. See also
 139         * &drm_i915_gem_object.cache_coherent for more details. Due to this we
 140         * should only ever use uncached for scanout surfaces, otherwise we end
 141         * up over-flushing in some places.
 142         *
 143         * This is the default on non-LLC platforms.
 144         */
 145        I915_CACHE_NONE = 0,
 146        /**
 147         * @I915_CACHE_LLC:
 148         *
 149         * GPU access is coherent with the CPU cache. If the cache is dirty,
 150         * then the GPU will ensure that access remains coherent, when both
 151         * reading and writing through the CPU cache. GPU writes can dirty the
 152         * CPU cache.
 153         *
 154         * Not used for scanout surfaces.
 155         *
 156         * Applies to both platforms with shared LLC(HAS_LLC), and snooping
 157         * based platforms(HAS_SNOOP).
 158         *
 159         * This is the default on shared LLC platforms.  The only exception is
 160         * scanout objects, where the display engine is not coherent with the
 161         * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
 162         * automatically applied by the kernel in pin_for_display, if userspace
 163         * has not done so already.
 164         */
 165        I915_CACHE_LLC,
 166        /**
 167         * @I915_CACHE_L3_LLC:
 168         *
 169         * Explicitly enable the Gfx L3 cache, with coherent LLC.
 170         *
 171         * The Gfx L3 sits between the domain specific caches, e.g
 172         * sampler/render caches, and the larger LLC. LLC is coherent with the
 173         * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
 174         * when the workload completes.
 175         *
 176         * Not used for scanout surfaces.
 177         *
 178         * Only exposed on some gen7 + GGTT. More recent hardware has dropped
 179         * this explicit setting, where it should now be enabled by default.
 180         */
 181        I915_CACHE_L3_LLC,
 182        /**
 183         * @I915_CACHE_WT:
 184         *
 185         * Write-through. Used for scanout surfaces.
 186         *
 187         * The GPU can utilise the caches, while still having the display engine
 188         * be coherent with GPU writes, as a result we don't need to flush the
 189         * CPU caches when moving out of the render domain. This is the default
 190         * setting chosen by the kernel, if supported by the HW, otherwise we
 191         * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
 192         * cache still need to be flushed, to remain coherent with the display
 193         * engine.
 194         */
 195        I915_CACHE_WT,
 196};
 197
 198enum i915_map_type {
 199        I915_MAP_WB = 0,
 200        I915_MAP_WC,
 201#define I915_MAP_OVERRIDE BIT(31)
 202        I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
 203        I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 204};
 205
 206enum i915_mmap_type {
 207        I915_MMAP_TYPE_GTT = 0,
 208        I915_MMAP_TYPE_WC,
 209        I915_MMAP_TYPE_WB,
 210        I915_MMAP_TYPE_UC,
 211        I915_MMAP_TYPE_FIXED,
 212};
 213
 214struct i915_mmap_offset {
 215        struct drm_vma_offset_node vma_node;
 216        struct drm_i915_gem_object *obj;
 217        enum i915_mmap_type mmap_type;
 218
 219        struct rb_node offset;
 220};
 221
 222struct i915_gem_object_page_iter {
 223        struct scatterlist *sg_pos;
 224        unsigned int sg_idx; /* in pages, but 32bit eek! */
 225
 226        struct radix_tree_root radix;
 227        struct mutex lock; /* protects this cache */
 228};
 229
 230struct drm_i915_gem_object {
 231        /*
 232         * We might have reason to revisit the below since it wastes
 233         * a lot of space for non-ttm gem objects.
 234         * In any case, always use the accessors for the ttm_buffer_object
 235         * when accessing it.
 236         */
 237        union {
 238                struct drm_gem_object base;
 239                struct ttm_buffer_object __do_not_access;
 240        };
 241
 242        const struct drm_i915_gem_object_ops *ops;
 243
 244        struct {
 245                /**
 246                 * @vma.lock: protect the list/tree of vmas
 247                 */
 248                spinlock_t lock;
 249
 250                /**
 251                 * @vma.list: List of VMAs backed by this object
 252                 *
 253                 * The VMA on this list are ordered by type, all GGTT vma are
 254                 * placed at the head and all ppGTT vma are placed at the tail.
 255                 * The different types of GGTT vma are unordered between
 256                 * themselves, use the @vma.tree (which has a defined order
 257                 * between all VMA) to quickly find an exact match.
 258                 */
 259                struct list_head list;
 260
 261                /**
 262                 * @vma.tree: Ordered tree of VMAs backed by this object
 263                 *
 264                 * All VMA created for this object are placed in the @vma.tree
 265                 * for fast retrieval via a binary search in
 266                 * i915_vma_instance(). They are also added to @vma.list for
 267                 * easy iteration.
 268                 */
 269                struct rb_root tree;
 270        } vma;
 271
 272        /**
 273         * @lut_list: List of vma lookup entries in use for this object.
 274         *
 275         * If this object is closed, we need to remove all of its VMA from
 276         * the fast lookup index in associated contexts; @lut_list provides
 277         * this translation from object to context->handles_vma.
 278         */
 279        struct list_head lut_list;
 280        spinlock_t lut_lock; /* guards lut_list */
 281
 282        /**
 283         * @obj_link: Link into @i915_gem_ww_ctx.obj_list
 284         *
 285         * When we lock this object through i915_gem_object_lock() with a
 286         * context, we add it to the list to ensure we can unlock everything
 287         * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
 288         */
 289        struct list_head obj_link;
 290        /**
 291         * @shared_resv_from: The object shares the resv from this vm.
 292         */
 293        struct i915_address_space *shares_resv_from;
 294
 295        union {
 296                struct rcu_head rcu;
 297                struct llist_node freed;
 298        };
 299
 300        /**
 301         * Whether the object is currently in the GGTT mmap.
 302         */
 303        unsigned int userfault_count;
 304        struct list_head userfault_link;
 305
 306        struct {
 307                spinlock_t lock; /* Protects access to mmo offsets */
 308                struct rb_root offsets;
 309        } mmo;
 310
 311        I915_SELFTEST_DECLARE(struct list_head st_link);
 312
 313        unsigned long flags;
 314#define I915_BO_ALLOC_CONTIGUOUS  BIT(0)
 315#define I915_BO_ALLOC_VOLATILE    BIT(1)
 316#define I915_BO_ALLOC_CPU_CLEAR   BIT(2)
 317#define I915_BO_ALLOC_USER        BIT(3)
 318/* Object is allowed to lose its contents on suspend / resume, even if pinned */
 319#define I915_BO_ALLOC_PM_VOLATILE BIT(4)
 320/* Object needs to be restored early using memcpy during resume */
 321#define I915_BO_ALLOC_PM_EARLY    BIT(5)
 322/*
 323 * Object is likely never accessed by the CPU. This will prioritise the BO to be
 324 * allocated in the non-mappable portion of lmem. This is merely a hint, and if
 325 * dealing with userspace objects the CPU fault handler is free to ignore this.
 326 */
 327#define I915_BO_ALLOC_GPU_ONLY    BIT(6)
 328#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
 329                             I915_BO_ALLOC_VOLATILE | \
 330                             I915_BO_ALLOC_CPU_CLEAR | \
 331                             I915_BO_ALLOC_USER | \
 332                             I915_BO_ALLOC_PM_VOLATILE | \
 333                             I915_BO_ALLOC_PM_EARLY | \
 334                             I915_BO_ALLOC_GPU_ONLY)
 335#define I915_BO_READONLY          BIT(7)
 336#define I915_TILING_QUIRK_BIT     8 /* unknown swizzling; do not release! */
 337#define I915_BO_PROTECTED         BIT(9)
 338#define I915_BO_WAS_BOUND_BIT     10
 339        /**
 340         * @mem_flags - Mutable placement-related flags
 341         *
 342         * These are flags that indicate specifics of the memory region
 343         * the object is currently in. As such they are only stable
 344         * either under the object lock or if the object is pinned.
 345         */
 346        unsigned int mem_flags;
 347#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
 348#define I915_BO_FLAG_IOMEM       BIT(1) /* Object backed by IO memory */
 349        /**
 350         * @cache_level: The desired GTT caching level.
 351         *
 352         * See enum i915_cache_level for possible values, along with what
 353         * each does.
 354         */
 355        unsigned int cache_level:3;
 356        /**
 357         * @cache_coherent:
 358         *
 359         * Track whether the pages are coherent with the GPU if reading or
 360         * writing through the CPU caches. The largely depends on the
 361         * @cache_level setting.
 362         *
 363         * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
 364         * platforms, coherency must be explicitly requested with some special
 365         * GTT caching bits(see enum i915_cache_level). When enabling coherency
 366         * it does come at a performance and power cost on such platforms. On
 367         * the flip side the kernel does not need to manually flush any buffers
 368         * which need to be coherent with the GPU, if the object is not coherent
 369         * i.e @cache_coherent is zero.
 370         *
 371         * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
 372         * access will automatically snoop the CPU caches(even with CACHE_NONE).
 373         * The one exception is when dealing with the display engine, like with
 374         * scanout surfaces. To handle this the kernel will always flush the
 375         * surface out of the CPU caches when preparing it for scanout.  Also
 376         * note that since scanout surfaces are only ever read by the display
 377         * engine we only need to care about flushing any writes through the CPU
 378         * cache, reads on the other hand will always be coherent.
 379         *
 380         * Something strange here is why @cache_coherent is not a simple
 381         * boolean, i.e coherent vs non-coherent. The reasoning for this is back
 382         * to the display engine not being fully coherent. As a result scanout
 383         * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
 384         * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
 385         * that this is likely a scanout surface, and will set @cache_coherent
 386         * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
 387         * LLC. The kernel uses this to always flush writes through the CPU
 388         * cache as early as possible, where it can, in effect keeping
 389         * @cache_dirty clean, so we can potentially avoid stalling when
 390         * flushing the surface just before doing the scanout.  This does mean
 391         * we might unnecessarily flush non-scanout objects in some places, but
 392         * the default assumption is that all normal objects should be using
 393         * I915_CACHE_LLC, at least on platforms with the shared LLC.
 394         *
 395         * Supported values:
 396         *
 397         * I915_BO_CACHE_COHERENT_FOR_READ:
 398         *
 399         * On shared LLC platforms, we use this for special scanout surfaces,
 400         * where the display engine is not coherent with the CPU cache. As such
 401         * we need to ensure we flush any writes before doing the scanout. As an
 402         * optimisation we try to flush any writes as early as possible to avoid
 403         * stalling later.
 404         *
 405         * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
 406         * platforms, we use:
 407         *
 408         *      cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
 409         *
 410         * While for normal objects that are fully coherent, including special
 411         * scanout surfaces marked as I915_CACHE_WT, we use:
 412         *
 413         *      cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
 414         *                       I915_BO_CACHE_COHERENT_FOR_WRITE
 415         *
 416         * And then for objects that are not coherent at all we use:
 417         *
 418         *      cache_coherent = 0
 419         *
 420         * I915_BO_CACHE_COHERENT_FOR_WRITE:
 421         *
 422         * When writing through the CPU cache, the GPU is still coherent. Note
 423         * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
 424         */
 425#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
 426#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
 427        unsigned int cache_coherent:2;
 428
 429        /**
 430         * @cache_dirty:
 431         *
 432         * Track if we are we dirty with writes through the CPU cache for this
 433         * object. As a result reading directly from main memory might yield
 434         * stale data.
 435         *
 436         * This also ties into whether the kernel is tracking the object as
 437         * coherent with the GPU, as per @cache_coherent, as it determines if
 438         * flushing might be needed at various points.
 439         *
 440         * Another part of @cache_dirty is managing flushing when first
 441         * acquiring the pages for system memory, at this point the pages are
 442         * considered foreign, so the default assumption is that the cache is
 443         * dirty, for example the page zeroing done by the kernel might leave
 444         * writes though the CPU cache, or swapping-in, while the actual data in
 445         * main memory is potentially stale.  Note that this is a potential
 446         * security issue when dealing with userspace objects and zeroing. Now,
 447         * whether we actually need apply the big sledgehammer of flushing all
 448         * the pages on acquire depends on if @cache_coherent is marked as
 449         * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
 450         * for both reads and writes though the CPU cache.
 451         *
 452         * Note that on shared LLC platforms we still apply the heavy flush for
 453         * I915_CACHE_NONE objects, under the assumption that this is going to
 454         * be used for scanout.
 455         *
 456         * Update: On some hardware there is now also the 'Bypass LLC' MOCS
 457         * entry, which defeats our @cache_coherent tracking, since userspace
 458         * can freely bypass the CPU cache when touching the pages with the GPU,
 459         * where the kernel is completely unaware. On such platform we need
 460         * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
 461         *
 462         * Special care is taken on non-LLC platforms, to prevent potential
 463         * information leak. The driver currently ensures:
 464         *
 465         *   1. All userspace objects, by default, have @cache_level set as
 466         *   I915_CACHE_NONE. The only exception is userptr objects, where we
 467         *   instead force I915_CACHE_LLC, but we also don't allow userspace to
 468         *   ever change the @cache_level for such objects. Another special case
 469         *   is dma-buf, which doesn't rely on @cache_dirty,  but there we
 470         *   always do a forced flush when acquiring the pages, if there is a
 471         *   chance that the pages can be read directly from main memory with
 472         *   the GPU.
 473         *
 474         *   2. All I915_CACHE_NONE objects have @cache_dirty initially true.
 475         *
 476         *   3. All swapped-out objects(i.e shmem) have @cache_dirty set to
 477         *   true.
 478         *
 479         *   4. The @cache_dirty is never freely reset before the initial
 480         *   flush, even if userspace adjusts the @cache_level through the
 481         *   i915_gem_set_caching_ioctl.
 482         *
 483         *   5. All @cache_dirty objects(including swapped-in) are initially
 484         *   flushed with a synchronous call to drm_clflush_sg in
 485         *   __i915_gem_object_set_pages. The @cache_dirty can be freely reset
 486         *   at this point. All further asynchronous clfushes are never security
 487         *   critical, i.e userspace is free to race against itself.
 488         */
 489        unsigned int cache_dirty:1;
 490
 491        /**
 492         * @read_domains: Read memory domains.
 493         *
 494         * These monitor which caches contain read/write data related to the
 495         * object. When transitioning from one set of domains to another,
 496         * the driver is called to ensure that caches are suitably flushed and
 497         * invalidated.
 498         */
 499        u16 read_domains;
 500
 501        /**
 502         * @write_domain: Corresponding unique write memory domain.
 503         */
 504        u16 write_domain;
 505
 506        struct intel_frontbuffer __rcu *frontbuffer;
 507
 508        /** Current tiling stride for the object, if it's tiled. */
 509        unsigned int tiling_and_stride;
 510#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
 511#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 512#define STRIDE_MASK (~TILING_MASK)
 513
 514        struct {
 515                /*
 516                 * Protects the pages and their use. Do not use directly, but
 517                 * instead go through the pin/unpin interfaces.
 518                 */
 519                atomic_t pages_pin_count;
 520
 521                /**
 522                 * @shrink_pin: Prevents the pages from being made visible to
 523                 * the shrinker, while the shrink_pin is non-zero. Most users
 524                 * should pretty much never have to care about this, outside of
 525                 * some special use cases.
 526                 *
 527                 * By default most objects will start out as visible to the
 528                 * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
 529                 * backing pages are attached to the object, like in
 530                 * __i915_gem_object_set_pages(). They will then be removed the
 531                 * shrinker list once the pages are released.
 532                 *
 533                 * The @shrink_pin is incremented by calling
 534                 * i915_gem_object_make_unshrinkable(), which will also remove
 535                 * the object from the shrinker list, if the pin count was zero.
 536                 *
 537                 * Callers will then typically call
 538                 * i915_gem_object_make_shrinkable() or
 539                 * i915_gem_object_make_purgeable() to decrement the pin count,
 540                 * and make the pages visible again.
 541                 */
 542                atomic_t shrink_pin;
 543
 544                /**
 545                 * @ttm_shrinkable: True when the object is using shmem pages
 546                 * underneath. Protected by the object lock.
 547                 */
 548                bool ttm_shrinkable;
 549
 550                /**
 551                 * Priority list of potential placements for this object.
 552                 */
 553                struct intel_memory_region **placements;
 554                int n_placements;
 555
 556                /**
 557                 * Memory region for this object.
 558                 */
 559                struct intel_memory_region *region;
 560
 561                /**
 562                 * Memory manager resource allocated for this object. Only
 563                 * needed for the mock region.
 564                 */
 565                struct ttm_resource *res;
 566
 567                /**
 568                 * Element within memory_region->objects or region->purgeable
 569                 * if the object is marked as DONTNEED. Access is protected by
 570                 * region->obj_lock.
 571                 */
 572                struct list_head region_link;
 573
 574                struct i915_refct_sgt *rsgt;
 575                struct sg_table *pages;
 576                void *mapping;
 577
 578                struct i915_page_sizes page_sizes;
 579
 580                I915_SELFTEST_DECLARE(unsigned int page_mask);
 581
 582                struct i915_gem_object_page_iter get_page;
 583                struct i915_gem_object_page_iter get_dma_page;
 584
 585                /**
 586                 * Element within i915->mm.shrink_list or i915->mm.purge_list,
 587                 * locked by i915->mm.obj_lock.
 588                 */
 589                struct list_head link;
 590
 591                /**
 592                 * Advice: are the backing pages purgeable?
 593                 */
 594                unsigned int madv:2;
 595
 596                /**
 597                 * This is set if the object has been written to since the
 598                 * pages were last acquired.
 599                 */
 600                bool dirty:1;
 601        } mm;
 602
 603        struct {
 604                struct i915_refct_sgt *cached_io_rsgt;
 605                struct i915_gem_object_page_iter get_io_page;
 606                struct drm_i915_gem_object *backup;
 607                bool created:1;
 608        } ttm;
 609
 610        /*
 611         * Record which PXP key instance this object was created against (if
 612         * any), so we can use it to determine if the encryption is valid by
 613         * comparing against the current key instance.
 614         */
 615        u32 pxp_key_instance;
 616
 617        /** Record of address bit 17 of each page at last unbind. */
 618        unsigned long *bit_17;
 619
 620        union {
 621#ifdef CONFIG_MMU_NOTIFIER
 622                struct i915_gem_userptr {
 623                        uintptr_t ptr;
 624                        unsigned long notifier_seq;
 625
 626                        struct mmu_interval_notifier notifier;
 627                        struct page **pvec;
 628                        int page_ref;
 629                } userptr;
 630#endif
 631
 632                struct drm_mm_node *stolen;
 633
 634                unsigned long scratch;
 635                u64 encode;
 636
 637                void *gvt_info;
 638        };
 639};
 640
 641static inline struct drm_i915_gem_object *
 642to_intel_bo(struct drm_gem_object *gem)
 643{
 644        /* Assert that to_intel_bo(NULL) == NULL */
 645        BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
 646
 647        return container_of(gem, struct drm_i915_gem_object, base);
 648}
 649
 650#endif
 651