linux/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_TYPES_H__
   8#define __I915_GEM_OBJECT_TYPES_H__
   9
  10#include <linux/mmu_notifier.h>
  11
  12#include <drm/drm_gem.h>
  13#include <drm/ttm/ttm_bo_api.h>
  14#include <uapi/drm/i915_drm.h>
  15
  16#include "i915_active.h"
  17#include "i915_selftest.h"
  18
  19struct drm_i915_gem_object;
  20struct intel_fronbuffer;
  21struct intel_memory_region;
  22
  23/*
  24 * struct i915_lut_handle tracks the fast lookups from handle to vma used
  25 * for execbuf. Although we use a radixtree for that mapping, in order to
  26 * remove them as the object or context is closed, we need a secondary list
  27 * and a translation entry (i915_lut_handle).
  28 */
  29struct i915_lut_handle {
  30        struct list_head obj_link;
  31        struct i915_gem_context *ctx;
  32        u32 handle;
  33};
  34
  35struct drm_i915_gem_object_ops {
  36        unsigned int flags;
  37#define I915_GEM_OBJECT_IS_SHRINKABLE   BIT(1)
  38#define I915_GEM_OBJECT_IS_PROXY        BIT(2)
  39#define I915_GEM_OBJECT_NO_MMAP         BIT(3)
  40
  41        /* Interface between the GEM object and its backing storage.
  42         * get_pages() is called once prior to the use of the associated set
  43         * of pages before to binding them into the GTT, and put_pages() is
  44         * called after we no longer need them. As we expect there to be
  45         * associated cost with migrating pages between the backing storage
  46         * and making them available for the GPU (e.g. clflush), we may hold
  47         * onto the pages after they are no longer referenced by the GPU
  48         * in case they may be used again shortly (for example migrating the
  49         * pages to a different memory domain within the GTT). put_pages()
  50         * will therefore most likely be called when the object itself is
  51         * being released or under memory pressure (where we attempt to
  52         * reap pages for the shrinker).
  53         */
  54        int (*get_pages)(struct drm_i915_gem_object *obj);
  55        void (*put_pages)(struct drm_i915_gem_object *obj,
  56                          struct sg_table *pages);
  57        void (*truncate)(struct drm_i915_gem_object *obj);
  58        void (*writeback)(struct drm_i915_gem_object *obj);
  59
  60        int (*pread)(struct drm_i915_gem_object *obj,
  61                     const struct drm_i915_gem_pread *arg);
  62        int (*pwrite)(struct drm_i915_gem_object *obj,
  63                      const struct drm_i915_gem_pwrite *arg);
  64        u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
  65
  66        int (*dmabuf_export)(struct drm_i915_gem_object *obj);
  67
  68        /**
  69         * adjust_lru - notify that the madvise value was updated
  70         * @obj: The gem object
  71         *
  72         * The madvise value may have been updated, or object was recently
  73         * referenced so act accordingly (Perhaps changing an LRU list etc).
  74         */
  75        void (*adjust_lru)(struct drm_i915_gem_object *obj);
  76
  77        /**
  78         * delayed_free - Override the default delayed free implementation
  79         */
  80        void (*delayed_free)(struct drm_i915_gem_object *obj);
  81
  82        /**
  83         * migrate - Migrate object to a different region either for
  84         * pinning or for as long as the object lock is held.
  85         */
  86        int (*migrate)(struct drm_i915_gem_object *obj,
  87                       struct intel_memory_region *mr);
  88
  89        void (*release)(struct drm_i915_gem_object *obj);
  90
  91        const struct vm_operations_struct *mmap_ops;
  92        const char *name; /* friendly name for debug, e.g. lockdep classes */
  93};
  94
  95/**
  96 * enum i915_cache_level - The supported GTT caching values for system memory
  97 * pages.
  98 *
  99 * These translate to some special GTT PTE bits when binding pages into some
 100 * address space. It also determines whether an object, or rather its pages are
 101 * coherent with the GPU, when also reading or writing through the CPU cache
 102 * with those pages.
 103 *
 104 * Userspace can also control this through struct drm_i915_gem_caching.
 105 */
 106enum i915_cache_level {
 107        /**
 108         * @I915_CACHE_NONE:
 109         *
 110         * GPU access is not coherent with the CPU cache. If the cache is dirty
 111         * and we need the underlying pages to be coherent with some later GPU
 112         * access then we need to manually flush the pages.
 113         *
 114         * On shared LLC platforms reads and writes through the CPU cache are
 115         * still coherent even with this setting. See also
 116         * &drm_i915_gem_object.cache_coherent for more details. Due to this we
 117         * should only ever use uncached for scanout surfaces, otherwise we end
 118         * up over-flushing in some places.
 119         *
 120         * This is the default on non-LLC platforms.
 121         */
 122        I915_CACHE_NONE = 0,
 123        /**
 124         * @I915_CACHE_LLC:
 125         *
 126         * GPU access is coherent with the CPU cache. If the cache is dirty,
 127         * then the GPU will ensure that access remains coherent, when both
 128         * reading and writing through the CPU cache. GPU writes can dirty the
 129         * CPU cache.
 130         *
 131         * Not used for scanout surfaces.
 132         *
 133         * Applies to both platforms with shared LLC(HAS_LLC), and snooping
 134         * based platforms(HAS_SNOOP).
 135         *
 136         * This is the default on shared LLC platforms.  The only exception is
 137         * scanout objects, where the display engine is not coherent with the
 138         * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
 139         * automatically applied by the kernel in pin_for_display, if userspace
 140         * has not done so already.
 141         */
 142        I915_CACHE_LLC,
 143        /**
 144         * @I915_CACHE_L3_LLC:
 145         *
 146         * Explicitly enable the Gfx L3 cache, with coherent LLC.
 147         *
 148         * The Gfx L3 sits between the domain specific caches, e.g
 149         * sampler/render caches, and the larger LLC. LLC is coherent with the
 150         * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
 151         * when the workload completes.
 152         *
 153         * Not used for scanout surfaces.
 154         *
 155         * Only exposed on some gen7 + GGTT. More recent hardware has dropped
 156         * this explicit setting, where it should now be enabled by default.
 157         */
 158        I915_CACHE_L3_LLC,
 159        /**
 160         * @I915_CACHE_WT:
 161         *
 162         * Write-through. Used for scanout surfaces.
 163         *
 164         * The GPU can utilise the caches, while still having the display engine
 165         * be coherent with GPU writes, as a result we don't need to flush the
 166         * CPU caches when moving out of the render domain. This is the default
 167         * setting chosen by the kernel, if supported by the HW, otherwise we
 168         * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
 169         * cache still need to be flushed, to remain coherent with the display
 170         * engine.
 171         */
 172        I915_CACHE_WT,
 173};
 174
 175enum i915_map_type {
 176        I915_MAP_WB = 0,
 177        I915_MAP_WC,
 178#define I915_MAP_OVERRIDE BIT(31)
 179        I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
 180        I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 181};
 182
 183enum i915_mmap_type {
 184        I915_MMAP_TYPE_GTT = 0,
 185        I915_MMAP_TYPE_WC,
 186        I915_MMAP_TYPE_WB,
 187        I915_MMAP_TYPE_UC,
 188        I915_MMAP_TYPE_FIXED,
 189};
 190
 191struct i915_mmap_offset {
 192        struct drm_vma_offset_node vma_node;
 193        struct drm_i915_gem_object *obj;
 194        enum i915_mmap_type mmap_type;
 195
 196        struct rb_node offset;
 197};
 198
 199struct i915_gem_object_page_iter {
 200        struct scatterlist *sg_pos;
 201        unsigned int sg_idx; /* in pages, but 32bit eek! */
 202
 203        struct radix_tree_root radix;
 204        struct mutex lock; /* protects this cache */
 205};
 206
 207struct drm_i915_gem_object {
 208        /*
 209         * We might have reason to revisit the below since it wastes
 210         * a lot of space for non-ttm gem objects.
 211         * In any case, always use the accessors for the ttm_buffer_object
 212         * when accessing it.
 213         */
 214        union {
 215                struct drm_gem_object base;
 216                struct ttm_buffer_object __do_not_access;
 217        };
 218
 219        const struct drm_i915_gem_object_ops *ops;
 220
 221        struct {
 222                /**
 223                 * @vma.lock: protect the list/tree of vmas
 224                 */
 225                spinlock_t lock;
 226
 227                /**
 228                 * @vma.list: List of VMAs backed by this object
 229                 *
 230                 * The VMA on this list are ordered by type, all GGTT vma are
 231                 * placed at the head and all ppGTT vma are placed at the tail.
 232                 * The different types of GGTT vma are unordered between
 233                 * themselves, use the @vma.tree (which has a defined order
 234                 * between all VMA) to quickly find an exact match.
 235                 */
 236                struct list_head list;
 237
 238                /**
 239                 * @vma.tree: Ordered tree of VMAs backed by this object
 240                 *
 241                 * All VMA created for this object are placed in the @vma.tree
 242                 * for fast retrieval via a binary search in
 243                 * i915_vma_instance(). They are also added to @vma.list for
 244                 * easy iteration.
 245                 */
 246                struct rb_root tree;
 247        } vma;
 248
 249        /**
 250         * @lut_list: List of vma lookup entries in use for this object.
 251         *
 252         * If this object is closed, we need to remove all of its VMA from
 253         * the fast lookup index in associated contexts; @lut_list provides
 254         * this translation from object to context->handles_vma.
 255         */
 256        struct list_head lut_list;
 257        spinlock_t lut_lock; /* guards lut_list */
 258
 259        /**
 260         * @obj_link: Link into @i915_gem_ww_ctx.obj_list
 261         *
 262         * When we lock this object through i915_gem_object_lock() with a
 263         * context, we add it to the list to ensure we can unlock everything
 264         * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
 265         */
 266        struct list_head obj_link;
 267        /**
 268         * @shared_resv_from: The object shares the resv from this vm.
 269         */
 270        struct i915_address_space *shares_resv_from;
 271
 272        union {
 273                struct rcu_head rcu;
 274                struct llist_node freed;
 275        };
 276
 277        /**
 278         * Whether the object is currently in the GGTT mmap.
 279         */
 280        unsigned int userfault_count;
 281        struct list_head userfault_link;
 282
 283        struct {
 284                spinlock_t lock; /* Protects access to mmo offsets */
 285                struct rb_root offsets;
 286        } mmo;
 287
 288        I915_SELFTEST_DECLARE(struct list_head st_link);
 289
 290        unsigned long flags;
 291#define I915_BO_ALLOC_CONTIGUOUS  BIT(0)
 292#define I915_BO_ALLOC_VOLATILE    BIT(1)
 293#define I915_BO_ALLOC_CPU_CLEAR   BIT(2)
 294#define I915_BO_ALLOC_USER        BIT(3)
 295/* Object is allowed to lose its contents on suspend / resume, even if pinned */
 296#define I915_BO_ALLOC_PM_VOLATILE BIT(4)
 297/* Object needs to be restored early using memcpy during resume */
 298#define I915_BO_ALLOC_PM_EARLY    BIT(5)
 299#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
 300                             I915_BO_ALLOC_VOLATILE | \
 301                             I915_BO_ALLOC_CPU_CLEAR | \
 302                             I915_BO_ALLOC_USER | \
 303                             I915_BO_ALLOC_PM_VOLATILE | \
 304                             I915_BO_ALLOC_PM_EARLY)
 305#define I915_BO_READONLY          BIT(6)
 306#define I915_TILING_QUIRK_BIT     7 /* unknown swizzling; do not release! */
 307#define I915_BO_PROTECTED         BIT(8)
 308        /**
 309         * @mem_flags - Mutable placement-related flags
 310         *
 311         * These are flags that indicate specifics of the memory region
 312         * the object is currently in. As such they are only stable
 313         * either under the object lock or if the object is pinned.
 314         */
 315        unsigned int mem_flags;
 316#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
 317#define I915_BO_FLAG_IOMEM       BIT(1) /* Object backed by IO memory */
 318        /**
 319         * @cache_level: The desired GTT caching level.
 320         *
 321         * See enum i915_cache_level for possible values, along with what
 322         * each does.
 323         */
 324        unsigned int cache_level:3;
 325        /**
 326         * @cache_coherent:
 327         *
 328         * Track whether the pages are coherent with the GPU if reading or
 329         * writing through the CPU caches. The largely depends on the
 330         * @cache_level setting.
 331         *
 332         * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
 333         * platforms, coherency must be explicitly requested with some special
 334         * GTT caching bits(see enum i915_cache_level). When enabling coherency
 335         * it does come at a performance and power cost on such platforms. On
 336         * the flip side the kernel does not need to manually flush any buffers
 337         * which need to be coherent with the GPU, if the object is not coherent
 338         * i.e @cache_coherent is zero.
 339         *
 340         * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
 341         * access will automatically snoop the CPU caches(even with CACHE_NONE).
 342         * The one exception is when dealing with the display engine, like with
 343         * scanout surfaces. To handle this the kernel will always flush the
 344         * surface out of the CPU caches when preparing it for scanout.  Also
 345         * note that since scanout surfaces are only ever read by the display
 346         * engine we only need to care about flushing any writes through the CPU
 347         * cache, reads on the other hand will always be coherent.
 348         *
 349         * Something strange here is why @cache_coherent is not a simple
 350         * boolean, i.e coherent vs non-coherent. The reasoning for this is back
 351         * to the display engine not being fully coherent. As a result scanout
 352         * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
 353         * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
 354         * that this is likely a scanout surface, and will set @cache_coherent
 355         * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
 356         * LLC. The kernel uses this to always flush writes through the CPU
 357         * cache as early as possible, where it can, in effect keeping
 358         * @cache_dirty clean, so we can potentially avoid stalling when
 359         * flushing the surface just before doing the scanout.  This does mean
 360         * we might unnecessarily flush non-scanout objects in some places, but
 361         * the default assumption is that all normal objects should be using
 362         * I915_CACHE_LLC, at least on platforms with the shared LLC.
 363         *
 364         * Supported values:
 365         *
 366         * I915_BO_CACHE_COHERENT_FOR_READ:
 367         *
 368         * On shared LLC platforms, we use this for special scanout surfaces,
 369         * where the display engine is not coherent with the CPU cache. As such
 370         * we need to ensure we flush any writes before doing the scanout. As an
 371         * optimisation we try to flush any writes as early as possible to avoid
 372         * stalling later.
 373         *
 374         * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
 375         * platforms, we use:
 376         *
 377         *      cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
 378         *
 379         * While for normal objects that are fully coherent, including special
 380         * scanout surfaces marked as I915_CACHE_WT, we use:
 381         *
 382         *      cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
 383         *                       I915_BO_CACHE_COHERENT_FOR_WRITE
 384         *
 385         * And then for objects that are not coherent at all we use:
 386         *
 387         *      cache_coherent = 0
 388         *
 389         * I915_BO_CACHE_COHERENT_FOR_WRITE:
 390         *
 391         * When writing through the CPU cache, the GPU is still coherent. Note
 392         * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
 393         */
 394#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
 395#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
 396        unsigned int cache_coherent:2;
 397
 398        /**
 399         * @cache_dirty:
 400         *
 401         * Track if we are we dirty with writes through the CPU cache for this
 402         * object. As a result reading directly from main memory might yield
 403         * stale data.
 404         *
 405         * This also ties into whether the kernel is tracking the object as
 406         * coherent with the GPU, as per @cache_coherent, as it determines if
 407         * flushing might be needed at various points.
 408         *
 409         * Another part of @cache_dirty is managing flushing when first
 410         * acquiring the pages for system memory, at this point the pages are
 411         * considered foreign, so the default assumption is that the cache is
 412         * dirty, for example the page zeroing done by the kernel might leave
 413         * writes though the CPU cache, or swapping-in, while the actual data in
 414         * main memory is potentially stale.  Note that this is a potential
 415         * security issue when dealing with userspace objects and zeroing. Now,
 416         * whether we actually need apply the big sledgehammer of flushing all
 417         * the pages on acquire depends on if @cache_coherent is marked as
 418         * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
 419         * for both reads and writes though the CPU cache.
 420         *
 421         * Note that on shared LLC platforms we still apply the heavy flush for
 422         * I915_CACHE_NONE objects, under the assumption that this is going to
 423         * be used for scanout.
 424         *
 425         * Update: On some hardware there is now also the 'Bypass LLC' MOCS
 426         * entry, which defeats our @cache_coherent tracking, since userspace
 427         * can freely bypass the CPU cache when touching the pages with the GPU,
 428         * where the kernel is completely unaware. On such platform we need
 429         * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
 430         *
 431         * Special care is taken on non-LLC platforms, to prevent potential
 432         * information leak. The driver currently ensures:
 433         *
 434         *   1. All userspace objects, by default, have @cache_level set as
 435         *   I915_CACHE_NONE. The only exception is userptr objects, where we
 436         *   instead force I915_CACHE_LLC, but we also don't allow userspace to
 437         *   ever change the @cache_level for such objects. Another special case
 438         *   is dma-buf, which doesn't rely on @cache_dirty,  but there we
 439         *   always do a forced flush when acquiring the pages, if there is a
 440         *   chance that the pages can be read directly from main memory with
 441         *   the GPU.
 442         *
 443         *   2. All I915_CACHE_NONE objects have @cache_dirty initially true.
 444         *
 445         *   3. All swapped-out objects(i.e shmem) have @cache_dirty set to
 446         *   true.
 447         *
 448         *   4. The @cache_dirty is never freely reset before the initial
 449         *   flush, even if userspace adjusts the @cache_level through the
 450         *   i915_gem_set_caching_ioctl.
 451         *
 452         *   5. All @cache_dirty objects(including swapped-in) are initially
 453         *   flushed with a synchronous call to drm_clflush_sg in
 454         *   __i915_gem_object_set_pages. The @cache_dirty can be freely reset
 455         *   at this point. All further asynchronous clfushes are never security
 456         *   critical, i.e userspace is free to race against itself.
 457         */
 458        unsigned int cache_dirty:1;
 459
 460        /**
 461         * @read_domains: Read memory domains.
 462         *
 463         * These monitor which caches contain read/write data related to the
 464         * object. When transitioning from one set of domains to another,
 465         * the driver is called to ensure that caches are suitably flushed and
 466         * invalidated.
 467         */
 468        u16 read_domains;
 469
 470        /**
 471         * @write_domain: Corresponding unique write memory domain.
 472         */
 473        u16 write_domain;
 474
 475        struct intel_frontbuffer __rcu *frontbuffer;
 476
 477        /** Current tiling stride for the object, if it's tiled. */
 478        unsigned int tiling_and_stride;
 479#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
 480#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 481#define STRIDE_MASK (~TILING_MASK)
 482
 483        struct {
 484                /*
 485                 * Protects the pages and their use. Do not use directly, but
 486                 * instead go through the pin/unpin interfaces.
 487                 */
 488                atomic_t pages_pin_count;
 489                atomic_t shrink_pin;
 490
 491                /**
 492                 * Priority list of potential placements for this object.
 493                 */
 494                struct intel_memory_region **placements;
 495                int n_placements;
 496
 497                /**
 498                 * Memory region for this object.
 499                 */
 500                struct intel_memory_region *region;
 501
 502                /**
 503                 * Memory manager resource allocated for this object. Only
 504                 * needed for the mock region.
 505                 */
 506                struct ttm_resource *res;
 507
 508                /**
 509                 * Element within memory_region->objects or region->purgeable
 510                 * if the object is marked as DONTNEED. Access is protected by
 511                 * region->obj_lock.
 512                 */
 513                struct list_head region_link;
 514
 515                struct sg_table *pages;
 516                void *mapping;
 517
 518                struct i915_page_sizes {
 519                        /**
 520                         * The sg mask of the pages sg_table. i.e the mask of
 521                         * of the lengths for each sg entry.
 522                         */
 523                        unsigned int phys;
 524
 525                        /**
 526                         * The gtt page sizes we are allowed to use given the
 527                         * sg mask and the supported page sizes. This will
 528                         * express the smallest unit we can use for the whole
 529                         * object, as well as the larger sizes we may be able
 530                         * to use opportunistically.
 531                         */
 532                        unsigned int sg;
 533
 534                        /**
 535                         * The actual gtt page size usage. Since we can have
 536                         * multiple vma associated with this object we need to
 537                         * prevent any trampling of state, hence a copy of this
 538                         * struct also lives in each vma, therefore the gtt
 539                         * value here should only be read/write through the vma.
 540                         */
 541                        unsigned int gtt;
 542                } page_sizes;
 543
 544                I915_SELFTEST_DECLARE(unsigned int page_mask);
 545
 546                struct i915_gem_object_page_iter get_page;
 547                struct i915_gem_object_page_iter get_dma_page;
 548
 549                /**
 550                 * Element within i915->mm.unbound_list or i915->mm.bound_list,
 551                 * locked by i915->mm.obj_lock.
 552                 */
 553                struct list_head link;
 554
 555                /**
 556                 * Advice: are the backing pages purgeable?
 557                 */
 558                unsigned int madv:2;
 559
 560                /**
 561                 * This is set if the object has been written to since the
 562                 * pages were last acquired.
 563                 */
 564                bool dirty:1;
 565        } mm;
 566
 567        struct {
 568                struct sg_table *cached_io_st;
 569                struct i915_gem_object_page_iter get_io_page;
 570                struct drm_i915_gem_object *backup;
 571                bool created:1;
 572        } ttm;
 573
 574        /*
 575         * Record which PXP key instance this object was created against (if
 576         * any), so we can use it to determine if the encryption is valid by
 577         * comparing against the current key instance.
 578         */
 579        u32 pxp_key_instance;
 580
 581        /** Record of address bit 17 of each page at last unbind. */
 582        unsigned long *bit_17;
 583
 584        union {
 585#ifdef CONFIG_MMU_NOTIFIER
 586                struct i915_gem_userptr {
 587                        uintptr_t ptr;
 588                        unsigned long notifier_seq;
 589
 590                        struct mmu_interval_notifier notifier;
 591                        struct page **pvec;
 592                        int page_ref;
 593                } userptr;
 594#endif
 595
 596                struct drm_mm_node *stolen;
 597
 598                unsigned long scratch;
 599                u64 encode;
 600
 601                void *gvt_info;
 602        };
 603};
 604
 605static inline struct drm_i915_gem_object *
 606to_intel_bo(struct drm_gem_object *gem)
 607{
 608        /* Assert that to_intel_bo(NULL) == NULL */
 609        BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
 610
 611        return container_of(gem, struct drm_i915_gem_object, base);
 612}
 613
 614#endif
 615