linux/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_TYPES_H__
   8#define __I915_GEM_OBJECT_TYPES_H__
   9
  10#include <drm/drm_gem.h>
  11#include <uapi/drm/i915_drm.h>
  12
  13#include "i915_active.h"
  14#include "i915_selftest.h"
  15
  16struct drm_i915_gem_object;
  17struct intel_fronbuffer;
  18
  19/*
  20 * struct i915_lut_handle tracks the fast lookups from handle to vma used
  21 * for execbuf. Although we use a radixtree for that mapping, in order to
  22 * remove them as the object or context is closed, we need a secondary list
  23 * and a translation entry (i915_lut_handle).
  24 */
  25struct i915_lut_handle {
  26        struct list_head obj_link;
  27        struct i915_gem_context *ctx;
  28        u32 handle;
  29};
  30
  31struct drm_i915_gem_object_ops {
  32        unsigned int flags;
  33#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
  34#define I915_GEM_OBJECT_HAS_IOMEM       BIT(1)
  35#define I915_GEM_OBJECT_IS_SHRINKABLE   BIT(2)
  36#define I915_GEM_OBJECT_IS_PROXY        BIT(3)
  37#define I915_GEM_OBJECT_NO_GGTT         BIT(4)
  38#define I915_GEM_OBJECT_ASYNC_CANCEL    BIT(5)
  39
  40        /* Interface between the GEM object and its backing storage.
  41         * get_pages() is called once prior to the use of the associated set
  42         * of pages before to binding them into the GTT, and put_pages() is
  43         * called after we no longer need them. As we expect there to be
  44         * associated cost with migrating pages between the backing storage
  45         * and making them available for the GPU (e.g. clflush), we may hold
  46         * onto the pages after they are no longer referenced by the GPU
  47         * in case they may be used again shortly (for example migrating the
  48         * pages to a different memory domain within the GTT). put_pages()
  49         * will therefore most likely be called when the object itself is
  50         * being released or under memory pressure (where we attempt to
  51         * reap pages for the shrinker).
  52         */
  53        int (*get_pages)(struct drm_i915_gem_object *obj);
  54        void (*put_pages)(struct drm_i915_gem_object *obj,
  55                          struct sg_table *pages);
  56        void (*truncate)(struct drm_i915_gem_object *obj);
  57        void (*writeback)(struct drm_i915_gem_object *obj);
  58
  59        int (*pwrite)(struct drm_i915_gem_object *obj,
  60                      const struct drm_i915_gem_pwrite *arg);
  61
  62        int (*dmabuf_export)(struct drm_i915_gem_object *obj);
  63        void (*release)(struct drm_i915_gem_object *obj);
  64};
  65
  66struct drm_i915_gem_object {
  67        struct drm_gem_object base;
  68
  69        const struct drm_i915_gem_object_ops *ops;
  70
  71        struct {
  72                /**
  73                 * @vma.lock: protect the list/tree of vmas
  74                 */
  75                spinlock_t lock;
  76
  77                /**
  78                 * @vma.list: List of VMAs backed by this object
  79                 *
  80                 * The VMA on this list are ordered by type, all GGTT vma are
  81                 * placed at the head and all ppGTT vma are placed at the tail.
  82                 * The different types of GGTT vma are unordered between
  83                 * themselves, use the @vma.tree (which has a defined order
  84                 * between all VMA) to quickly find an exact match.
  85                 */
  86                struct list_head list;
  87
  88                /**
  89                 * @vma.tree: Ordered tree of VMAs backed by this object
  90                 *
  91                 * All VMA created for this object are placed in the @vma.tree
  92                 * for fast retrieval via a binary search in
  93                 * i915_vma_instance(). They are also added to @vma.list for
  94                 * easy iteration.
  95                 */
  96                struct rb_root tree;
  97        } vma;
  98
  99        /**
 100         * @lut_list: List of vma lookup entries in use for this object.
 101         *
 102         * If this object is closed, we need to remove all of its VMA from
 103         * the fast lookup index in associated contexts; @lut_list provides
 104         * this translation from object to context->handles_vma.
 105         */
 106        struct list_head lut_list;
 107
 108        /** Stolen memory for this object, instead of being backed by shmem. */
 109        struct drm_mm_node *stolen;
 110        union {
 111                struct rcu_head rcu;
 112                struct llist_node freed;
 113        };
 114
 115        /**
 116         * Whether the object is currently in the GGTT mmap.
 117         */
 118        unsigned int userfault_count;
 119        struct list_head userfault_link;
 120
 121        I915_SELFTEST_DECLARE(struct list_head st_link);
 122
 123        unsigned long flags;
 124#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
 125#define I915_BO_ALLOC_VOLATILE   BIT(1)
 126#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
 127
 128        /*
 129         * Is the object to be mapped as read-only to the GPU
 130         * Only honoured if hardware has relevant pte bit
 131         */
 132        unsigned int cache_level:3;
 133        unsigned int cache_coherent:2;
 134#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
 135#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
 136        unsigned int cache_dirty:1;
 137
 138        /**
 139         * @read_domains: Read memory domains.
 140         *
 141         * These monitor which caches contain read/write data related to the
 142         * object. When transitioning from one set of domains to another,
 143         * the driver is called to ensure that caches are suitably flushed and
 144         * invalidated.
 145         */
 146        u16 read_domains;
 147
 148        /**
 149         * @write_domain: Corresponding unique write memory domain.
 150         */
 151        u16 write_domain;
 152
 153        struct intel_frontbuffer __rcu *frontbuffer;
 154
 155        /** Current tiling stride for the object, if it's tiled. */
 156        unsigned int tiling_and_stride;
 157#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
 158#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 159#define STRIDE_MASK (~TILING_MASK)
 160
 161        /** Count of VMA actually bound by this object */
 162        atomic_t bind_count;
 163
 164        struct {
 165                struct mutex lock; /* protects the pages and their use */
 166                atomic_t pages_pin_count;
 167                atomic_t shrink_pin;
 168
 169                /**
 170                 * Memory region for this object.
 171                 */
 172                struct intel_memory_region *region;
 173                /**
 174                 * List of memory region blocks allocated for this object.
 175                 */
 176                struct list_head blocks;
 177                /**
 178                 * Element within memory_region->objects or region->purgeable
 179                 * if the object is marked as DONTNEED. Access is protected by
 180                 * region->obj_lock.
 181                 */
 182                struct list_head region_link;
 183
 184                struct sg_table *pages;
 185                void *mapping;
 186
 187                struct i915_page_sizes {
 188                        /**
 189                         * The sg mask of the pages sg_table. i.e the mask of
 190                         * of the lengths for each sg entry.
 191                         */
 192                        unsigned int phys;
 193
 194                        /**
 195                         * The gtt page sizes we are allowed to use given the
 196                         * sg mask and the supported page sizes. This will
 197                         * express the smallest unit we can use for the whole
 198                         * object, as well as the larger sizes we may be able
 199                         * to use opportunistically.
 200                         */
 201                        unsigned int sg;
 202
 203                        /**
 204                         * The actual gtt page size usage. Since we can have
 205                         * multiple vma associated with this object we need to
 206                         * prevent any trampling of state, hence a copy of this
 207                         * struct also lives in each vma, therefore the gtt
 208                         * value here should only be read/write through the vma.
 209                         */
 210                        unsigned int gtt;
 211                } page_sizes;
 212
 213                I915_SELFTEST_DECLARE(unsigned int page_mask);
 214
 215                struct i915_gem_object_page_iter {
 216                        struct scatterlist *sg_pos;
 217                        unsigned int sg_idx; /* in pages, but 32bit eek! */
 218
 219                        struct radix_tree_root radix;
 220                        struct mutex lock; /* protects this cache */
 221                } get_page;
 222
 223                /**
 224                 * Element within i915->mm.unbound_list or i915->mm.bound_list,
 225                 * locked by i915->mm.obj_lock.
 226                 */
 227                struct list_head link;
 228
 229                /**
 230                 * Advice: are the backing pages purgeable?
 231                 */
 232                unsigned int madv:2;
 233
 234                /**
 235                 * This is set if the object has been written to since the
 236                 * pages were last acquired.
 237                 */
 238                bool dirty:1;
 239
 240                /**
 241                 * This is set if the object has been pinned due to unknown
 242                 * swizzling.
 243                 */
 244                bool quirked:1;
 245        } mm;
 246
 247        /** Record of address bit 17 of each page at last unbind. */
 248        unsigned long *bit_17;
 249
 250        union {
 251                struct i915_gem_userptr {
 252                        uintptr_t ptr;
 253
 254                        struct i915_mm_struct *mm;
 255                        struct i915_mmu_object *mmu_object;
 256                        struct work_struct *work;
 257                } userptr;
 258
 259                unsigned long scratch;
 260
 261                void *gvt_info;
 262        };
 263
 264        /** for phys allocated objects */
 265        struct drm_dma_handle *phys_handle;
 266};
 267
 268static inline struct drm_i915_gem_object *
 269to_intel_bo(struct drm_gem_object *gem)
 270{
 271        /* Assert that to_intel_bo(NULL) == NULL */
 272        BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
 273
 274        return container_of(gem, struct drm_i915_gem_object, base);
 275}
 276
 277#endif
 278