linux/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_TYPES_H__
   8#define __I915_GEM_OBJECT_TYPES_H__
   9
  10#include <drm/drm_gem.h>
  11
  12#include "i915_active.h"
  13#include "i915_selftest.h"
  14
  15struct drm_i915_gem_object;
  16
  17/*
  18 * struct i915_lut_handle tracks the fast lookups from handle to vma used
  19 * for execbuf. Although we use a radixtree for that mapping, in order to
  20 * remove them as the object or context is closed, we need a secondary list
  21 * and a translation entry (i915_lut_handle).
  22 */
  23struct i915_lut_handle {
  24        struct list_head obj_link;
  25        struct i915_gem_context *ctx;
  26        u32 handle;
  27};
  28
  29struct drm_i915_gem_object_ops {
  30        unsigned int flags;
  31#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
  32#define I915_GEM_OBJECT_IS_SHRINKABLE   BIT(1)
  33#define I915_GEM_OBJECT_IS_PROXY        BIT(2)
  34#define I915_GEM_OBJECT_ASYNC_CANCEL    BIT(3)
  35
  36        /* Interface between the GEM object and its backing storage.
  37         * get_pages() is called once prior to the use of the associated set
  38         * of pages before to binding them into the GTT, and put_pages() is
  39         * called after we no longer need them. As we expect there to be
  40         * associated cost with migrating pages between the backing storage
  41         * and making them available for the GPU (e.g. clflush), we may hold
  42         * onto the pages after they are no longer referenced by the GPU
  43         * in case they may be used again shortly (for example migrating the
  44         * pages to a different memory domain within the GTT). put_pages()
  45         * will therefore most likely be called when the object itself is
  46         * being released or under memory pressure (where we attempt to
  47         * reap pages for the shrinker).
  48         */
  49        int (*get_pages)(struct drm_i915_gem_object *obj);
  50        void (*put_pages)(struct drm_i915_gem_object *obj,
  51                          struct sg_table *pages);
  52        void (*truncate)(struct drm_i915_gem_object *obj);
  53        void (*writeback)(struct drm_i915_gem_object *obj);
  54
  55        int (*pwrite)(struct drm_i915_gem_object *obj,
  56                      const struct drm_i915_gem_pwrite *arg);
  57
  58        int (*dmabuf_export)(struct drm_i915_gem_object *obj);
  59        void (*release)(struct drm_i915_gem_object *obj);
  60};
  61
  62struct drm_i915_gem_object {
  63        struct drm_gem_object base;
  64
  65        const struct drm_i915_gem_object_ops *ops;
  66
  67        struct {
  68                /**
  69                 * @vma.lock: protect the list/tree of vmas
  70                 */
  71                spinlock_t lock;
  72
  73                /**
  74                 * @vma.list: List of VMAs backed by this object
  75                 *
  76                 * The VMA on this list are ordered by type, all GGTT vma are
  77                 * placed at the head and all ppGTT vma are placed at the tail.
  78                 * The different types of GGTT vma are unordered between
  79                 * themselves, use the @vma.tree (which has a defined order
  80                 * between all VMA) to quickly find an exact match.
  81                 */
  82                struct list_head list;
  83
  84                /**
  85                 * @vma.tree: Ordered tree of VMAs backed by this object
  86                 *
  87                 * All VMA created for this object are placed in the @vma.tree
  88                 * for fast retrieval via a binary search in
  89                 * i915_vma_instance(). They are also added to @vma.list for
  90                 * easy iteration.
  91                 */
  92                struct rb_root tree;
  93        } vma;
  94
  95        /**
  96         * @lut_list: List of vma lookup entries in use for this object.
  97         *
  98         * If this object is closed, we need to remove all of its VMA from
  99         * the fast lookup index in associated contexts; @lut_list provides
 100         * this translation from object to context->handles_vma.
 101         */
 102        struct list_head lut_list;
 103
 104        /** Stolen memory for this object, instead of being backed by shmem. */
 105        struct drm_mm_node *stolen;
 106        union {
 107                struct rcu_head rcu;
 108                struct llist_node freed;
 109        };
 110
 111        /**
 112         * Whether the object is currently in the GGTT mmap.
 113         */
 114        unsigned int userfault_count;
 115        struct list_head userfault_link;
 116
 117        struct list_head batch_pool_link;
 118        I915_SELFTEST_DECLARE(struct list_head st_link);
 119
 120        /*
 121         * Is the object to be mapped as read-only to the GPU
 122         * Only honoured if hardware has relevant pte bit
 123         */
 124        unsigned int cache_level:3;
 125        unsigned int cache_coherent:2;
 126#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
 127#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
 128        unsigned int cache_dirty:1;
 129
 130        /**
 131         * @read_domains: Read memory domains.
 132         *
 133         * These monitor which caches contain read/write data related to the
 134         * object. When transitioning from one set of domains to another,
 135         * the driver is called to ensure that caches are suitably flushed and
 136         * invalidated.
 137         */
 138        u16 read_domains;
 139
 140        /**
 141         * @write_domain: Corresponding unique write memory domain.
 142         */
 143        u16 write_domain;
 144
 145        atomic_t frontbuffer_bits;
 146        unsigned int frontbuffer_ggtt_origin; /* write once */
 147        struct i915_active_request frontbuffer_write;
 148
 149        /** Current tiling stride for the object, if it's tiled. */
 150        unsigned int tiling_and_stride;
 151#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
 152#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
 153#define STRIDE_MASK (~TILING_MASK)
 154
 155        /** Count of VMA actually bound by this object */
 156        atomic_t bind_count;
 157        unsigned int active_count;
 158        /** Count of how many global VMA are currently pinned for use by HW */
 159        unsigned int pin_global;
 160
 161        struct {
 162                struct mutex lock; /* protects the pages and their use */
 163                atomic_t pages_pin_count;
 164
 165                struct sg_table *pages;
 166                void *mapping;
 167
 168                /* TODO: whack some of this into the error state */
 169                struct i915_page_sizes {
 170                        /**
 171                         * The sg mask of the pages sg_table. i.e the mask of
 172                         * of the lengths for each sg entry.
 173                         */
 174                        unsigned int phys;
 175
 176                        /**
 177                         * The gtt page sizes we are allowed to use given the
 178                         * sg mask and the supported page sizes. This will
 179                         * express the smallest unit we can use for the whole
 180                         * object, as well as the larger sizes we may be able
 181                         * to use opportunistically.
 182                         */
 183                        unsigned int sg;
 184
 185                        /**
 186                         * The actual gtt page size usage. Since we can have
 187                         * multiple vma associated with this object we need to
 188                         * prevent any trampling of state, hence a copy of this
 189                         * struct also lives in each vma, therefore the gtt
 190                         * value here should only be read/write through the vma.
 191                         */
 192                        unsigned int gtt;
 193                } page_sizes;
 194
 195                I915_SELFTEST_DECLARE(unsigned int page_mask);
 196
 197                struct i915_gem_object_page_iter {
 198                        struct scatterlist *sg_pos;
 199                        unsigned int sg_idx; /* in pages, but 32bit eek! */
 200
 201                        struct radix_tree_root radix;
 202                        struct mutex lock; /* protects this cache */
 203                } get_page;
 204
 205                /**
 206                 * Element within i915->mm.unbound_list or i915->mm.bound_list,
 207                 * locked by i915->mm.obj_lock.
 208                 */
 209                struct list_head link;
 210
 211                /**
 212                 * Advice: are the backing pages purgeable?
 213                 */
 214                unsigned int madv:2;
 215
 216                /**
 217                 * This is set if the object has been written to since the
 218                 * pages were last acquired.
 219                 */
 220                bool dirty:1;
 221
 222                /**
 223                 * This is set if the object has been pinned due to unknown
 224                 * swizzling.
 225                 */
 226                bool quirked:1;
 227        } mm;
 228
 229        /** References from framebuffers, locks out tiling changes. */
 230        unsigned int framebuffer_references;
 231
 232        /** Record of address bit 17 of each page at last unbind. */
 233        unsigned long *bit_17;
 234
 235        union {
 236                struct i915_gem_userptr {
 237                        uintptr_t ptr;
 238
 239                        struct i915_mm_struct *mm;
 240                        struct i915_mmu_object *mmu_object;
 241                        struct work_struct *work;
 242                } userptr;
 243
 244                unsigned long scratch;
 245
 246                void *gvt_info;
 247        };
 248
 249        /** for phys allocated objects */
 250        struct drm_dma_handle *phys_handle;
 251};
 252
 253static inline struct drm_i915_gem_object *
 254to_intel_bo(struct drm_gem_object *gem)
 255{
 256        /* Assert that to_intel_bo(NULL) == NULL */
 257        BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
 258
 259        return container_of(gem, struct drm_i915_gem_object, base);
 260}
 261
 262#endif
 263