linux/drivers/gpu/drm/i915/gem/i915_gem_object.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_H__
   8#define __I915_GEM_OBJECT_H__
   9
  10#include <drm/drm_gem.h>
  11#include <drm/drm_file.h>
  12#include <drm/drm_device.h>
  13
  14#include <drm/i915_drm.h>
  15
  16#include "i915_gem_object_types.h"
  17
  18#include "i915_gem_gtt.h"
  19
  20void i915_gem_init__objects(struct drm_i915_private *i915);
  21
  22struct drm_i915_gem_object *i915_gem_object_alloc(void);
  23void i915_gem_object_free(struct drm_i915_gem_object *obj);
  24
  25void i915_gem_object_init(struct drm_i915_gem_object *obj,
  26                          const struct drm_i915_gem_object_ops *ops);
  27struct drm_i915_gem_object *
  28i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
  29struct drm_i915_gem_object *
  30i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
  31                                       const void *data, size_t size);
  32
  33extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
  34void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
  35                                     struct sg_table *pages,
  36                                     bool needs_clflush);
  37
  38int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
  39
  40void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
  41void i915_gem_free_object(struct drm_gem_object *obj);
  42
  43void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  44
  45struct sg_table *
  46__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
  47void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  48
  49/**
  50 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
  51 * @filp: DRM file private date
  52 * @handle: userspace handle
  53 *
  54 * Returns:
  55 *
  56 * A pointer to the object named by the handle if such exists on @filp, NULL
  57 * otherwise. This object is only valid whilst under the RCU read lock, and
  58 * note carefully the object may be in the process of being destroyed.
  59 */
  60static inline struct drm_i915_gem_object *
  61i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
  62{
  63#ifdef CONFIG_LOCKDEP
  64        WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
  65#endif
  66        return idr_find(&file->object_idr, handle);
  67}
  68
  69static inline struct drm_i915_gem_object *
  70i915_gem_object_lookup(struct drm_file *file, u32 handle)
  71{
  72        struct drm_i915_gem_object *obj;
  73
  74        rcu_read_lock();
  75        obj = i915_gem_object_lookup_rcu(file, handle);
  76        if (obj && !kref_get_unless_zero(&obj->base.refcount))
  77                obj = NULL;
  78        rcu_read_unlock();
  79
  80        return obj;
  81}
  82
  83__deprecated
  84struct drm_gem_object *
  85drm_gem_object_lookup(struct drm_file *file, u32 handle);
  86
  87__attribute__((nonnull))
  88static inline struct drm_i915_gem_object *
  89i915_gem_object_get(struct drm_i915_gem_object *obj)
  90{
  91        drm_gem_object_get(&obj->base);
  92        return obj;
  93}
  94
  95__attribute__((nonnull))
  96static inline void
  97i915_gem_object_put(struct drm_i915_gem_object *obj)
  98{
  99        __drm_gem_object_put(&obj->base);
 100}
 101
 102#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 103
 104static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 105{
 106        dma_resv_lock(obj->base.resv, NULL);
 107}
 108
 109static inline int
 110i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 111{
 112        return dma_resv_lock_interruptible(obj->base.resv, NULL);
 113}
 114
 115static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 116{
 117        dma_resv_unlock(obj->base.resv);
 118}
 119
 120struct dma_fence *
 121i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
 122void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 123                                  struct dma_fence *fence);
 124
 125static inline void
 126i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 127{
 128        obj->base.vma_node.readonly = true;
 129}
 130
 131static inline bool
 132i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 133{
 134        return obj->base.vma_node.readonly;
 135}
 136
 137static inline bool
 138i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 139{
 140        return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 141}
 142
 143static inline bool
 144i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 145{
 146        return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
 147}
 148
 149static inline bool
 150i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 151{
 152        return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
 153}
 154
 155static inline bool
 156i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
 157{
 158        return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
 159}
 160
 161static inline bool
 162i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 163{
 164        return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
 165}
 166
 167static inline bool
 168i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 169{
 170        return READ_ONCE(obj->frontbuffer);
 171}
 172
 173static inline unsigned int
 174i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
 175{
 176        return obj->tiling_and_stride & TILING_MASK;
 177}
 178
 179static inline bool
 180i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
 181{
 182        return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
 183}
 184
 185static inline unsigned int
 186i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
 187{
 188        return obj->tiling_and_stride & STRIDE_MASK;
 189}
 190
 191static inline unsigned int
 192i915_gem_tile_height(unsigned int tiling)
 193{
 194        GEM_BUG_ON(!tiling);
 195        return tiling == I915_TILING_Y ? 32 : 8;
 196}
 197
 198static inline unsigned int
 199i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
 200{
 201        return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
 202}
 203
 204static inline unsigned int
 205i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
 206{
 207        return (i915_gem_object_get_stride(obj) *
 208                i915_gem_object_get_tile_height(obj));
 209}
 210
 211int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 212                               unsigned int tiling, unsigned int stride);
 213
 214struct scatterlist *
 215i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 216                       unsigned int n, unsigned int *offset);
 217
 218struct page *
 219i915_gem_object_get_page(struct drm_i915_gem_object *obj,
 220                         unsigned int n);
 221
 222struct page *
 223i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 224                               unsigned int n);
 225
 226dma_addr_t
 227i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
 228                                    unsigned long n,
 229                                    unsigned int *len);
 230
 231dma_addr_t
 232i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 233                                unsigned long n);
 234
 235void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 236                                 struct sg_table *pages,
 237                                 unsigned int sg_page_sizes);
 238
 239int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 240int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 241
 242static inline int __must_check
 243i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 244{
 245        might_lock(&obj->mm.lock);
 246
 247        if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
 248                return 0;
 249
 250        return __i915_gem_object_get_pages(obj);
 251}
 252
 253static inline bool
 254i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
 255{
 256        return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
 257}
 258
 259static inline void
 260__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 261{
 262        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 263
 264        atomic_inc(&obj->mm.pages_pin_count);
 265}
 266
 267static inline bool
 268i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 269{
 270        return atomic_read(&obj->mm.pages_pin_count);
 271}
 272
 273static inline void
 274__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 275{
 276        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 277        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 278
 279        atomic_dec(&obj->mm.pages_pin_count);
 280}
 281
 282static inline void
 283i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 284{
 285        __i915_gem_object_unpin_pages(obj);
 286}
 287
 288enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
 289        I915_MM_NORMAL = 0,
 290        I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
 291};
 292
 293int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 294                                enum i915_mm_subclass subclass);
 295void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 296void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 297
 298enum i915_map_type {
 299        I915_MAP_WB = 0,
 300        I915_MAP_WC,
 301#define I915_MAP_OVERRIDE BIT(31)
 302        I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
 303        I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 304};
 305
 306/**
 307 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
 308 * @obj: the object to map into kernel address space
 309 * @type: the type of mapping, used to select pgprot_t
 310 *
 311 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 312 * pages and then returns a contiguous mapping of the backing storage into
 313 * the kernel address space. Based on the @type of mapping, the PTE will be
 314 * set to either WriteBack or WriteCombine (via pgprot_t).
 315 *
 316 * The caller is responsible for calling i915_gem_object_unpin_map() when the
 317 * mapping is no longer required.
 318 *
 319 * Returns the pointer through which to access the mapped object, or an
 320 * ERR_PTR() on error.
 321 */
 322void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 323                                           enum i915_map_type type);
 324
 325void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
 326                                 unsigned long offset,
 327                                 unsigned long size);
 328static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
 329{
 330        __i915_gem_object_flush_map(obj, 0, obj->base.size);
 331}
 332
 333/**
 334 * i915_gem_object_unpin_map - releases an earlier mapping
 335 * @obj: the object to unmap
 336 *
 337 * After pinning the object and mapping its pages, once you are finished
 338 * with your access, call i915_gem_object_unpin_map() to release the pin
 339 * upon the mapping. Once the pin count reaches zero, that mapping may be
 340 * removed.
 341 */
 342static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 343{
 344        i915_gem_object_unpin_pages(obj);
 345}
 346
 347void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
 348void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
 349
 350void
 351i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 352                                   unsigned int flush_domains);
 353
 354int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 355                                 unsigned int *needs_clflush);
 356int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 357                                  unsigned int *needs_clflush);
 358#define CLFLUSH_BEFORE  BIT(0)
 359#define CLFLUSH_AFTER   BIT(1)
 360#define CLFLUSH_FLAGS   (CLFLUSH_BEFORE | CLFLUSH_AFTER)
 361
 362static inline void
 363i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
 364{
 365        i915_gem_object_unpin_pages(obj);
 366        i915_gem_object_unlock(obj);
 367}
 368
 369static inline struct intel_engine_cs *
 370i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 371{
 372        struct intel_engine_cs *engine = NULL;
 373        struct dma_fence *fence;
 374
 375        rcu_read_lock();
 376        fence = dma_resv_get_excl_rcu(obj->base.resv);
 377        rcu_read_unlock();
 378
 379        if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 380                engine = to_request(fence)->engine;
 381        dma_fence_put(fence);
 382
 383        return engine;
 384}
 385
 386void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 387                                         unsigned int cache_level);
 388void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 389
 390int __must_check
 391i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
 392int __must_check
 393i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
 394int __must_check
 395i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
 396struct i915_vma * __must_check
 397i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 398                                     u32 alignment,
 399                                     const struct i915_ggtt_view *view,
 400                                     unsigned int flags);
 401void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 402
 403void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
 404void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
 405void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
 406
 407static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 408{
 409        if (obj->cache_dirty)
 410                return false;
 411
 412        if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 413                return true;
 414
 415        return obj->pin_global; /* currently in use by HW, keep flushed */
 416}
 417
 418static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
 419{
 420        obj->read_domains = I915_GEM_DOMAIN_CPU;
 421        obj->write_domain = I915_GEM_DOMAIN_CPU;
 422        if (cpu_write_needs_clflush(obj))
 423                obj->cache_dirty = true;
 424}
 425
 426int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 427                         unsigned int flags,
 428                         long timeout);
 429int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 430                                  unsigned int flags,
 431                                  const struct i915_sched_attr *attr);
 432#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 433
 434#endif
 435