linux/drivers/gpu/drm/i915/gem/i915_gem_object.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_H__
   8#define __I915_GEM_OBJECT_H__
   9
  10#include <drm/drm_gem.h>
  11#include <drm/drm_file.h>
  12#include <drm/drm_device.h>
  13
  14#include "display/intel_frontbuffer.h"
  15#include "i915_gem_object_types.h"
  16#include "i915_gem_gtt.h"
  17#include "i915_vma_types.h"
  18
  19void i915_gem_init__objects(struct drm_i915_private *i915);
  20
  21struct drm_i915_gem_object *i915_gem_object_alloc(void);
  22void i915_gem_object_free(struct drm_i915_gem_object *obj);
  23
  24void i915_gem_object_init(struct drm_i915_gem_object *obj,
  25                          const struct drm_i915_gem_object_ops *ops,
  26                          struct lock_class_key *key);
  27struct drm_i915_gem_object *
  28i915_gem_object_create_shmem(struct drm_i915_private *i915,
  29                             resource_size_t size);
  30struct drm_i915_gem_object *
  31i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
  32                                       const void *data, resource_size_t size);
  33
  34extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
  35void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
  36                                     struct sg_table *pages,
  37                                     bool needs_clflush);
  38
  39int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
  40
  41void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
  42void i915_gem_free_object(struct drm_gem_object *obj);
  43
  44void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  45
  46struct sg_table *
  47__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
  48void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  49
  50/**
  51 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
  52 * @filp: DRM file private date
  53 * @handle: userspace handle
  54 *
  55 * Returns:
  56 *
  57 * A pointer to the object named by the handle if such exists on @filp, NULL
  58 * otherwise. This object is only valid whilst under the RCU read lock, and
  59 * note carefully the object may be in the process of being destroyed.
  60 */
  61static inline struct drm_i915_gem_object *
  62i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
  63{
  64#ifdef CONFIG_LOCKDEP
  65        WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
  66#endif
  67        return idr_find(&file->object_idr, handle);
  68}
  69
  70static inline struct drm_i915_gem_object *
  71i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
  72{
  73        if (obj && !kref_get_unless_zero(&obj->base.refcount))
  74                obj = NULL;
  75
  76        return obj;
  77}
  78
  79static inline struct drm_i915_gem_object *
  80i915_gem_object_lookup(struct drm_file *file, u32 handle)
  81{
  82        struct drm_i915_gem_object *obj;
  83
  84        rcu_read_lock();
  85        obj = i915_gem_object_lookup_rcu(file, handle);
  86        obj = i915_gem_object_get_rcu(obj);
  87        rcu_read_unlock();
  88
  89        return obj;
  90}
  91
  92__deprecated
  93struct drm_gem_object *
  94drm_gem_object_lookup(struct drm_file *file, u32 handle);
  95
  96__attribute__((nonnull))
  97static inline struct drm_i915_gem_object *
  98i915_gem_object_get(struct drm_i915_gem_object *obj)
  99{
 100        drm_gem_object_get(&obj->base);
 101        return obj;
 102}
 103
 104__attribute__((nonnull))
 105static inline void
 106i915_gem_object_put(struct drm_i915_gem_object *obj)
 107{
 108        __drm_gem_object_put(&obj->base);
 109}
 110
 111#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 112
 113static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 114{
 115        dma_resv_lock(obj->base.resv, NULL);
 116}
 117
 118static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
 119{
 120        return dma_resv_trylock(obj->base.resv);
 121}
 122
 123static inline int
 124i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 125{
 126        return dma_resv_lock_interruptible(obj->base.resv, NULL);
 127}
 128
 129static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 130{
 131        dma_resv_unlock(obj->base.resv);
 132}
 133
 134struct dma_fence *
 135i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
 136void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 137                                  struct dma_fence *fence);
 138
 139static inline void
 140i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 141{
 142        obj->flags |= I915_BO_READONLY;
 143}
 144
 145static inline bool
 146i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 147{
 148        return obj->flags & I915_BO_READONLY;
 149}
 150
 151static inline bool
 152i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
 153{
 154        return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
 155}
 156
 157static inline bool
 158i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
 159{
 160        return obj->flags & I915_BO_ALLOC_VOLATILE;
 161}
 162
 163static inline void
 164i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
 165{
 166        obj->flags |= I915_BO_ALLOC_VOLATILE;
 167}
 168
 169static inline bool
 170i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 171                         unsigned long flags)
 172{
 173        return obj->ops->flags & flags;
 174}
 175
 176static inline bool
 177i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 178{
 179        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
 180}
 181
 182static inline bool
 183i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 184{
 185        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
 186}
 187
 188static inline bool
 189i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 190{
 191        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
 192}
 193
 194static inline bool
 195i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
 196{
 197        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
 198}
 199
 200static inline bool
 201i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 202{
 203        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
 204}
 205
 206static inline bool
 207i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 208{
 209        return READ_ONCE(obj->frontbuffer);
 210}
 211
 212static inline unsigned int
 213i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
 214{
 215        return obj->tiling_and_stride & TILING_MASK;
 216}
 217
 218static inline bool
 219i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
 220{
 221        return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
 222}
 223
 224static inline unsigned int
 225i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
 226{
 227        return obj->tiling_and_stride & STRIDE_MASK;
 228}
 229
 230static inline unsigned int
 231i915_gem_tile_height(unsigned int tiling)
 232{
 233        GEM_BUG_ON(!tiling);
 234        return tiling == I915_TILING_Y ? 32 : 8;
 235}
 236
 237static inline unsigned int
 238i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
 239{
 240        return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
 241}
 242
 243static inline unsigned int
 244i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
 245{
 246        return (i915_gem_object_get_stride(obj) *
 247                i915_gem_object_get_tile_height(obj));
 248}
 249
 250int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 251                               unsigned int tiling, unsigned int stride);
 252
 253struct scatterlist *
 254i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 255                       unsigned int n, unsigned int *offset);
 256
 257struct page *
 258i915_gem_object_get_page(struct drm_i915_gem_object *obj,
 259                         unsigned int n);
 260
 261struct page *
 262i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 263                               unsigned int n);
 264
 265dma_addr_t
 266i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
 267                                    unsigned long n,
 268                                    unsigned int *len);
 269
 270dma_addr_t
 271i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 272                                unsigned long n);
 273
 274void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 275                                 struct sg_table *pages,
 276                                 unsigned int sg_page_sizes);
 277
 278int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 279int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 280
 281enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
 282        I915_MM_NORMAL = 0,
 283        /*
 284         * Only used by struct_mutex, when called "recursively" from
 285         * direct-reclaim-esque. Safe because there is only every one
 286         * struct_mutex in the entire system.
 287         */
 288        I915_MM_SHRINKER = 1,
 289        /*
 290         * Used for obj->mm.lock when allocating pages. Safe because the object
 291         * isn't yet on any LRU, and therefore the shrinker can't deadlock on
 292         * it. As soon as the object has pages, obj->mm.lock nests within
 293         * fs_reclaim.
 294         */
 295        I915_MM_GET_PAGES = 1,
 296};
 297
 298static inline int __must_check
 299i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 300{
 301        might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
 302
 303        if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
 304                return 0;
 305
 306        return __i915_gem_object_get_pages(obj);
 307}
 308
 309static inline bool
 310i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
 311{
 312        return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
 313}
 314
 315static inline void
 316__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 317{
 318        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 319
 320        atomic_inc(&obj->mm.pages_pin_count);
 321}
 322
 323static inline bool
 324i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 325{
 326        return atomic_read(&obj->mm.pages_pin_count);
 327}
 328
 329static inline void
 330__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 331{
 332        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 333        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 334
 335        atomic_dec(&obj->mm.pages_pin_count);
 336}
 337
 338static inline void
 339i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 340{
 341        __i915_gem_object_unpin_pages(obj);
 342}
 343
 344int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 345void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 346void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 347
 348enum i915_map_type {
 349        I915_MAP_WB = 0,
 350        I915_MAP_WC,
 351#define I915_MAP_OVERRIDE BIT(31)
 352        I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
 353        I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 354};
 355
 356/**
 357 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
 358 * @obj: the object to map into kernel address space
 359 * @type: the type of mapping, used to select pgprot_t
 360 *
 361 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 362 * pages and then returns a contiguous mapping of the backing storage into
 363 * the kernel address space. Based on the @type of mapping, the PTE will be
 364 * set to either WriteBack or WriteCombine (via pgprot_t).
 365 *
 366 * The caller is responsible for calling i915_gem_object_unpin_map() when the
 367 * mapping is no longer required.
 368 *
 369 * Returns the pointer through which to access the mapped object, or an
 370 * ERR_PTR() on error.
 371 */
 372void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 373                                           enum i915_map_type type);
 374
 375void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
 376                                 unsigned long offset,
 377                                 unsigned long size);
 378static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
 379{
 380        __i915_gem_object_flush_map(obj, 0, obj->base.size);
 381}
 382
 383/**
 384 * i915_gem_object_unpin_map - releases an earlier mapping
 385 * @obj: the object to unmap
 386 *
 387 * After pinning the object and mapping its pages, once you are finished
 388 * with your access, call i915_gem_object_unpin_map() to release the pin
 389 * upon the mapping. Once the pin count reaches zero, that mapping may be
 390 * removed.
 391 */
 392static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 393{
 394        i915_gem_object_unpin_pages(obj);
 395}
 396
 397void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
 398
 399void
 400i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 401                                   unsigned int flush_domains);
 402
 403int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 404                                 unsigned int *needs_clflush);
 405int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 406                                  unsigned int *needs_clflush);
 407#define CLFLUSH_BEFORE  BIT(0)
 408#define CLFLUSH_AFTER   BIT(1)
 409#define CLFLUSH_FLAGS   (CLFLUSH_BEFORE | CLFLUSH_AFTER)
 410
 411static inline void
 412i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
 413{
 414        i915_gem_object_unpin_pages(obj);
 415        i915_gem_object_unlock(obj);
 416}
 417
 418static inline struct intel_engine_cs *
 419i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 420{
 421        struct intel_engine_cs *engine = NULL;
 422        struct dma_fence *fence;
 423
 424        rcu_read_lock();
 425        fence = dma_resv_get_excl_rcu(obj->base.resv);
 426        rcu_read_unlock();
 427
 428        if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 429                engine = to_request(fence)->engine;
 430        dma_fence_put(fence);
 431
 432        return engine;
 433}
 434
 435void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 436                                         unsigned int cache_level);
 437void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 438
 439int __must_check
 440i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
 441int __must_check
 442i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
 443int __must_check
 444i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
 445struct i915_vma * __must_check
 446i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 447                                     u32 alignment,
 448                                     const struct i915_ggtt_view *view,
 449                                     unsigned int flags);
 450void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 451
 452void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
 453void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
 454void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
 455
 456static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 457{
 458        if (obj->cache_dirty)
 459                return false;
 460
 461        if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 462                return true;
 463
 464        /* Currently in use by HW (display engine)? Keep flushed. */
 465        return i915_gem_object_is_framebuffer(obj);
 466}
 467
 468static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
 469{
 470        obj->read_domains = I915_GEM_DOMAIN_CPU;
 471        obj->write_domain = I915_GEM_DOMAIN_CPU;
 472        if (cpu_write_needs_clflush(obj))
 473                obj->cache_dirty = true;
 474}
 475
 476int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 477                         unsigned int flags,
 478                         long timeout);
 479int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 480                                  unsigned int flags,
 481                                  const struct i915_sched_attr *attr);
 482
 483void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 484                                         enum fb_op_origin origin);
 485void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 486                                              enum fb_op_origin origin);
 487
 488static inline void
 489i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 490                                  enum fb_op_origin origin)
 491{
 492        if (unlikely(rcu_access_pointer(obj->frontbuffer)))
 493                __i915_gem_object_flush_frontbuffer(obj, origin);
 494}
 495
 496static inline void
 497i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 498                                       enum fb_op_origin origin)
 499{
 500        if (unlikely(rcu_access_pointer(obj->frontbuffer)))
 501                __i915_gem_object_invalidate_frontbuffer(obj, origin);
 502}
 503
 504#endif
 505