linux/drivers/gpu/drm/i915/gem/i915_gem_object.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_OBJECT_H__
   8#define __I915_GEM_OBJECT_H__
   9
  10#include <drm/drm_gem.h>
  11#include <drm/drm_file.h>
  12#include <drm/drm_device.h>
  13
  14#include "display/intel_frontbuffer.h"
  15#include "i915_gem_object_types.h"
  16#include "i915_gem_gtt.h"
  17#include "i915_vma_types.h"
  18
  19/*
  20 * XXX: There is a prevalence of the assumption that we fit the
  21 * object's page count inside a 32bit _signed_ variable. Let's document
  22 * this and catch if we ever need to fix it. In the meantime, if you do
  23 * spot such a local variable, please consider fixing!
  24 *
  25 * Aside from our own locals (for which we have no excuse!):
  26 * - sg_table embeds unsigned int for num_pages
  27 * - get_user_pages*() mixed ints with longs
  28 */
  29#define GEM_CHECK_SIZE_OVERFLOW(sz) \
  30        GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
  31
  32static inline bool i915_gem_object_size_2big(u64 size)
  33{
  34        struct drm_i915_gem_object *obj;
  35
  36        if (GEM_CHECK_SIZE_OVERFLOW(size))
  37                return true;
  38
  39        if (overflows_type(size, obj->base.size))
  40                return true;
  41
  42        return false;
  43}
  44
  45void i915_gem_init__objects(struct drm_i915_private *i915);
  46
  47struct drm_i915_gem_object *i915_gem_object_alloc(void);
  48void i915_gem_object_free(struct drm_i915_gem_object *obj);
  49
  50void i915_gem_object_init(struct drm_i915_gem_object *obj,
  51                          const struct drm_i915_gem_object_ops *ops,
  52                          struct lock_class_key *key,
  53                          unsigned alloc_flags);
  54struct drm_i915_gem_object *
  55i915_gem_object_create_shmem(struct drm_i915_private *i915,
  56                             resource_size_t size);
  57struct drm_i915_gem_object *
  58i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
  59                                       const void *data, resource_size_t size);
  60
  61extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
  62
  63void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
  64                                     struct sg_table *pages,
  65                                     bool needs_clflush);
  66
  67int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
  68                                const struct drm_i915_gem_pwrite *args);
  69int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
  70                               const struct drm_i915_gem_pread *args);
  71
  72int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
  73void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
  74                                     struct sg_table *pages);
  75void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
  76                                    struct sg_table *pages);
  77
  78void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  79
  80struct sg_table *
  81__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
  82void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  83
  84/**
  85 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
  86 * @filp: DRM file private date
  87 * @handle: userspace handle
  88 *
  89 * Returns:
  90 *
  91 * A pointer to the object named by the handle if such exists on @filp, NULL
  92 * otherwise. This object is only valid whilst under the RCU read lock, and
  93 * note carefully the object may be in the process of being destroyed.
  94 */
  95static inline struct drm_i915_gem_object *
  96i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
  97{
  98#ifdef CONFIG_LOCKDEP
  99        WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
 100#endif
 101        return idr_find(&file->object_idr, handle);
 102}
 103
 104static inline struct drm_i915_gem_object *
 105i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
 106{
 107        if (obj && !kref_get_unless_zero(&obj->base.refcount))
 108                obj = NULL;
 109
 110        return obj;
 111}
 112
 113static inline struct drm_i915_gem_object *
 114i915_gem_object_lookup(struct drm_file *file, u32 handle)
 115{
 116        struct drm_i915_gem_object *obj;
 117
 118        rcu_read_lock();
 119        obj = i915_gem_object_lookup_rcu(file, handle);
 120        obj = i915_gem_object_get_rcu(obj);
 121        rcu_read_unlock();
 122
 123        return obj;
 124}
 125
 126__deprecated
 127struct drm_gem_object *
 128drm_gem_object_lookup(struct drm_file *file, u32 handle);
 129
 130__attribute__((nonnull))
 131static inline struct drm_i915_gem_object *
 132i915_gem_object_get(struct drm_i915_gem_object *obj)
 133{
 134        drm_gem_object_get(&obj->base);
 135        return obj;
 136}
 137
 138__attribute__((nonnull))
 139static inline void
 140i915_gem_object_put(struct drm_i915_gem_object *obj)
 141{
 142        __drm_gem_object_put(&obj->base);
 143}
 144
 145#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 146
 147/*
 148 * If more than one potential simultaneous locker, assert held.
 149 */
 150static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
 151{
 152        /*
 153         * Note mm list lookup is protected by
 154         * kref_get_unless_zero().
 155         */
 156        if (IS_ENABLED(CONFIG_LOCKDEP) &&
 157            kref_read(&obj->base.refcount) > 0)
 158                assert_object_held(obj);
 159}
 160
 161static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
 162                                         struct i915_gem_ww_ctx *ww,
 163                                         bool intr)
 164{
 165        int ret;
 166
 167        if (intr)
 168                ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
 169        else
 170                ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
 171
 172        if (!ret && ww)
 173                list_add_tail(&obj->obj_link, &ww->obj_list);
 174        if (ret == -EALREADY)
 175                ret = 0;
 176
 177        if (ret == -EDEADLK)
 178                ww->contended = obj;
 179
 180        return ret;
 181}
 182
 183static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
 184                                       struct i915_gem_ww_ctx *ww)
 185{
 186        return __i915_gem_object_lock(obj, ww, ww && ww->intr);
 187}
 188
 189static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
 190                                                     struct i915_gem_ww_ctx *ww)
 191{
 192        WARN_ON(ww && !ww->intr);
 193        return __i915_gem_object_lock(obj, ww, true);
 194}
 195
 196static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
 197{
 198        return dma_resv_trylock(obj->base.resv);
 199}
 200
 201static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 202{
 203        dma_resv_unlock(obj->base.resv);
 204}
 205
 206static inline void
 207i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 208{
 209        obj->flags |= I915_BO_READONLY;
 210}
 211
 212static inline bool
 213i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 214{
 215        return obj->flags & I915_BO_READONLY;
 216}
 217
 218static inline bool
 219i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
 220{
 221        return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
 222}
 223
 224static inline bool
 225i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
 226{
 227        return obj->flags & I915_BO_ALLOC_VOLATILE;
 228}
 229
 230static inline void
 231i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
 232{
 233        obj->flags |= I915_BO_ALLOC_VOLATILE;
 234}
 235
 236static inline bool
 237i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
 238{
 239        return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
 240}
 241
 242static inline void
 243i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
 244{
 245        set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
 246}
 247
 248static inline void
 249i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
 250{
 251        clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
 252}
 253
 254static inline bool
 255i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
 256                         unsigned long flags)
 257{
 258        return obj->ops->flags & flags;
 259}
 260
 261static inline bool
 262i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 263{
 264        return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
 265}
 266
 267static inline bool
 268i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
 269{
 270        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
 271}
 272
 273static inline bool
 274i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 275{
 276        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
 277}
 278
 279static inline bool
 280i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 281{
 282        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
 283}
 284
 285static inline bool
 286i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
 287{
 288        return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
 289}
 290
 291static inline bool
 292i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 293{
 294        return READ_ONCE(obj->frontbuffer);
 295}
 296
 297static inline unsigned int
 298i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
 299{
 300        return obj->tiling_and_stride & TILING_MASK;
 301}
 302
 303static inline bool
 304i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
 305{
 306        return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
 307}
 308
 309static inline unsigned int
 310i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
 311{
 312        return obj->tiling_and_stride & STRIDE_MASK;
 313}
 314
 315static inline unsigned int
 316i915_gem_tile_height(unsigned int tiling)
 317{
 318        GEM_BUG_ON(!tiling);
 319        return tiling == I915_TILING_Y ? 32 : 8;
 320}
 321
 322static inline unsigned int
 323i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
 324{
 325        return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
 326}
 327
 328static inline unsigned int
 329i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
 330{
 331        return (i915_gem_object_get_stride(obj) *
 332                i915_gem_object_get_tile_height(obj));
 333}
 334
 335int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 336                               unsigned int tiling, unsigned int stride);
 337
 338struct scatterlist *
 339__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 340                         struct i915_gem_object_page_iter *iter,
 341                         unsigned int n,
 342                         unsigned int *offset, bool allow_alloc);
 343
 344static inline struct scatterlist *
 345i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 346                       unsigned int n,
 347                       unsigned int *offset, bool allow_alloc)
 348{
 349        return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc);
 350}
 351
 352static inline struct scatterlist *
 353i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
 354                           unsigned int n,
 355                           unsigned int *offset, bool allow_alloc)
 356{
 357        return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc);
 358}
 359
 360struct page *
 361i915_gem_object_get_page(struct drm_i915_gem_object *obj,
 362                         unsigned int n);
 363
 364struct page *
 365i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 366                               unsigned int n);
 367
 368dma_addr_t
 369i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
 370                                    unsigned long n,
 371                                    unsigned int *len);
 372
 373dma_addr_t
 374i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 375                                unsigned long n);
 376
 377void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 378                                 struct sg_table *pages,
 379                                 unsigned int sg_page_sizes);
 380
 381int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 382int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 383
 384static inline int __must_check
 385i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 386{
 387        assert_object_held(obj);
 388
 389        if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
 390                return 0;
 391
 392        return __i915_gem_object_get_pages(obj);
 393}
 394
 395int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
 396
 397static inline bool
 398i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
 399{
 400        return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
 401}
 402
 403static inline void
 404__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 405{
 406        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 407
 408        atomic_inc(&obj->mm.pages_pin_count);
 409}
 410
 411static inline bool
 412i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 413{
 414        return atomic_read(&obj->mm.pages_pin_count);
 415}
 416
 417static inline void
 418__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 419{
 420        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 421        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 422
 423        atomic_dec(&obj->mm.pages_pin_count);
 424}
 425
 426static inline void
 427i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 428{
 429        __i915_gem_object_unpin_pages(obj);
 430}
 431
 432int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 433void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 434void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 435
 436/**
 437 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
 438 * @obj: the object to map into kernel address space
 439 * @type: the type of mapping, used to select pgprot_t
 440 *
 441 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 442 * pages and then returns a contiguous mapping of the backing storage into
 443 * the kernel address space. Based on the @type of mapping, the PTE will be
 444 * set to either WriteBack or WriteCombine (via pgprot_t).
 445 *
 446 * The caller is responsible for calling i915_gem_object_unpin_map() when the
 447 * mapping is no longer required.
 448 *
 449 * Returns the pointer through which to access the mapped object, or an
 450 * ERR_PTR() on error.
 451 */
 452void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 453                                           enum i915_map_type type);
 454
 455void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
 456                                                    enum i915_map_type type);
 457
 458void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
 459                                 unsigned long offset,
 460                                 unsigned long size);
 461static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
 462{
 463        __i915_gem_object_flush_map(obj, 0, obj->base.size);
 464}
 465
 466/**
 467 * i915_gem_object_unpin_map - releases an earlier mapping
 468 * @obj: the object to unmap
 469 *
 470 * After pinning the object and mapping its pages, once you are finished
 471 * with your access, call i915_gem_object_unpin_map() to release the pin
 472 * upon the mapping. Once the pin count reaches zero, that mapping may be
 473 * removed.
 474 */
 475static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 476{
 477        i915_gem_object_unpin_pages(obj);
 478}
 479
 480void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
 481
 482int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 483                                 unsigned int *needs_clflush);
 484int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 485                                  unsigned int *needs_clflush);
 486#define CLFLUSH_BEFORE  BIT(0)
 487#define CLFLUSH_AFTER   BIT(1)
 488#define CLFLUSH_FLAGS   (CLFLUSH_BEFORE | CLFLUSH_AFTER)
 489
 490static inline void
 491i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
 492{
 493        i915_gem_object_unpin_pages(obj);
 494}
 495
 496static inline struct intel_engine_cs *
 497i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 498{
 499        struct intel_engine_cs *engine = NULL;
 500        struct dma_fence *fence;
 501
 502        rcu_read_lock();
 503        fence = dma_resv_get_excl_rcu(obj->base.resv);
 504        rcu_read_unlock();
 505
 506        if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 507                engine = to_request(fence)->engine;
 508        dma_fence_put(fence);
 509
 510        return engine;
 511}
 512
 513void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 514                                         unsigned int cache_level);
 515void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 516void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
 517
 518int __must_check
 519i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
 520int __must_check
 521i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
 522int __must_check
 523i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
 524struct i915_vma * __must_check
 525i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 526                                     struct i915_gem_ww_ctx *ww,
 527                                     u32 alignment,
 528                                     const struct i915_ggtt_view *view,
 529                                     unsigned int flags);
 530
 531void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
 532void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
 533void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
 534
 535static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 536{
 537        if (obj->cache_dirty)
 538                return false;
 539
 540        if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 541                return true;
 542
 543        /* Currently in use by HW (display engine)? Keep flushed. */
 544        return i915_gem_object_is_framebuffer(obj);
 545}
 546
 547static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
 548{
 549        obj->read_domains = I915_GEM_DOMAIN_CPU;
 550        obj->write_domain = I915_GEM_DOMAIN_CPU;
 551        if (cpu_write_needs_clflush(obj))
 552                obj->cache_dirty = true;
 553}
 554
 555void i915_gem_fence_wait_priority(struct dma_fence *fence,
 556                                  const struct i915_sched_attr *attr);
 557
 558int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 559                         unsigned int flags,
 560                         long timeout);
 561int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 562                                  unsigned int flags,
 563                                  const struct i915_sched_attr *attr);
 564
 565void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 566                                         enum fb_op_origin origin);
 567void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 568                                              enum fb_op_origin origin);
 569
 570static inline void
 571i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 572                                  enum fb_op_origin origin)
 573{
 574        if (unlikely(rcu_access_pointer(obj->frontbuffer)))
 575                __i915_gem_object_flush_frontbuffer(obj, origin);
 576}
 577
 578static inline void
 579i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 580                                       enum fb_op_origin origin)
 581{
 582        if (unlikely(rcu_access_pointer(obj->frontbuffer)))
 583                __i915_gem_object_invalidate_frontbuffer(obj, origin);
 584}
 585
 586int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
 587
 588bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
 589
 590#ifdef CONFIG_MMU_NOTIFIER
 591static inline bool
 592i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
 593{
 594        return obj->userptr.notifier.mm;
 595}
 596
 597int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
 598int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
 599void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
 600int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
 601#else
 602static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
 603
 604static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
 605static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
 606static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
 607static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
 608
 609#endif
 610
 611#endif
 612