linux/drivers/gpu/drm/nouveau/nouveau_bo.h
<<
>>
Prefs
   1#ifndef __NOUVEAU_BO_H__
   2#define __NOUVEAU_BO_H__
   3
   4#include <drm/drm_gem.h>
   5
   6struct nouveau_channel;
   7struct nouveau_fence;
   8struct nvkm_vma;
   9
  10struct nouveau_bo {
  11        struct ttm_buffer_object bo;
  12        struct ttm_placement placement;
  13        u32 valid_domains;
  14        struct ttm_place placements[3];
  15        struct ttm_place busy_placements[3];
  16        bool force_coherent;
  17        struct ttm_bo_kmap_obj kmap;
  18        struct list_head head;
  19
  20        /* protected by ttm_bo_reserve() */
  21        struct drm_file *reserved_by;
  22        struct list_head entry;
  23        int pbbo_index;
  24        bool validate_mapped;
  25
  26        struct list_head vma_list;
  27        unsigned page_shift;
  28
  29        u32 tile_mode;
  30        u32 tile_flags;
  31        struct nouveau_drm_tile *tile;
  32
  33        /* Only valid if allocated via nouveau_gem_new() and iff you hold a
  34         * gem reference to it! For debugging, use gem.filp != NULL to test
  35         * whether it is valid. */
  36        struct drm_gem_object gem;
  37
  38        /* protect by the ttm reservation lock */
  39        int pin_refcnt;
  40
  41        struct ttm_bo_kmap_obj dma_buf_vmap;
  42};
  43
  44static inline struct nouveau_bo *
  45nouveau_bo(struct ttm_buffer_object *bo)
  46{
  47        return container_of(bo, struct nouveau_bo, bo);
  48}
  49
  50static inline int
  51nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
  52{
  53        struct nouveau_bo *prev;
  54
  55        if (!pnvbo)
  56                return -EINVAL;
  57        prev = *pnvbo;
  58
  59        *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
  60        if (prev) {
  61                struct ttm_buffer_object *bo = &prev->bo;
  62
  63                ttm_bo_unref(&bo);
  64        }
  65
  66        return 0;
  67}
  68
  69extern struct ttm_bo_driver nouveau_bo_driver;
  70
  71void nouveau_bo_move_init(struct nouveau_drm *);
  72int  nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
  73                    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
  74                    struct reservation_object *robj,
  75                    struct nouveau_bo **);
  76int  nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
  77int  nouveau_bo_unpin(struct nouveau_bo *);
  78int  nouveau_bo_map(struct nouveau_bo *);
  79void nouveau_bo_unmap(struct nouveau_bo *);
  80void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
  81void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
  82u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
  83void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
  84void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
  85int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
  86                         bool no_wait_gpu);
  87void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
  88void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
  89
  90struct nvkm_vma *
  91nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
  92
  93int  nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
  94                        struct nvkm_vma *);
  95void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
  96
  97/* TODO: submit equivalent to TTM generic API upstream? */
  98static inline void __iomem *
  99nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
 100{
 101        bool is_iomem;
 102        void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
 103                                                &nvbo->kmap, &is_iomem);
 104        WARN_ON_ONCE(ioptr && !is_iomem);
 105        return ioptr;
 106}
 107
 108#endif
 109