linux/drivers/gpu/drm/v3d/v3d_bo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Copyright (C) 2015-2018 Broadcom */
   3
   4/**
   5 * DOC: V3D GEM BO management support
   6 *
   7 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
   8 * GPU and the bus, allowing us to use shmem objects for our storage
   9 * instead of CMA.
  10 *
  11 * Physically contiguous objects may still be imported to V3D, but the
  12 * driver doesn't allocate physically contiguous objects on its own.
  13 * Display engines requiring physically contiguous allocations should
  14 * look into Mesa's "renderonly" support (as used by the Mesa pl111
  15 * driver) for an example of how to integrate with V3D.
  16 *
  17 * Long term, we should support evicting pages from the MMU when under
  18 * memory pressure (thus the v3d_bo_get_pages() refcounting), but
  19 * that's not a high priority since our systems tend to not have swap.
  20 */
  21
  22#include <linux/dma-buf.h>
  23#include <linux/pfn_t.h>
  24
  25#include "v3d_drv.h"
  26#include "uapi/drm/v3d_drm.h"
  27
  28/* Called DRM core on the last userspace/kernel unreference of the
  29 * BO.
  30 */
  31void v3d_free_object(struct drm_gem_object *obj)
  32{
  33        struct v3d_dev *v3d = to_v3d_dev(obj->dev);
  34        struct v3d_bo *bo = to_v3d_bo(obj);
  35
  36        v3d_mmu_remove_ptes(bo);
  37
  38        mutex_lock(&v3d->bo_lock);
  39        v3d->bo_stats.num_allocated--;
  40        v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
  41        mutex_unlock(&v3d->bo_lock);
  42
  43        spin_lock(&v3d->mm_lock);
  44        drm_mm_remove_node(&bo->node);
  45        spin_unlock(&v3d->mm_lock);
  46
  47        /* GPU execution may have dirtied any pages in the BO. */
  48        bo->base.pages_mark_dirty_on_put = true;
  49
  50        drm_gem_shmem_free_object(obj);
  51}
  52
  53static const struct drm_gem_object_funcs v3d_gem_funcs = {
  54        .free = v3d_free_object,
  55        .print_info = drm_gem_shmem_print_info,
  56        .pin = drm_gem_shmem_pin,
  57        .unpin = drm_gem_shmem_unpin,
  58        .get_sg_table = drm_gem_shmem_get_sg_table,
  59        .vmap = drm_gem_shmem_vmap,
  60        .vunmap = drm_gem_shmem_vunmap,
  61        .mmap = drm_gem_shmem_mmap,
  62};
  63
  64/* gem_create_object function for allocating a BO struct and doing
  65 * early setup.
  66 */
  67struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
  68{
  69        struct v3d_bo *bo;
  70        struct drm_gem_object *obj;
  71
  72        if (size == 0)
  73                return NULL;
  74
  75        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  76        if (!bo)
  77                return NULL;
  78        obj = &bo->base.base;
  79
  80        obj->funcs = &v3d_gem_funcs;
  81        bo->base.map_wc = true;
  82        INIT_LIST_HEAD(&bo->unref_head);
  83
  84        return &bo->base.base;
  85}
  86
  87static int
  88v3d_bo_create_finish(struct drm_gem_object *obj)
  89{
  90        struct v3d_dev *v3d = to_v3d_dev(obj->dev);
  91        struct v3d_bo *bo = to_v3d_bo(obj);
  92        struct sg_table *sgt;
  93        int ret;
  94
  95        /* So far we pin the BO in the MMU for its lifetime, so use
  96         * shmem's helper for getting a lifetime sgt.
  97         */
  98        sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
  99        if (IS_ERR(sgt))
 100                return PTR_ERR(sgt);
 101
 102        spin_lock(&v3d->mm_lock);
 103        /* Allocate the object's space in the GPU's page tables.
 104         * Inserting PTEs will happen later, but the offset is for the
 105         * lifetime of the BO.
 106         */
 107        ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
 108                                         obj->size >> PAGE_SHIFT,
 109                                         GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
 110        spin_unlock(&v3d->mm_lock);
 111        if (ret)
 112                return ret;
 113
 114        /* Track stats for /debug/dri/n/bo_stats. */
 115        mutex_lock(&v3d->bo_lock);
 116        v3d->bo_stats.num_allocated++;
 117        v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
 118        mutex_unlock(&v3d->bo_lock);
 119
 120        v3d_mmu_insert_ptes(bo);
 121
 122        return 0;
 123}
 124
 125struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
 126                             size_t unaligned_size)
 127{
 128        struct drm_gem_shmem_object *shmem_obj;
 129        struct v3d_bo *bo;
 130        int ret;
 131
 132        shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
 133        if (IS_ERR(shmem_obj))
 134                return ERR_CAST(shmem_obj);
 135        bo = to_v3d_bo(&shmem_obj->base);
 136
 137        ret = v3d_bo_create_finish(&shmem_obj->base);
 138        if (ret)
 139                goto free_obj;
 140
 141        return bo;
 142
 143free_obj:
 144        drm_gem_shmem_free_object(&shmem_obj->base);
 145        return ERR_PTR(ret);
 146}
 147
 148struct drm_gem_object *
 149v3d_prime_import_sg_table(struct drm_device *dev,
 150                          struct dma_buf_attachment *attach,
 151                          struct sg_table *sgt)
 152{
 153        struct drm_gem_object *obj;
 154        int ret;
 155
 156        obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
 157        if (IS_ERR(obj))
 158                return obj;
 159
 160        ret = v3d_bo_create_finish(obj);
 161        if (ret) {
 162                drm_gem_shmem_free_object(obj);
 163                return ERR_PTR(ret);
 164        }
 165
 166        return obj;
 167}
 168
 169int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
 170                        struct drm_file *file_priv)
 171{
 172        struct drm_v3d_create_bo *args = data;
 173        struct v3d_bo *bo = NULL;
 174        int ret;
 175
 176        if (args->flags != 0) {
 177                DRM_INFO("unknown create_bo flags: %d\n", args->flags);
 178                return -EINVAL;
 179        }
 180
 181        bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
 182        if (IS_ERR(bo))
 183                return PTR_ERR(bo);
 184
 185        args->offset = bo->node.start << PAGE_SHIFT;
 186
 187        ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 188        drm_gem_object_put(&bo->base.base);
 189
 190        return ret;
 191}
 192
 193int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
 194                      struct drm_file *file_priv)
 195{
 196        struct drm_v3d_mmap_bo *args = data;
 197        struct drm_gem_object *gem_obj;
 198
 199        if (args->flags != 0) {
 200                DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
 201                return -EINVAL;
 202        }
 203
 204        gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 205        if (!gem_obj) {
 206                DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 207                return -ENOENT;
 208        }
 209
 210        args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 211        drm_gem_object_put(gem_obj);
 212
 213        return 0;
 214}
 215
 216int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
 217                            struct drm_file *file_priv)
 218{
 219        struct drm_v3d_get_bo_offset *args = data;
 220        struct drm_gem_object *gem_obj;
 221        struct v3d_bo *bo;
 222
 223        gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 224        if (!gem_obj) {
 225                DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 226                return -ENOENT;
 227        }
 228        bo = to_v3d_bo(gem_obj);
 229
 230        args->offset = bo->node.start << PAGE_SHIFT;
 231
 232        drm_gem_object_put(gem_obj);
 233        return 0;
 234}
 235