linux/drivers/gpu/drm/mediatek/mtk_drm_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015 MediaTek Inc.
   4 */
   5
   6#include <linux/dma-buf.h>
   7
   8#include <drm/drm.h>
   9#include <drm/drm_device.h>
  10#include <drm/drm_gem.h>
  11#include <drm/drm_prime.h>
  12
  13#include "mtk_drm_drv.h"
  14#include "mtk_drm_gem.h"
  15
  16static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
  17                                                unsigned long size)
  18{
  19        struct mtk_drm_gem_obj *mtk_gem_obj;
  20        int ret;
  21
  22        size = round_up(size, PAGE_SIZE);
  23
  24        mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
  25        if (!mtk_gem_obj)
  26                return ERR_PTR(-ENOMEM);
  27
  28        ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
  29        if (ret < 0) {
  30                DRM_ERROR("failed to initialize gem object\n");
  31                kfree(mtk_gem_obj);
  32                return ERR_PTR(ret);
  33        }
  34
  35        return mtk_gem_obj;
  36}
  37
  38struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
  39                                           size_t size, bool alloc_kmap)
  40{
  41        struct mtk_drm_private *priv = dev->dev_private;
  42        struct mtk_drm_gem_obj *mtk_gem;
  43        struct drm_gem_object *obj;
  44        int ret;
  45
  46        mtk_gem = mtk_drm_gem_init(dev, size);
  47        if (IS_ERR(mtk_gem))
  48                return ERR_CAST(mtk_gem);
  49
  50        obj = &mtk_gem->base;
  51
  52        mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
  53
  54        if (!alloc_kmap)
  55                mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  56
  57        mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
  58                                          &mtk_gem->dma_addr, GFP_KERNEL,
  59                                          mtk_gem->dma_attrs);
  60        if (!mtk_gem->cookie) {
  61                DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
  62                ret = -ENOMEM;
  63                goto err_gem_free;
  64        }
  65
  66        if (alloc_kmap)
  67                mtk_gem->kvaddr = mtk_gem->cookie;
  68
  69        DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
  70                         mtk_gem->cookie, &mtk_gem->dma_addr,
  71                         size);
  72
  73        return mtk_gem;
  74
  75err_gem_free:
  76        drm_gem_object_release(obj);
  77        kfree(mtk_gem);
  78        return ERR_PTR(ret);
  79}
  80
  81void mtk_drm_gem_free_object(struct drm_gem_object *obj)
  82{
  83        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  84        struct mtk_drm_private *priv = obj->dev->dev_private;
  85
  86        if (mtk_gem->sg)
  87                drm_prime_gem_destroy(obj, mtk_gem->sg);
  88        else
  89                dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
  90                               mtk_gem->dma_addr, mtk_gem->dma_attrs);
  91
  92        /* release file pointer to gem object. */
  93        drm_gem_object_release(obj);
  94
  95        kfree(mtk_gem);
  96}
  97
  98int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
  99                            struct drm_mode_create_dumb *args)
 100{
 101        struct mtk_drm_gem_obj *mtk_gem;
 102        int ret;
 103
 104        args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 105        args->size = args->pitch * args->height;
 106
 107        mtk_gem = mtk_drm_gem_create(dev, args->size, false);
 108        if (IS_ERR(mtk_gem))
 109                return PTR_ERR(mtk_gem);
 110
 111        /*
 112         * allocate a id of idr table where the obj is registered
 113         * and handle has the id what user can see.
 114         */
 115        ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
 116        if (ret)
 117                goto err_handle_create;
 118
 119        /* drop reference from allocate - handle holds it now. */
 120        drm_gem_object_put_unlocked(&mtk_gem->base);
 121
 122        return 0;
 123
 124err_handle_create:
 125        mtk_drm_gem_free_object(&mtk_gem->base);
 126        return ret;
 127}
 128
 129static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
 130                                   struct vm_area_struct *vma)
 131
 132{
 133        int ret;
 134        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
 135        struct mtk_drm_private *priv = obj->dev->dev_private;
 136
 137        /*
 138         * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
 139         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
 140         */
 141        vma->vm_flags &= ~VM_PFNMAP;
 142
 143        ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
 144                             mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
 145        if (ret)
 146                drm_gem_vm_close(vma);
 147
 148        return ret;
 149}
 150
 151int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
 152{
 153        int ret;
 154
 155        ret = drm_gem_mmap_obj(obj, obj->size, vma);
 156        if (ret)
 157                return ret;
 158
 159        return mtk_drm_gem_object_mmap(obj, vma);
 160}
 161
 162int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 163{
 164        struct drm_gem_object *obj;
 165        int ret;
 166
 167        ret = drm_gem_mmap(filp, vma);
 168        if (ret)
 169                return ret;
 170
 171        obj = vma->vm_private_data;
 172
 173        /*
 174         * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
 175         * whole buffer from the start.
 176         */
 177        vma->vm_pgoff = 0;
 178
 179        return mtk_drm_gem_object_mmap(obj, vma);
 180}
 181
 182/*
 183 * Allocate a sg_table for this GEM object.
 184 * Note: Both the table's contents, and the sg_table itself must be freed by
 185 *       the caller.
 186 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
 187 */
 188struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
 189{
 190        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
 191        struct mtk_drm_private *priv = obj->dev->dev_private;
 192        struct sg_table *sgt;
 193        int ret;
 194
 195        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 196        if (!sgt)
 197                return ERR_PTR(-ENOMEM);
 198
 199        ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
 200                                    mtk_gem->dma_addr, obj->size,
 201                                    mtk_gem->dma_attrs);
 202        if (ret) {
 203                DRM_ERROR("failed to allocate sgt, %d\n", ret);
 204                kfree(sgt);
 205                return ERR_PTR(ret);
 206        }
 207
 208        return sgt;
 209}
 210
 211struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
 212                        struct dma_buf_attachment *attach, struct sg_table *sg)
 213{
 214        struct mtk_drm_gem_obj *mtk_gem;
 215        int ret;
 216        struct scatterlist *s;
 217        unsigned int i;
 218        dma_addr_t expected;
 219
 220        mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
 221
 222        if (IS_ERR(mtk_gem))
 223                return ERR_CAST(mtk_gem);
 224
 225        expected = sg_dma_address(sg->sgl);
 226        for_each_sg(sg->sgl, s, sg->nents, i) {
 227                if (sg_dma_address(s) != expected) {
 228                        DRM_ERROR("sg_table is not contiguous");
 229                        ret = -EINVAL;
 230                        goto err_gem_free;
 231                }
 232                expected = sg_dma_address(s) + sg_dma_len(s);
 233        }
 234
 235        mtk_gem->dma_addr = sg_dma_address(sg->sgl);
 236        mtk_gem->sg = sg;
 237
 238        return &mtk_gem->base;
 239
 240err_gem_free:
 241        kfree(mtk_gem);
 242        return ERR_PTR(ret);
 243}
 244
 245void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
 246{
 247        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
 248        struct sg_table *sgt;
 249        struct sg_page_iter iter;
 250        unsigned int npages;
 251        unsigned int i = 0;
 252
 253        if (mtk_gem->kvaddr)
 254                return mtk_gem->kvaddr;
 255
 256        sgt = mtk_gem_prime_get_sg_table(obj);
 257        if (IS_ERR(sgt))
 258                return NULL;
 259
 260        npages = obj->size >> PAGE_SHIFT;
 261        mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
 262        if (!mtk_gem->pages)
 263                goto out;
 264
 265        for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
 266                mtk_gem->pages[i++] = sg_page_iter_page(&iter);
 267                if (i > npages)
 268                        break;
 269        }
 270        mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
 271                               pgprot_writecombine(PAGE_KERNEL));
 272
 273out:
 274        kfree((void *)sgt);
 275
 276        return mtk_gem->kvaddr;
 277}
 278
 279void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 280{
 281        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
 282
 283        if (!mtk_gem->pages)
 284                return;
 285
 286        vunmap(vaddr);
 287        mtk_gem->kvaddr = 0;
 288        kfree((void *)mtk_gem->pages);
 289}
 290