linux/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
   4 * Author: Rob Clark <rob.clark@linaro.org>
   5 */
   6
   7#include <linux/dma-buf.h>
   8#include <linux/highmem.h>
   9
  10#include <drm/drm_prime.h>
  11
  12#include "omap_drv.h"
  13
  14/* -----------------------------------------------------------------------------
  15 * DMABUF Export
  16 */
  17
  18static struct sg_table *omap_gem_map_dma_buf(
  19                struct dma_buf_attachment *attachment,
  20                enum dma_data_direction dir)
  21{
  22        struct drm_gem_object *obj = attachment->dmabuf->priv;
  23        struct sg_table *sg;
  24        dma_addr_t dma_addr;
  25        int ret;
  26
  27        sg = kzalloc(sizeof(*sg), GFP_KERNEL);
  28        if (!sg)
  29                return ERR_PTR(-ENOMEM);
  30
  31        /* camera, etc, need physically contiguous.. but we need a
  32         * better way to know this..
  33         */
  34        ret = omap_gem_pin(obj, &dma_addr);
  35        if (ret)
  36                goto out;
  37
  38        ret = sg_alloc_table(sg, 1, GFP_KERNEL);
  39        if (ret)
  40                goto out;
  41
  42        sg_init_table(sg->sgl, 1);
  43        sg_dma_len(sg->sgl) = obj->size;
  44        sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
  45        sg_dma_address(sg->sgl) = dma_addr;
  46
  47        /* this must be after omap_gem_pin() to ensure we have pages attached */
  48        omap_gem_dma_sync_buffer(obj, dir);
  49
  50        return sg;
  51out:
  52        kfree(sg);
  53        return ERR_PTR(ret);
  54}
  55
  56static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  57                struct sg_table *sg, enum dma_data_direction dir)
  58{
  59        struct drm_gem_object *obj = attachment->dmabuf->priv;
  60        omap_gem_unpin(obj);
  61        sg_free_table(sg);
  62        kfree(sg);
  63}
  64
  65static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
  66                enum dma_data_direction dir)
  67{
  68        struct drm_gem_object *obj = buffer->priv;
  69        struct page **pages;
  70        if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
  71                /* TODO we would need to pin at least part of the buffer to
  72                 * get de-tiled view.  For now just reject it.
  73                 */
  74                return -ENOMEM;
  75        }
  76        /* make sure we have the pages: */
  77        return omap_gem_get_pages(obj, &pages, true);
  78}
  79
  80static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
  81                                          enum dma_data_direction dir)
  82{
  83        struct drm_gem_object *obj = buffer->priv;
  84        omap_gem_put_pages(obj);
  85        return 0;
  86}
  87
  88static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
  89                struct vm_area_struct *vma)
  90{
  91        struct drm_gem_object *obj = buffer->priv;
  92        int ret = 0;
  93
  94        ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
  95        if (ret < 0)
  96                return ret;
  97
  98        return omap_gem_mmap_obj(obj, vma);
  99}
 100
 101static const struct dma_buf_ops omap_dmabuf_ops = {
 102        .map_dma_buf = omap_gem_map_dma_buf,
 103        .unmap_dma_buf = omap_gem_unmap_dma_buf,
 104        .release = drm_gem_dmabuf_release,
 105        .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
 106        .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
 107        .mmap = omap_gem_dmabuf_mmap,
 108};
 109
 110struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
 111{
 112        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 113
 114        exp_info.ops = &omap_dmabuf_ops;
 115        exp_info.size = obj->size;
 116        exp_info.flags = flags;
 117        exp_info.priv = obj;
 118
 119        return drm_gem_dmabuf_export(obj->dev, &exp_info);
 120}
 121
 122/* -----------------------------------------------------------------------------
 123 * DMABUF Import
 124 */
 125
 126struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
 127                                             struct dma_buf *dma_buf)
 128{
 129        struct dma_buf_attachment *attach;
 130        struct drm_gem_object *obj;
 131        struct sg_table *sgt;
 132        int ret;
 133
 134        if (dma_buf->ops == &omap_dmabuf_ops) {
 135                obj = dma_buf->priv;
 136                if (obj->dev == dev) {
 137                        /*
 138                         * Importing dmabuf exported from out own gem increases
 139                         * refcount on gem itself instead of f_count of dmabuf.
 140                         */
 141                        drm_gem_object_get(obj);
 142                        return obj;
 143                }
 144        }
 145
 146        attach = dma_buf_attach(dma_buf, dev->dev);
 147        if (IS_ERR(attach))
 148                return ERR_CAST(attach);
 149
 150        get_dma_buf(dma_buf);
 151
 152        sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
 153        if (IS_ERR(sgt)) {
 154                ret = PTR_ERR(sgt);
 155                goto fail_detach;
 156        }
 157
 158        obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
 159        if (IS_ERR(obj)) {
 160                ret = PTR_ERR(obj);
 161                goto fail_unmap;
 162        }
 163
 164        obj->import_attach = attach;
 165
 166        return obj;
 167
 168fail_unmap:
 169        dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
 170fail_detach:
 171        dma_buf_detach(dma_buf, attach);
 172        dma_buf_put(dma_buf);
 173
 174        return ERR_PTR(ret);
 175}
 176