linux/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright 2012 Red Hat Inc
   5 */
   6
   7#include <linux/dma-buf.h>
   8#include <linux/highmem.h>
   9#include <linux/dma-resv.h>
  10
  11#include "i915_drv.h"
  12#include "i915_gem_object.h"
  13#include "i915_scatterlist.h"
  14
  15static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
  16{
  17        return to_intel_bo(buf->priv);
  18}
  19
  20static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  21                                             enum dma_data_direction dir)
  22{
  23        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  24        struct sg_table *st;
  25        struct scatterlist *src, *dst;
  26        int ret, i;
  27
  28        ret = i915_gem_object_pin_pages(obj);
  29        if (ret)
  30                goto err;
  31
  32        /* Copy sg so that we make an independent mapping */
  33        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  34        if (st == NULL) {
  35                ret = -ENOMEM;
  36                goto err_unpin_pages;
  37        }
  38
  39        ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
  40        if (ret)
  41                goto err_free;
  42
  43        src = obj->mm.pages->sgl;
  44        dst = st->sgl;
  45        for (i = 0; i < obj->mm.pages->nents; i++) {
  46                sg_set_page(dst, sg_page(src), src->length, 0);
  47                dst = sg_next(dst);
  48                src = sg_next(src);
  49        }
  50
  51        ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
  52        if (ret)
  53                goto err_free_sg;
  54
  55        return st;
  56
  57err_free_sg:
  58        sg_free_table(st);
  59err_free:
  60        kfree(st);
  61err_unpin_pages:
  62        i915_gem_object_unpin_pages(obj);
  63err:
  64        return ERR_PTR(ret);
  65}
  66
  67static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  68                                   struct sg_table *sg,
  69                                   enum dma_data_direction dir)
  70{
  71        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  72
  73        dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
  74        sg_free_table(sg);
  75        kfree(sg);
  76
  77        i915_gem_object_unpin_pages(obj);
  78}
  79
  80static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  81{
  82        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  83        void *vaddr;
  84
  85        vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
  86        if (IS_ERR(vaddr))
  87                return PTR_ERR(vaddr);
  88
  89        dma_buf_map_set_vaddr(map, vaddr);
  90
  91        return 0;
  92}
  93
  94static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  95{
  96        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  97
  98        i915_gem_object_flush_map(obj);
  99        i915_gem_object_unpin_map(obj);
 100}
 101
 102static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 103{
 104        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 105        int ret;
 106
 107        if (obj->base.size < vma->vm_end - vma->vm_start)
 108                return -EINVAL;
 109
 110        if (!obj->base.filp)
 111                return -ENODEV;
 112
 113        ret = call_mmap(obj->base.filp, vma);
 114        if (ret)
 115                return ret;
 116
 117        vma_set_file(vma, obj->base.filp);
 118
 119        return 0;
 120}
 121
 122static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 123{
 124        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 125        bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
 126        int err;
 127
 128        err = i915_gem_object_pin_pages(obj);
 129        if (err)
 130                return err;
 131
 132        err = i915_gem_object_lock_interruptible(obj, NULL);
 133        if (err)
 134                goto out;
 135
 136        err = i915_gem_object_set_to_cpu_domain(obj, write);
 137        i915_gem_object_unlock(obj);
 138
 139out:
 140        i915_gem_object_unpin_pages(obj);
 141        return err;
 142}
 143
 144static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 145{
 146        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 147        int err;
 148
 149        err = i915_gem_object_pin_pages(obj);
 150        if (err)
 151                return err;
 152
 153        err = i915_gem_object_lock_interruptible(obj, NULL);
 154        if (err)
 155                goto out;
 156
 157        err = i915_gem_object_set_to_gtt_domain(obj, false);
 158        i915_gem_object_unlock(obj);
 159
 160out:
 161        i915_gem_object_unpin_pages(obj);
 162        return err;
 163}
 164
 165static const struct dma_buf_ops i915_dmabuf_ops =  {
 166        .map_dma_buf = i915_gem_map_dma_buf,
 167        .unmap_dma_buf = i915_gem_unmap_dma_buf,
 168        .release = drm_gem_dmabuf_release,
 169        .mmap = i915_gem_dmabuf_mmap,
 170        .vmap = i915_gem_dmabuf_vmap,
 171        .vunmap = i915_gem_dmabuf_vunmap,
 172        .begin_cpu_access = i915_gem_begin_cpu_access,
 173        .end_cpu_access = i915_gem_end_cpu_access,
 174};
 175
 176struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
 177{
 178        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 179        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 180
 181        exp_info.ops = &i915_dmabuf_ops;
 182        exp_info.size = gem_obj->size;
 183        exp_info.flags = flags;
 184        exp_info.priv = gem_obj;
 185        exp_info.resv = obj->base.resv;
 186
 187        if (obj->ops->dmabuf_export) {
 188                int ret = obj->ops->dmabuf_export(obj);
 189                if (ret)
 190                        return ERR_PTR(ret);
 191        }
 192
 193        return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
 194}
 195
 196static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 197{
 198        struct sg_table *pages;
 199        unsigned int sg_page_sizes;
 200
 201        pages = dma_buf_map_attachment(obj->base.import_attach,
 202                                       DMA_BIDIRECTIONAL);
 203        if (IS_ERR(pages))
 204                return PTR_ERR(pages);
 205
 206        sg_page_sizes = i915_sg_page_sizes(pages->sgl);
 207
 208        __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 209
 210        return 0;
 211}
 212
 213static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
 214                                             struct sg_table *pages)
 215{
 216        dma_buf_unmap_attachment(obj->base.import_attach, pages,
 217                                 DMA_BIDIRECTIONAL);
 218}
 219
 220static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 221        .name = "i915_gem_object_dmabuf",
 222        .get_pages = i915_gem_object_get_pages_dmabuf,
 223        .put_pages = i915_gem_object_put_pages_dmabuf,
 224};
 225
 226struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 227                                             struct dma_buf *dma_buf)
 228{
 229        static struct lock_class_key lock_class;
 230        struct dma_buf_attachment *attach;
 231        struct drm_i915_gem_object *obj;
 232        int ret;
 233
 234        /* is this one of own objects? */
 235        if (dma_buf->ops == &i915_dmabuf_ops) {
 236                obj = dma_buf_to_obj(dma_buf);
 237                /* is it from our device? */
 238                if (obj->base.dev == dev) {
 239                        /*
 240                         * Importing dmabuf exported from out own gem increases
 241                         * refcount on gem itself instead of f_count of dmabuf.
 242                         */
 243                        return &i915_gem_object_get(obj)->base;
 244                }
 245        }
 246
 247        /* need to attach */
 248        attach = dma_buf_attach(dma_buf, dev->dev);
 249        if (IS_ERR(attach))
 250                return ERR_CAST(attach);
 251
 252        get_dma_buf(dma_buf);
 253
 254        obj = i915_gem_object_alloc();
 255        if (obj == NULL) {
 256                ret = -ENOMEM;
 257                goto fail_detach;
 258        }
 259
 260        drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 261        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
 262        obj->base.import_attach = attach;
 263        obj->base.resv = dma_buf->resv;
 264
 265        /* We use GTT as shorthand for a coherent domain, one that is
 266         * neither in the GPU cache nor in the CPU cache, where all
 267         * writes are immediately visible in memory. (That's not strictly
 268         * true, but it's close! There are internal buffers such as the
 269         * write-combined buffer or a delay through the chipset for GTT
 270         * writes that do require us to treat GTT as a separate cache domain.)
 271         */
 272        obj->read_domains = I915_GEM_DOMAIN_GTT;
 273        obj->write_domain = 0;
 274
 275        return &obj->base;
 276
 277fail_detach:
 278        dma_buf_detach(dma_buf, attach);
 279        dma_buf_put(dma_buf);
 280
 281        return ERR_PTR(ret);
 282}
 283
 284#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 285#include "selftests/mock_dmabuf.c"
 286#include "selftests/i915_gem_dmabuf.c"
 287#endif
 288