linux/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright 2012 Red Hat Inc
   5 */
   6
   7#include <linux/dma-buf.h>
   8#include <linux/highmem.h>
   9#include <linux/reservation.h>
  10
  11#include "i915_drv.h"
  12#include "i915_gem_object.h"
  13#include "i915_scatterlist.h"
  14
  15static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
  16{
  17        return to_intel_bo(buf->priv);
  18}
  19
  20static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  21                                             enum dma_data_direction dir)
  22{
  23        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  24        struct sg_table *st;
  25        struct scatterlist *src, *dst;
  26        int ret, i;
  27
  28        ret = i915_gem_object_pin_pages(obj);
  29        if (ret)
  30                goto err;
  31
  32        /* Copy sg so that we make an independent mapping */
  33        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  34        if (st == NULL) {
  35                ret = -ENOMEM;
  36                goto err_unpin_pages;
  37        }
  38
  39        ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
  40        if (ret)
  41                goto err_free;
  42
  43        src = obj->mm.pages->sgl;
  44        dst = st->sgl;
  45        for (i = 0; i < obj->mm.pages->nents; i++) {
  46                sg_set_page(dst, sg_page(src), src->length, 0);
  47                dst = sg_next(dst);
  48                src = sg_next(src);
  49        }
  50
  51        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
  52                ret = -ENOMEM;
  53                goto err_free_sg;
  54        }
  55
  56        return st;
  57
  58err_free_sg:
  59        sg_free_table(st);
  60err_free:
  61        kfree(st);
  62err_unpin_pages:
  63        i915_gem_object_unpin_pages(obj);
  64err:
  65        return ERR_PTR(ret);
  66}
  67
  68static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  69                                   struct sg_table *sg,
  70                                   enum dma_data_direction dir)
  71{
  72        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  73
  74        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  75        sg_free_table(sg);
  76        kfree(sg);
  77
  78        i915_gem_object_unpin_pages(obj);
  79}
  80
  81static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
  82{
  83        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  84
  85        return i915_gem_object_pin_map(obj, I915_MAP_WB);
  86}
  87
  88static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
  89{
  90        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  91
  92        i915_gem_object_flush_map(obj);
  93        i915_gem_object_unpin_map(obj);
  94}
  95
  96static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
  97{
  98        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  99        struct page *page;
 100
 101        if (page_num >= obj->base.size >> PAGE_SHIFT)
 102                return NULL;
 103
 104        if (!i915_gem_object_has_struct_page(obj))
 105                return NULL;
 106
 107        if (i915_gem_object_pin_pages(obj))
 108                return NULL;
 109
 110        /* Synchronisation is left to the caller (via .begin_cpu_access()) */
 111        page = i915_gem_object_get_page(obj, page_num);
 112        if (IS_ERR(page))
 113                goto err_unpin;
 114
 115        return kmap(page);
 116
 117err_unpin:
 118        i915_gem_object_unpin_pages(obj);
 119        return NULL;
 120}
 121
 122static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 123{
 124        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 125
 126        kunmap(virt_to_page(addr));
 127        i915_gem_object_unpin_pages(obj);
 128}
 129
 130static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 131{
 132        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 133        int ret;
 134
 135        if (obj->base.size < vma->vm_end - vma->vm_start)
 136                return -EINVAL;
 137
 138        if (!obj->base.filp)
 139                return -ENODEV;
 140
 141        ret = call_mmap(obj->base.filp, vma);
 142        if (ret)
 143                return ret;
 144
 145        fput(vma->vm_file);
 146        vma->vm_file = get_file(obj->base.filp);
 147
 148        return 0;
 149}
 150
 151static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 152{
 153        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 154        bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
 155        int err;
 156
 157        err = i915_gem_object_pin_pages(obj);
 158        if (err)
 159                return err;
 160
 161        err = i915_gem_object_lock_interruptible(obj);
 162        if (err)
 163                goto out;
 164
 165        err = i915_gem_object_set_to_cpu_domain(obj, write);
 166        i915_gem_object_unlock(obj);
 167
 168out:
 169        i915_gem_object_unpin_pages(obj);
 170        return err;
 171}
 172
 173static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 174{
 175        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 176        int err;
 177
 178        err = i915_gem_object_pin_pages(obj);
 179        if (err)
 180                return err;
 181
 182        err = i915_gem_object_lock_interruptible(obj);
 183        if (err)
 184                goto out;
 185
 186        err = i915_gem_object_set_to_gtt_domain(obj, false);
 187        i915_gem_object_unlock(obj);
 188
 189out:
 190        i915_gem_object_unpin_pages(obj);
 191        return err;
 192}
 193
 194static const struct dma_buf_ops i915_dmabuf_ops =  {
 195        .map_dma_buf = i915_gem_map_dma_buf,
 196        .unmap_dma_buf = i915_gem_unmap_dma_buf,
 197        .release = drm_gem_dmabuf_release,
 198        .map = i915_gem_dmabuf_kmap,
 199        .unmap = i915_gem_dmabuf_kunmap,
 200        .mmap = i915_gem_dmabuf_mmap,
 201        .vmap = i915_gem_dmabuf_vmap,
 202        .vunmap = i915_gem_dmabuf_vunmap,
 203        .begin_cpu_access = i915_gem_begin_cpu_access,
 204        .end_cpu_access = i915_gem_end_cpu_access,
 205};
 206
 207struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 208                                      struct drm_gem_object *gem_obj, int flags)
 209{
 210        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 211        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 212
 213        exp_info.ops = &i915_dmabuf_ops;
 214        exp_info.size = gem_obj->size;
 215        exp_info.flags = flags;
 216        exp_info.priv = gem_obj;
 217        exp_info.resv = obj->base.resv;
 218
 219        if (obj->ops->dmabuf_export) {
 220                int ret = obj->ops->dmabuf_export(obj);
 221                if (ret)
 222                        return ERR_PTR(ret);
 223        }
 224
 225        return drm_gem_dmabuf_export(dev, &exp_info);
 226}
 227
 228static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 229{
 230        struct sg_table *pages;
 231        unsigned int sg_page_sizes;
 232
 233        pages = dma_buf_map_attachment(obj->base.import_attach,
 234                                       DMA_BIDIRECTIONAL);
 235        if (IS_ERR(pages))
 236                return PTR_ERR(pages);
 237
 238        sg_page_sizes = i915_sg_page_sizes(pages->sgl);
 239
 240        __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 241
 242        return 0;
 243}
 244
 245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
 246                                             struct sg_table *pages)
 247{
 248        dma_buf_unmap_attachment(obj->base.import_attach, pages,
 249                                 DMA_BIDIRECTIONAL);
 250}
 251
 252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 253        .get_pages = i915_gem_object_get_pages_dmabuf,
 254        .put_pages = i915_gem_object_put_pages_dmabuf,
 255};
 256
 257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 258                                             struct dma_buf *dma_buf)
 259{
 260        struct dma_buf_attachment *attach;
 261        struct drm_i915_gem_object *obj;
 262        int ret;
 263
 264        /* is this one of own objects? */
 265        if (dma_buf->ops == &i915_dmabuf_ops) {
 266                obj = dma_buf_to_obj(dma_buf);
 267                /* is it from our device? */
 268                if (obj->base.dev == dev) {
 269                        /*
 270                         * Importing dmabuf exported from out own gem increases
 271                         * refcount on gem itself instead of f_count of dmabuf.
 272                         */
 273                        return &i915_gem_object_get(obj)->base;
 274                }
 275        }
 276
 277        /* need to attach */
 278        attach = dma_buf_attach(dma_buf, dev->dev);
 279        if (IS_ERR(attach))
 280                return ERR_CAST(attach);
 281
 282        get_dma_buf(dma_buf);
 283
 284        obj = i915_gem_object_alloc();
 285        if (obj == NULL) {
 286                ret = -ENOMEM;
 287                goto fail_detach;
 288        }
 289
 290        drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 291        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 292        obj->base.import_attach = attach;
 293        obj->base.resv = dma_buf->resv;
 294
 295        /* We use GTT as shorthand for a coherent domain, one that is
 296         * neither in the GPU cache nor in the CPU cache, where all
 297         * writes are immediately visible in memory. (That's not strictly
 298         * true, but it's close! There are internal buffers such as the
 299         * write-combined buffer or a delay through the chipset for GTT
 300         * writes that do require us to treat GTT as a separate cache domain.)
 301         */
 302        obj->read_domains = I915_GEM_DOMAIN_GTT;
 303        obj->write_domain = 0;
 304
 305        return &obj->base;
 306
 307fail_detach:
 308        dma_buf_detach(dma_buf, attach);
 309        dma_buf_put(dma_buf);
 310
 311        return ERR_PTR(ret);
 312}
 313
 314#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 315#include "selftests/mock_dmabuf.c"
 316#include "selftests/i915_gem_dmabuf.c"
 317#endif
 318