linux/drivers/gpu/drm/i915/i915_gem_dmabuf.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Dave Airlie <airlied@redhat.com>
  25 */
  26#include <drm/drmP.h>
  27#include "i915_drv.h"
  28#include <linux/dma-buf.h>
  29
  30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  31                                             enum dma_data_direction dir)
  32{
  33        struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
  34        struct sg_table *st;
  35        struct scatterlist *src, *dst;
  36        int ret, i;
  37
  38        ret = i915_mutex_lock_interruptible(obj->base.dev);
  39        if (ret)
  40                return ERR_PTR(ret);
  41
  42        ret = i915_gem_object_get_pages(obj);
  43        if (ret) {
  44                st = ERR_PTR(ret);
  45                goto out;
  46        }
  47
  48        /* Copy sg so that we make an independent mapping */
  49        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  50        if (st == NULL) {
  51                st = ERR_PTR(-ENOMEM);
  52                goto out;
  53        }
  54
  55        ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
  56        if (ret) {
  57                kfree(st);
  58                st = ERR_PTR(ret);
  59                goto out;
  60        }
  61
  62        src = obj->pages->sgl;
  63        dst = st->sgl;
  64        for (i = 0; i < obj->pages->nents; i++) {
  65                sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
  66                dst = sg_next(dst);
  67                src = sg_next(src);
  68        }
  69
  70        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
  71                sg_free_table(st);
  72                kfree(st);
  73                st = ERR_PTR(-ENOMEM);
  74                goto out;
  75        }
  76
  77        i915_gem_object_pin_pages(obj);
  78
  79out:
  80        mutex_unlock(&obj->base.dev->struct_mutex);
  81        return st;
  82}
  83
  84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  85                                   struct sg_table *sg,
  86                                   enum dma_data_direction dir)
  87{
  88        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
  89        sg_free_table(sg);
  90        kfree(sg);
  91}
  92
  93static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
  94{
  95        struct drm_i915_gem_object *obj = dma_buf->priv;
  96
  97        if (obj->base.export_dma_buf == dma_buf) {
  98                /* drop the reference on the export fd holds */
  99                obj->base.export_dma_buf = NULL;
 100                drm_gem_object_unreference_unlocked(&obj->base);
 101        }
 102}
 103
 104static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 105{
 106        struct drm_i915_gem_object *obj = dma_buf->priv;
 107        struct drm_device *dev = obj->base.dev;
 108        struct scatterlist *sg;
 109        struct page **pages;
 110        int ret, i;
 111
 112        ret = i915_mutex_lock_interruptible(dev);
 113        if (ret)
 114                return ERR_PTR(ret);
 115
 116        if (obj->dma_buf_vmapping) {
 117                obj->vmapping_count++;
 118                goto out_unlock;
 119        }
 120
 121        ret = i915_gem_object_get_pages(obj);
 122        if (ret)
 123                goto error;
 124
 125        ret = -ENOMEM;
 126
 127        pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
 128        if (pages == NULL)
 129                goto error;
 130
 131        for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
 132                pages[i] = sg_page(sg);
 133
 134        obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
 135        drm_free_large(pages);
 136
 137        if (!obj->dma_buf_vmapping)
 138                goto error;
 139
 140        obj->vmapping_count = 1;
 141        i915_gem_object_pin_pages(obj);
 142out_unlock:
 143        mutex_unlock(&dev->struct_mutex);
 144        return obj->dma_buf_vmapping;
 145
 146error:
 147        mutex_unlock(&dev->struct_mutex);
 148        return ERR_PTR(ret);
 149}
 150
 151static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 152{
 153        struct drm_i915_gem_object *obj = dma_buf->priv;
 154        struct drm_device *dev = obj->base.dev;
 155        int ret;
 156
 157        ret = i915_mutex_lock_interruptible(dev);
 158        if (ret)
 159                return;
 160
 161        if (--obj->vmapping_count == 0) {
 162                vunmap(obj->dma_buf_vmapping);
 163                obj->dma_buf_vmapping = NULL;
 164
 165                i915_gem_object_unpin_pages(obj);
 166        }
 167        mutex_unlock(&dev->struct_mutex);
 168}
 169
 170static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 171{
 172        return NULL;
 173}
 174
 175static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 176{
 177
 178}
 179static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 180{
 181        return NULL;
 182}
 183
 184static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 185{
 186
 187}
 188
 189static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 190{
 191        return -EINVAL;
 192}
 193
 194static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
 195{
 196        struct drm_i915_gem_object *obj = dma_buf->priv;
 197        struct drm_device *dev = obj->base.dev;
 198        int ret;
 199        bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
 200
 201        ret = i915_mutex_lock_interruptible(dev);
 202        if (ret)
 203                return ret;
 204
 205        ret = i915_gem_object_set_to_cpu_domain(obj, write);
 206        mutex_unlock(&dev->struct_mutex);
 207        return ret;
 208}
 209
 210static const struct dma_buf_ops i915_dmabuf_ops =  {
 211        .map_dma_buf = i915_gem_map_dma_buf,
 212        .unmap_dma_buf = i915_gem_unmap_dma_buf,
 213        .release = i915_gem_dmabuf_release,
 214        .kmap = i915_gem_dmabuf_kmap,
 215        .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
 216        .kunmap = i915_gem_dmabuf_kunmap,
 217        .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
 218        .mmap = i915_gem_dmabuf_mmap,
 219        .vmap = i915_gem_dmabuf_vmap,
 220        .vunmap = i915_gem_dmabuf_vunmap,
 221        .begin_cpu_access = i915_gem_begin_cpu_access,
 222};
 223
 224struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 225                                      struct drm_gem_object *gem_obj, int flags)
 226{
 227        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 228
 229        return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
 230}
 231
 232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 233{
 234        struct sg_table *sg;
 235
 236        sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
 237        if (IS_ERR(sg))
 238                return PTR_ERR(sg);
 239
 240        obj->pages = sg;
 241        obj->has_dma_mapping = true;
 242        return 0;
 243}
 244
 245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
 246{
 247        dma_buf_unmap_attachment(obj->base.import_attach,
 248                                 obj->pages, DMA_BIDIRECTIONAL);
 249        obj->has_dma_mapping = false;
 250}
 251
 252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 253        .get_pages = i915_gem_object_get_pages_dmabuf,
 254        .put_pages = i915_gem_object_put_pages_dmabuf,
 255};
 256
 257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 258                                             struct dma_buf *dma_buf)
 259{
 260        struct dma_buf_attachment *attach;
 261        struct drm_i915_gem_object *obj;
 262        int ret;
 263
 264        /* is this one of own objects? */
 265        if (dma_buf->ops == &i915_dmabuf_ops) {
 266                obj = dma_buf->priv;
 267                /* is it from our device? */
 268                if (obj->base.dev == dev) {
 269                        /*
 270                         * Importing dmabuf exported from out own gem increases
 271                         * refcount on gem itself instead of f_count of dmabuf.
 272                         */
 273                        drm_gem_object_reference(&obj->base);
 274                        dma_buf_put(dma_buf);
 275                        return &obj->base;
 276                }
 277        }
 278
 279        /* need to attach */
 280        attach = dma_buf_attach(dma_buf, dev->dev);
 281        if (IS_ERR(attach))
 282                return ERR_CAST(attach);
 283
 284        obj = i915_gem_object_alloc(dev);
 285        if (obj == NULL) {
 286                ret = -ENOMEM;
 287                goto fail_detach;
 288        }
 289
 290        ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 291        if (ret) {
 292                i915_gem_object_free(obj);
 293                goto fail_detach;
 294        }
 295
 296        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 297        obj->base.import_attach = attach;
 298
 299        return &obj->base;
 300
 301fail_detach:
 302        dma_buf_detach(dma_buf, attach);
 303        return ERR_PTR(ret);
 304}
 305