linux/drivers/gpu/drm/i915/i915_gem_dmabuf.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Dave Airlie <airlied@redhat.com>
  25 */
  26#include <drm/drmP.h>
  27#include "i915_drv.h"
  28#include <linux/dma-buf.h>
  29
  30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
  31{
  32        return to_intel_bo(buf->priv);
  33}
  34
  35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
  36                                             enum dma_data_direction dir)
  37{
  38        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  39        struct sg_table *st;
  40        struct scatterlist *src, *dst;
  41        int ret, i;
  42
  43        ret = i915_mutex_lock_interruptible(obj->base.dev);
  44        if (ret)
  45                goto err;
  46
  47        ret = i915_gem_object_get_pages(obj);
  48        if (ret)
  49                goto err_unlock;
  50
  51        i915_gem_object_pin_pages(obj);
  52
  53        /* Copy sg so that we make an independent mapping */
  54        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  55        if (st == NULL) {
  56                ret = -ENOMEM;
  57                goto err_unpin;
  58        }
  59
  60        ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
  61        if (ret)
  62                goto err_free;
  63
  64        src = obj->pages->sgl;
  65        dst = st->sgl;
  66        for (i = 0; i < obj->pages->nents; i++) {
  67                sg_set_page(dst, sg_page(src), src->length, 0);
  68                dst = sg_next(dst);
  69                src = sg_next(src);
  70        }
  71
  72        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
  73                ret =-ENOMEM;
  74                goto err_free_sg;
  75        }
  76
  77        mutex_unlock(&obj->base.dev->struct_mutex);
  78        return st;
  79
  80err_free_sg:
  81        sg_free_table(st);
  82err_free:
  83        kfree(st);
  84err_unpin:
  85        i915_gem_object_unpin_pages(obj);
  86err_unlock:
  87        mutex_unlock(&obj->base.dev->struct_mutex);
  88err:
  89        return ERR_PTR(ret);
  90}
  91
  92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
  93                                   struct sg_table *sg,
  94                                   enum dma_data_direction dir)
  95{
  96        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  97
  98        mutex_lock(&obj->base.dev->struct_mutex);
  99
 100        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
 101        sg_free_table(sg);
 102        kfree(sg);
 103
 104        i915_gem_object_unpin_pages(obj);
 105
 106        mutex_unlock(&obj->base.dev->struct_mutex);
 107}
 108
 109static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 110{
 111        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 112        struct drm_device *dev = obj->base.dev;
 113        struct sg_page_iter sg_iter;
 114        struct page **pages;
 115        int ret, i;
 116
 117        ret = i915_mutex_lock_interruptible(dev);
 118        if (ret)
 119                return ERR_PTR(ret);
 120
 121        if (obj->dma_buf_vmapping) {
 122                obj->vmapping_count++;
 123                goto out_unlock;
 124        }
 125
 126        ret = i915_gem_object_get_pages(obj);
 127        if (ret)
 128                goto err;
 129
 130        i915_gem_object_pin_pages(obj);
 131
 132        ret = -ENOMEM;
 133
 134        pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
 135        if (pages == NULL)
 136                goto err_unpin;
 137
 138        i = 0;
 139        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
 140                pages[i++] = sg_page_iter_page(&sg_iter);
 141
 142        obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
 143        drm_free_large(pages);
 144
 145        if (!obj->dma_buf_vmapping)
 146                goto err_unpin;
 147
 148        obj->vmapping_count = 1;
 149out_unlock:
 150        mutex_unlock(&dev->struct_mutex);
 151        return obj->dma_buf_vmapping;
 152
 153err_unpin:
 154        i915_gem_object_unpin_pages(obj);
 155err:
 156        mutex_unlock(&dev->struct_mutex);
 157        return ERR_PTR(ret);
 158}
 159
 160static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 161{
 162        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 163        struct drm_device *dev = obj->base.dev;
 164
 165        mutex_lock(&dev->struct_mutex);
 166        if (--obj->vmapping_count == 0) {
 167                vunmap(obj->dma_buf_vmapping);
 168                obj->dma_buf_vmapping = NULL;
 169
 170                i915_gem_object_unpin_pages(obj);
 171        }
 172        mutex_unlock(&dev->struct_mutex);
 173}
 174
 175static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 176{
 177        return NULL;
 178}
 179
 180static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 181{
 182
 183}
 184static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 185{
 186        return NULL;
 187}
 188
 189static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 190{
 191
 192}
 193
 194static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 195{
 196        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 197        int ret;
 198
 199        if (obj->base.size < vma->vm_end - vma->vm_start)
 200                return -EINVAL;
 201
 202        if (!obj->base.filp)
 203                return -ENODEV;
 204
 205        ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
 206        if (ret)
 207                return ret;
 208
 209        fput(vma->vm_file);
 210        vma->vm_file = get_file(obj->base.filp);
 211
 212        return 0;
 213}
 214
 215static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 216{
 217        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 218        struct drm_device *dev = obj->base.dev;
 219        int ret;
 220        bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
 221
 222        ret = i915_mutex_lock_interruptible(dev);
 223        if (ret)
 224                return ret;
 225
 226        ret = i915_gem_object_set_to_cpu_domain(obj, write);
 227        mutex_unlock(&dev->struct_mutex);
 228        return ret;
 229}
 230
 231static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 232{
 233        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 234        struct drm_device *dev = obj->base.dev;
 235        int ret;
 236
 237        ret = i915_mutex_lock_interruptible(dev);
 238        if (ret)
 239                return ret;
 240
 241        ret = i915_gem_object_set_to_gtt_domain(obj, false);
 242        mutex_unlock(&dev->struct_mutex);
 243
 244        return ret;
 245}
 246
 247static const struct dma_buf_ops i915_dmabuf_ops =  {
 248        .map_dma_buf = i915_gem_map_dma_buf,
 249        .unmap_dma_buf = i915_gem_unmap_dma_buf,
 250        .release = drm_gem_dmabuf_release,
 251        .kmap = i915_gem_dmabuf_kmap,
 252        .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
 253        .kunmap = i915_gem_dmabuf_kunmap,
 254        .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
 255        .mmap = i915_gem_dmabuf_mmap,
 256        .vmap = i915_gem_dmabuf_vmap,
 257        .vunmap = i915_gem_dmabuf_vunmap,
 258        .begin_cpu_access = i915_gem_begin_cpu_access,
 259        .end_cpu_access = i915_gem_end_cpu_access,
 260};
 261
 262struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 263                                      struct drm_gem_object *gem_obj, int flags)
 264{
 265        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 266        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 267
 268        exp_info.ops = &i915_dmabuf_ops;
 269        exp_info.size = gem_obj->size;
 270        exp_info.flags = flags;
 271        exp_info.priv = gem_obj;
 272
 273
 274        if (obj->ops->dmabuf_export) {
 275                int ret = obj->ops->dmabuf_export(obj);
 276                if (ret)
 277                        return ERR_PTR(ret);
 278        }
 279
 280        return dma_buf_export(&exp_info);
 281}
 282
 283static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 284{
 285        struct sg_table *sg;
 286
 287        sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
 288        if (IS_ERR(sg))
 289                return PTR_ERR(sg);
 290
 291        obj->pages = sg;
 292        return 0;
 293}
 294
 295static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
 296{
 297        dma_buf_unmap_attachment(obj->base.import_attach,
 298                                 obj->pages, DMA_BIDIRECTIONAL);
 299}
 300
 301static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 302        .get_pages = i915_gem_object_get_pages_dmabuf,
 303        .put_pages = i915_gem_object_put_pages_dmabuf,
 304};
 305
 306struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 307                                             struct dma_buf *dma_buf)
 308{
 309        struct dma_buf_attachment *attach;
 310        struct drm_i915_gem_object *obj;
 311        int ret;
 312
 313        /* is this one of own objects? */
 314        if (dma_buf->ops == &i915_dmabuf_ops) {
 315                obj = dma_buf_to_obj(dma_buf);
 316                /* is it from our device? */
 317                if (obj->base.dev == dev) {
 318                        /*
 319                         * Importing dmabuf exported from out own gem increases
 320                         * refcount on gem itself instead of f_count of dmabuf.
 321                         */
 322                        drm_gem_object_reference(&obj->base);
 323                        return &obj->base;
 324                }
 325        }
 326
 327        /* need to attach */
 328        attach = dma_buf_attach(dma_buf, dev->dev);
 329        if (IS_ERR(attach))
 330                return ERR_CAST(attach);
 331
 332        get_dma_buf(dma_buf);
 333
 334        obj = i915_gem_object_alloc(dev);
 335        if (obj == NULL) {
 336                ret = -ENOMEM;
 337                goto fail_detach;
 338        }
 339
 340        drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 341        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 342        obj->base.import_attach = attach;
 343
 344        return &obj->base;
 345
 346fail_detach:
 347        dma_buf_detach(dma_buf, attach);
 348        dma_buf_put(dma_buf);
 349
 350        return ERR_PTR(ret);
 351}
 352