linux/drivers/gpu/drm/drm_prime.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012 Red Hat
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *      Dave Airlie <airlied@redhat.com>
  25 *      Rob Clark <rob.clark@linaro.org>
  26 *
  27 */
  28
  29#include <linux/export.h>
  30#include <linux/dma-buf.h>
  31#include <linux/rbtree.h>
  32#include <drm/drm_prime.h>
  33#include <drm/drm_gem.h>
  34#include <drm/drmP.h>
  35
  36#include "drm_internal.h"
  37
  38/*
  39 * DMA-BUF/GEM Object references and lifetime overview:
  40 *
  41 * On the export the dma_buf holds a reference to the exporting GEM
  42 * object. It takes this reference in handle_to_fd_ioctl, when it
  43 * first calls .prime_export and stores the exporting GEM object in
  44 * the dma_buf priv. This reference needs to be released when the
  45 * final reference to the &dma_buf itself is dropped and its
  46 * &dma_buf_ops.release function is called. For GEM-based drivers,
  47 * the dma_buf should be exported using drm_gem_dmabuf_export() and
  48 * then released by drm_gem_dmabuf_release().
  49 *
  50 * On the import the importing GEM object holds a reference to the
  51 * dma_buf (which in turn holds a ref to the exporting GEM object).
  52 * It takes that reference in the fd_to_handle ioctl.
  53 * It calls dma_buf_get, creates an attachment to it and stores the
  54 * attachment in the GEM object. When this attachment is destroyed
  55 * when the imported object is destroyed, we remove the attachment
  56 * and drop the reference to the dma_buf.
  57 *
  58 * When all the references to the &dma_buf are dropped, i.e. when
  59 * userspace has closed both handles to the imported GEM object (through the
  60 * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
  61 * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
  62 * are also gone, then the dma_buf gets destroyed.  This can also happen as a
  63 * part of the clean up procedure in the drm_release() function if userspace
  64 * fails to properly clean up.  Note that both the kernel and userspace (by
  65 * keeeping the PRIME file descriptors open) can hold references onto a
  66 * &dma_buf.
  67 *
  68 * Thus the chain of references always flows in one direction
  69 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
  70 *
  71 * Self-importing: if userspace is using PRIME as a replacement for flink
  72 * then it will get a fd->handle request for a GEM object that it created.
  73 * Drivers should detect this situation and return back the gem object
  74 * from the dma-buf private.  Prime will do this automatically for drivers that
  75 * use the drm_gem_prime_{import,export} helpers.
  76 */
  77
  78struct drm_prime_member {
  79        struct dma_buf *dma_buf;
  80        uint32_t handle;
  81
  82        struct rb_node dmabuf_rb;
  83        struct rb_node handle_rb;
  84};
  85
  86struct drm_prime_attachment {
  87        struct sg_table *sgt;
  88        enum dma_data_direction dir;
  89};
  90
  91static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
  92                                    struct dma_buf *dma_buf, uint32_t handle)
  93{
  94        struct drm_prime_member *member;
  95        struct rb_node **p, *rb;
  96
  97        member = kmalloc(sizeof(*member), GFP_KERNEL);
  98        if (!member)
  99                return -ENOMEM;
 100
 101        get_dma_buf(dma_buf);
 102        member->dma_buf = dma_buf;
 103        member->handle = handle;
 104
 105        rb = NULL;
 106        p = &prime_fpriv->dmabufs.rb_node;
 107        while (*p) {
 108                struct drm_prime_member *pos;
 109
 110                rb = *p;
 111                pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 112                if (dma_buf > pos->dma_buf)
 113                        p = &rb->rb_right;
 114                else
 115                        p = &rb->rb_left;
 116        }
 117        rb_link_node(&member->dmabuf_rb, rb, p);
 118        rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
 119
 120        rb = NULL;
 121        p = &prime_fpriv->handles.rb_node;
 122        while (*p) {
 123                struct drm_prime_member *pos;
 124
 125                rb = *p;
 126                pos = rb_entry(rb, struct drm_prime_member, handle_rb);
 127                if (handle > pos->handle)
 128                        p = &rb->rb_right;
 129                else
 130                        p = &rb->rb_left;
 131        }
 132        rb_link_node(&member->handle_rb, rb, p);
 133        rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
 134
 135        return 0;
 136}
 137
 138static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
 139                                                      uint32_t handle)
 140{
 141        struct rb_node *rb;
 142
 143        rb = prime_fpriv->handles.rb_node;
 144        while (rb) {
 145                struct drm_prime_member *member;
 146
 147                member = rb_entry(rb, struct drm_prime_member, handle_rb);
 148                if (member->handle == handle)
 149                        return member->dma_buf;
 150                else if (member->handle < handle)
 151                        rb = rb->rb_right;
 152                else
 153                        rb = rb->rb_left;
 154        }
 155
 156        return NULL;
 157}
 158
 159static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
 160                                       struct dma_buf *dma_buf,
 161                                       uint32_t *handle)
 162{
 163        struct rb_node *rb;
 164
 165        rb = prime_fpriv->dmabufs.rb_node;
 166        while (rb) {
 167                struct drm_prime_member *member;
 168
 169                member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 170                if (member->dma_buf == dma_buf) {
 171                        *handle = member->handle;
 172                        return 0;
 173                } else if (member->dma_buf < dma_buf) {
 174                        rb = rb->rb_right;
 175                } else {
 176                        rb = rb->rb_left;
 177                }
 178        }
 179
 180        return -ENOENT;
 181}
 182
 183static int drm_gem_map_attach(struct dma_buf *dma_buf,
 184                              struct device *target_dev,
 185                              struct dma_buf_attachment *attach)
 186{
 187        struct drm_prime_attachment *prime_attach;
 188        struct drm_gem_object *obj = dma_buf->priv;
 189        struct drm_device *dev = obj->dev;
 190
 191        prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
 192        if (!prime_attach)
 193                return -ENOMEM;
 194
 195        prime_attach->dir = DMA_NONE;
 196        attach->priv = prime_attach;
 197
 198        if (!dev->driver->gem_prime_pin)
 199                return 0;
 200
 201        return dev->driver->gem_prime_pin(obj);
 202}
 203
 204static void drm_gem_map_detach(struct dma_buf *dma_buf,
 205                               struct dma_buf_attachment *attach)
 206{
 207        struct drm_prime_attachment *prime_attach = attach->priv;
 208        struct drm_gem_object *obj = dma_buf->priv;
 209        struct drm_device *dev = obj->dev;
 210        struct sg_table *sgt;
 211
 212        if (dev->driver->gem_prime_unpin)
 213                dev->driver->gem_prime_unpin(obj);
 214
 215        if (!prime_attach)
 216                return;
 217
 218        sgt = prime_attach->sgt;
 219        if (sgt) {
 220                if (prime_attach->dir != DMA_NONE)
 221                        dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
 222                                        prime_attach->dir);
 223                sg_free_table(sgt);
 224        }
 225
 226        kfree(sgt);
 227        kfree(prime_attach);
 228        attach->priv = NULL;
 229}
 230
 231void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
 232                                        struct dma_buf *dma_buf)
 233{
 234        struct rb_node *rb;
 235
 236        rb = prime_fpriv->dmabufs.rb_node;
 237        while (rb) {
 238                struct drm_prime_member *member;
 239
 240                member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 241                if (member->dma_buf == dma_buf) {
 242                        rb_erase(&member->handle_rb, &prime_fpriv->handles);
 243                        rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
 244
 245                        dma_buf_put(dma_buf);
 246                        kfree(member);
 247                        return;
 248                } else if (member->dma_buf < dma_buf) {
 249                        rb = rb->rb_right;
 250                } else {
 251                        rb = rb->rb_left;
 252                }
 253        }
 254}
 255
 256static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 257                                            enum dma_data_direction dir)
 258{
 259        struct drm_prime_attachment *prime_attach = attach->priv;
 260        struct drm_gem_object *obj = attach->dmabuf->priv;
 261        struct sg_table *sgt;
 262
 263        if (WARN_ON(dir == DMA_NONE || !prime_attach))
 264                return ERR_PTR(-EINVAL);
 265
 266        /* return the cached mapping when possible */
 267        if (prime_attach->dir == dir)
 268                return prime_attach->sgt;
 269
 270        /*
 271         * two mappings with different directions for the same attachment are
 272         * not allowed
 273         */
 274        if (WARN_ON(prime_attach->dir != DMA_NONE))
 275                return ERR_PTR(-EBUSY);
 276
 277        sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 278
 279        if (!IS_ERR(sgt)) {
 280                if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
 281                        sg_free_table(sgt);
 282                        kfree(sgt);
 283                        sgt = ERR_PTR(-ENOMEM);
 284                } else {
 285                        prime_attach->sgt = sgt;
 286                        prime_attach->dir = dir;
 287                }
 288        }
 289
 290        return sgt;
 291}
 292
 293static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 294                                  struct sg_table *sgt,
 295                                  enum dma_data_direction dir)
 296{
 297        /* nothing to be done here */
 298}
 299
 300/**
 301 * drm_gem_dmabuf_export - dma_buf export implementation for GEM
 302 * @dev: parent device for the exported dmabuf
 303 * @exp_info: the export information used by dma_buf_export()
 304 *
 305 * This wraps dma_buf_export() for use by generic GEM drivers that are using
 306 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
 307 * a reference to the &drm_device and the exported &drm_gem_object (stored in
 308 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
 309 *
 310 * Returns the new dmabuf.
 311 */
 312struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
 313                                      struct dma_buf_export_info *exp_info)
 314{
 315        struct dma_buf *dma_buf;
 316
 317        dma_buf = dma_buf_export(exp_info);
 318        if (IS_ERR(dma_buf))
 319                return dma_buf;
 320
 321        drm_dev_ref(dev);
 322        drm_gem_object_get(exp_info->priv);
 323
 324        return dma_buf;
 325}
 326EXPORT_SYMBOL(drm_gem_dmabuf_export);
 327
 328/**
 329 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
 330 * @dma_buf: buffer to be released
 331 *
 332 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
 333 * must use this in their dma_buf ops structure as the release callback.
 334 * drm_gem_dmabuf_release() should be used in conjunction with
 335 * drm_gem_dmabuf_export().
 336 */
 337void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 338{
 339        struct drm_gem_object *obj = dma_buf->priv;
 340        struct drm_device *dev = obj->dev;
 341
 342        /* drop the reference on the export fd holds */
 343        drm_gem_object_put_unlocked(obj);
 344
 345        drm_dev_unref(dev);
 346}
 347EXPORT_SYMBOL(drm_gem_dmabuf_release);
 348
 349static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 350{
 351        struct drm_gem_object *obj = dma_buf->priv;
 352        struct drm_device *dev = obj->dev;
 353
 354        return dev->driver->gem_prime_vmap(obj);
 355}
 356
 357static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 358{
 359        struct drm_gem_object *obj = dma_buf->priv;
 360        struct drm_device *dev = obj->dev;
 361
 362        dev->driver->gem_prime_vunmap(obj, vaddr);
 363}
 364
 365static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
 366                                        unsigned long page_num)
 367{
 368        return NULL;
 369}
 370
 371static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
 372                                         unsigned long page_num, void *addr)
 373{
 374
 375}
 376static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
 377                                 unsigned long page_num)
 378{
 379        return NULL;
 380}
 381
 382static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
 383                                  unsigned long page_num, void *addr)
 384{
 385
 386}
 387
 388static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 389                               struct vm_area_struct *vma)
 390{
 391        struct drm_gem_object *obj = dma_buf->priv;
 392        struct drm_device *dev = obj->dev;
 393
 394        if (!dev->driver->gem_prime_mmap)
 395                return -ENOSYS;
 396
 397        return dev->driver->gem_prime_mmap(obj, vma);
 398}
 399
 400static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 401        .attach = drm_gem_map_attach,
 402        .detach = drm_gem_map_detach,
 403        .map_dma_buf = drm_gem_map_dma_buf,
 404        .unmap_dma_buf = drm_gem_unmap_dma_buf,
 405        .release = drm_gem_dmabuf_release,
 406        .map = drm_gem_dmabuf_kmap,
 407        .map_atomic = drm_gem_dmabuf_kmap_atomic,
 408        .unmap = drm_gem_dmabuf_kunmap,
 409        .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
 410        .mmap = drm_gem_dmabuf_mmap,
 411        .vmap = drm_gem_dmabuf_vmap,
 412        .vunmap = drm_gem_dmabuf_vunmap,
 413};
 414
 415/**
 416 * DOC: PRIME Helpers
 417 *
 418 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
 419 * simpler APIs by using the helper functions @drm_gem_prime_export and
 420 * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
 421 * six lower-level driver callbacks:
 422 *
 423 * Export callbacks:
 424 *
 425 *  * @gem_prime_pin (optional): prepare a GEM object for exporting
 426 *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
 427 *  * @gem_prime_vmap: vmap a buffer exported by your driver
 428 *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
 429 *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
 430 *
 431 * Import callback:
 432 *
 433 *  * @gem_prime_import_sg_table (import): produce a GEM object from another
 434 *    driver's scatter/gather table
 435 */
 436
 437/**
 438 * drm_gem_prime_export - helper library implementation of the export callback
 439 * @dev: drm_device to export from
 440 * @obj: GEM object to export
 441 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
 442 *
 443 * This is the implementation of the gem_prime_export functions for GEM drivers
 444 * using the PRIME helpers.
 445 */
 446struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 447                                     struct drm_gem_object *obj,
 448                                     int flags)
 449{
 450        struct dma_buf_export_info exp_info = {
 451                .exp_name = KBUILD_MODNAME, /* white lie for debug */
 452                .owner = dev->driver->fops->owner,
 453                .ops = &drm_gem_prime_dmabuf_ops,
 454                .size = obj->size,
 455                .flags = flags,
 456                .priv = obj,
 457        };
 458
 459        if (dev->driver->gem_prime_res_obj)
 460                exp_info.resv = dev->driver->gem_prime_res_obj(obj);
 461
 462        return drm_gem_dmabuf_export(dev, &exp_info);
 463}
 464EXPORT_SYMBOL(drm_gem_prime_export);
 465
 466static struct dma_buf *export_and_register_object(struct drm_device *dev,
 467                                                  struct drm_gem_object *obj,
 468                                                  uint32_t flags)
 469{
 470        struct dma_buf *dmabuf;
 471
 472        /* prevent races with concurrent gem_close. */
 473        if (obj->handle_count == 0) {
 474                dmabuf = ERR_PTR(-ENOENT);
 475                return dmabuf;
 476        }
 477
 478        dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
 479        if (IS_ERR(dmabuf)) {
 480                /* normally the created dma-buf takes ownership of the ref,
 481                 * but if that fails then drop the ref
 482                 */
 483                return dmabuf;
 484        }
 485
 486        /*
 487         * Note that callers do not need to clean up the export cache
 488         * since the check for obj->handle_count guarantees that someone
 489         * will clean it up.
 490         */
 491        obj->dma_buf = dmabuf;
 492        get_dma_buf(obj->dma_buf);
 493
 494        return dmabuf;
 495}
 496
 497/**
 498 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
 499 * @dev: dev to export the buffer from
 500 * @file_priv: drm file-private structure
 501 * @handle: buffer handle to export
 502 * @flags: flags like DRM_CLOEXEC
 503 * @prime_fd: pointer to storage for the fd id of the create dma-buf
 504 *
 505 * This is the PRIME export function which must be used mandatorily by GEM
 506 * drivers to ensure correct lifetime management of the underlying GEM object.
 507 * The actual exporting from GEM object to a dma-buf is done through the
 508 * gem_prime_export driver callback.
 509 */
 510int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 511                               struct drm_file *file_priv, uint32_t handle,
 512                               uint32_t flags,
 513                               int *prime_fd)
 514{
 515        struct drm_gem_object *obj;
 516        int ret = 0;
 517        struct dma_buf *dmabuf;
 518
 519        mutex_lock(&file_priv->prime.lock);
 520        obj = drm_gem_object_lookup(file_priv, handle);
 521        if (!obj)  {
 522                ret = -ENOENT;
 523                goto out_unlock;
 524        }
 525
 526        dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
 527        if (dmabuf) {
 528                get_dma_buf(dmabuf);
 529                goto out_have_handle;
 530        }
 531
 532        mutex_lock(&dev->object_name_lock);
 533        /* re-export the original imported object */
 534        if (obj->import_attach) {
 535                dmabuf = obj->import_attach->dmabuf;
 536                get_dma_buf(dmabuf);
 537                goto out_have_obj;
 538        }
 539
 540        if (obj->dma_buf) {
 541                get_dma_buf(obj->dma_buf);
 542                dmabuf = obj->dma_buf;
 543                goto out_have_obj;
 544        }
 545
 546        dmabuf = export_and_register_object(dev, obj, flags);
 547        if (IS_ERR(dmabuf)) {
 548                /* normally the created dma-buf takes ownership of the ref,
 549                 * but if that fails then drop the ref
 550                 */
 551                ret = PTR_ERR(dmabuf);
 552                mutex_unlock(&dev->object_name_lock);
 553                goto out;
 554        }
 555
 556out_have_obj:
 557        /*
 558         * If we've exported this buffer then cheat and add it to the import list
 559         * so we get the correct handle back. We must do this under the
 560         * protection of dev->object_name_lock to ensure that a racing gem close
 561         * ioctl doesn't miss to remove this buffer handle from the cache.
 562         */
 563        ret = drm_prime_add_buf_handle(&file_priv->prime,
 564                                       dmabuf, handle);
 565        mutex_unlock(&dev->object_name_lock);
 566        if (ret)
 567                goto fail_put_dmabuf;
 568
 569out_have_handle:
 570        ret = dma_buf_fd(dmabuf, flags);
 571        /*
 572         * We must _not_ remove the buffer from the handle cache since the newly
 573         * created dma buf is already linked in the global obj->dma_buf pointer,
 574         * and that is invariant as long as a userspace gem handle exists.
 575         * Closing the handle will clean out the cache anyway, so we don't leak.
 576         */
 577        if (ret < 0) {
 578                goto fail_put_dmabuf;
 579        } else {
 580                *prime_fd = ret;
 581                ret = 0;
 582        }
 583
 584        goto out;
 585
 586fail_put_dmabuf:
 587        dma_buf_put(dmabuf);
 588out:
 589        drm_gem_object_put_unlocked(obj);
 590out_unlock:
 591        mutex_unlock(&file_priv->prime.lock);
 592
 593        return ret;
 594}
 595EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 596
 597/**
 598 * drm_gem_prime_import_dev - core implementation of the import callback
 599 * @dev: drm_device to import into
 600 * @dma_buf: dma-buf object to import
 601 * @attach_dev: struct device to dma_buf attach
 602 *
 603 * This is the core of drm_gem_prime_import. It's designed to be called by
 604 * drivers who want to use a different device structure than dev->dev for
 605 * attaching via dma_buf.
 606 */
 607struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
 608                                            struct dma_buf *dma_buf,
 609                                            struct device *attach_dev)
 610{
 611        struct dma_buf_attachment *attach;
 612        struct sg_table *sgt;
 613        struct drm_gem_object *obj;
 614        int ret;
 615
 616        if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
 617                obj = dma_buf->priv;
 618                if (obj->dev == dev) {
 619                        /*
 620                         * Importing dmabuf exported from out own gem increases
 621                         * refcount on gem itself instead of f_count of dmabuf.
 622                         */
 623                        drm_gem_object_get(obj);
 624                        return obj;
 625                }
 626        }
 627
 628        if (!dev->driver->gem_prime_import_sg_table)
 629                return ERR_PTR(-EINVAL);
 630
 631        attach = dma_buf_attach(dma_buf, attach_dev);
 632        if (IS_ERR(attach))
 633                return ERR_CAST(attach);
 634
 635        get_dma_buf(dma_buf);
 636
 637        sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 638        if (IS_ERR(sgt)) {
 639                ret = PTR_ERR(sgt);
 640                goto fail_detach;
 641        }
 642
 643        obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
 644        if (IS_ERR(obj)) {
 645                ret = PTR_ERR(obj);
 646                goto fail_unmap;
 647        }
 648
 649        obj->import_attach = attach;
 650
 651        return obj;
 652
 653fail_unmap:
 654        dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
 655fail_detach:
 656        dma_buf_detach(dma_buf, attach);
 657        dma_buf_put(dma_buf);
 658
 659        return ERR_PTR(ret);
 660}
 661EXPORT_SYMBOL(drm_gem_prime_import_dev);
 662
 663/**
 664 * drm_gem_prime_import - helper library implementation of the import callback
 665 * @dev: drm_device to import into
 666 * @dma_buf: dma-buf object to import
 667 *
 668 * This is the implementation of the gem_prime_import functions for GEM drivers
 669 * using the PRIME helpers.
 670 */
 671struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
 672                                            struct dma_buf *dma_buf)
 673{
 674        return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
 675}
 676EXPORT_SYMBOL(drm_gem_prime_import);
 677
 678/**
 679 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
 680 * @dev: dev to export the buffer from
 681 * @file_priv: drm file-private structure
 682 * @prime_fd: fd id of the dma-buf which should be imported
 683 * @handle: pointer to storage for the handle of the imported buffer object
 684 *
 685 * This is the PRIME import function which must be used mandatorily by GEM
 686 * drivers to ensure correct lifetime management of the underlying GEM object.
 687 * The actual importing of GEM object from the dma-buf is done through the
 688 * gem_import_export driver callback.
 689 */
 690int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 691                               struct drm_file *file_priv, int prime_fd,
 692                               uint32_t *handle)
 693{
 694        struct dma_buf *dma_buf;
 695        struct drm_gem_object *obj;
 696        int ret;
 697
 698        dma_buf = dma_buf_get(prime_fd);
 699        if (IS_ERR(dma_buf))
 700                return PTR_ERR(dma_buf);
 701
 702        mutex_lock(&file_priv->prime.lock);
 703
 704        ret = drm_prime_lookup_buf_handle(&file_priv->prime,
 705                        dma_buf, handle);
 706        if (ret == 0)
 707                goto out_put;
 708
 709        /* never seen this one, need to import */
 710        mutex_lock(&dev->object_name_lock);
 711        obj = dev->driver->gem_prime_import(dev, dma_buf);
 712        if (IS_ERR(obj)) {
 713                ret = PTR_ERR(obj);
 714                goto out_unlock;
 715        }
 716
 717        if (obj->dma_buf) {
 718                WARN_ON(obj->dma_buf != dma_buf);
 719        } else {
 720                obj->dma_buf = dma_buf;
 721                get_dma_buf(dma_buf);
 722        }
 723
 724        /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
 725        ret = drm_gem_handle_create_tail(file_priv, obj, handle);
 726        drm_gem_object_put_unlocked(obj);
 727        if (ret)
 728                goto out_put;
 729
 730        ret = drm_prime_add_buf_handle(&file_priv->prime,
 731                        dma_buf, *handle);
 732        mutex_unlock(&file_priv->prime.lock);
 733        if (ret)
 734                goto fail;
 735
 736        dma_buf_put(dma_buf);
 737
 738        return 0;
 739
 740fail:
 741        /* hmm, if driver attached, we are relying on the free-object path
 742         * to detach.. which seems ok..
 743         */
 744        drm_gem_handle_delete(file_priv, *handle);
 745        dma_buf_put(dma_buf);
 746        return ret;
 747
 748out_unlock:
 749        mutex_unlock(&dev->object_name_lock);
 750out_put:
 751        mutex_unlock(&file_priv->prime.lock);
 752        dma_buf_put(dma_buf);
 753        return ret;
 754}
 755EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
 756
 757int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 758                                 struct drm_file *file_priv)
 759{
 760        struct drm_prime_handle *args = data;
 761
 762        if (!drm_core_check_feature(dev, DRIVER_PRIME))
 763                return -EINVAL;
 764
 765        if (!dev->driver->prime_handle_to_fd)
 766                return -ENOSYS;
 767
 768        /* check flags are valid */
 769        if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
 770                return -EINVAL;
 771
 772        return dev->driver->prime_handle_to_fd(dev, file_priv,
 773                        args->handle, args->flags, &args->fd);
 774}
 775
 776int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 777                                 struct drm_file *file_priv)
 778{
 779        struct drm_prime_handle *args = data;
 780
 781        if (!drm_core_check_feature(dev, DRIVER_PRIME))
 782                return -EINVAL;
 783
 784        if (!dev->driver->prime_fd_to_handle)
 785                return -ENOSYS;
 786
 787        return dev->driver->prime_fd_to_handle(dev, file_priv,
 788                        args->fd, &args->handle);
 789}
 790
 791/**
 792 * drm_prime_pages_to_sg - converts a page array into an sg list
 793 * @pages: pointer to the array of page pointers to convert
 794 * @nr_pages: length of the page vector
 795 *
 796 * This helper creates an sg table object from a set of pages
 797 * the driver is responsible for mapping the pages into the
 798 * importers address space for use with dma_buf itself.
 799 */
 800struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
 801{
 802        struct sg_table *sg = NULL;
 803        int ret;
 804
 805        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 806        if (!sg) {
 807                ret = -ENOMEM;
 808                goto out;
 809        }
 810
 811        ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
 812                                nr_pages << PAGE_SHIFT, GFP_KERNEL);
 813        if (ret)
 814                goto out;
 815
 816        return sg;
 817out:
 818        kfree(sg);
 819        return ERR_PTR(ret);
 820}
 821EXPORT_SYMBOL(drm_prime_pages_to_sg);
 822
 823/**
 824 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
 825 * @sgt: scatter-gather table to convert
 826 * @pages: array of page pointers to store the page array in
 827 * @addrs: optional array to store the dma bus address of each page
 828 * @max_pages: size of both the passed-in arrays
 829 *
 830 * Exports an sg table into an array of pages and addresses. This is currently
 831 * required by the TTM driver in order to do correct fault handling.
 832 */
 833int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 834                                     dma_addr_t *addrs, int max_pages)
 835{
 836        unsigned count;
 837        struct scatterlist *sg;
 838        struct page *page;
 839        u32 len;
 840        int pg_index;
 841        dma_addr_t addr;
 842
 843        pg_index = 0;
 844        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 845                len = sg->length;
 846                page = sg_page(sg);
 847                addr = sg_dma_address(sg);
 848
 849                while (len > 0) {
 850                        if (WARN_ON(pg_index >= max_pages))
 851                                return -1;
 852                        pages[pg_index] = page;
 853                        if (addrs)
 854                                addrs[pg_index] = addr;
 855
 856                        page++;
 857                        addr += PAGE_SIZE;
 858                        len -= PAGE_SIZE;
 859                        pg_index++;
 860                }
 861        }
 862        return 0;
 863}
 864EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
 865
 866/**
 867 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
 868 * @obj: GEM object which was created from a dma-buf
 869 * @sg: the sg-table which was pinned at import time
 870 *
 871 * This is the cleanup functions which GEM drivers need to call when they use
 872 * @drm_gem_prime_import to import dma-bufs.
 873 */
 874void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
 875{
 876        struct dma_buf_attachment *attach;
 877        struct dma_buf *dma_buf;
 878        attach = obj->import_attach;
 879        if (sg)
 880                dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
 881        dma_buf = attach->dmabuf;
 882        dma_buf_detach(attach->dmabuf, attach);
 883        /* remove the reference */
 884        dma_buf_put(dma_buf);
 885}
 886EXPORT_SYMBOL(drm_prime_gem_destroy);
 887
 888void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
 889{
 890        mutex_init(&prime_fpriv->lock);
 891        prime_fpriv->dmabufs = RB_ROOT;
 892        prime_fpriv->handles = RB_ROOT;
 893}
 894
 895void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 896{
 897        /* by now drm_gem_release should've made sure the list is empty */
 898        WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
 899}
 900