linux/drivers/dma-buf/dma-buf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Framework for buffer objects that can be shared across devices/subsystems.
   4 *
   5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   6 * Author: Sumit Semwal <sumit.semwal@ti.com>
   7 *
   8 * Many thanks to linaro-mm-sig list, and specially
   9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11 * refining of this idea.
  12 */
  13
  14#include <linux/fs.h>
  15#include <linux/slab.h>
  16#include <linux/dma-buf.h>
  17#include <linux/dma-fence.h>
  18#include <linux/anon_inodes.h>
  19#include <linux/export.h>
  20#include <linux/debugfs.h>
  21#include <linux/module.h>
  22#include <linux/seq_file.h>
  23#include <linux/poll.h>
  24#include <linux/dma-resv.h>
  25#include <linux/mm.h>
  26#include <linux/mount.h>
  27
  28#include <uapi/linux/dma-buf.h>
  29#include <uapi/linux/magic.h>
  30
  31static inline int is_dma_buf_file(struct file *);
  32
  33struct dma_buf_list {
  34        struct list_head head;
  35        struct mutex lock;
  36};
  37
  38static struct dma_buf_list db_list;
  39
  40static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
  41{
  42        struct dma_buf *dmabuf;
  43        char name[DMA_BUF_NAME_LEN];
  44        size_t ret = 0;
  45
  46        dmabuf = dentry->d_fsdata;
  47        dma_resv_lock(dmabuf->resv, NULL);
  48        if (dmabuf->name)
  49                ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
  50        dma_resv_unlock(dmabuf->resv);
  51
  52        return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
  53                             dentry->d_name.name, ret > 0 ? name : "");
  54}
  55
  56static const struct dentry_operations dma_buf_dentry_ops = {
  57        .d_dname = dmabuffs_dname,
  58};
  59
  60static struct vfsmount *dma_buf_mnt;
  61
  62static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
  63                                       int flags, const char *name, void *data)
  64{
  65        return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
  66                            DMA_BUF_MAGIC);
  67}
  68
  69static struct file_system_type dma_buf_fs_type = {
  70        .name = "dmabuf",
  71        .mount = dma_buf_fs_mount,
  72        .kill_sb = kill_anon_super,
  73};
  74
  75static int dma_buf_release(struct inode *inode, struct file *file)
  76{
  77        struct dma_buf *dmabuf;
  78
  79        if (!is_dma_buf_file(file))
  80                return -EINVAL;
  81
  82        dmabuf = file->private_data;
  83
  84        BUG_ON(dmabuf->vmapping_counter);
  85
  86        /*
  87         * Any fences that a dma-buf poll can wait on should be signaled
  88         * before releasing dma-buf. This is the responsibility of each
  89         * driver that uses the reservation objects.
  90         *
  91         * If you hit this BUG() it means someone dropped their ref to the
  92         * dma-buf while still having pending operation to the buffer.
  93         */
  94        BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
  95
  96        dmabuf->ops->release(dmabuf);
  97
  98        mutex_lock(&db_list.lock);
  99        list_del(&dmabuf->list_node);
 100        mutex_unlock(&db_list.lock);
 101
 102        if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
 103                dma_resv_fini(dmabuf->resv);
 104
 105        module_put(dmabuf->owner);
 106        kfree(dmabuf->name);
 107        kfree(dmabuf);
 108        return 0;
 109}
 110
 111static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 112{
 113        struct dma_buf *dmabuf;
 114
 115        if (!is_dma_buf_file(file))
 116                return -EINVAL;
 117
 118        dmabuf = file->private_data;
 119
 120        /* check if buffer supports mmap */
 121        if (!dmabuf->ops->mmap)
 122                return -EINVAL;
 123
 124        /* check for overflowing the buffer's size */
 125        if (vma->vm_pgoff + vma_pages(vma) >
 126            dmabuf->size >> PAGE_SHIFT)
 127                return -EINVAL;
 128
 129        return dmabuf->ops->mmap(dmabuf, vma);
 130}
 131
 132static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 133{
 134        struct dma_buf *dmabuf;
 135        loff_t base;
 136
 137        if (!is_dma_buf_file(file))
 138                return -EBADF;
 139
 140        dmabuf = file->private_data;
 141
 142        /* only support discovering the end of the buffer,
 143           but also allow SEEK_SET to maintain the idiomatic
 144           SEEK_END(0), SEEK_CUR(0) pattern */
 145        if (whence == SEEK_END)
 146                base = dmabuf->size;
 147        else if (whence == SEEK_SET)
 148                base = 0;
 149        else
 150                return -EINVAL;
 151
 152        if (offset != 0)
 153                return -EINVAL;
 154
 155        return base + offset;
 156}
 157
 158/**
 159 * DOC: fence polling
 160 *
 161 * To support cross-device and cross-driver synchronization of buffer access
 162 * implicit fences (represented internally in the kernel with &struct fence) can
 163 * be attached to a &dma_buf. The glue for that and a few related things are
 164 * provided in the &dma_resv structure.
 165 *
 166 * Userspace can query the state of these implicitly tracked fences using poll()
 167 * and related system calls:
 168 *
 169 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
 170 *   most recent write or exclusive fence.
 171 *
 172 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
 173 *   all attached fences, shared and exclusive ones.
 174 *
 175 * Note that this only signals the completion of the respective fences, i.e. the
 176 * DMA transfers are complete. Cache flushing and any other necessary
 177 * preparations before CPU access can begin still need to happen.
 178 */
 179
 180static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 181{
 182        struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 183        unsigned long flags;
 184
 185        spin_lock_irqsave(&dcb->poll->lock, flags);
 186        wake_up_locked_poll(dcb->poll, dcb->active);
 187        dcb->active = 0;
 188        spin_unlock_irqrestore(&dcb->poll->lock, flags);
 189}
 190
 191static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 192{
 193        struct dma_buf *dmabuf;
 194        struct dma_resv *resv;
 195        struct dma_resv_list *fobj;
 196        struct dma_fence *fence_excl;
 197        __poll_t events;
 198        unsigned shared_count, seq;
 199
 200        dmabuf = file->private_data;
 201        if (!dmabuf || !dmabuf->resv)
 202                return EPOLLERR;
 203
 204        resv = dmabuf->resv;
 205
 206        poll_wait(file, &dmabuf->poll, poll);
 207
 208        events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
 209        if (!events)
 210                return 0;
 211
 212retry:
 213        seq = read_seqcount_begin(&resv->seq);
 214        rcu_read_lock();
 215
 216        fobj = rcu_dereference(resv->fence);
 217        if (fobj)
 218                shared_count = fobj->shared_count;
 219        else
 220                shared_count = 0;
 221        fence_excl = rcu_dereference(resv->fence_excl);
 222        if (read_seqcount_retry(&resv->seq, seq)) {
 223                rcu_read_unlock();
 224                goto retry;
 225        }
 226
 227        if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 228                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 229                __poll_t pevents = EPOLLIN;
 230
 231                if (shared_count == 0)
 232                        pevents |= EPOLLOUT;
 233
 234                spin_lock_irq(&dmabuf->poll.lock);
 235                if (dcb->active) {
 236                        dcb->active |= pevents;
 237                        events &= ~pevents;
 238                } else
 239                        dcb->active = pevents;
 240                spin_unlock_irq(&dmabuf->poll.lock);
 241
 242                if (events & pevents) {
 243                        if (!dma_fence_get_rcu(fence_excl)) {
 244                                /* force a recheck */
 245                                events &= ~pevents;
 246                                dma_buf_poll_cb(NULL, &dcb->cb);
 247                        } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
 248                                                           dma_buf_poll_cb)) {
 249                                events &= ~pevents;
 250                                dma_fence_put(fence_excl);
 251                        } else {
 252                                /*
 253                                 * No callback queued, wake up any additional
 254                                 * waiters.
 255                                 */
 256                                dma_fence_put(fence_excl);
 257                                dma_buf_poll_cb(NULL, &dcb->cb);
 258                        }
 259                }
 260        }
 261
 262        if ((events & EPOLLOUT) && shared_count > 0) {
 263                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
 264                int i;
 265
 266                /* Only queue a new callback if no event has fired yet */
 267                spin_lock_irq(&dmabuf->poll.lock);
 268                if (dcb->active)
 269                        events &= ~EPOLLOUT;
 270                else
 271                        dcb->active = EPOLLOUT;
 272                spin_unlock_irq(&dmabuf->poll.lock);
 273
 274                if (!(events & EPOLLOUT))
 275                        goto out;
 276
 277                for (i = 0; i < shared_count; ++i) {
 278                        struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 279
 280                        if (!dma_fence_get_rcu(fence)) {
 281                                /*
 282                                 * fence refcount dropped to zero, this means
 283                                 * that fobj has been freed
 284                                 *
 285                                 * call dma_buf_poll_cb and force a recheck!
 286                                 */
 287                                events &= ~EPOLLOUT;
 288                                dma_buf_poll_cb(NULL, &dcb->cb);
 289                                break;
 290                        }
 291                        if (!dma_fence_add_callback(fence, &dcb->cb,
 292                                                    dma_buf_poll_cb)) {
 293                                dma_fence_put(fence);
 294                                events &= ~EPOLLOUT;
 295                                break;
 296                        }
 297                        dma_fence_put(fence);
 298                }
 299
 300                /* No callback queued, wake up any additional waiters. */
 301                if (i == shared_count)
 302                        dma_buf_poll_cb(NULL, &dcb->cb);
 303        }
 304
 305out:
 306        rcu_read_unlock();
 307        return events;
 308}
 309
 310/**
 311 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
 312 * The name of the dma-buf buffer can only be set when the dma-buf is not
 313 * attached to any devices. It could theoritically support changing the
 314 * name of the dma-buf if the same piece of memory is used for multiple
 315 * purpose between different devices.
 316 *
 317 * @dmabuf [in]     dmabuf buffer that will be renamed.
 318 * @buf:   [in]     A piece of userspace memory that contains the name of
 319 *                  the dma-buf.
 320 *
 321 * Returns 0 on success. If the dma-buf buffer is already attached to
 322 * devices, return -EBUSY.
 323 *
 324 */
 325static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
 326{
 327        char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
 328        long ret = 0;
 329
 330        if (IS_ERR(name))
 331                return PTR_ERR(name);
 332
 333        dma_resv_lock(dmabuf->resv, NULL);
 334        if (!list_empty(&dmabuf->attachments)) {
 335                ret = -EBUSY;
 336                kfree(name);
 337                goto out_unlock;
 338        }
 339        kfree(dmabuf->name);
 340        dmabuf->name = name;
 341
 342out_unlock:
 343        dma_resv_unlock(dmabuf->resv);
 344        return ret;
 345}
 346
 347static long dma_buf_ioctl(struct file *file,
 348                          unsigned int cmd, unsigned long arg)
 349{
 350        struct dma_buf *dmabuf;
 351        struct dma_buf_sync sync;
 352        enum dma_data_direction direction;
 353        int ret;
 354
 355        dmabuf = file->private_data;
 356
 357        switch (cmd) {
 358        case DMA_BUF_IOCTL_SYNC:
 359                if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
 360                        return -EFAULT;
 361
 362                if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
 363                        return -EINVAL;
 364
 365                switch (sync.flags & DMA_BUF_SYNC_RW) {
 366                case DMA_BUF_SYNC_READ:
 367                        direction = DMA_FROM_DEVICE;
 368                        break;
 369                case DMA_BUF_SYNC_WRITE:
 370                        direction = DMA_TO_DEVICE;
 371                        break;
 372                case DMA_BUF_SYNC_RW:
 373                        direction = DMA_BIDIRECTIONAL;
 374                        break;
 375                default:
 376                        return -EINVAL;
 377                }
 378
 379                if (sync.flags & DMA_BUF_SYNC_END)
 380                        ret = dma_buf_end_cpu_access(dmabuf, direction);
 381                else
 382                        ret = dma_buf_begin_cpu_access(dmabuf, direction);
 383
 384                return ret;
 385
 386        case DMA_BUF_SET_NAME_A:
 387        case DMA_BUF_SET_NAME_B:
 388                return dma_buf_set_name(dmabuf, (const char __user *)arg);
 389
 390        default:
 391                return -ENOTTY;
 392        }
 393}
 394
 395static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 396{
 397        struct dma_buf *dmabuf = file->private_data;
 398
 399        seq_printf(m, "size:\t%zu\n", dmabuf->size);
 400        /* Don't count the temporary reference taken inside procfs seq_show */
 401        seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
 402        seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
 403        dma_resv_lock(dmabuf->resv, NULL);
 404        if (dmabuf->name)
 405                seq_printf(m, "name:\t%s\n", dmabuf->name);
 406        dma_resv_unlock(dmabuf->resv);
 407}
 408
 409static const struct file_operations dma_buf_fops = {
 410        .release        = dma_buf_release,
 411        .mmap           = dma_buf_mmap_internal,
 412        .llseek         = dma_buf_llseek,
 413        .poll           = dma_buf_poll,
 414        .unlocked_ioctl = dma_buf_ioctl,
 415        .compat_ioctl   = compat_ptr_ioctl,
 416        .show_fdinfo    = dma_buf_show_fdinfo,
 417};
 418
 419/*
 420 * is_dma_buf_file - Check if struct file* is associated with dma_buf
 421 */
 422static inline int is_dma_buf_file(struct file *file)
 423{
 424        return file->f_op == &dma_buf_fops;
 425}
 426
 427static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
 428{
 429        struct file *file;
 430        struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
 431
 432        if (IS_ERR(inode))
 433                return ERR_CAST(inode);
 434
 435        inode->i_size = dmabuf->size;
 436        inode_set_bytes(inode, dmabuf->size);
 437
 438        file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
 439                                 flags, &dma_buf_fops);
 440        if (IS_ERR(file))
 441                goto err_alloc_file;
 442        file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
 443        file->private_data = dmabuf;
 444        file->f_path.dentry->d_fsdata = dmabuf;
 445
 446        return file;
 447
 448err_alloc_file:
 449        iput(inode);
 450        return file;
 451}
 452
 453/**
 454 * DOC: dma buf device access
 455 *
 456 * For device DMA access to a shared DMA buffer the usual sequence of operations
 457 * is fairly simple:
 458 *
 459 * 1. The exporter defines his exporter instance using
 460 *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
 461 *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
 462 *    as a file descriptor by calling dma_buf_fd().
 463 *
 464 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
 465 *    to share with: First the filedescriptor is converted to a &dma_buf using
 466 *    dma_buf_get(). Then the buffer is attached to the device using
 467 *    dma_buf_attach().
 468 *
 469 *    Up to this stage the exporter is still free to migrate or reallocate the
 470 *    backing storage.
 471 *
 472 * 3. Once the buffer is attached to all devices userspace can initiate DMA
 473 *    access to the shared buffer. In the kernel this is done by calling
 474 *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
 475 *
 476 * 4. Once a driver is done with a shared buffer it needs to call
 477 *    dma_buf_detach() (after cleaning up any mappings) and then release the
 478 *    reference acquired with dma_buf_get by calling dma_buf_put().
 479 *
 480 * For the detailed semantics exporters are expected to implement see
 481 * &dma_buf_ops.
 482 */
 483
 484/**
 485 * dma_buf_export - Creates a new dma_buf, and associates an anon file
 486 * with this buffer, so it can be exported.
 487 * Also connect the allocator specific data and ops to the buffer.
 488 * Additionally, provide a name string for exporter; useful in debugging.
 489 *
 490 * @exp_info:   [in]    holds all the export related information provided
 491 *                      by the exporter. see &struct dma_buf_export_info
 492 *                      for further details.
 493 *
 494 * Returns, on success, a newly created dma_buf object, which wraps the
 495 * supplied private data and operations for dma_buf_ops. On either missing
 496 * ops, or error in allocating struct dma_buf, will return negative error.
 497 *
 498 * For most cases the easiest way to create @exp_info is through the
 499 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
 500 */
 501struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 502{
 503        struct dma_buf *dmabuf;
 504        struct dma_resv *resv = exp_info->resv;
 505        struct file *file;
 506        size_t alloc_size = sizeof(struct dma_buf);
 507        int ret;
 508
 509        if (!exp_info->resv)
 510                alloc_size += sizeof(struct dma_resv);
 511        else
 512                /* prevent &dma_buf[1] == dma_buf->resv */
 513                alloc_size += 1;
 514
 515        if (WARN_ON(!exp_info->priv
 516                          || !exp_info->ops
 517                          || !exp_info->ops->map_dma_buf
 518                          || !exp_info->ops->unmap_dma_buf
 519                          || !exp_info->ops->release)) {
 520                return ERR_PTR(-EINVAL);
 521        }
 522
 523        if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
 524                    exp_info->ops->dynamic_mapping))
 525                return ERR_PTR(-EINVAL);
 526
 527        if (!try_module_get(exp_info->owner))
 528                return ERR_PTR(-ENOENT);
 529
 530        dmabuf = kzalloc(alloc_size, GFP_KERNEL);
 531        if (!dmabuf) {
 532                ret = -ENOMEM;
 533                goto err_module;
 534        }
 535
 536        dmabuf->priv = exp_info->priv;
 537        dmabuf->ops = exp_info->ops;
 538        dmabuf->size = exp_info->size;
 539        dmabuf->exp_name = exp_info->exp_name;
 540        dmabuf->owner = exp_info->owner;
 541        init_waitqueue_head(&dmabuf->poll);
 542        dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 543        dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 544
 545        if (!resv) {
 546                resv = (struct dma_resv *)&dmabuf[1];
 547                dma_resv_init(resv);
 548        }
 549        dmabuf->resv = resv;
 550
 551        file = dma_buf_getfile(dmabuf, exp_info->flags);
 552        if (IS_ERR(file)) {
 553                ret = PTR_ERR(file);
 554                goto err_dmabuf;
 555        }
 556
 557        file->f_mode |= FMODE_LSEEK;
 558        dmabuf->file = file;
 559
 560        mutex_init(&dmabuf->lock);
 561        INIT_LIST_HEAD(&dmabuf->attachments);
 562
 563        mutex_lock(&db_list.lock);
 564        list_add(&dmabuf->list_node, &db_list.head);
 565        mutex_unlock(&db_list.lock);
 566
 567        return dmabuf;
 568
 569err_dmabuf:
 570        kfree(dmabuf);
 571err_module:
 572        module_put(exp_info->owner);
 573        return ERR_PTR(ret);
 574}
 575EXPORT_SYMBOL_GPL(dma_buf_export);
 576
 577/**
 578 * dma_buf_fd - returns a file descriptor for the given dma_buf
 579 * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
 580 * @flags:      [in]    flags to give to fd
 581 *
 582 * On success, returns an associated 'fd'. Else, returns error.
 583 */
 584int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 585{
 586        int fd;
 587
 588        if (!dmabuf || !dmabuf->file)
 589                return -EINVAL;
 590
 591        fd = get_unused_fd_flags(flags);
 592        if (fd < 0)
 593                return fd;
 594
 595        fd_install(fd, dmabuf->file);
 596
 597        return fd;
 598}
 599EXPORT_SYMBOL_GPL(dma_buf_fd);
 600
 601/**
 602 * dma_buf_get - returns the dma_buf structure related to an fd
 603 * @fd: [in]    fd associated with the dma_buf to be returned
 604 *
 605 * On success, returns the dma_buf structure associated with an fd; uses
 606 * file's refcounting done by fget to increase refcount. returns ERR_PTR
 607 * otherwise.
 608 */
 609struct dma_buf *dma_buf_get(int fd)
 610{
 611        struct file *file;
 612
 613        file = fget(fd);
 614
 615        if (!file)
 616                return ERR_PTR(-EBADF);
 617
 618        if (!is_dma_buf_file(file)) {
 619                fput(file);
 620                return ERR_PTR(-EINVAL);
 621        }
 622
 623        return file->private_data;
 624}
 625EXPORT_SYMBOL_GPL(dma_buf_get);
 626
 627/**
 628 * dma_buf_put - decreases refcount of the buffer
 629 * @dmabuf:     [in]    buffer to reduce refcount of
 630 *
 631 * Uses file's refcounting done implicitly by fput().
 632 *
 633 * If, as a result of this call, the refcount becomes 0, the 'release' file
 634 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
 635 * in turn, and frees the memory allocated for dmabuf when exported.
 636 */
 637void dma_buf_put(struct dma_buf *dmabuf)
 638{
 639        if (WARN_ON(!dmabuf || !dmabuf->file))
 640                return;
 641
 642        fput(dmabuf->file);
 643}
 644EXPORT_SYMBOL_GPL(dma_buf_put);
 645
 646/**
 647 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
 648 * calls attach() of dma_buf_ops to allow device-specific attach functionality
 649 * @dmabuf:             [in]    buffer to attach device to.
 650 * @dev:                [in]    device to be attached.
 651 * @dynamic_mapping:    [in]    calling convention for map/unmap
 652 *
 653 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
 654 * must be cleaned up by calling dma_buf_detach().
 655 *
 656 * Returns:
 657 *
 658 * A pointer to newly created &dma_buf_attachment on success, or a negative
 659 * error code wrapped into a pointer on failure.
 660 *
 661 * Note that this can fail if the backing storage of @dmabuf is in a place not
 662 * accessible to @dev, and cannot be moved to a more suitable place. This is
 663 * indicated with the error code -EBUSY.
 664 */
 665struct dma_buf_attachment *
 666dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 667                       bool dynamic_mapping)
 668{
 669        struct dma_buf_attachment *attach;
 670        int ret;
 671
 672        if (WARN_ON(!dmabuf || !dev))
 673                return ERR_PTR(-EINVAL);
 674
 675        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 676        if (!attach)
 677                return ERR_PTR(-ENOMEM);
 678
 679        attach->dev = dev;
 680        attach->dmabuf = dmabuf;
 681        attach->dynamic_mapping = dynamic_mapping;
 682
 683        if (dmabuf->ops->attach) {
 684                ret = dmabuf->ops->attach(dmabuf, attach);
 685                if (ret)
 686                        goto err_attach;
 687        }
 688        dma_resv_lock(dmabuf->resv, NULL);
 689        list_add(&attach->node, &dmabuf->attachments);
 690        dma_resv_unlock(dmabuf->resv);
 691
 692        /* When either the importer or the exporter can't handle dynamic
 693         * mappings we cache the mapping here to avoid issues with the
 694         * reservation object lock.
 695         */
 696        if (dma_buf_attachment_is_dynamic(attach) !=
 697            dma_buf_is_dynamic(dmabuf)) {
 698                struct sg_table *sgt;
 699
 700                if (dma_buf_is_dynamic(attach->dmabuf))
 701                        dma_resv_lock(attach->dmabuf->resv, NULL);
 702
 703                sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
 704                if (!sgt)
 705                        sgt = ERR_PTR(-ENOMEM);
 706                if (IS_ERR(sgt)) {
 707                        ret = PTR_ERR(sgt);
 708                        goto err_unlock;
 709                }
 710                if (dma_buf_is_dynamic(attach->dmabuf))
 711                        dma_resv_unlock(attach->dmabuf->resv);
 712                attach->sgt = sgt;
 713                attach->dir = DMA_BIDIRECTIONAL;
 714        }
 715
 716        return attach;
 717
 718err_attach:
 719        kfree(attach);
 720        return ERR_PTR(ret);
 721
 722err_unlock:
 723        if (dma_buf_is_dynamic(attach->dmabuf))
 724                dma_resv_unlock(attach->dmabuf->resv);
 725
 726        dma_buf_detach(dmabuf, attach);
 727        return ERR_PTR(ret);
 728}
 729EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
 730
 731/**
 732 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
 733 * @dmabuf:     [in]    buffer to attach device to.
 734 * @dev:        [in]    device to be attached.
 735 *
 736 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
 737 * mapping.
 738 */
 739struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 740                                          struct device *dev)
 741{
 742        return dma_buf_dynamic_attach(dmabuf, dev, false);
 743}
 744EXPORT_SYMBOL_GPL(dma_buf_attach);
 745
 746/**
 747 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
 748 * optionally calls detach() of dma_buf_ops for device-specific detach
 749 * @dmabuf:     [in]    buffer to detach from.
 750 * @attach:     [in]    attachment to be detached; is free'd after this call.
 751 *
 752 * Clean up a device attachment obtained by calling dma_buf_attach().
 753 */
 754void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 755{
 756        if (WARN_ON(!dmabuf || !attach))
 757                return;
 758
 759        if (attach->sgt) {
 760                if (dma_buf_is_dynamic(attach->dmabuf))
 761                        dma_resv_lock(attach->dmabuf->resv, NULL);
 762
 763                dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
 764
 765                if (dma_buf_is_dynamic(attach->dmabuf))
 766                        dma_resv_unlock(attach->dmabuf->resv);
 767        }
 768
 769        dma_resv_lock(dmabuf->resv, NULL);
 770        list_del(&attach->node);
 771        dma_resv_unlock(dmabuf->resv);
 772        if (dmabuf->ops->detach)
 773                dmabuf->ops->detach(dmabuf, attach);
 774
 775        kfree(attach);
 776}
 777EXPORT_SYMBOL_GPL(dma_buf_detach);
 778
 779/**
 780 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
 781 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
 782 * dma_buf_ops.
 783 * @attach:     [in]    attachment whose scatterlist is to be returned
 784 * @direction:  [in]    direction of DMA transfer
 785 *
 786 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
 787 * on error. May return -EINTR if it is interrupted by a signal.
 788 *
 789 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
 790 * the underlying backing storage is pinned for as long as a mapping exists,
 791 * therefore users/importers should not hold onto a mapping for undue amounts of
 792 * time.
 793 */
 794struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 795                                        enum dma_data_direction direction)
 796{
 797        struct sg_table *sg_table;
 798
 799        might_sleep();
 800
 801        if (WARN_ON(!attach || !attach->dmabuf))
 802                return ERR_PTR(-EINVAL);
 803
 804        if (dma_buf_attachment_is_dynamic(attach))
 805                dma_resv_assert_held(attach->dmabuf->resv);
 806
 807        if (attach->sgt) {
 808                /*
 809                 * Two mappings with different directions for the same
 810                 * attachment are not allowed.
 811                 */
 812                if (attach->dir != direction &&
 813                    attach->dir != DMA_BIDIRECTIONAL)
 814                        return ERR_PTR(-EBUSY);
 815
 816                return attach->sgt;
 817        }
 818
 819        if (dma_buf_is_dynamic(attach->dmabuf))
 820                dma_resv_assert_held(attach->dmabuf->resv);
 821
 822        sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 823        if (!sg_table)
 824                sg_table = ERR_PTR(-ENOMEM);
 825
 826        if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
 827                attach->sgt = sg_table;
 828                attach->dir = direction;
 829        }
 830
 831        return sg_table;
 832}
 833EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
 834
 835/**
 836 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
 837 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
 838 * dma_buf_ops.
 839 * @attach:     [in]    attachment to unmap buffer from
 840 * @sg_table:   [in]    scatterlist info of the buffer to unmap
 841 * @direction:  [in]    direction of DMA transfer
 842 *
 843 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
 844 */
 845void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 846                                struct sg_table *sg_table,
 847                                enum dma_data_direction direction)
 848{
 849        might_sleep();
 850
 851        if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
 852                return;
 853
 854        if (dma_buf_attachment_is_dynamic(attach))
 855                dma_resv_assert_held(attach->dmabuf->resv);
 856
 857        if (attach->sgt == sg_table)
 858                return;
 859
 860        if (dma_buf_is_dynamic(attach->dmabuf))
 861                dma_resv_assert_held(attach->dmabuf->resv);
 862
 863        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 864}
 865EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 866
 867/**
 868 * DOC: cpu access
 869 *
 870 * There are mutliple reasons for supporting CPU access to a dma buffer object:
 871 *
 872 * - Fallback operations in the kernel, for example when a device is connected
 873 *   over USB and the kernel needs to shuffle the data around first before
 874 *   sending it away. Cache coherency is handled by braketing any transactions
 875 *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
 876 *   access.
 877 *
 878 *   Since for most kernel internal dma-buf accesses need the entire buffer, a
 879 *   vmap interface is introduced. Note that on very old 32-bit architectures
 880 *   vmalloc space might be limited and result in vmap calls failing.
 881 *
 882 *   Interfaces::
 883 *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
 884 *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
 885 *
 886 *   The vmap call can fail if there is no vmap support in the exporter, or if
 887 *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
 888 *   that the dma-buf layer keeps a reference count for all vmap access and
 889 *   calls down into the exporter's vmap function only when no vmapping exists,
 890 *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
 891 *   provided by taking the dma_buf->lock mutex.
 892 *
 893 * - For full compatibility on the importer side with existing userspace
 894 *   interfaces, which might already support mmap'ing buffers. This is needed in
 895 *   many processing pipelines (e.g. feeding a software rendered image into a
 896 *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
 897 *   framework already supported this and for DMA buffer file descriptors to
 898 *   replace ION buffers mmap support was needed.
 899 *
 900 *   There is no special interfaces, userspace simply calls mmap on the dma-buf
 901 *   fd. But like for CPU access there's a need to braket the actual access,
 902 *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
 903 *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
 904 *   be restarted.
 905 *
 906 *   Some systems might need some sort of cache coherency management e.g. when
 907 *   CPU and GPU domains are being accessed through dma-buf at the same time.
 908 *   To circumvent this problem there are begin/end coherency markers, that
 909 *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
 910 *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
 911 *   sequence would be used like following:
 912 *
 913 *     - mmap dma-buf fd
 914 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
 915 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
 916 *       want (with the new data being consumed by say the GPU or the scanout
 917 *       device)
 918 *     - munmap once you don't need the buffer any more
 919 *
 920 *    For correctness and optimal performance, it is always required to use
 921 *    SYNC_START and SYNC_END before and after, respectively, when accessing the
 922 *    mapped address. Userspace cannot rely on coherent access, even when there
 923 *    are systems where it just works without calling these ioctls.
 924 *
 925 * - And as a CPU fallback in userspace processing pipelines.
 926 *
 927 *   Similar to the motivation for kernel cpu access it is again important that
 928 *   the userspace code of a given importing subsystem can use the same
 929 *   interfaces with a imported dma-buf buffer object as with a native buffer
 930 *   object. This is especially important for drm where the userspace part of
 931 *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
 932 *   use a different way to mmap a buffer rather invasive.
 933 *
 934 *   The assumption in the current dma-buf interfaces is that redirecting the
 935 *   initial mmap is all that's needed. A survey of some of the existing
 936 *   subsystems shows that no driver seems to do any nefarious thing like
 937 *   syncing up with outstanding asynchronous processing on the device or
 938 *   allocating special resources at fault time. So hopefully this is good
 939 *   enough, since adding interfaces to intercept pagefaults and allow pte
 940 *   shootdowns would increase the complexity quite a bit.
 941 *
 942 *   Interface::
 943 *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
 944 *                     unsigned long);
 945 *
 946 *   If the importing subsystem simply provides a special-purpose mmap call to
 947 *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
 948 *   equally achieve that for a dma-buf object.
 949 */
 950
 951static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 952                                      enum dma_data_direction direction)
 953{
 954        bool write = (direction == DMA_BIDIRECTIONAL ||
 955                      direction == DMA_TO_DEVICE);
 956        struct dma_resv *resv = dmabuf->resv;
 957        long ret;
 958
 959        /* Wait on any implicit rendering fences */
 960        ret = dma_resv_wait_timeout_rcu(resv, write, true,
 961                                                  MAX_SCHEDULE_TIMEOUT);
 962        if (ret < 0)
 963                return ret;
 964
 965        return 0;
 966}
 967
 968/**
 969 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
 970 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
 971 * preparations. Coherency is only guaranteed in the specified range for the
 972 * specified access direction.
 973 * @dmabuf:     [in]    buffer to prepare cpu access for.
 974 * @direction:  [in]    length of range for cpu access.
 975 *
 976 * After the cpu access is complete the caller should call
 977 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
 978 * it guaranteed to be coherent with other DMA access.
 979 *
 980 * Can return negative error values, returns 0 on success.
 981 */
 982int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 983                             enum dma_data_direction direction)
 984{
 985        int ret = 0;
 986
 987        if (WARN_ON(!dmabuf))
 988                return -EINVAL;
 989
 990        if (dmabuf->ops->begin_cpu_access)
 991                ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 992
 993        /* Ensure that all fences are waited upon - but we first allow
 994         * the native handler the chance to do so more efficiently if it
 995         * chooses. A double invocation here will be reasonably cheap no-op.
 996         */
 997        if (ret == 0)
 998                ret = __dma_buf_begin_cpu_access(dmabuf, direction);
 999
1000        return ret;
1001}
1002EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1003
1004/**
1005 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1006 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1007 * actions. Coherency is only guaranteed in the specified range for the
1008 * specified access direction.
1009 * @dmabuf:     [in]    buffer to complete cpu access for.
1010 * @direction:  [in]    length of range for cpu access.
1011 *
1012 * This terminates CPU access started with dma_buf_begin_cpu_access().
1013 *
1014 * Can return negative error values, returns 0 on success.
1015 */
1016int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1017                           enum dma_data_direction direction)
1018{
1019        int ret = 0;
1020
1021        WARN_ON(!dmabuf);
1022
1023        if (dmabuf->ops->end_cpu_access)
1024                ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1025
1026        return ret;
1027}
1028EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1029
1030
1031/**
1032 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1033 * @dmabuf:     [in]    buffer that should back the vma
1034 * @vma:        [in]    vma for the mmap
1035 * @pgoff:      [in]    offset in pages where this mmap should start within the
1036 *                      dma-buf buffer.
1037 *
1038 * This function adjusts the passed in vma so that it points at the file of the
1039 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1040 * checking on the size of the vma. Then it calls the exporters mmap function to
1041 * set up the mapping.
1042 *
1043 * Can return negative error values, returns 0 on success.
1044 */
1045int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1046                 unsigned long pgoff)
1047{
1048        struct file *oldfile;
1049        int ret;
1050
1051        if (WARN_ON(!dmabuf || !vma))
1052                return -EINVAL;
1053
1054        /* check if buffer supports mmap */
1055        if (!dmabuf->ops->mmap)
1056                return -EINVAL;
1057
1058        /* check for offset overflow */
1059        if (pgoff + vma_pages(vma) < pgoff)
1060                return -EOVERFLOW;
1061
1062        /* check for overflowing the buffer's size */
1063        if (pgoff + vma_pages(vma) >
1064            dmabuf->size >> PAGE_SHIFT)
1065                return -EINVAL;
1066
1067        /* readjust the vma */
1068        get_file(dmabuf->file);
1069        oldfile = vma->vm_file;
1070        vma->vm_file = dmabuf->file;
1071        vma->vm_pgoff = pgoff;
1072
1073        ret = dmabuf->ops->mmap(dmabuf, vma);
1074        if (ret) {
1075                /* restore old parameters on failure */
1076                vma->vm_file = oldfile;
1077                fput(dmabuf->file);
1078        } else {
1079                if (oldfile)
1080                        fput(oldfile);
1081        }
1082        return ret;
1083
1084}
1085EXPORT_SYMBOL_GPL(dma_buf_mmap);
1086
1087/**
1088 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1089 * address space. Same restrictions as for vmap and friends apply.
1090 * @dmabuf:     [in]    buffer to vmap
1091 *
1092 * This call may fail due to lack of virtual mapping address space.
1093 * These calls are optional in drivers. The intended use for them
1094 * is for mapping objects linear in kernel space for high use objects.
1095 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1096 *
1097 * Returns NULL on error.
1098 */
1099void *dma_buf_vmap(struct dma_buf *dmabuf)
1100{
1101        void *ptr;
1102
1103        if (WARN_ON(!dmabuf))
1104                return NULL;
1105
1106        if (!dmabuf->ops->vmap)
1107                return NULL;
1108
1109        mutex_lock(&dmabuf->lock);
1110        if (dmabuf->vmapping_counter) {
1111                dmabuf->vmapping_counter++;
1112                BUG_ON(!dmabuf->vmap_ptr);
1113                ptr = dmabuf->vmap_ptr;
1114                goto out_unlock;
1115        }
1116
1117        BUG_ON(dmabuf->vmap_ptr);
1118
1119        ptr = dmabuf->ops->vmap(dmabuf);
1120        if (WARN_ON_ONCE(IS_ERR(ptr)))
1121                ptr = NULL;
1122        if (!ptr)
1123                goto out_unlock;
1124
1125        dmabuf->vmap_ptr = ptr;
1126        dmabuf->vmapping_counter = 1;
1127
1128out_unlock:
1129        mutex_unlock(&dmabuf->lock);
1130        return ptr;
1131}
1132EXPORT_SYMBOL_GPL(dma_buf_vmap);
1133
1134/**
1135 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1136 * @dmabuf:     [in]    buffer to vunmap
1137 * @vaddr:      [in]    vmap to vunmap
1138 */
1139void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1140{
1141        if (WARN_ON(!dmabuf))
1142                return;
1143
1144        BUG_ON(!dmabuf->vmap_ptr);
1145        BUG_ON(dmabuf->vmapping_counter == 0);
1146        BUG_ON(dmabuf->vmap_ptr != vaddr);
1147
1148        mutex_lock(&dmabuf->lock);
1149        if (--dmabuf->vmapping_counter == 0) {
1150                if (dmabuf->ops->vunmap)
1151                        dmabuf->ops->vunmap(dmabuf, vaddr);
1152                dmabuf->vmap_ptr = NULL;
1153        }
1154        mutex_unlock(&dmabuf->lock);
1155}
1156EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1157
1158#ifdef CONFIG_DEBUG_FS
1159static int dma_buf_debug_show(struct seq_file *s, void *unused)
1160{
1161        int ret;
1162        struct dma_buf *buf_obj;
1163        struct dma_buf_attachment *attach_obj;
1164        struct dma_resv *robj;
1165        struct dma_resv_list *fobj;
1166        struct dma_fence *fence;
1167        unsigned seq;
1168        int count = 0, attach_count, shared_count, i;
1169        size_t size = 0;
1170
1171        ret = mutex_lock_interruptible(&db_list.lock);
1172
1173        if (ret)
1174                return ret;
1175
1176        seq_puts(s, "\nDma-buf Objects:\n");
1177        seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1178                   "size", "flags", "mode", "count", "ino");
1179
1180        list_for_each_entry(buf_obj, &db_list.head, list_node) {
1181
1182                ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1183                if (ret)
1184                        goto error_unlock;
1185
1186                seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1187                                buf_obj->size,
1188                                buf_obj->file->f_flags, buf_obj->file->f_mode,
1189                                file_count(buf_obj->file),
1190                                buf_obj->exp_name,
1191                                file_inode(buf_obj->file)->i_ino,
1192                                buf_obj->name ?: "");
1193
1194                robj = buf_obj->resv;
1195                while (true) {
1196                        seq = read_seqcount_begin(&robj->seq);
1197                        rcu_read_lock();
1198                        fobj = rcu_dereference(robj->fence);
1199                        shared_count = fobj ? fobj->shared_count : 0;
1200                        fence = rcu_dereference(robj->fence_excl);
1201                        if (!read_seqcount_retry(&robj->seq, seq))
1202                                break;
1203                        rcu_read_unlock();
1204                }
1205
1206                if (fence)
1207                        seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1208                                   fence->ops->get_driver_name(fence),
1209                                   fence->ops->get_timeline_name(fence),
1210                                   dma_fence_is_signaled(fence) ? "" : "un");
1211                for (i = 0; i < shared_count; i++) {
1212                        fence = rcu_dereference(fobj->shared[i]);
1213                        if (!dma_fence_get_rcu(fence))
1214                                continue;
1215                        seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1216                                   fence->ops->get_driver_name(fence),
1217                                   fence->ops->get_timeline_name(fence),
1218                                   dma_fence_is_signaled(fence) ? "" : "un");
1219                        dma_fence_put(fence);
1220                }
1221                rcu_read_unlock();
1222
1223                seq_puts(s, "\tAttached Devices:\n");
1224                attach_count = 0;
1225
1226                list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1227                        seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1228                        attach_count++;
1229                }
1230                dma_resv_unlock(buf_obj->resv);
1231
1232                seq_printf(s, "Total %d devices attached\n\n",
1233                                attach_count);
1234
1235                count++;
1236                size += buf_obj->size;
1237        }
1238
1239        seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1240
1241        mutex_unlock(&db_list.lock);
1242        return 0;
1243
1244error_unlock:
1245        mutex_unlock(&db_list.lock);
1246        return ret;
1247}
1248
1249DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1250
1251static struct dentry *dma_buf_debugfs_dir;
1252
1253static int dma_buf_init_debugfs(void)
1254{
1255        struct dentry *d;
1256        int err = 0;
1257
1258        d = debugfs_create_dir("dma_buf", NULL);
1259        if (IS_ERR(d))
1260                return PTR_ERR(d);
1261
1262        dma_buf_debugfs_dir = d;
1263
1264        d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1265                                NULL, &dma_buf_debug_fops);
1266        if (IS_ERR(d)) {
1267                pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1268                debugfs_remove_recursive(dma_buf_debugfs_dir);
1269                dma_buf_debugfs_dir = NULL;
1270                err = PTR_ERR(d);
1271        }
1272
1273        return err;
1274}
1275
1276static void dma_buf_uninit_debugfs(void)
1277{
1278        debugfs_remove_recursive(dma_buf_debugfs_dir);
1279}
1280#else
1281static inline int dma_buf_init_debugfs(void)
1282{
1283        return 0;
1284}
1285static inline void dma_buf_uninit_debugfs(void)
1286{
1287}
1288#endif
1289
1290static int __init dma_buf_init(void)
1291{
1292        dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1293        if (IS_ERR(dma_buf_mnt))
1294                return PTR_ERR(dma_buf_mnt);
1295
1296        mutex_init(&db_list.lock);
1297        INIT_LIST_HEAD(&db_list.head);
1298        dma_buf_init_debugfs();
1299        return 0;
1300}
1301subsys_initcall(dma_buf_init);
1302
1303static void __exit dma_buf_deinit(void)
1304{
1305        dma_buf_uninit_debugfs();
1306        kern_unmount(dma_buf_mnt);
1307}
1308__exitcall(dma_buf_deinit);
1309