linux/drivers/dma-buf/dma-buf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Framework for buffer objects that can be shared across devices/subsystems.
   4 *
   5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   6 * Author: Sumit Semwal <sumit.semwal@ti.com>
   7 *
   8 * Many thanks to linaro-mm-sig list, and specially
   9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11 * refining of this idea.
  12 */
  13
  14#include <linux/fs.h>
  15#include <linux/slab.h>
  16#include <linux/dma-buf.h>
  17#include <linux/dma-fence.h>
  18#include <linux/anon_inodes.h>
  19#include <linux/export.h>
  20#include <linux/debugfs.h>
  21#include <linux/module.h>
  22#include <linux/seq_file.h>
  23#include <linux/poll.h>
  24#include <linux/dma-resv.h>
  25#include <linux/mm.h>
  26#include <linux/mount.h>
  27#include <linux/pseudo_fs.h>
  28
  29#include <uapi/linux/dma-buf.h>
  30#include <uapi/linux/magic.h>
  31
  32#include "dma-buf-sysfs-stats.h"
  33
  34static inline int is_dma_buf_file(struct file *);
  35
  36struct dma_buf_list {
  37        struct list_head head;
  38        struct mutex lock;
  39};
  40
  41static struct dma_buf_list db_list;
  42
  43static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
  44{
  45        struct dma_buf *dmabuf;
  46        char name[DMA_BUF_NAME_LEN];
  47        size_t ret = 0;
  48
  49        dmabuf = dentry->d_fsdata;
  50        spin_lock(&dmabuf->name_lock);
  51        if (dmabuf->name)
  52                ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
  53        spin_unlock(&dmabuf->name_lock);
  54
  55        return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
  56                             dentry->d_name.name, ret > 0 ? name : "");
  57}
  58
  59static void dma_buf_release(struct dentry *dentry)
  60{
  61        struct dma_buf *dmabuf;
  62
  63        dmabuf = dentry->d_fsdata;
  64        if (unlikely(!dmabuf))
  65                return;
  66
  67        BUG_ON(dmabuf->vmapping_counter);
  68
  69        /*
  70         * Any fences that a dma-buf poll can wait on should be signaled
  71         * before releasing dma-buf. This is the responsibility of each
  72         * driver that uses the reservation objects.
  73         *
  74         * If you hit this BUG() it means someone dropped their ref to the
  75         * dma-buf while still having pending operation to the buffer.
  76         */
  77        BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
  78
  79        dma_buf_stats_teardown(dmabuf);
  80        dmabuf->ops->release(dmabuf);
  81
  82        if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
  83                dma_resv_fini(dmabuf->resv);
  84
  85        module_put(dmabuf->owner);
  86        kfree(dmabuf->name);
  87        kfree(dmabuf);
  88}
  89
  90static int dma_buf_file_release(struct inode *inode, struct file *file)
  91{
  92        struct dma_buf *dmabuf;
  93
  94        if (!is_dma_buf_file(file))
  95                return -EINVAL;
  96
  97        dmabuf = file->private_data;
  98
  99        mutex_lock(&db_list.lock);
 100        list_del(&dmabuf->list_node);
 101        mutex_unlock(&db_list.lock);
 102
 103        return 0;
 104}
 105
 106static const struct dentry_operations dma_buf_dentry_ops = {
 107        .d_dname = dmabuffs_dname,
 108        .d_release = dma_buf_release,
 109};
 110
 111static struct vfsmount *dma_buf_mnt;
 112
 113static int dma_buf_fs_init_context(struct fs_context *fc)
 114{
 115        struct pseudo_fs_context *ctx;
 116
 117        ctx = init_pseudo(fc, DMA_BUF_MAGIC);
 118        if (!ctx)
 119                return -ENOMEM;
 120        ctx->dops = &dma_buf_dentry_ops;
 121        return 0;
 122}
 123
 124static struct file_system_type dma_buf_fs_type = {
 125        .name = "dmabuf",
 126        .init_fs_context = dma_buf_fs_init_context,
 127        .kill_sb = kill_anon_super,
 128};
 129
 130static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 131{
 132        struct dma_buf *dmabuf;
 133
 134        if (!is_dma_buf_file(file))
 135                return -EINVAL;
 136
 137        dmabuf = file->private_data;
 138
 139        /* check if buffer supports mmap */
 140        if (!dmabuf->ops->mmap)
 141                return -EINVAL;
 142
 143        /* check for overflowing the buffer's size */
 144        if (vma->vm_pgoff + vma_pages(vma) >
 145            dmabuf->size >> PAGE_SHIFT)
 146                return -EINVAL;
 147
 148        return dmabuf->ops->mmap(dmabuf, vma);
 149}
 150
 151static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 152{
 153        struct dma_buf *dmabuf;
 154        loff_t base;
 155
 156        if (!is_dma_buf_file(file))
 157                return -EBADF;
 158
 159        dmabuf = file->private_data;
 160
 161        /* only support discovering the end of the buffer,
 162           but also allow SEEK_SET to maintain the idiomatic
 163           SEEK_END(0), SEEK_CUR(0) pattern */
 164        if (whence == SEEK_END)
 165                base = dmabuf->size;
 166        else if (whence == SEEK_SET)
 167                base = 0;
 168        else
 169                return -EINVAL;
 170
 171        if (offset != 0)
 172                return -EINVAL;
 173
 174        return base + offset;
 175}
 176
 177/**
 178 * DOC: implicit fence polling
 179 *
 180 * To support cross-device and cross-driver synchronization of buffer access
 181 * implicit fences (represented internally in the kernel with &struct dma_fence)
 182 * can be attached to a &dma_buf. The glue for that and a few related things are
 183 * provided in the &dma_resv structure.
 184 *
 185 * Userspace can query the state of these implicitly tracked fences using poll()
 186 * and related system calls:
 187 *
 188 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
 189 *   most recent write or exclusive fence.
 190 *
 191 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
 192 *   all attached fences, shared and exclusive ones.
 193 *
 194 * Note that this only signals the completion of the respective fences, i.e. the
 195 * DMA transfers are complete. Cache flushing and any other necessary
 196 * preparations before CPU access can begin still need to happen.
 197 */
 198
 199static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 200{
 201        struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 202        unsigned long flags;
 203
 204        spin_lock_irqsave(&dcb->poll->lock, flags);
 205        wake_up_locked_poll(dcb->poll, dcb->active);
 206        dcb->active = 0;
 207        spin_unlock_irqrestore(&dcb->poll->lock, flags);
 208}
 209
 210static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 211{
 212        struct dma_buf *dmabuf;
 213        struct dma_resv *resv;
 214        struct dma_resv_list *fobj;
 215        struct dma_fence *fence_excl;
 216        __poll_t events;
 217        unsigned shared_count, seq;
 218
 219        dmabuf = file->private_data;
 220        if (!dmabuf || !dmabuf->resv)
 221                return EPOLLERR;
 222
 223        resv = dmabuf->resv;
 224
 225        poll_wait(file, &dmabuf->poll, poll);
 226
 227        events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
 228        if (!events)
 229                return 0;
 230
 231retry:
 232        seq = read_seqcount_begin(&resv->seq);
 233        rcu_read_lock();
 234
 235        fobj = rcu_dereference(resv->fence);
 236        if (fobj)
 237                shared_count = fobj->shared_count;
 238        else
 239                shared_count = 0;
 240        fence_excl = dma_resv_excl_fence(resv);
 241        if (read_seqcount_retry(&resv->seq, seq)) {
 242                rcu_read_unlock();
 243                goto retry;
 244        }
 245
 246        if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 247                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 248                __poll_t pevents = EPOLLIN;
 249
 250                if (shared_count == 0)
 251                        pevents |= EPOLLOUT;
 252
 253                spin_lock_irq(&dmabuf->poll.lock);
 254                if (dcb->active) {
 255                        dcb->active |= pevents;
 256                        events &= ~pevents;
 257                } else
 258                        dcb->active = pevents;
 259                spin_unlock_irq(&dmabuf->poll.lock);
 260
 261                if (events & pevents) {
 262                        if (!dma_fence_get_rcu(fence_excl)) {
 263                                /* force a recheck */
 264                                events &= ~pevents;
 265                                dma_buf_poll_cb(NULL, &dcb->cb);
 266                        } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
 267                                                           dma_buf_poll_cb)) {
 268                                events &= ~pevents;
 269                                dma_fence_put(fence_excl);
 270                        } else {
 271                                /*
 272                                 * No callback queued, wake up any additional
 273                                 * waiters.
 274                                 */
 275                                dma_fence_put(fence_excl);
 276                                dma_buf_poll_cb(NULL, &dcb->cb);
 277                        }
 278                }
 279        }
 280
 281        if ((events & EPOLLOUT) && shared_count > 0) {
 282                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
 283                int i;
 284
 285                /* Only queue a new callback if no event has fired yet */
 286                spin_lock_irq(&dmabuf->poll.lock);
 287                if (dcb->active)
 288                        events &= ~EPOLLOUT;
 289                else
 290                        dcb->active = EPOLLOUT;
 291                spin_unlock_irq(&dmabuf->poll.lock);
 292
 293                if (!(events & EPOLLOUT))
 294                        goto out;
 295
 296                for (i = 0; i < shared_count; ++i) {
 297                        struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 298
 299                        if (!dma_fence_get_rcu(fence)) {
 300                                /*
 301                                 * fence refcount dropped to zero, this means
 302                                 * that fobj has been freed
 303                                 *
 304                                 * call dma_buf_poll_cb and force a recheck!
 305                                 */
 306                                events &= ~EPOLLOUT;
 307                                dma_buf_poll_cb(NULL, &dcb->cb);
 308                                break;
 309                        }
 310                        if (!dma_fence_add_callback(fence, &dcb->cb,
 311                                                    dma_buf_poll_cb)) {
 312                                dma_fence_put(fence);
 313                                events &= ~EPOLLOUT;
 314                                break;
 315                        }
 316                        dma_fence_put(fence);
 317                }
 318
 319                /* No callback queued, wake up any additional waiters. */
 320                if (i == shared_count)
 321                        dma_buf_poll_cb(NULL, &dcb->cb);
 322        }
 323
 324out:
 325        rcu_read_unlock();
 326        return events;
 327}
 328
 329/**
 330 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
 331 * The name of the dma-buf buffer can only be set when the dma-buf is not
 332 * attached to any devices. It could theoritically support changing the
 333 * name of the dma-buf if the same piece of memory is used for multiple
 334 * purpose between different devices.
 335 *
 336 * @dmabuf: [in]     dmabuf buffer that will be renamed.
 337 * @buf:    [in]     A piece of userspace memory that contains the name of
 338 *                   the dma-buf.
 339 *
 340 * Returns 0 on success. If the dma-buf buffer is already attached to
 341 * devices, return -EBUSY.
 342 *
 343 */
 344static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
 345{
 346        char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
 347        long ret = 0;
 348
 349        if (IS_ERR(name))
 350                return PTR_ERR(name);
 351
 352        dma_resv_lock(dmabuf->resv, NULL);
 353        if (!list_empty(&dmabuf->attachments)) {
 354                ret = -EBUSY;
 355                kfree(name);
 356                goto out_unlock;
 357        }
 358        spin_lock(&dmabuf->name_lock);
 359        kfree(dmabuf->name);
 360        dmabuf->name = name;
 361        spin_unlock(&dmabuf->name_lock);
 362
 363out_unlock:
 364        dma_resv_unlock(dmabuf->resv);
 365        return ret;
 366}
 367
 368static long dma_buf_ioctl(struct file *file,
 369                          unsigned int cmd, unsigned long arg)
 370{
 371        struct dma_buf *dmabuf;
 372        struct dma_buf_sync sync;
 373        enum dma_data_direction direction;
 374        int ret;
 375
 376        dmabuf = file->private_data;
 377
 378        switch (cmd) {
 379        case DMA_BUF_IOCTL_SYNC:
 380                if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
 381                        return -EFAULT;
 382
 383                if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
 384                        return -EINVAL;
 385
 386                switch (sync.flags & DMA_BUF_SYNC_RW) {
 387                case DMA_BUF_SYNC_READ:
 388                        direction = DMA_FROM_DEVICE;
 389                        break;
 390                case DMA_BUF_SYNC_WRITE:
 391                        direction = DMA_TO_DEVICE;
 392                        break;
 393                case DMA_BUF_SYNC_RW:
 394                        direction = DMA_BIDIRECTIONAL;
 395                        break;
 396                default:
 397                        return -EINVAL;
 398                }
 399
 400                if (sync.flags & DMA_BUF_SYNC_END)
 401                        ret = dma_buf_end_cpu_access(dmabuf, direction);
 402                else
 403                        ret = dma_buf_begin_cpu_access(dmabuf, direction);
 404
 405                return ret;
 406
 407        case DMA_BUF_SET_NAME_A:
 408        case DMA_BUF_SET_NAME_B:
 409                return dma_buf_set_name(dmabuf, (const char __user *)arg);
 410
 411        default:
 412                return -ENOTTY;
 413        }
 414}
 415
 416static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 417{
 418        struct dma_buf *dmabuf = file->private_data;
 419
 420        seq_printf(m, "size:\t%zu\n", dmabuf->size);
 421        /* Don't count the temporary reference taken inside procfs seq_show */
 422        seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
 423        seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
 424        spin_lock(&dmabuf->name_lock);
 425        if (dmabuf->name)
 426                seq_printf(m, "name:\t%s\n", dmabuf->name);
 427        spin_unlock(&dmabuf->name_lock);
 428}
 429
 430static const struct file_operations dma_buf_fops = {
 431        .release        = dma_buf_file_release,
 432        .mmap           = dma_buf_mmap_internal,
 433        .llseek         = dma_buf_llseek,
 434        .poll           = dma_buf_poll,
 435        .unlocked_ioctl = dma_buf_ioctl,
 436        .compat_ioctl   = compat_ptr_ioctl,
 437        .show_fdinfo    = dma_buf_show_fdinfo,
 438};
 439
 440/*
 441 * is_dma_buf_file - Check if struct file* is associated with dma_buf
 442 */
 443static inline int is_dma_buf_file(struct file *file)
 444{
 445        return file->f_op == &dma_buf_fops;
 446}
 447
 448static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
 449{
 450        struct file *file;
 451        struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
 452
 453        if (IS_ERR(inode))
 454                return ERR_CAST(inode);
 455
 456        inode->i_size = dmabuf->size;
 457        inode_set_bytes(inode, dmabuf->size);
 458
 459        file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
 460                                 flags, &dma_buf_fops);
 461        if (IS_ERR(file))
 462                goto err_alloc_file;
 463        file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
 464        file->private_data = dmabuf;
 465        file->f_path.dentry->d_fsdata = dmabuf;
 466
 467        return file;
 468
 469err_alloc_file:
 470        iput(inode);
 471        return file;
 472}
 473
 474/**
 475 * DOC: dma buf device access
 476 *
 477 * For device DMA access to a shared DMA buffer the usual sequence of operations
 478 * is fairly simple:
 479 *
 480 * 1. The exporter defines his exporter instance using
 481 *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
 482 *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
 483 *    as a file descriptor by calling dma_buf_fd().
 484 *
 485 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
 486 *    to share with: First the filedescriptor is converted to a &dma_buf using
 487 *    dma_buf_get(). Then the buffer is attached to the device using
 488 *    dma_buf_attach().
 489 *
 490 *    Up to this stage the exporter is still free to migrate or reallocate the
 491 *    backing storage.
 492 *
 493 * 3. Once the buffer is attached to all devices userspace can initiate DMA
 494 *    access to the shared buffer. In the kernel this is done by calling
 495 *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
 496 *
 497 * 4. Once a driver is done with a shared buffer it needs to call
 498 *    dma_buf_detach() (after cleaning up any mappings) and then release the
 499 *    reference acquired with dma_buf_get() by calling dma_buf_put().
 500 *
 501 * For the detailed semantics exporters are expected to implement see
 502 * &dma_buf_ops.
 503 */
 504
 505/**
 506 * dma_buf_export - Creates a new dma_buf, and associates an anon file
 507 * with this buffer, so it can be exported.
 508 * Also connect the allocator specific data and ops to the buffer.
 509 * Additionally, provide a name string for exporter; useful in debugging.
 510 *
 511 * @exp_info:   [in]    holds all the export related information provided
 512 *                      by the exporter. see &struct dma_buf_export_info
 513 *                      for further details.
 514 *
 515 * Returns, on success, a newly created struct dma_buf object, which wraps the
 516 * supplied private data and operations for struct dma_buf_ops. On either
 517 * missing ops, or error in allocating struct dma_buf, will return negative
 518 * error.
 519 *
 520 * For most cases the easiest way to create @exp_info is through the
 521 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
 522 */
 523struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 524{
 525        struct dma_buf *dmabuf;
 526        struct dma_resv *resv = exp_info->resv;
 527        struct file *file;
 528        size_t alloc_size = sizeof(struct dma_buf);
 529        int ret;
 530
 531        if (!exp_info->resv)
 532                alloc_size += sizeof(struct dma_resv);
 533        else
 534                /* prevent &dma_buf[1] == dma_buf->resv */
 535                alloc_size += 1;
 536
 537        if (WARN_ON(!exp_info->priv
 538                          || !exp_info->ops
 539                          || !exp_info->ops->map_dma_buf
 540                          || !exp_info->ops->unmap_dma_buf
 541                          || !exp_info->ops->release)) {
 542                return ERR_PTR(-EINVAL);
 543        }
 544
 545        if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
 546                    (exp_info->ops->pin || exp_info->ops->unpin)))
 547                return ERR_PTR(-EINVAL);
 548
 549        if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
 550                return ERR_PTR(-EINVAL);
 551
 552        if (!try_module_get(exp_info->owner))
 553                return ERR_PTR(-ENOENT);
 554
 555        dmabuf = kzalloc(alloc_size, GFP_KERNEL);
 556        if (!dmabuf) {
 557                ret = -ENOMEM;
 558                goto err_module;
 559        }
 560
 561        dmabuf->priv = exp_info->priv;
 562        dmabuf->ops = exp_info->ops;
 563        dmabuf->size = exp_info->size;
 564        dmabuf->exp_name = exp_info->exp_name;
 565        dmabuf->owner = exp_info->owner;
 566        spin_lock_init(&dmabuf->name_lock);
 567        init_waitqueue_head(&dmabuf->poll);
 568        dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 569        dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 570
 571        if (!resv) {
 572                resv = (struct dma_resv *)&dmabuf[1];
 573                dma_resv_init(resv);
 574        }
 575        dmabuf->resv = resv;
 576
 577        file = dma_buf_getfile(dmabuf, exp_info->flags);
 578        if (IS_ERR(file)) {
 579                ret = PTR_ERR(file);
 580                goto err_dmabuf;
 581        }
 582
 583        file->f_mode |= FMODE_LSEEK;
 584        dmabuf->file = file;
 585
 586        ret = dma_buf_stats_setup(dmabuf);
 587        if (ret)
 588                goto err_sysfs;
 589
 590        mutex_init(&dmabuf->lock);
 591        INIT_LIST_HEAD(&dmabuf->attachments);
 592
 593        mutex_lock(&db_list.lock);
 594        list_add(&dmabuf->list_node, &db_list.head);
 595        mutex_unlock(&db_list.lock);
 596
 597        return dmabuf;
 598
 599err_sysfs:
 600        /*
 601         * Set file->f_path.dentry->d_fsdata to NULL so that when
 602         * dma_buf_release() gets invoked by dentry_ops, it exits
 603         * early before calling the release() dma_buf op.
 604         */
 605        file->f_path.dentry->d_fsdata = NULL;
 606        fput(file);
 607err_dmabuf:
 608        kfree(dmabuf);
 609err_module:
 610        module_put(exp_info->owner);
 611        return ERR_PTR(ret);
 612}
 613EXPORT_SYMBOL_GPL(dma_buf_export);
 614
 615/**
 616 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
 617 * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
 618 * @flags:      [in]    flags to give to fd
 619 *
 620 * On success, returns an associated 'fd'. Else, returns error.
 621 */
 622int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 623{
 624        int fd;
 625
 626        if (!dmabuf || !dmabuf->file)
 627                return -EINVAL;
 628
 629        fd = get_unused_fd_flags(flags);
 630        if (fd < 0)
 631                return fd;
 632
 633        fd_install(fd, dmabuf->file);
 634
 635        return fd;
 636}
 637EXPORT_SYMBOL_GPL(dma_buf_fd);
 638
 639/**
 640 * dma_buf_get - returns the struct dma_buf related to an fd
 641 * @fd: [in]    fd associated with the struct dma_buf to be returned
 642 *
 643 * On success, returns the struct dma_buf associated with an fd; uses
 644 * file's refcounting done by fget to increase refcount. returns ERR_PTR
 645 * otherwise.
 646 */
 647struct dma_buf *dma_buf_get(int fd)
 648{
 649        struct file *file;
 650
 651        file = fget(fd);
 652
 653        if (!file)
 654                return ERR_PTR(-EBADF);
 655
 656        if (!is_dma_buf_file(file)) {
 657                fput(file);
 658                return ERR_PTR(-EINVAL);
 659        }
 660
 661        return file->private_data;
 662}
 663EXPORT_SYMBOL_GPL(dma_buf_get);
 664
 665/**
 666 * dma_buf_put - decreases refcount of the buffer
 667 * @dmabuf:     [in]    buffer to reduce refcount of
 668 *
 669 * Uses file's refcounting done implicitly by fput().
 670 *
 671 * If, as a result of this call, the refcount becomes 0, the 'release' file
 672 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
 673 * in turn, and frees the memory allocated for dmabuf when exported.
 674 */
 675void dma_buf_put(struct dma_buf *dmabuf)
 676{
 677        if (WARN_ON(!dmabuf || !dmabuf->file))
 678                return;
 679
 680        fput(dmabuf->file);
 681}
 682EXPORT_SYMBOL_GPL(dma_buf_put);
 683
 684static void mangle_sg_table(struct sg_table *sg_table)
 685{
 686#ifdef CONFIG_DMABUF_DEBUG
 687        int i;
 688        struct scatterlist *sg;
 689
 690        /* To catch abuse of the underlying struct page by importers mix
 691         * up the bits, but take care to preserve the low SG_ bits to
 692         * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
 693         * before passing the sgt back to the exporter. */
 694        for_each_sgtable_sg(sg_table, sg, i)
 695                sg->page_link ^= ~0xffUL;
 696#endif
 697
 698}
 699static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
 700                                       enum dma_data_direction direction)
 701{
 702        struct sg_table *sg_table;
 703
 704        sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 705
 706        if (!IS_ERR_OR_NULL(sg_table))
 707                mangle_sg_table(sg_table);
 708
 709        return sg_table;
 710}
 711
 712/**
 713 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
 714 * @dmabuf:             [in]    buffer to attach device to.
 715 * @dev:                [in]    device to be attached.
 716 * @importer_ops:       [in]    importer operations for the attachment
 717 * @importer_priv:      [in]    importer private pointer for the attachment
 718 *
 719 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
 720 * must be cleaned up by calling dma_buf_detach().
 721 *
 722 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
 723 * functionality.
 724 *
 725 * Returns:
 726 *
 727 * A pointer to newly created &dma_buf_attachment on success, or a negative
 728 * error code wrapped into a pointer on failure.
 729 *
 730 * Note that this can fail if the backing storage of @dmabuf is in a place not
 731 * accessible to @dev, and cannot be moved to a more suitable place. This is
 732 * indicated with the error code -EBUSY.
 733 */
 734struct dma_buf_attachment *
 735dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 736                       const struct dma_buf_attach_ops *importer_ops,
 737                       void *importer_priv)
 738{
 739        struct dma_buf_attachment *attach;
 740        int ret;
 741
 742        if (WARN_ON(!dmabuf || !dev))
 743                return ERR_PTR(-EINVAL);
 744
 745        if (WARN_ON(importer_ops && !importer_ops->move_notify))
 746                return ERR_PTR(-EINVAL);
 747
 748        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 749        if (!attach)
 750                return ERR_PTR(-ENOMEM);
 751
 752        attach->dev = dev;
 753        attach->dmabuf = dmabuf;
 754        if (importer_ops)
 755                attach->peer2peer = importer_ops->allow_peer2peer;
 756        attach->importer_ops = importer_ops;
 757        attach->importer_priv = importer_priv;
 758
 759        if (dmabuf->ops->attach) {
 760                ret = dmabuf->ops->attach(dmabuf, attach);
 761                if (ret)
 762                        goto err_attach;
 763        }
 764        dma_resv_lock(dmabuf->resv, NULL);
 765        list_add(&attach->node, &dmabuf->attachments);
 766        dma_resv_unlock(dmabuf->resv);
 767
 768        /* When either the importer or the exporter can't handle dynamic
 769         * mappings we cache the mapping here to avoid issues with the
 770         * reservation object lock.
 771         */
 772        if (dma_buf_attachment_is_dynamic(attach) !=
 773            dma_buf_is_dynamic(dmabuf)) {
 774                struct sg_table *sgt;
 775
 776                if (dma_buf_is_dynamic(attach->dmabuf)) {
 777                        dma_resv_lock(attach->dmabuf->resv, NULL);
 778                        ret = dmabuf->ops->pin(attach);
 779                        if (ret)
 780                                goto err_unlock;
 781                }
 782
 783                sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
 784                if (!sgt)
 785                        sgt = ERR_PTR(-ENOMEM);
 786                if (IS_ERR(sgt)) {
 787                        ret = PTR_ERR(sgt);
 788                        goto err_unpin;
 789                }
 790                if (dma_buf_is_dynamic(attach->dmabuf))
 791                        dma_resv_unlock(attach->dmabuf->resv);
 792                attach->sgt = sgt;
 793                attach->dir = DMA_BIDIRECTIONAL;
 794        }
 795
 796        return attach;
 797
 798err_attach:
 799        kfree(attach);
 800        return ERR_PTR(ret);
 801
 802err_unpin:
 803        if (dma_buf_is_dynamic(attach->dmabuf))
 804                dmabuf->ops->unpin(attach);
 805
 806err_unlock:
 807        if (dma_buf_is_dynamic(attach->dmabuf))
 808                dma_resv_unlock(attach->dmabuf->resv);
 809
 810        dma_buf_detach(dmabuf, attach);
 811        return ERR_PTR(ret);
 812}
 813EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
 814
 815/**
 816 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
 817 * @dmabuf:     [in]    buffer to attach device to.
 818 * @dev:        [in]    device to be attached.
 819 *
 820 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
 821 * mapping.
 822 */
 823struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 824                                          struct device *dev)
 825{
 826        return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
 827}
 828EXPORT_SYMBOL_GPL(dma_buf_attach);
 829
 830static void __unmap_dma_buf(struct dma_buf_attachment *attach,
 831                            struct sg_table *sg_table,
 832                            enum dma_data_direction direction)
 833{
 834        /* uses XOR, hence this unmangles */
 835        mangle_sg_table(sg_table);
 836
 837        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 838}
 839
 840/**
 841 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
 842 * @dmabuf:     [in]    buffer to detach from.
 843 * @attach:     [in]    attachment to be detached; is free'd after this call.
 844 *
 845 * Clean up a device attachment obtained by calling dma_buf_attach().
 846 *
 847 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
 848 */
 849void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 850{
 851        if (WARN_ON(!dmabuf || !attach))
 852                return;
 853
 854        if (attach->sgt) {
 855                if (dma_buf_is_dynamic(attach->dmabuf))
 856                        dma_resv_lock(attach->dmabuf->resv, NULL);
 857
 858                __unmap_dma_buf(attach, attach->sgt, attach->dir);
 859
 860                if (dma_buf_is_dynamic(attach->dmabuf)) {
 861                        dmabuf->ops->unpin(attach);
 862                        dma_resv_unlock(attach->dmabuf->resv);
 863                }
 864        }
 865
 866        dma_resv_lock(dmabuf->resv, NULL);
 867        list_del(&attach->node);
 868        dma_resv_unlock(dmabuf->resv);
 869        if (dmabuf->ops->detach)
 870                dmabuf->ops->detach(dmabuf, attach);
 871
 872        kfree(attach);
 873}
 874EXPORT_SYMBOL_GPL(dma_buf_detach);
 875
 876/**
 877 * dma_buf_pin - Lock down the DMA-buf
 878 * @attach:     [in]    attachment which should be pinned
 879 *
 880 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
 881 * call this, and only for limited use cases like scanout and not for temporary
 882 * pin operations. It is not permitted to allow userspace to pin arbitrary
 883 * amounts of buffers through this interface.
 884 *
 885 * Buffers must be unpinned by calling dma_buf_unpin().
 886 *
 887 * Returns:
 888 * 0 on success, negative error code on failure.
 889 */
 890int dma_buf_pin(struct dma_buf_attachment *attach)
 891{
 892        struct dma_buf *dmabuf = attach->dmabuf;
 893        int ret = 0;
 894
 895        WARN_ON(!dma_buf_attachment_is_dynamic(attach));
 896
 897        dma_resv_assert_held(dmabuf->resv);
 898
 899        if (dmabuf->ops->pin)
 900                ret = dmabuf->ops->pin(attach);
 901
 902        return ret;
 903}
 904EXPORT_SYMBOL_GPL(dma_buf_pin);
 905
 906/**
 907 * dma_buf_unpin - Unpin a DMA-buf
 908 * @attach:     [in]    attachment which should be unpinned
 909 *
 910 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
 911 * any mapping of @attach again and inform the importer through
 912 * &dma_buf_attach_ops.move_notify.
 913 */
 914void dma_buf_unpin(struct dma_buf_attachment *attach)
 915{
 916        struct dma_buf *dmabuf = attach->dmabuf;
 917
 918        WARN_ON(!dma_buf_attachment_is_dynamic(attach));
 919
 920        dma_resv_assert_held(dmabuf->resv);
 921
 922        if (dmabuf->ops->unpin)
 923                dmabuf->ops->unpin(attach);
 924}
 925EXPORT_SYMBOL_GPL(dma_buf_unpin);
 926
 927/**
 928 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
 929 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
 930 * dma_buf_ops.
 931 * @attach:     [in]    attachment whose scatterlist is to be returned
 932 * @direction:  [in]    direction of DMA transfer
 933 *
 934 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
 935 * on error. May return -EINTR if it is interrupted by a signal.
 936 *
 937 * On success, the DMA addresses and lengths in the returned scatterlist are
 938 * PAGE_SIZE aligned.
 939 *
 940 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
 941 * the underlying backing storage is pinned for as long as a mapping exists,
 942 * therefore users/importers should not hold onto a mapping for undue amounts of
 943 * time.
 944 *
 945 * Important: Dynamic importers must wait for the exclusive fence of the struct
 946 * dma_resv attached to the DMA-BUF first.
 947 */
 948struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 949                                        enum dma_data_direction direction)
 950{
 951        struct sg_table *sg_table;
 952        int r;
 953
 954        might_sleep();
 955
 956        if (WARN_ON(!attach || !attach->dmabuf))
 957                return ERR_PTR(-EINVAL);
 958
 959        if (dma_buf_attachment_is_dynamic(attach))
 960                dma_resv_assert_held(attach->dmabuf->resv);
 961
 962        if (attach->sgt) {
 963                /*
 964                 * Two mappings with different directions for the same
 965                 * attachment are not allowed.
 966                 */
 967                if (attach->dir != direction &&
 968                    attach->dir != DMA_BIDIRECTIONAL)
 969                        return ERR_PTR(-EBUSY);
 970
 971                return attach->sgt;
 972        }
 973
 974        if (dma_buf_is_dynamic(attach->dmabuf)) {
 975                dma_resv_assert_held(attach->dmabuf->resv);
 976                if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
 977                        r = attach->dmabuf->ops->pin(attach);
 978                        if (r)
 979                                return ERR_PTR(r);
 980                }
 981        }
 982
 983        sg_table = __map_dma_buf(attach, direction);
 984        if (!sg_table)
 985                sg_table = ERR_PTR(-ENOMEM);
 986
 987        if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
 988             !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
 989                attach->dmabuf->ops->unpin(attach);
 990
 991        if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
 992                attach->sgt = sg_table;
 993                attach->dir = direction;
 994        }
 995
 996#ifdef CONFIG_DMA_API_DEBUG
 997        if (!IS_ERR(sg_table)) {
 998                struct scatterlist *sg;
 999                u64 addr;
1000                int len;
1001                int i;
1002
1003                for_each_sgtable_dma_sg(sg_table, sg, i) {
1004                        addr = sg_dma_address(sg);
1005                        len = sg_dma_len(sg);
1006                        if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1007                                pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1008                                         __func__, addr, len);
1009                        }
1010                }
1011        }
1012#endif /* CONFIG_DMA_API_DEBUG */
1013        return sg_table;
1014}
1015EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
1016
1017/**
1018 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1019 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1020 * dma_buf_ops.
1021 * @attach:     [in]    attachment to unmap buffer from
1022 * @sg_table:   [in]    scatterlist info of the buffer to unmap
1023 * @direction:  [in]    direction of DMA transfer
1024 *
1025 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1026 */
1027void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1028                                struct sg_table *sg_table,
1029                                enum dma_data_direction direction)
1030{
1031        might_sleep();
1032
1033        if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1034                return;
1035
1036        if (dma_buf_attachment_is_dynamic(attach))
1037                dma_resv_assert_held(attach->dmabuf->resv);
1038
1039        if (attach->sgt == sg_table)
1040                return;
1041
1042        if (dma_buf_is_dynamic(attach->dmabuf))
1043                dma_resv_assert_held(attach->dmabuf->resv);
1044
1045        __unmap_dma_buf(attach, sg_table, direction);
1046
1047        if (dma_buf_is_dynamic(attach->dmabuf) &&
1048            !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1049                dma_buf_unpin(attach);
1050}
1051EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1052
1053/**
1054 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1055 *
1056 * @dmabuf:     [in]    buffer which is moving
1057 *
1058 * Informs all attachmenst that they need to destroy and recreated all their
1059 * mappings.
1060 */
1061void dma_buf_move_notify(struct dma_buf *dmabuf)
1062{
1063        struct dma_buf_attachment *attach;
1064
1065        dma_resv_assert_held(dmabuf->resv);
1066
1067        list_for_each_entry(attach, &dmabuf->attachments, node)
1068                if (attach->importer_ops)
1069                        attach->importer_ops->move_notify(attach);
1070}
1071EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1072
1073/**
1074 * DOC: cpu access
1075 *
1076 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1077 *
1078 * - Fallback operations in the kernel, for example when a device is connected
1079 *   over USB and the kernel needs to shuffle the data around first before
1080 *   sending it away. Cache coherency is handled by braketing any transactions
1081 *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1082 *   access.
1083 *
1084 *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1085 *   vmap interface is introduced. Note that on very old 32-bit architectures
1086 *   vmalloc space might be limited and result in vmap calls failing.
1087 *
1088 *   Interfaces::
1089 *
1090 *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1091 *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1092 *
1093 *   The vmap call can fail if there is no vmap support in the exporter, or if
1094 *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1095 *   count for all vmap access and calls down into the exporter's vmap function
1096 *   only when no vmapping exists, and only unmaps it once. Protection against
1097 *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1098 *
1099 * - For full compatibility on the importer side with existing userspace
1100 *   interfaces, which might already support mmap'ing buffers. This is needed in
1101 *   many processing pipelines (e.g. feeding a software rendered image into a
1102 *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1103 *   framework already supported this and for DMA buffer file descriptors to
1104 *   replace ION buffers mmap support was needed.
1105 *
1106 *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1107 *   fd. But like for CPU access there's a need to braket the actual access,
1108 *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1109 *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1110 *   be restarted.
1111 *
1112 *   Some systems might need some sort of cache coherency management e.g. when
1113 *   CPU and GPU domains are being accessed through dma-buf at the same time.
1114 *   To circumvent this problem there are begin/end coherency markers, that
1115 *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1116 *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1117 *   sequence would be used like following:
1118 *
1119 *     - mmap dma-buf fd
1120 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1121 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1122 *       want (with the new data being consumed by say the GPU or the scanout
1123 *       device)
1124 *     - munmap once you don't need the buffer any more
1125 *
1126 *    For correctness and optimal performance, it is always required to use
1127 *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1128 *    mapped address. Userspace cannot rely on coherent access, even when there
1129 *    are systems where it just works without calling these ioctls.
1130 *
1131 * - And as a CPU fallback in userspace processing pipelines.
1132 *
1133 *   Similar to the motivation for kernel cpu access it is again important that
1134 *   the userspace code of a given importing subsystem can use the same
1135 *   interfaces with a imported dma-buf buffer object as with a native buffer
1136 *   object. This is especially important for drm where the userspace part of
1137 *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1138 *   use a different way to mmap a buffer rather invasive.
1139 *
1140 *   The assumption in the current dma-buf interfaces is that redirecting the
1141 *   initial mmap is all that's needed. A survey of some of the existing
1142 *   subsystems shows that no driver seems to do any nefarious thing like
1143 *   syncing up with outstanding asynchronous processing on the device or
1144 *   allocating special resources at fault time. So hopefully this is good
1145 *   enough, since adding interfaces to intercept pagefaults and allow pte
1146 *   shootdowns would increase the complexity quite a bit.
1147 *
1148 *   Interface::
1149 *
1150 *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1151 *                     unsigned long);
1152 *
1153 *   If the importing subsystem simply provides a special-purpose mmap call to
1154 *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1155 *   equally achieve that for a dma-buf object.
1156 */
1157
1158static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1159                                      enum dma_data_direction direction)
1160{
1161        bool write = (direction == DMA_BIDIRECTIONAL ||
1162                      direction == DMA_TO_DEVICE);
1163        struct dma_resv *resv = dmabuf->resv;
1164        long ret;
1165
1166        /* Wait on any implicit rendering fences */
1167        ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
1168        if (ret < 0)
1169                return ret;
1170
1171        return 0;
1172}
1173
1174/**
1175 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1176 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1177 * preparations. Coherency is only guaranteed in the specified range for the
1178 * specified access direction.
1179 * @dmabuf:     [in]    buffer to prepare cpu access for.
1180 * @direction:  [in]    length of range for cpu access.
1181 *
1182 * After the cpu access is complete the caller should call
1183 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1184 * it guaranteed to be coherent with other DMA access.
1185 *
1186 * This function will also wait for any DMA transactions tracked through
1187 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1188 * synchronization this function will only ensure cache coherency, callers must
1189 * ensure synchronization with such DMA transactions on their own.
1190 *
1191 * Can return negative error values, returns 0 on success.
1192 */
1193int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1194                             enum dma_data_direction direction)
1195{
1196        int ret = 0;
1197
1198        if (WARN_ON(!dmabuf))
1199                return -EINVAL;
1200
1201        might_lock(&dmabuf->resv->lock.base);
1202
1203        if (dmabuf->ops->begin_cpu_access)
1204                ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1205
1206        /* Ensure that all fences are waited upon - but we first allow
1207         * the native handler the chance to do so more efficiently if it
1208         * chooses. A double invocation here will be reasonably cheap no-op.
1209         */
1210        if (ret == 0)
1211                ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1212
1213        return ret;
1214}
1215EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1216
1217/**
1218 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1219 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1220 * actions. Coherency is only guaranteed in the specified range for the
1221 * specified access direction.
1222 * @dmabuf:     [in]    buffer to complete cpu access for.
1223 * @direction:  [in]    length of range for cpu access.
1224 *
1225 * This terminates CPU access started with dma_buf_begin_cpu_access().
1226 *
1227 * Can return negative error values, returns 0 on success.
1228 */
1229int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1230                           enum dma_data_direction direction)
1231{
1232        int ret = 0;
1233
1234        WARN_ON(!dmabuf);
1235
1236        might_lock(&dmabuf->resv->lock.base);
1237
1238        if (dmabuf->ops->end_cpu_access)
1239                ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1240
1241        return ret;
1242}
1243EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1244
1245
1246/**
1247 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1248 * @dmabuf:     [in]    buffer that should back the vma
1249 * @vma:        [in]    vma for the mmap
1250 * @pgoff:      [in]    offset in pages where this mmap should start within the
1251 *                      dma-buf buffer.
1252 *
1253 * This function adjusts the passed in vma so that it points at the file of the
1254 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1255 * checking on the size of the vma. Then it calls the exporters mmap function to
1256 * set up the mapping.
1257 *
1258 * Can return negative error values, returns 0 on success.
1259 */
1260int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1261                 unsigned long pgoff)
1262{
1263        if (WARN_ON(!dmabuf || !vma))
1264                return -EINVAL;
1265
1266        /* check if buffer supports mmap */
1267        if (!dmabuf->ops->mmap)
1268                return -EINVAL;
1269
1270        /* check for offset overflow */
1271        if (pgoff + vma_pages(vma) < pgoff)
1272                return -EOVERFLOW;
1273
1274        /* check for overflowing the buffer's size */
1275        if (pgoff + vma_pages(vma) >
1276            dmabuf->size >> PAGE_SHIFT)
1277                return -EINVAL;
1278
1279        /* readjust the vma */
1280        vma_set_file(vma, dmabuf->file);
1281        vma->vm_pgoff = pgoff;
1282
1283        return dmabuf->ops->mmap(dmabuf, vma);
1284}
1285EXPORT_SYMBOL_GPL(dma_buf_mmap);
1286
1287/**
1288 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1289 * address space. Same restrictions as for vmap and friends apply.
1290 * @dmabuf:     [in]    buffer to vmap
1291 * @map:        [out]   returns the vmap pointer
1292 *
1293 * This call may fail due to lack of virtual mapping address space.
1294 * These calls are optional in drivers. The intended use for them
1295 * is for mapping objects linear in kernel space for high use objects.
1296 *
1297 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1298 * dma_buf_end_cpu_access() around any cpu access performed through this
1299 * mapping.
1300 *
1301 * Returns 0 on success, or a negative errno code otherwise.
1302 */
1303int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1304{
1305        struct dma_buf_map ptr;
1306        int ret = 0;
1307
1308        dma_buf_map_clear(map);
1309
1310        if (WARN_ON(!dmabuf))
1311                return -EINVAL;
1312
1313        if (!dmabuf->ops->vmap)
1314                return -EINVAL;
1315
1316        mutex_lock(&dmabuf->lock);
1317        if (dmabuf->vmapping_counter) {
1318                dmabuf->vmapping_counter++;
1319                BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1320                *map = dmabuf->vmap_ptr;
1321                goto out_unlock;
1322        }
1323
1324        BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1325
1326        ret = dmabuf->ops->vmap(dmabuf, &ptr);
1327        if (WARN_ON_ONCE(ret))
1328                goto out_unlock;
1329
1330        dmabuf->vmap_ptr = ptr;
1331        dmabuf->vmapping_counter = 1;
1332
1333        *map = dmabuf->vmap_ptr;
1334
1335out_unlock:
1336        mutex_unlock(&dmabuf->lock);
1337        return ret;
1338}
1339EXPORT_SYMBOL_GPL(dma_buf_vmap);
1340
1341/**
1342 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1343 * @dmabuf:     [in]    buffer to vunmap
1344 * @map:        [in]    vmap pointer to vunmap
1345 */
1346void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1347{
1348        if (WARN_ON(!dmabuf))
1349                return;
1350
1351        BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1352        BUG_ON(dmabuf->vmapping_counter == 0);
1353        BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1354
1355        mutex_lock(&dmabuf->lock);
1356        if (--dmabuf->vmapping_counter == 0) {
1357                if (dmabuf->ops->vunmap)
1358                        dmabuf->ops->vunmap(dmabuf, map);
1359                dma_buf_map_clear(&dmabuf->vmap_ptr);
1360        }
1361        mutex_unlock(&dmabuf->lock);
1362}
1363EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1364
1365#ifdef CONFIG_DEBUG_FS
1366static int dma_buf_debug_show(struct seq_file *s, void *unused)
1367{
1368        struct dma_buf *buf_obj;
1369        struct dma_buf_attachment *attach_obj;
1370        struct dma_resv *robj;
1371        struct dma_resv_list *fobj;
1372        struct dma_fence *fence;
1373        int count = 0, attach_count, shared_count, i;
1374        size_t size = 0;
1375        int ret;
1376
1377        ret = mutex_lock_interruptible(&db_list.lock);
1378
1379        if (ret)
1380                return ret;
1381
1382        seq_puts(s, "\nDma-buf Objects:\n");
1383        seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1384                   "size", "flags", "mode", "count", "ino");
1385
1386        list_for_each_entry(buf_obj, &db_list.head, list_node) {
1387
1388                ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1389                if (ret)
1390                        goto error_unlock;
1391
1392                seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1393                                buf_obj->size,
1394                                buf_obj->file->f_flags, buf_obj->file->f_mode,
1395                                file_count(buf_obj->file),
1396                                buf_obj->exp_name,
1397                                file_inode(buf_obj->file)->i_ino,
1398                                buf_obj->name ?: "");
1399
1400                robj = buf_obj->resv;
1401                fence = dma_resv_excl_fence(robj);
1402                if (fence)
1403                        seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1404                                   fence->ops->get_driver_name(fence),
1405                                   fence->ops->get_timeline_name(fence),
1406                                   dma_fence_is_signaled(fence) ? "" : "un");
1407
1408                fobj = rcu_dereference_protected(robj->fence,
1409                                                 dma_resv_held(robj));
1410                shared_count = fobj ? fobj->shared_count : 0;
1411                for (i = 0; i < shared_count; i++) {
1412                        fence = rcu_dereference_protected(fobj->shared[i],
1413                                                          dma_resv_held(robj));
1414                        seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1415                                   fence->ops->get_driver_name(fence),
1416                                   fence->ops->get_timeline_name(fence),
1417                                   dma_fence_is_signaled(fence) ? "" : "un");
1418                }
1419
1420                seq_puts(s, "\tAttached Devices:\n");
1421                attach_count = 0;
1422
1423                list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1424                        seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1425                        attach_count++;
1426                }
1427                dma_resv_unlock(buf_obj->resv);
1428
1429                seq_printf(s, "Total %d devices attached\n\n",
1430                                attach_count);
1431
1432                count++;
1433                size += buf_obj->size;
1434        }
1435
1436        seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1437
1438        mutex_unlock(&db_list.lock);
1439        return 0;
1440
1441error_unlock:
1442        mutex_unlock(&db_list.lock);
1443        return ret;
1444}
1445
1446DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1447
1448static struct dentry *dma_buf_debugfs_dir;
1449
1450static int dma_buf_init_debugfs(void)
1451{
1452        struct dentry *d;
1453        int err = 0;
1454
1455        d = debugfs_create_dir("dma_buf", NULL);
1456        if (IS_ERR(d))
1457                return PTR_ERR(d);
1458
1459        dma_buf_debugfs_dir = d;
1460
1461        d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1462                                NULL, &dma_buf_debug_fops);
1463        if (IS_ERR(d)) {
1464                pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1465                debugfs_remove_recursive(dma_buf_debugfs_dir);
1466                dma_buf_debugfs_dir = NULL;
1467                err = PTR_ERR(d);
1468        }
1469
1470        return err;
1471}
1472
1473static void dma_buf_uninit_debugfs(void)
1474{
1475        debugfs_remove_recursive(dma_buf_debugfs_dir);
1476}
1477#else
1478static inline int dma_buf_init_debugfs(void)
1479{
1480        return 0;
1481}
1482static inline void dma_buf_uninit_debugfs(void)
1483{
1484}
1485#endif
1486
1487static int __init dma_buf_init(void)
1488{
1489        int ret;
1490
1491        ret = dma_buf_init_sysfs_statistics();
1492        if (ret)
1493                return ret;
1494
1495        dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1496        if (IS_ERR(dma_buf_mnt))
1497                return PTR_ERR(dma_buf_mnt);
1498
1499        mutex_init(&db_list.lock);
1500        INIT_LIST_HEAD(&db_list.head);
1501        dma_buf_init_debugfs();
1502        return 0;
1503}
1504subsys_initcall(dma_buf_init);
1505
1506static void __exit dma_buf_deinit(void)
1507{
1508        dma_buf_uninit_debugfs();
1509        kern_unmount(dma_buf_mnt);
1510        dma_buf_uninit_sysfs_statistics();
1511}
1512__exitcall(dma_buf_deinit);
1513