linux/drivers/dma-buf/dma-buf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Framework for buffer objects that can be shared across devices/subsystems.
   4 *
   5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   6 * Author: Sumit Semwal <sumit.semwal@ti.com>
   7 *
   8 * Many thanks to linaro-mm-sig list, and specially
   9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11 * refining of this idea.
  12 */
  13
  14#include <linux/fs.h>
  15#include <linux/slab.h>
  16#include <linux/dma-buf.h>
  17#include <linux/dma-fence.h>
  18#include <linux/anon_inodes.h>
  19#include <linux/export.h>
  20#include <linux/debugfs.h>
  21#include <linux/module.h>
  22#include <linux/seq_file.h>
  23#include <linux/poll.h>
  24#include <linux/dma-resv.h>
  25#include <linux/mm.h>
  26#include <linux/mount.h>
  27#include <linux/pseudo_fs.h>
  28
  29#include <uapi/linux/dma-buf.h>
  30#include <uapi/linux/magic.h>
  31
  32#include "dma-buf-sysfs-stats.h"
  33
  34static inline int is_dma_buf_file(struct file *);
  35
  36struct dma_buf_list {
  37        struct list_head head;
  38        struct mutex lock;
  39};
  40
  41static struct dma_buf_list db_list;
  42
  43static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
  44{
  45        struct dma_buf *dmabuf;
  46        char name[DMA_BUF_NAME_LEN];
  47        size_t ret = 0;
  48
  49        dmabuf = dentry->d_fsdata;
  50        spin_lock(&dmabuf->name_lock);
  51        if (dmabuf->name)
  52                ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
  53        spin_unlock(&dmabuf->name_lock);
  54
  55        return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
  56                             dentry->d_name.name, ret > 0 ? name : "");
  57}
  58
  59static void dma_buf_release(struct dentry *dentry)
  60{
  61        struct dma_buf *dmabuf;
  62
  63        dmabuf = dentry->d_fsdata;
  64        if (unlikely(!dmabuf))
  65                return;
  66
  67        BUG_ON(dmabuf->vmapping_counter);
  68
  69        /*
  70         * If you hit this BUG() it could mean:
  71         * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
  72         * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
  73         */
  74        BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
  75
  76        dma_buf_stats_teardown(dmabuf);
  77        dmabuf->ops->release(dmabuf);
  78
  79        if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
  80                dma_resv_fini(dmabuf->resv);
  81
  82        WARN_ON(!list_empty(&dmabuf->attachments));
  83        module_put(dmabuf->owner);
  84        kfree(dmabuf->name);
  85        kfree(dmabuf);
  86}
  87
  88static int dma_buf_file_release(struct inode *inode, struct file *file)
  89{
  90        struct dma_buf *dmabuf;
  91
  92        if (!is_dma_buf_file(file))
  93                return -EINVAL;
  94
  95        dmabuf = file->private_data;
  96
  97        mutex_lock(&db_list.lock);
  98        list_del(&dmabuf->list_node);
  99        mutex_unlock(&db_list.lock);
 100
 101        return 0;
 102}
 103
 104static const struct dentry_operations dma_buf_dentry_ops = {
 105        .d_dname = dmabuffs_dname,
 106        .d_release = dma_buf_release,
 107};
 108
 109static struct vfsmount *dma_buf_mnt;
 110
 111static int dma_buf_fs_init_context(struct fs_context *fc)
 112{
 113        struct pseudo_fs_context *ctx;
 114
 115        ctx = init_pseudo(fc, DMA_BUF_MAGIC);
 116        if (!ctx)
 117                return -ENOMEM;
 118        ctx->dops = &dma_buf_dentry_ops;
 119        return 0;
 120}
 121
 122static struct file_system_type dma_buf_fs_type = {
 123        .name = "dmabuf",
 124        .init_fs_context = dma_buf_fs_init_context,
 125        .kill_sb = kill_anon_super,
 126};
 127
 128static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 129{
 130        struct dma_buf *dmabuf;
 131
 132        if (!is_dma_buf_file(file))
 133                return -EINVAL;
 134
 135        dmabuf = file->private_data;
 136
 137        /* check if buffer supports mmap */
 138        if (!dmabuf->ops->mmap)
 139                return -EINVAL;
 140
 141        /* check for overflowing the buffer's size */
 142        if (vma->vm_pgoff + vma_pages(vma) >
 143            dmabuf->size >> PAGE_SHIFT)
 144                return -EINVAL;
 145
 146        return dmabuf->ops->mmap(dmabuf, vma);
 147}
 148
 149static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 150{
 151        struct dma_buf *dmabuf;
 152        loff_t base;
 153
 154        if (!is_dma_buf_file(file))
 155                return -EBADF;
 156
 157        dmabuf = file->private_data;
 158
 159        /* only support discovering the end of the buffer,
 160           but also allow SEEK_SET to maintain the idiomatic
 161           SEEK_END(0), SEEK_CUR(0) pattern */
 162        if (whence == SEEK_END)
 163                base = dmabuf->size;
 164        else if (whence == SEEK_SET)
 165                base = 0;
 166        else
 167                return -EINVAL;
 168
 169        if (offset != 0)
 170                return -EINVAL;
 171
 172        return base + offset;
 173}
 174
 175/**
 176 * DOC: implicit fence polling
 177 *
 178 * To support cross-device and cross-driver synchronization of buffer access
 179 * implicit fences (represented internally in the kernel with &struct dma_fence)
 180 * can be attached to a &dma_buf. The glue for that and a few related things are
 181 * provided in the &dma_resv structure.
 182 *
 183 * Userspace can query the state of these implicitly tracked fences using poll()
 184 * and related system calls:
 185 *
 186 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
 187 *   most recent write or exclusive fence.
 188 *
 189 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
 190 *   all attached fences, shared and exclusive ones.
 191 *
 192 * Note that this only signals the completion of the respective fences, i.e. the
 193 * DMA transfers are complete. Cache flushing and any other necessary
 194 * preparations before CPU access can begin still need to happen.
 195 */
 196
 197static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 198{
 199        struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 200        struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
 201        unsigned long flags;
 202
 203        spin_lock_irqsave(&dcb->poll->lock, flags);
 204        wake_up_locked_poll(dcb->poll, dcb->active);
 205        dcb->active = 0;
 206        spin_unlock_irqrestore(&dcb->poll->lock, flags);
 207        dma_fence_put(fence);
 208        /* Paired with get_file in dma_buf_poll */
 209        fput(dmabuf->file);
 210}
 211
 212static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
 213                                struct dma_buf_poll_cb_t *dcb)
 214{
 215        struct dma_resv_iter cursor;
 216        struct dma_fence *fence;
 217        int r;
 218
 219        dma_resv_for_each_fence(&cursor, resv, write, fence) {
 220                dma_fence_get(fence);
 221                r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
 222                if (!r)
 223                        return true;
 224                dma_fence_put(fence);
 225        }
 226
 227        return false;
 228}
 229
 230static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 231{
 232        struct dma_buf *dmabuf;
 233        struct dma_resv *resv;
 234        __poll_t events;
 235
 236        dmabuf = file->private_data;
 237        if (!dmabuf || !dmabuf->resv)
 238                return EPOLLERR;
 239
 240        resv = dmabuf->resv;
 241
 242        poll_wait(file, &dmabuf->poll, poll);
 243
 244        events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
 245        if (!events)
 246                return 0;
 247
 248        dma_resv_lock(resv, NULL);
 249
 250        if (events & EPOLLOUT) {
 251                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
 252
 253                /* Check that callback isn't busy */
 254                spin_lock_irq(&dmabuf->poll.lock);
 255                if (dcb->active)
 256                        events &= ~EPOLLOUT;
 257                else
 258                        dcb->active = EPOLLOUT;
 259                spin_unlock_irq(&dmabuf->poll.lock);
 260
 261                if (events & EPOLLOUT) {
 262                        /* Paired with fput in dma_buf_poll_cb */
 263                        get_file(dmabuf->file);
 264
 265                        if (!dma_buf_poll_add_cb(resv, true, dcb))
 266                                /* No callback queued, wake up any other waiters */
 267                                dma_buf_poll_cb(NULL, &dcb->cb);
 268                        else
 269                                events &= ~EPOLLOUT;
 270                }
 271        }
 272
 273        if (events & EPOLLIN) {
 274                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
 275
 276                /* Check that callback isn't busy */
 277                spin_lock_irq(&dmabuf->poll.lock);
 278                if (dcb->active)
 279                        events &= ~EPOLLIN;
 280                else
 281                        dcb->active = EPOLLIN;
 282                spin_unlock_irq(&dmabuf->poll.lock);
 283
 284                if (events & EPOLLIN) {
 285                        /* Paired with fput in dma_buf_poll_cb */
 286                        get_file(dmabuf->file);
 287
 288                        if (!dma_buf_poll_add_cb(resv, false, dcb))
 289                                /* No callback queued, wake up any other waiters */
 290                                dma_buf_poll_cb(NULL, &dcb->cb);
 291                        else
 292                                events &= ~EPOLLIN;
 293                }
 294        }
 295
 296        dma_resv_unlock(resv);
 297        return events;
 298}
 299
 300/**
 301 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
 302 * It could support changing the name of the dma-buf if the same
 303 * piece of memory is used for multiple purpose between different devices.
 304 *
 305 * @dmabuf: [in]     dmabuf buffer that will be renamed.
 306 * @buf:    [in]     A piece of userspace memory that contains the name of
 307 *                   the dma-buf.
 308 *
 309 * Returns 0 on success. If the dma-buf buffer is already attached to
 310 * devices, return -EBUSY.
 311 *
 312 */
 313static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
 314{
 315        char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
 316
 317        if (IS_ERR(name))
 318                return PTR_ERR(name);
 319
 320        spin_lock(&dmabuf->name_lock);
 321        kfree(dmabuf->name);
 322        dmabuf->name = name;
 323        spin_unlock(&dmabuf->name_lock);
 324
 325        return 0;
 326}
 327
 328static long dma_buf_ioctl(struct file *file,
 329                          unsigned int cmd, unsigned long arg)
 330{
 331        struct dma_buf *dmabuf;
 332        struct dma_buf_sync sync;
 333        enum dma_data_direction direction;
 334        int ret;
 335
 336        dmabuf = file->private_data;
 337
 338        switch (cmd) {
 339        case DMA_BUF_IOCTL_SYNC:
 340                if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
 341                        return -EFAULT;
 342
 343                if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
 344                        return -EINVAL;
 345
 346                switch (sync.flags & DMA_BUF_SYNC_RW) {
 347                case DMA_BUF_SYNC_READ:
 348                        direction = DMA_FROM_DEVICE;
 349                        break;
 350                case DMA_BUF_SYNC_WRITE:
 351                        direction = DMA_TO_DEVICE;
 352                        break;
 353                case DMA_BUF_SYNC_RW:
 354                        direction = DMA_BIDIRECTIONAL;
 355                        break;
 356                default:
 357                        return -EINVAL;
 358                }
 359
 360                if (sync.flags & DMA_BUF_SYNC_END)
 361                        ret = dma_buf_end_cpu_access(dmabuf, direction);
 362                else
 363                        ret = dma_buf_begin_cpu_access(dmabuf, direction);
 364
 365                return ret;
 366
 367        case DMA_BUF_SET_NAME_A:
 368        case DMA_BUF_SET_NAME_B:
 369                return dma_buf_set_name(dmabuf, (const char __user *)arg);
 370
 371        default:
 372                return -ENOTTY;
 373        }
 374}
 375
 376static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 377{
 378        struct dma_buf *dmabuf = file->private_data;
 379
 380        seq_printf(m, "size:\t%zu\n", dmabuf->size);
 381        /* Don't count the temporary reference taken inside procfs seq_show */
 382        seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
 383        seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
 384        spin_lock(&dmabuf->name_lock);
 385        if (dmabuf->name)
 386                seq_printf(m, "name:\t%s\n", dmabuf->name);
 387        spin_unlock(&dmabuf->name_lock);
 388}
 389
 390static const struct file_operations dma_buf_fops = {
 391        .release        = dma_buf_file_release,
 392        .mmap           = dma_buf_mmap_internal,
 393        .llseek         = dma_buf_llseek,
 394        .poll           = dma_buf_poll,
 395        .unlocked_ioctl = dma_buf_ioctl,
 396        .compat_ioctl   = compat_ptr_ioctl,
 397        .show_fdinfo    = dma_buf_show_fdinfo,
 398};
 399
 400/*
 401 * is_dma_buf_file - Check if struct file* is associated with dma_buf
 402 */
 403static inline int is_dma_buf_file(struct file *file)
 404{
 405        return file->f_op == &dma_buf_fops;
 406}
 407
 408static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
 409{
 410        static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
 411        struct file *file;
 412        struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
 413
 414        if (IS_ERR(inode))
 415                return ERR_CAST(inode);
 416
 417        inode->i_size = dmabuf->size;
 418        inode_set_bytes(inode, dmabuf->size);
 419
 420        /*
 421         * The ->i_ino acquired from get_next_ino() is not unique thus
 422         * not suitable for using it as dentry name by dmabuf stats.
 423         * Override ->i_ino with the unique and dmabuffs specific
 424         * value.
 425         */
 426        inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
 427        file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
 428                                 flags, &dma_buf_fops);
 429        if (IS_ERR(file))
 430                goto err_alloc_file;
 431        file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
 432        file->private_data = dmabuf;
 433        file->f_path.dentry->d_fsdata = dmabuf;
 434
 435        return file;
 436
 437err_alloc_file:
 438        iput(inode);
 439        return file;
 440}
 441
 442/**
 443 * DOC: dma buf device access
 444 *
 445 * For device DMA access to a shared DMA buffer the usual sequence of operations
 446 * is fairly simple:
 447 *
 448 * 1. The exporter defines his exporter instance using
 449 *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
 450 *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
 451 *    as a file descriptor by calling dma_buf_fd().
 452 *
 453 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
 454 *    to share with: First the filedescriptor is converted to a &dma_buf using
 455 *    dma_buf_get(). Then the buffer is attached to the device using
 456 *    dma_buf_attach().
 457 *
 458 *    Up to this stage the exporter is still free to migrate or reallocate the
 459 *    backing storage.
 460 *
 461 * 3. Once the buffer is attached to all devices userspace can initiate DMA
 462 *    access to the shared buffer. In the kernel this is done by calling
 463 *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
 464 *
 465 * 4. Once a driver is done with a shared buffer it needs to call
 466 *    dma_buf_detach() (after cleaning up any mappings) and then release the
 467 *    reference acquired with dma_buf_get() by calling dma_buf_put().
 468 *
 469 * For the detailed semantics exporters are expected to implement see
 470 * &dma_buf_ops.
 471 */
 472
 473/**
 474 * dma_buf_export - Creates a new dma_buf, and associates an anon file
 475 * with this buffer, so it can be exported.
 476 * Also connect the allocator specific data and ops to the buffer.
 477 * Additionally, provide a name string for exporter; useful in debugging.
 478 *
 479 * @exp_info:   [in]    holds all the export related information provided
 480 *                      by the exporter. see &struct dma_buf_export_info
 481 *                      for further details.
 482 *
 483 * Returns, on success, a newly created struct dma_buf object, which wraps the
 484 * supplied private data and operations for struct dma_buf_ops. On either
 485 * missing ops, or error in allocating struct dma_buf, will return negative
 486 * error.
 487 *
 488 * For most cases the easiest way to create @exp_info is through the
 489 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
 490 */
 491struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 492{
 493        struct dma_buf *dmabuf;
 494        struct dma_resv *resv = exp_info->resv;
 495        struct file *file;
 496        size_t alloc_size = sizeof(struct dma_buf);
 497        int ret;
 498
 499        if (!exp_info->resv)
 500                alloc_size += sizeof(struct dma_resv);
 501        else
 502                /* prevent &dma_buf[1] == dma_buf->resv */
 503                alloc_size += 1;
 504
 505        if (WARN_ON(!exp_info->priv
 506                          || !exp_info->ops
 507                          || !exp_info->ops->map_dma_buf
 508                          || !exp_info->ops->unmap_dma_buf
 509                          || !exp_info->ops->release)) {
 510                return ERR_PTR(-EINVAL);
 511        }
 512
 513        if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
 514                    (exp_info->ops->pin || exp_info->ops->unpin)))
 515                return ERR_PTR(-EINVAL);
 516
 517        if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
 518                return ERR_PTR(-EINVAL);
 519
 520        if (!try_module_get(exp_info->owner))
 521                return ERR_PTR(-ENOENT);
 522
 523        dmabuf = kzalloc(alloc_size, GFP_KERNEL);
 524        if (!dmabuf) {
 525                ret = -ENOMEM;
 526                goto err_module;
 527        }
 528
 529        dmabuf->priv = exp_info->priv;
 530        dmabuf->ops = exp_info->ops;
 531        dmabuf->size = exp_info->size;
 532        dmabuf->exp_name = exp_info->exp_name;
 533        dmabuf->owner = exp_info->owner;
 534        spin_lock_init(&dmabuf->name_lock);
 535        init_waitqueue_head(&dmabuf->poll);
 536        dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
 537        dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
 538
 539        if (!resv) {
 540                resv = (struct dma_resv *)&dmabuf[1];
 541                dma_resv_init(resv);
 542        }
 543        dmabuf->resv = resv;
 544
 545        file = dma_buf_getfile(dmabuf, exp_info->flags);
 546        if (IS_ERR(file)) {
 547                ret = PTR_ERR(file);
 548                goto err_dmabuf;
 549        }
 550
 551        file->f_mode |= FMODE_LSEEK;
 552        dmabuf->file = file;
 553
 554        mutex_init(&dmabuf->lock);
 555        INIT_LIST_HEAD(&dmabuf->attachments);
 556
 557        mutex_lock(&db_list.lock);
 558        list_add(&dmabuf->list_node, &db_list.head);
 559        mutex_unlock(&db_list.lock);
 560
 561        ret = dma_buf_stats_setup(dmabuf);
 562        if (ret)
 563                goto err_sysfs;
 564
 565        return dmabuf;
 566
 567err_sysfs:
 568        /*
 569         * Set file->f_path.dentry->d_fsdata to NULL so that when
 570         * dma_buf_release() gets invoked by dentry_ops, it exits
 571         * early before calling the release() dma_buf op.
 572         */
 573        file->f_path.dentry->d_fsdata = NULL;
 574        fput(file);
 575err_dmabuf:
 576        kfree(dmabuf);
 577err_module:
 578        module_put(exp_info->owner);
 579        return ERR_PTR(ret);
 580}
 581EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
 582
 583/**
 584 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
 585 * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
 586 * @flags:      [in]    flags to give to fd
 587 *
 588 * On success, returns an associated 'fd'. Else, returns error.
 589 */
 590int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 591{
 592        int fd;
 593
 594        if (!dmabuf || !dmabuf->file)
 595                return -EINVAL;
 596
 597        fd = get_unused_fd_flags(flags);
 598        if (fd < 0)
 599                return fd;
 600
 601        fd_install(fd, dmabuf->file);
 602
 603        return fd;
 604}
 605EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
 606
 607/**
 608 * dma_buf_get - returns the struct dma_buf related to an fd
 609 * @fd: [in]    fd associated with the struct dma_buf to be returned
 610 *
 611 * On success, returns the struct dma_buf associated with an fd; uses
 612 * file's refcounting done by fget to increase refcount. returns ERR_PTR
 613 * otherwise.
 614 */
 615struct dma_buf *dma_buf_get(int fd)
 616{
 617        struct file *file;
 618
 619        file = fget(fd);
 620
 621        if (!file)
 622                return ERR_PTR(-EBADF);
 623
 624        if (!is_dma_buf_file(file)) {
 625                fput(file);
 626                return ERR_PTR(-EINVAL);
 627        }
 628
 629        return file->private_data;
 630}
 631EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
 632
 633/**
 634 * dma_buf_put - decreases refcount of the buffer
 635 * @dmabuf:     [in]    buffer to reduce refcount of
 636 *
 637 * Uses file's refcounting done implicitly by fput().
 638 *
 639 * If, as a result of this call, the refcount becomes 0, the 'release' file
 640 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
 641 * in turn, and frees the memory allocated for dmabuf when exported.
 642 */
 643void dma_buf_put(struct dma_buf *dmabuf)
 644{
 645        if (WARN_ON(!dmabuf || !dmabuf->file))
 646                return;
 647
 648        fput(dmabuf->file);
 649}
 650EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
 651
 652static void mangle_sg_table(struct sg_table *sg_table)
 653{
 654#ifdef CONFIG_DMABUF_DEBUG
 655        int i;
 656        struct scatterlist *sg;
 657
 658        /* To catch abuse of the underlying struct page by importers mix
 659         * up the bits, but take care to preserve the low SG_ bits to
 660         * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
 661         * before passing the sgt back to the exporter. */
 662        for_each_sgtable_sg(sg_table, sg, i)
 663                sg->page_link ^= ~0xffUL;
 664#endif
 665
 666}
 667static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
 668                                       enum dma_data_direction direction)
 669{
 670        struct sg_table *sg_table;
 671
 672        sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 673
 674        if (!IS_ERR_OR_NULL(sg_table))
 675                mangle_sg_table(sg_table);
 676
 677        return sg_table;
 678}
 679
 680/**
 681 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
 682 * @dmabuf:             [in]    buffer to attach device to.
 683 * @dev:                [in]    device to be attached.
 684 * @importer_ops:       [in]    importer operations for the attachment
 685 * @importer_priv:      [in]    importer private pointer for the attachment
 686 *
 687 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
 688 * must be cleaned up by calling dma_buf_detach().
 689 *
 690 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
 691 * functionality.
 692 *
 693 * Returns:
 694 *
 695 * A pointer to newly created &dma_buf_attachment on success, or a negative
 696 * error code wrapped into a pointer on failure.
 697 *
 698 * Note that this can fail if the backing storage of @dmabuf is in a place not
 699 * accessible to @dev, and cannot be moved to a more suitable place. This is
 700 * indicated with the error code -EBUSY.
 701 */
 702struct dma_buf_attachment *
 703dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 704                       const struct dma_buf_attach_ops *importer_ops,
 705                       void *importer_priv)
 706{
 707        struct dma_buf_attachment *attach;
 708        int ret;
 709
 710        if (WARN_ON(!dmabuf || !dev))
 711                return ERR_PTR(-EINVAL);
 712
 713        if (WARN_ON(importer_ops && !importer_ops->move_notify))
 714                return ERR_PTR(-EINVAL);
 715
 716        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 717        if (!attach)
 718                return ERR_PTR(-ENOMEM);
 719
 720        attach->dev = dev;
 721        attach->dmabuf = dmabuf;
 722        if (importer_ops)
 723                attach->peer2peer = importer_ops->allow_peer2peer;
 724        attach->importer_ops = importer_ops;
 725        attach->importer_priv = importer_priv;
 726
 727        if (dmabuf->ops->attach) {
 728                ret = dmabuf->ops->attach(dmabuf, attach);
 729                if (ret)
 730                        goto err_attach;
 731        }
 732        dma_resv_lock(dmabuf->resv, NULL);
 733        list_add(&attach->node, &dmabuf->attachments);
 734        dma_resv_unlock(dmabuf->resv);
 735
 736        /* When either the importer or the exporter can't handle dynamic
 737         * mappings we cache the mapping here to avoid issues with the
 738         * reservation object lock.
 739         */
 740        if (dma_buf_attachment_is_dynamic(attach) !=
 741            dma_buf_is_dynamic(dmabuf)) {
 742                struct sg_table *sgt;
 743
 744                if (dma_buf_is_dynamic(attach->dmabuf)) {
 745                        dma_resv_lock(attach->dmabuf->resv, NULL);
 746                        ret = dmabuf->ops->pin(attach);
 747                        if (ret)
 748                                goto err_unlock;
 749                }
 750
 751                sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
 752                if (!sgt)
 753                        sgt = ERR_PTR(-ENOMEM);
 754                if (IS_ERR(sgt)) {
 755                        ret = PTR_ERR(sgt);
 756                        goto err_unpin;
 757                }
 758                if (dma_buf_is_dynamic(attach->dmabuf))
 759                        dma_resv_unlock(attach->dmabuf->resv);
 760                attach->sgt = sgt;
 761                attach->dir = DMA_BIDIRECTIONAL;
 762        }
 763
 764        return attach;
 765
 766err_attach:
 767        kfree(attach);
 768        return ERR_PTR(ret);
 769
 770err_unpin:
 771        if (dma_buf_is_dynamic(attach->dmabuf))
 772                dmabuf->ops->unpin(attach);
 773
 774err_unlock:
 775        if (dma_buf_is_dynamic(attach->dmabuf))
 776                dma_resv_unlock(attach->dmabuf->resv);
 777
 778        dma_buf_detach(dmabuf, attach);
 779        return ERR_PTR(ret);
 780}
 781EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
 782
 783/**
 784 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
 785 * @dmabuf:     [in]    buffer to attach device to.
 786 * @dev:        [in]    device to be attached.
 787 *
 788 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
 789 * mapping.
 790 */
 791struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 792                                          struct device *dev)
 793{
 794        return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
 795}
 796EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
 797
 798static void __unmap_dma_buf(struct dma_buf_attachment *attach,
 799                            struct sg_table *sg_table,
 800                            enum dma_data_direction direction)
 801{
 802        /* uses XOR, hence this unmangles */
 803        mangle_sg_table(sg_table);
 804
 805        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 806}
 807
 808/**
 809 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
 810 * @dmabuf:     [in]    buffer to detach from.
 811 * @attach:     [in]    attachment to be detached; is free'd after this call.
 812 *
 813 * Clean up a device attachment obtained by calling dma_buf_attach().
 814 *
 815 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
 816 */
 817void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 818{
 819        if (WARN_ON(!dmabuf || !attach))
 820                return;
 821
 822        if (attach->sgt) {
 823                if (dma_buf_is_dynamic(attach->dmabuf))
 824                        dma_resv_lock(attach->dmabuf->resv, NULL);
 825
 826                __unmap_dma_buf(attach, attach->sgt, attach->dir);
 827
 828                if (dma_buf_is_dynamic(attach->dmabuf)) {
 829                        dmabuf->ops->unpin(attach);
 830                        dma_resv_unlock(attach->dmabuf->resv);
 831                }
 832        }
 833
 834        dma_resv_lock(dmabuf->resv, NULL);
 835        list_del(&attach->node);
 836        dma_resv_unlock(dmabuf->resv);
 837        if (dmabuf->ops->detach)
 838                dmabuf->ops->detach(dmabuf, attach);
 839
 840        kfree(attach);
 841}
 842EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
 843
 844/**
 845 * dma_buf_pin - Lock down the DMA-buf
 846 * @attach:     [in]    attachment which should be pinned
 847 *
 848 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
 849 * call this, and only for limited use cases like scanout and not for temporary
 850 * pin operations. It is not permitted to allow userspace to pin arbitrary
 851 * amounts of buffers through this interface.
 852 *
 853 * Buffers must be unpinned by calling dma_buf_unpin().
 854 *
 855 * Returns:
 856 * 0 on success, negative error code on failure.
 857 */
 858int dma_buf_pin(struct dma_buf_attachment *attach)
 859{
 860        struct dma_buf *dmabuf = attach->dmabuf;
 861        int ret = 0;
 862
 863        WARN_ON(!dma_buf_attachment_is_dynamic(attach));
 864
 865        dma_resv_assert_held(dmabuf->resv);
 866
 867        if (dmabuf->ops->pin)
 868                ret = dmabuf->ops->pin(attach);
 869
 870        return ret;
 871}
 872EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
 873
 874/**
 875 * dma_buf_unpin - Unpin a DMA-buf
 876 * @attach:     [in]    attachment which should be unpinned
 877 *
 878 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
 879 * any mapping of @attach again and inform the importer through
 880 * &dma_buf_attach_ops.move_notify.
 881 */
 882void dma_buf_unpin(struct dma_buf_attachment *attach)
 883{
 884        struct dma_buf *dmabuf = attach->dmabuf;
 885
 886        WARN_ON(!dma_buf_attachment_is_dynamic(attach));
 887
 888        dma_resv_assert_held(dmabuf->resv);
 889
 890        if (dmabuf->ops->unpin)
 891                dmabuf->ops->unpin(attach);
 892}
 893EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
 894
 895/**
 896 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
 897 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
 898 * dma_buf_ops.
 899 * @attach:     [in]    attachment whose scatterlist is to be returned
 900 * @direction:  [in]    direction of DMA transfer
 901 *
 902 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
 903 * on error. May return -EINTR if it is interrupted by a signal.
 904 *
 905 * On success, the DMA addresses and lengths in the returned scatterlist are
 906 * PAGE_SIZE aligned.
 907 *
 908 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
 909 * the underlying backing storage is pinned for as long as a mapping exists,
 910 * therefore users/importers should not hold onto a mapping for undue amounts of
 911 * time.
 912 *
 913 * Important: Dynamic importers must wait for the exclusive fence of the struct
 914 * dma_resv attached to the DMA-BUF first.
 915 */
 916struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 917                                        enum dma_data_direction direction)
 918{
 919        struct sg_table *sg_table;
 920        int r;
 921
 922        might_sleep();
 923
 924        if (WARN_ON(!attach || !attach->dmabuf))
 925                return ERR_PTR(-EINVAL);
 926
 927        if (dma_buf_attachment_is_dynamic(attach))
 928                dma_resv_assert_held(attach->dmabuf->resv);
 929
 930        if (attach->sgt) {
 931                /*
 932                 * Two mappings with different directions for the same
 933                 * attachment are not allowed.
 934                 */
 935                if (attach->dir != direction &&
 936                    attach->dir != DMA_BIDIRECTIONAL)
 937                        return ERR_PTR(-EBUSY);
 938
 939                return attach->sgt;
 940        }
 941
 942        if (dma_buf_is_dynamic(attach->dmabuf)) {
 943                dma_resv_assert_held(attach->dmabuf->resv);
 944                if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
 945                        r = attach->dmabuf->ops->pin(attach);
 946                        if (r)
 947                                return ERR_PTR(r);
 948                }
 949        }
 950
 951        sg_table = __map_dma_buf(attach, direction);
 952        if (!sg_table)
 953                sg_table = ERR_PTR(-ENOMEM);
 954
 955        if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
 956             !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
 957                attach->dmabuf->ops->unpin(attach);
 958
 959        if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
 960                attach->sgt = sg_table;
 961                attach->dir = direction;
 962        }
 963
 964#ifdef CONFIG_DMA_API_DEBUG
 965        if (!IS_ERR(sg_table)) {
 966                struct scatterlist *sg;
 967                u64 addr;
 968                int len;
 969                int i;
 970
 971                for_each_sgtable_dma_sg(sg_table, sg, i) {
 972                        addr = sg_dma_address(sg);
 973                        len = sg_dma_len(sg);
 974                        if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
 975                                pr_debug("%s: addr %llx or len %x is not page aligned!\n",
 976                                         __func__, addr, len);
 977                        }
 978                }
 979        }
 980#endif /* CONFIG_DMA_API_DEBUG */
 981        return sg_table;
 982}
 983EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
 984
 985/**
 986 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
 987 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
 988 * dma_buf_ops.
 989 * @attach:     [in]    attachment to unmap buffer from
 990 * @sg_table:   [in]    scatterlist info of the buffer to unmap
 991 * @direction:  [in]    direction of DMA transfer
 992 *
 993 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
 994 */
 995void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 996                                struct sg_table *sg_table,
 997                                enum dma_data_direction direction)
 998{
 999        might_sleep();
1000
1001        if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1002                return;
1003
1004        if (dma_buf_attachment_is_dynamic(attach))
1005                dma_resv_assert_held(attach->dmabuf->resv);
1006
1007        if (attach->sgt == sg_table)
1008                return;
1009
1010        if (dma_buf_is_dynamic(attach->dmabuf))
1011                dma_resv_assert_held(attach->dmabuf->resv);
1012
1013        __unmap_dma_buf(attach, sg_table, direction);
1014
1015        if (dma_buf_is_dynamic(attach->dmabuf) &&
1016            !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1017                dma_buf_unpin(attach);
1018}
1019EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1020
1021/**
1022 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1023 *
1024 * @dmabuf:     [in]    buffer which is moving
1025 *
1026 * Informs all attachmenst that they need to destroy and recreated all their
1027 * mappings.
1028 */
1029void dma_buf_move_notify(struct dma_buf *dmabuf)
1030{
1031        struct dma_buf_attachment *attach;
1032
1033        dma_resv_assert_held(dmabuf->resv);
1034
1035        list_for_each_entry(attach, &dmabuf->attachments, node)
1036                if (attach->importer_ops)
1037                        attach->importer_ops->move_notify(attach);
1038}
1039EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1040
1041/**
1042 * DOC: cpu access
1043 *
1044 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1045 *
1046 * - Fallback operations in the kernel, for example when a device is connected
1047 *   over USB and the kernel needs to shuffle the data around first before
1048 *   sending it away. Cache coherency is handled by braketing any transactions
1049 *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1050 *   access.
1051 *
1052 *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1053 *   vmap interface is introduced. Note that on very old 32-bit architectures
1054 *   vmalloc space might be limited and result in vmap calls failing.
1055 *
1056 *   Interfaces::
1057 *
1058 *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1059 *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1060 *
1061 *   The vmap call can fail if there is no vmap support in the exporter, or if
1062 *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1063 *   count for all vmap access and calls down into the exporter's vmap function
1064 *   only when no vmapping exists, and only unmaps it once. Protection against
1065 *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1066 *
1067 * - For full compatibility on the importer side with existing userspace
1068 *   interfaces, which might already support mmap'ing buffers. This is needed in
1069 *   many processing pipelines (e.g. feeding a software rendered image into a
1070 *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1071 *   framework already supported this and for DMA buffer file descriptors to
1072 *   replace ION buffers mmap support was needed.
1073 *
1074 *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1075 *   fd. But like for CPU access there's a need to braket the actual access,
1076 *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1077 *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1078 *   be restarted.
1079 *
1080 *   Some systems might need some sort of cache coherency management e.g. when
1081 *   CPU and GPU domains are being accessed through dma-buf at the same time.
1082 *   To circumvent this problem there are begin/end coherency markers, that
1083 *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1084 *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1085 *   sequence would be used like following:
1086 *
1087 *     - mmap dma-buf fd
1088 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1089 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1090 *       want (with the new data being consumed by say the GPU or the scanout
1091 *       device)
1092 *     - munmap once you don't need the buffer any more
1093 *
1094 *    For correctness and optimal performance, it is always required to use
1095 *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1096 *    mapped address. Userspace cannot rely on coherent access, even when there
1097 *    are systems where it just works without calling these ioctls.
1098 *
1099 * - And as a CPU fallback in userspace processing pipelines.
1100 *
1101 *   Similar to the motivation for kernel cpu access it is again important that
1102 *   the userspace code of a given importing subsystem can use the same
1103 *   interfaces with a imported dma-buf buffer object as with a native buffer
1104 *   object. This is especially important for drm where the userspace part of
1105 *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1106 *   use a different way to mmap a buffer rather invasive.
1107 *
1108 *   The assumption in the current dma-buf interfaces is that redirecting the
1109 *   initial mmap is all that's needed. A survey of some of the existing
1110 *   subsystems shows that no driver seems to do any nefarious thing like
1111 *   syncing up with outstanding asynchronous processing on the device or
1112 *   allocating special resources at fault time. So hopefully this is good
1113 *   enough, since adding interfaces to intercept pagefaults and allow pte
1114 *   shootdowns would increase the complexity quite a bit.
1115 *
1116 *   Interface::
1117 *
1118 *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1119 *                     unsigned long);
1120 *
1121 *   If the importing subsystem simply provides a special-purpose mmap call to
1122 *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1123 *   equally achieve that for a dma-buf object.
1124 */
1125
1126static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1127                                      enum dma_data_direction direction)
1128{
1129        bool write = (direction == DMA_BIDIRECTIONAL ||
1130                      direction == DMA_TO_DEVICE);
1131        struct dma_resv *resv = dmabuf->resv;
1132        long ret;
1133
1134        /* Wait on any implicit rendering fences */
1135        ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
1136        if (ret < 0)
1137                return ret;
1138
1139        return 0;
1140}
1141
1142/**
1143 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1144 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1145 * preparations. Coherency is only guaranteed in the specified range for the
1146 * specified access direction.
1147 * @dmabuf:     [in]    buffer to prepare cpu access for.
1148 * @direction:  [in]    length of range for cpu access.
1149 *
1150 * After the cpu access is complete the caller should call
1151 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1152 * it guaranteed to be coherent with other DMA access.
1153 *
1154 * This function will also wait for any DMA transactions tracked through
1155 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1156 * synchronization this function will only ensure cache coherency, callers must
1157 * ensure synchronization with such DMA transactions on their own.
1158 *
1159 * Can return negative error values, returns 0 on success.
1160 */
1161int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1162                             enum dma_data_direction direction)
1163{
1164        int ret = 0;
1165
1166        if (WARN_ON(!dmabuf))
1167                return -EINVAL;
1168
1169        might_lock(&dmabuf->resv->lock.base);
1170
1171        if (dmabuf->ops->begin_cpu_access)
1172                ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1173
1174        /* Ensure that all fences are waited upon - but we first allow
1175         * the native handler the chance to do so more efficiently if it
1176         * chooses. A double invocation here will be reasonably cheap no-op.
1177         */
1178        if (ret == 0)
1179                ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1180
1181        return ret;
1182}
1183EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1184
1185/**
1186 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1187 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1188 * actions. Coherency is only guaranteed in the specified range for the
1189 * specified access direction.
1190 * @dmabuf:     [in]    buffer to complete cpu access for.
1191 * @direction:  [in]    length of range for cpu access.
1192 *
1193 * This terminates CPU access started with dma_buf_begin_cpu_access().
1194 *
1195 * Can return negative error values, returns 0 on success.
1196 */
1197int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1198                           enum dma_data_direction direction)
1199{
1200        int ret = 0;
1201
1202        WARN_ON(!dmabuf);
1203
1204        might_lock(&dmabuf->resv->lock.base);
1205
1206        if (dmabuf->ops->end_cpu_access)
1207                ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1208
1209        return ret;
1210}
1211EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1212
1213
1214/**
1215 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1216 * @dmabuf:     [in]    buffer that should back the vma
1217 * @vma:        [in]    vma for the mmap
1218 * @pgoff:      [in]    offset in pages where this mmap should start within the
1219 *                      dma-buf buffer.
1220 *
1221 * This function adjusts the passed in vma so that it points at the file of the
1222 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1223 * checking on the size of the vma. Then it calls the exporters mmap function to
1224 * set up the mapping.
1225 *
1226 * Can return negative error values, returns 0 on success.
1227 */
1228int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1229                 unsigned long pgoff)
1230{
1231        if (WARN_ON(!dmabuf || !vma))
1232                return -EINVAL;
1233
1234        /* check if buffer supports mmap */
1235        if (!dmabuf->ops->mmap)
1236                return -EINVAL;
1237
1238        /* check for offset overflow */
1239        if (pgoff + vma_pages(vma) < pgoff)
1240                return -EOVERFLOW;
1241
1242        /* check for overflowing the buffer's size */
1243        if (pgoff + vma_pages(vma) >
1244            dmabuf->size >> PAGE_SHIFT)
1245                return -EINVAL;
1246
1247        /* readjust the vma */
1248        vma_set_file(vma, dmabuf->file);
1249        vma->vm_pgoff = pgoff;
1250
1251        return dmabuf->ops->mmap(dmabuf, vma);
1252}
1253EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1254
1255/**
1256 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1257 * address space. Same restrictions as for vmap and friends apply.
1258 * @dmabuf:     [in]    buffer to vmap
1259 * @map:        [out]   returns the vmap pointer
1260 *
1261 * This call may fail due to lack of virtual mapping address space.
1262 * These calls are optional in drivers. The intended use for them
1263 * is for mapping objects linear in kernel space for high use objects.
1264 *
1265 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1266 * dma_buf_end_cpu_access() around any cpu access performed through this
1267 * mapping.
1268 *
1269 * Returns 0 on success, or a negative errno code otherwise.
1270 */
1271int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1272{
1273        struct iosys_map ptr;
1274        int ret = 0;
1275
1276        iosys_map_clear(map);
1277
1278        if (WARN_ON(!dmabuf))
1279                return -EINVAL;
1280
1281        if (!dmabuf->ops->vmap)
1282                return -EINVAL;
1283
1284        mutex_lock(&dmabuf->lock);
1285        if (dmabuf->vmapping_counter) {
1286                dmabuf->vmapping_counter++;
1287                BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1288                *map = dmabuf->vmap_ptr;
1289                goto out_unlock;
1290        }
1291
1292        BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1293
1294        ret = dmabuf->ops->vmap(dmabuf, &ptr);
1295        if (WARN_ON_ONCE(ret))
1296                goto out_unlock;
1297
1298        dmabuf->vmap_ptr = ptr;
1299        dmabuf->vmapping_counter = 1;
1300
1301        *map = dmabuf->vmap_ptr;
1302
1303out_unlock:
1304        mutex_unlock(&dmabuf->lock);
1305        return ret;
1306}
1307EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1308
1309/**
1310 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1311 * @dmabuf:     [in]    buffer to vunmap
1312 * @map:        [in]    vmap pointer to vunmap
1313 */
1314void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1315{
1316        if (WARN_ON(!dmabuf))
1317                return;
1318
1319        BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1320        BUG_ON(dmabuf->vmapping_counter == 0);
1321        BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1322
1323        mutex_lock(&dmabuf->lock);
1324        if (--dmabuf->vmapping_counter == 0) {
1325                if (dmabuf->ops->vunmap)
1326                        dmabuf->ops->vunmap(dmabuf, map);
1327                iosys_map_clear(&dmabuf->vmap_ptr);
1328        }
1329        mutex_unlock(&dmabuf->lock);
1330}
1331EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1332
1333#ifdef CONFIG_DEBUG_FS
1334static int dma_buf_debug_show(struct seq_file *s, void *unused)
1335{
1336        struct dma_buf *buf_obj;
1337        struct dma_buf_attachment *attach_obj;
1338        int count = 0, attach_count;
1339        size_t size = 0;
1340        int ret;
1341
1342        ret = mutex_lock_interruptible(&db_list.lock);
1343
1344        if (ret)
1345                return ret;
1346
1347        seq_puts(s, "\nDma-buf Objects:\n");
1348        seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1349                   "size", "flags", "mode", "count", "ino");
1350
1351        list_for_each_entry(buf_obj, &db_list.head, list_node) {
1352
1353                ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1354                if (ret)
1355                        goto error_unlock;
1356
1357
1358                spin_lock(&buf_obj->name_lock);
1359                seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1360                                buf_obj->size,
1361                                buf_obj->file->f_flags, buf_obj->file->f_mode,
1362                                file_count(buf_obj->file),
1363                                buf_obj->exp_name,
1364                                file_inode(buf_obj->file)->i_ino,
1365                                buf_obj->name ?: "");
1366                spin_unlock(&buf_obj->name_lock);
1367
1368                dma_resv_describe(buf_obj->resv, s);
1369
1370                seq_puts(s, "\tAttached Devices:\n");
1371                attach_count = 0;
1372
1373                list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1374                        seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1375                        attach_count++;
1376                }
1377                dma_resv_unlock(buf_obj->resv);
1378
1379                seq_printf(s, "Total %d devices attached\n\n",
1380                                attach_count);
1381
1382                count++;
1383                size += buf_obj->size;
1384        }
1385
1386        seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1387
1388        mutex_unlock(&db_list.lock);
1389        return 0;
1390
1391error_unlock:
1392        mutex_unlock(&db_list.lock);
1393        return ret;
1394}
1395
1396DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1397
1398static struct dentry *dma_buf_debugfs_dir;
1399
1400static int dma_buf_init_debugfs(void)
1401{
1402        struct dentry *d;
1403        int err = 0;
1404
1405        d = debugfs_create_dir("dma_buf", NULL);
1406        if (IS_ERR(d))
1407                return PTR_ERR(d);
1408
1409        dma_buf_debugfs_dir = d;
1410
1411        d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1412                                NULL, &dma_buf_debug_fops);
1413        if (IS_ERR(d)) {
1414                pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1415                debugfs_remove_recursive(dma_buf_debugfs_dir);
1416                dma_buf_debugfs_dir = NULL;
1417                err = PTR_ERR(d);
1418        }
1419
1420        return err;
1421}
1422
1423static void dma_buf_uninit_debugfs(void)
1424{
1425        debugfs_remove_recursive(dma_buf_debugfs_dir);
1426}
1427#else
1428static inline int dma_buf_init_debugfs(void)
1429{
1430        return 0;
1431}
1432static inline void dma_buf_uninit_debugfs(void)
1433{
1434}
1435#endif
1436
1437static int __init dma_buf_init(void)
1438{
1439        int ret;
1440
1441        ret = dma_buf_init_sysfs_statistics();
1442        if (ret)
1443                return ret;
1444
1445        dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1446        if (IS_ERR(dma_buf_mnt))
1447                return PTR_ERR(dma_buf_mnt);
1448
1449        mutex_init(&db_list.lock);
1450        INIT_LIST_HEAD(&db_list.head);
1451        dma_buf_init_debugfs();
1452        return 0;
1453}
1454subsys_initcall(dma_buf_init);
1455
1456static void __exit dma_buf_deinit(void)
1457{
1458        dma_buf_uninit_debugfs();
1459        kern_unmount(dma_buf_mnt);
1460        dma_buf_uninit_sysfs_statistics();
1461}
1462__exitcall(dma_buf_deinit);
1463