linux/drivers/base/dma-buf.c
<<
>>
Prefs
   1/*
   2 * Framework for buffer objects that can be shared across devices/subsystems.
   3 *
   4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   5 * Author: Sumit Semwal <sumit.semwal@ti.com>
   6 *
   7 * Many thanks to linaro-mm-sig list, and specially
   8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
   9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  10 * refining of this idea.
  11 *
  12 * This program is free software; you can redistribute it and/or modify it
  13 * under the terms of the GNU General Public License version 2 as published by
  14 * the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but WITHOUT
  17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  18 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  19 * more details.
  20 *
  21 * You should have received a copy of the GNU General Public License along with
  22 * this program.  If not, see <http://www.gnu.org/licenses/>.
  23 */
  24
  25#include <linux/fs.h>
  26#include <linux/slab.h>
  27#include <linux/dma-buf.h>
  28#include <linux/anon_inodes.h>
  29#include <linux/export.h>
  30#include <linux/debugfs.h>
  31#include <linux/seq_file.h>
  32
  33static inline int is_dma_buf_file(struct file *);
  34
  35struct dma_buf_list {
  36        struct list_head head;
  37        struct mutex lock;
  38};
  39
  40static struct dma_buf_list db_list;
  41
  42static int dma_buf_release(struct inode *inode, struct file *file)
  43{
  44        struct dma_buf *dmabuf;
  45
  46        if (!is_dma_buf_file(file))
  47                return -EINVAL;
  48
  49        dmabuf = file->private_data;
  50
  51        BUG_ON(dmabuf->vmapping_counter);
  52
  53        dmabuf->ops->release(dmabuf);
  54
  55        mutex_lock(&db_list.lock);
  56        list_del(&dmabuf->list_node);
  57        mutex_unlock(&db_list.lock);
  58
  59        kfree(dmabuf);
  60        return 0;
  61}
  62
  63static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
  64{
  65        struct dma_buf *dmabuf;
  66
  67        if (!is_dma_buf_file(file))
  68                return -EINVAL;
  69
  70        dmabuf = file->private_data;
  71
  72        /* check for overflowing the buffer's size */
  73        if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
  74            dmabuf->size >> PAGE_SHIFT)
  75                return -EINVAL;
  76
  77        return dmabuf->ops->mmap(dmabuf, vma);
  78}
  79
  80static const struct file_operations dma_buf_fops = {
  81        .release        = dma_buf_release,
  82        .mmap           = dma_buf_mmap_internal,
  83};
  84
  85/*
  86 * is_dma_buf_file - Check if struct file* is associated with dma_buf
  87 */
  88static inline int is_dma_buf_file(struct file *file)
  89{
  90        return file->f_op == &dma_buf_fops;
  91}
  92
  93/**
  94 * dma_buf_export_named - Creates a new dma_buf, and associates an anon file
  95 * with this buffer, so it can be exported.
  96 * Also connect the allocator specific data and ops to the buffer.
  97 * Additionally, provide a name string for exporter; useful in debugging.
  98 *
  99 * @priv:       [in]    Attach private data of allocator to this buffer
 100 * @ops:        [in]    Attach allocator-defined dma buf ops to the new buffer.
 101 * @size:       [in]    Size of the buffer
 102 * @flags:      [in]    mode flags for the file.
 103 * @exp_name:   [in]    name of the exporting module - useful for debugging.
 104 *
 105 * Returns, on success, a newly created dma_buf object, which wraps the
 106 * supplied private data and operations for dma_buf_ops. On either missing
 107 * ops, or error in allocating struct dma_buf, will return negative error.
 108 *
 109 */
 110struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
 111                                size_t size, int flags, const char *exp_name)
 112{
 113        struct dma_buf *dmabuf;
 114        struct file *file;
 115
 116        if (WARN_ON(!priv || !ops
 117                          || !ops->map_dma_buf
 118                          || !ops->unmap_dma_buf
 119                          || !ops->release
 120                          || !ops->kmap_atomic
 121                          || !ops->kmap
 122                          || !ops->mmap)) {
 123                return ERR_PTR(-EINVAL);
 124        }
 125
 126        dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
 127        if (dmabuf == NULL)
 128                return ERR_PTR(-ENOMEM);
 129
 130        dmabuf->priv = priv;
 131        dmabuf->ops = ops;
 132        dmabuf->size = size;
 133        dmabuf->exp_name = exp_name;
 134
 135        file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
 136
 137        dmabuf->file = file;
 138
 139        mutex_init(&dmabuf->lock);
 140        INIT_LIST_HEAD(&dmabuf->attachments);
 141
 142        mutex_lock(&db_list.lock);
 143        list_add(&dmabuf->list_node, &db_list.head);
 144        mutex_unlock(&db_list.lock);
 145
 146        return dmabuf;
 147}
 148EXPORT_SYMBOL_GPL(dma_buf_export_named);
 149
 150
 151/**
 152 * dma_buf_fd - returns a file descriptor for the given dma_buf
 153 * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
 154 * @flags:      [in]    flags to give to fd
 155 *
 156 * On success, returns an associated 'fd'. Else, returns error.
 157 */
 158int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 159{
 160        int fd;
 161
 162        if (!dmabuf || !dmabuf->file)
 163                return -EINVAL;
 164
 165        fd = get_unused_fd_flags(flags);
 166        if (fd < 0)
 167                return fd;
 168
 169        fd_install(fd, dmabuf->file);
 170
 171        return fd;
 172}
 173EXPORT_SYMBOL_GPL(dma_buf_fd);
 174
 175/**
 176 * dma_buf_get - returns the dma_buf structure related to an fd
 177 * @fd: [in]    fd associated with the dma_buf to be returned
 178 *
 179 * On success, returns the dma_buf structure associated with an fd; uses
 180 * file's refcounting done by fget to increase refcount. returns ERR_PTR
 181 * otherwise.
 182 */
 183struct dma_buf *dma_buf_get(int fd)
 184{
 185        struct file *file;
 186
 187        file = fget(fd);
 188
 189        if (!file)
 190                return ERR_PTR(-EBADF);
 191
 192        if (!is_dma_buf_file(file)) {
 193                fput(file);
 194                return ERR_PTR(-EINVAL);
 195        }
 196
 197        return file->private_data;
 198}
 199EXPORT_SYMBOL_GPL(dma_buf_get);
 200
 201/**
 202 * dma_buf_put - decreases refcount of the buffer
 203 * @dmabuf:     [in]    buffer to reduce refcount of
 204 *
 205 * Uses file's refcounting done implicitly by fput()
 206 */
 207void dma_buf_put(struct dma_buf *dmabuf)
 208{
 209        if (WARN_ON(!dmabuf || !dmabuf->file))
 210                return;
 211
 212        fput(dmabuf->file);
 213}
 214EXPORT_SYMBOL_GPL(dma_buf_put);
 215
 216/**
 217 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
 218 * calls attach() of dma_buf_ops to allow device-specific attach functionality
 219 * @dmabuf:     [in]    buffer to attach device to.
 220 * @dev:        [in]    device to be attached.
 221 *
 222 * Returns struct dma_buf_attachment * for this attachment; may return negative
 223 * error codes.
 224 *
 225 */
 226struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 227                                          struct device *dev)
 228{
 229        struct dma_buf_attachment *attach;
 230        int ret;
 231
 232        if (WARN_ON(!dmabuf || !dev))
 233                return ERR_PTR(-EINVAL);
 234
 235        attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
 236        if (attach == NULL)
 237                return ERR_PTR(-ENOMEM);
 238
 239        attach->dev = dev;
 240        attach->dmabuf = dmabuf;
 241
 242        mutex_lock(&dmabuf->lock);
 243
 244        if (dmabuf->ops->attach) {
 245                ret = dmabuf->ops->attach(dmabuf, dev, attach);
 246                if (ret)
 247                        goto err_attach;
 248        }
 249        list_add(&attach->node, &dmabuf->attachments);
 250
 251        mutex_unlock(&dmabuf->lock);
 252        return attach;
 253
 254err_attach:
 255        kfree(attach);
 256        mutex_unlock(&dmabuf->lock);
 257        return ERR_PTR(ret);
 258}
 259EXPORT_SYMBOL_GPL(dma_buf_attach);
 260
 261/**
 262 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
 263 * optionally calls detach() of dma_buf_ops for device-specific detach
 264 * @dmabuf:     [in]    buffer to detach from.
 265 * @attach:     [in]    attachment to be detached; is free'd after this call.
 266 *
 267 */
 268void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 269{
 270        if (WARN_ON(!dmabuf || !attach))
 271                return;
 272
 273        mutex_lock(&dmabuf->lock);
 274        list_del(&attach->node);
 275        if (dmabuf->ops->detach)
 276                dmabuf->ops->detach(dmabuf, attach);
 277
 278        mutex_unlock(&dmabuf->lock);
 279        kfree(attach);
 280}
 281EXPORT_SYMBOL_GPL(dma_buf_detach);
 282
 283/**
 284 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
 285 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
 286 * dma_buf_ops.
 287 * @attach:     [in]    attachment whose scatterlist is to be returned
 288 * @direction:  [in]    direction of DMA transfer
 289 *
 290 * Returns sg_table containing the scatterlist to be returned; may return NULL
 291 * or ERR_PTR.
 292 *
 293 */
 294struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 295                                        enum dma_data_direction direction)
 296{
 297        struct sg_table *sg_table = ERR_PTR(-EINVAL);
 298
 299        might_sleep();
 300
 301        if (WARN_ON(!attach || !attach->dmabuf))
 302                return ERR_PTR(-EINVAL);
 303
 304        sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 305
 306        return sg_table;
 307}
 308EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
 309
 310/**
 311 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
 312 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
 313 * dma_buf_ops.
 314 * @attach:     [in]    attachment to unmap buffer from
 315 * @sg_table:   [in]    scatterlist info of the buffer to unmap
 316 * @direction:  [in]    direction of DMA transfer
 317 *
 318 */
 319void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 320                                struct sg_table *sg_table,
 321                                enum dma_data_direction direction)
 322{
 323        might_sleep();
 324
 325        if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
 326                return;
 327
 328        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
 329                                                direction);
 330}
 331EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 332
 333
 334/**
 335 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
 336 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
 337 * preparations. Coherency is only guaranteed in the specified range for the
 338 * specified access direction.
 339 * @dmabuf:     [in]    buffer to prepare cpu access for.
 340 * @start:      [in]    start of range for cpu access.
 341 * @len:        [in]    length of range for cpu access.
 342 * @direction:  [in]    length of range for cpu access.
 343 *
 344 * Can return negative error values, returns 0 on success.
 345 */
 346int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
 347                             enum dma_data_direction direction)
 348{
 349        int ret = 0;
 350
 351        if (WARN_ON(!dmabuf))
 352                return -EINVAL;
 353
 354        if (dmabuf->ops->begin_cpu_access)
 355                ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
 356
 357        return ret;
 358}
 359EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
 360
 361/**
 362 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
 363 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
 364 * actions. Coherency is only guaranteed in the specified range for the
 365 * specified access direction.
 366 * @dmabuf:     [in]    buffer to complete cpu access for.
 367 * @start:      [in]    start of range for cpu access.
 368 * @len:        [in]    length of range for cpu access.
 369 * @direction:  [in]    length of range for cpu access.
 370 *
 371 * This call must always succeed.
 372 */
 373void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
 374                            enum dma_data_direction direction)
 375{
 376        WARN_ON(!dmabuf);
 377
 378        if (dmabuf->ops->end_cpu_access)
 379                dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
 380}
 381EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 382
 383/**
 384 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
 385 * space. The same restrictions as for kmap_atomic and friends apply.
 386 * @dmabuf:     [in]    buffer to map page from.
 387 * @page_num:   [in]    page in PAGE_SIZE units to map.
 388 *
 389 * This call must always succeed, any necessary preparations that might fail
 390 * need to be done in begin_cpu_access.
 391 */
 392void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
 393{
 394        WARN_ON(!dmabuf);
 395
 396        return dmabuf->ops->kmap_atomic(dmabuf, page_num);
 397}
 398EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
 399
 400/**
 401 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
 402 * @dmabuf:     [in]    buffer to unmap page from.
 403 * @page_num:   [in]    page in PAGE_SIZE units to unmap.
 404 * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
 405 *
 406 * This call must always succeed.
 407 */
 408void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
 409                           void *vaddr)
 410{
 411        WARN_ON(!dmabuf);
 412
 413        if (dmabuf->ops->kunmap_atomic)
 414                dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
 415}
 416EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
 417
 418/**
 419 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
 420 * same restrictions as for kmap and friends apply.
 421 * @dmabuf:     [in]    buffer to map page from.
 422 * @page_num:   [in]    page in PAGE_SIZE units to map.
 423 *
 424 * This call must always succeed, any necessary preparations that might fail
 425 * need to be done in begin_cpu_access.
 426 */
 427void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
 428{
 429        WARN_ON(!dmabuf);
 430
 431        return dmabuf->ops->kmap(dmabuf, page_num);
 432}
 433EXPORT_SYMBOL_GPL(dma_buf_kmap);
 434
 435/**
 436 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
 437 * @dmabuf:     [in]    buffer to unmap page from.
 438 * @page_num:   [in]    page in PAGE_SIZE units to unmap.
 439 * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap.
 440 *
 441 * This call must always succeed.
 442 */
 443void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
 444                    void *vaddr)
 445{
 446        WARN_ON(!dmabuf);
 447
 448        if (dmabuf->ops->kunmap)
 449                dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
 450}
 451EXPORT_SYMBOL_GPL(dma_buf_kunmap);
 452
 453
 454/**
 455 * dma_buf_mmap - Setup up a userspace mmap with the given vma
 456 * @dmabuf:     [in]    buffer that should back the vma
 457 * @vma:        [in]    vma for the mmap
 458 * @pgoff:      [in]    offset in pages where this mmap should start within the
 459 *                      dma-buf buffer.
 460 *
 461 * This function adjusts the passed in vma so that it points at the file of the
 462 * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
 463 * checking on the size of the vma. Then it calls the exporters mmap function to
 464 * set up the mapping.
 465 *
 466 * Can return negative error values, returns 0 on success.
 467 */
 468int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
 469                 unsigned long pgoff)
 470{
 471        struct file *oldfile;
 472        int ret;
 473
 474        if (WARN_ON(!dmabuf || !vma))
 475                return -EINVAL;
 476
 477        /* check for offset overflow */
 478        if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
 479                return -EOVERFLOW;
 480
 481        /* check for overflowing the buffer's size */
 482        if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
 483            dmabuf->size >> PAGE_SHIFT)
 484                return -EINVAL;
 485
 486        /* readjust the vma */
 487        get_file(dmabuf->file);
 488        oldfile = vma->vm_file;
 489        vma->vm_file = dmabuf->file;
 490        vma->vm_pgoff = pgoff;
 491
 492        ret = dmabuf->ops->mmap(dmabuf, vma);
 493        if (ret) {
 494                /* restore old parameters on failure */
 495                vma->vm_file = oldfile;
 496                fput(dmabuf->file);
 497        } else {
 498                if (oldfile)
 499                        fput(oldfile);
 500        }
 501        return ret;
 502
 503}
 504EXPORT_SYMBOL_GPL(dma_buf_mmap);
 505
 506/**
 507 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
 508 * address space. Same restrictions as for vmap and friends apply.
 509 * @dmabuf:     [in]    buffer to vmap
 510 *
 511 * This call may fail due to lack of virtual mapping address space.
 512 * These calls are optional in drivers. The intended use for them
 513 * is for mapping objects linear in kernel space for high use objects.
 514 * Please attempt to use kmap/kunmap before thinking about these interfaces.
 515 */
 516void *dma_buf_vmap(struct dma_buf *dmabuf)
 517{
 518        void *ptr;
 519
 520        if (WARN_ON(!dmabuf))
 521                return NULL;
 522
 523        if (!dmabuf->ops->vmap)
 524                return NULL;
 525
 526        mutex_lock(&dmabuf->lock);
 527        if (dmabuf->vmapping_counter) {
 528                dmabuf->vmapping_counter++;
 529                BUG_ON(!dmabuf->vmap_ptr);
 530                ptr = dmabuf->vmap_ptr;
 531                goto out_unlock;
 532        }
 533
 534        BUG_ON(dmabuf->vmap_ptr);
 535
 536        ptr = dmabuf->ops->vmap(dmabuf);
 537        if (IS_ERR_OR_NULL(ptr))
 538                goto out_unlock;
 539
 540        dmabuf->vmap_ptr = ptr;
 541        dmabuf->vmapping_counter = 1;
 542
 543out_unlock:
 544        mutex_unlock(&dmabuf->lock);
 545        return ptr;
 546}
 547EXPORT_SYMBOL_GPL(dma_buf_vmap);
 548
 549/**
 550 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
 551 * @dmabuf:     [in]    buffer to vunmap
 552 * @vaddr:      [in]    vmap to vunmap
 553 */
 554void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 555{
 556        if (WARN_ON(!dmabuf))
 557                return;
 558
 559        BUG_ON(!dmabuf->vmap_ptr);
 560        BUG_ON(dmabuf->vmapping_counter == 0);
 561        BUG_ON(dmabuf->vmap_ptr != vaddr);
 562
 563        mutex_lock(&dmabuf->lock);
 564        if (--dmabuf->vmapping_counter == 0) {
 565                if (dmabuf->ops->vunmap)
 566                        dmabuf->ops->vunmap(dmabuf, vaddr);
 567                dmabuf->vmap_ptr = NULL;
 568        }
 569        mutex_unlock(&dmabuf->lock);
 570}
 571EXPORT_SYMBOL_GPL(dma_buf_vunmap);
 572
 573#ifdef CONFIG_DEBUG_FS
 574static int dma_buf_describe(struct seq_file *s)
 575{
 576        int ret;
 577        struct dma_buf *buf_obj;
 578        struct dma_buf_attachment *attach_obj;
 579        int count = 0, attach_count;
 580        size_t size = 0;
 581
 582        ret = mutex_lock_interruptible(&db_list.lock);
 583
 584        if (ret)
 585                return ret;
 586
 587        seq_printf(s, "\nDma-buf Objects:\n");
 588        seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
 589
 590        list_for_each_entry(buf_obj, &db_list.head, list_node) {
 591                ret = mutex_lock_interruptible(&buf_obj->lock);
 592
 593                if (ret) {
 594                        seq_printf(s,
 595                                  "\tERROR locking buffer object: skipping\n");
 596                        continue;
 597                }
 598
 599                seq_printf(s, "\t");
 600
 601                seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
 602                                buf_obj->exp_name, buf_obj->size,
 603                                buf_obj->file->f_flags, buf_obj->file->f_mode,
 604                                (long)(buf_obj->file->f_count.counter));
 605
 606                seq_printf(s, "\t\tAttached Devices:\n");
 607                attach_count = 0;
 608
 609                list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
 610                        seq_printf(s, "\t\t");
 611
 612                        seq_printf(s, "%s\n", attach_obj->dev->init_name);
 613                        attach_count++;
 614                }
 615
 616                seq_printf(s, "\n\t\tTotal %d devices attached\n",
 617                                attach_count);
 618
 619                count++;
 620                size += buf_obj->size;
 621                mutex_unlock(&buf_obj->lock);
 622        }
 623
 624        seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
 625
 626        mutex_unlock(&db_list.lock);
 627        return 0;
 628}
 629
 630static int dma_buf_show(struct seq_file *s, void *unused)
 631{
 632        void (*func)(struct seq_file *) = s->private;
 633        func(s);
 634        return 0;
 635}
 636
 637static int dma_buf_debug_open(struct inode *inode, struct file *file)
 638{
 639        return single_open(file, dma_buf_show, inode->i_private);
 640}
 641
 642static const struct file_operations dma_buf_debug_fops = {
 643        .open           = dma_buf_debug_open,
 644        .read           = seq_read,
 645        .llseek         = seq_lseek,
 646        .release        = single_release,
 647};
 648
 649static struct dentry *dma_buf_debugfs_dir;
 650
 651static int dma_buf_init_debugfs(void)
 652{
 653        int err = 0;
 654        dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
 655        if (IS_ERR(dma_buf_debugfs_dir)) {
 656                err = PTR_ERR(dma_buf_debugfs_dir);
 657                dma_buf_debugfs_dir = NULL;
 658                return err;
 659        }
 660
 661        err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
 662
 663        if (err)
 664                pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
 665
 666        return err;
 667}
 668
 669static void dma_buf_uninit_debugfs(void)
 670{
 671        if (dma_buf_debugfs_dir)
 672                debugfs_remove_recursive(dma_buf_debugfs_dir);
 673}
 674
 675int dma_buf_debugfs_create_file(const char *name,
 676                                int (*write)(struct seq_file *))
 677{
 678        struct dentry *d;
 679
 680        d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
 681                        write, &dma_buf_debug_fops);
 682
 683        return PTR_RET(d);
 684}
 685#else
 686static inline int dma_buf_init_debugfs(void)
 687{
 688        return 0;
 689}
 690static inline void dma_buf_uninit_debugfs(void)
 691{
 692}
 693#endif
 694
 695static int __init dma_buf_init(void)
 696{
 697        mutex_init(&db_list.lock);
 698        INIT_LIST_HEAD(&db_list.head);
 699        dma_buf_init_debugfs();
 700        return 0;
 701}
 702subsys_initcall(dma_buf_init);
 703
 704static void __exit dma_buf_deinit(void)
 705{
 706        dma_buf_uninit_debugfs();
 707}
 708__exitcall(dma_buf_deinit);
 709