linux/drivers/media/common/videobuf2/videobuf2-dma-contig.c
<<
>>
Prefs
   1/*
   2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
   3 *
   4 * Copyright (C) 2010 Samsung Electronics
   5 *
   6 * Author: Pawel Osciak <pawel@osciak.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation.
  11 */
  12
  13#include <linux/dma-buf.h>
  14#include <linux/module.h>
  15#include <linux/refcount.h>
  16#include <linux/scatterlist.h>
  17#include <linux/sched.h>
  18#include <linux/slab.h>
  19#include <linux/dma-mapping.h>
  20
  21#include <media/videobuf2-v4l2.h>
  22#include <media/videobuf2-dma-contig.h>
  23#include <media/videobuf2-memops.h>
  24
  25struct vb2_dc_buf {
  26        struct device                   *dev;
  27        void                            *vaddr;
  28        unsigned long                   size;
  29        void                            *cookie;
  30        dma_addr_t                      dma_addr;
  31        unsigned long                   attrs;
  32        enum dma_data_direction         dma_dir;
  33        struct sg_table                 *dma_sgt;
  34        struct frame_vector             *vec;
  35
  36        /* MMAP related */
  37        struct vb2_vmarea_handler       handler;
  38        refcount_t                      refcount;
  39        struct sg_table                 *sgt_base;
  40
  41        /* DMABUF related */
  42        struct dma_buf_attachment       *db_attach;
  43};
  44
  45/*********************************************/
  46/*        scatterlist table functions        */
  47/*********************************************/
  48
  49static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  50{
  51        struct scatterlist *s;
  52        dma_addr_t expected = sg_dma_address(sgt->sgl);
  53        unsigned int i;
  54        unsigned long size = 0;
  55
  56        for_each_sg(sgt->sgl, s, sgt->nents, i) {
  57                if (sg_dma_address(s) != expected)
  58                        break;
  59                expected = sg_dma_address(s) + sg_dma_len(s);
  60                size += sg_dma_len(s);
  61        }
  62        return size;
  63}
  64
  65/*********************************************/
  66/*         callbacks for all buffers         */
  67/*********************************************/
  68
  69static void *vb2_dc_cookie(void *buf_priv)
  70{
  71        struct vb2_dc_buf *buf = buf_priv;
  72
  73        return &buf->dma_addr;
  74}
  75
  76static void *vb2_dc_vaddr(void *buf_priv)
  77{
  78        struct vb2_dc_buf *buf = buf_priv;
  79
  80        if (!buf->vaddr && buf->db_attach)
  81                buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
  82
  83        return buf->vaddr;
  84}
  85
  86static unsigned int vb2_dc_num_users(void *buf_priv)
  87{
  88        struct vb2_dc_buf *buf = buf_priv;
  89
  90        return refcount_read(&buf->refcount);
  91}
  92
  93static void vb2_dc_prepare(void *buf_priv)
  94{
  95        struct vb2_dc_buf *buf = buf_priv;
  96        struct sg_table *sgt = buf->dma_sgt;
  97
  98        /* DMABUF exporter will flush the cache for us */
  99        if (!sgt || buf->db_attach)
 100                return;
 101
 102        dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
 103                               buf->dma_dir);
 104}
 105
 106static void vb2_dc_finish(void *buf_priv)
 107{
 108        struct vb2_dc_buf *buf = buf_priv;
 109        struct sg_table *sgt = buf->dma_sgt;
 110
 111        /* DMABUF exporter will flush the cache for us */
 112        if (!sgt || buf->db_attach)
 113                return;
 114
 115        dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
 116}
 117
 118/*********************************************/
 119/*        callbacks for MMAP buffers         */
 120/*********************************************/
 121
 122static void vb2_dc_put(void *buf_priv)
 123{
 124        struct vb2_dc_buf *buf = buf_priv;
 125
 126        if (!refcount_dec_and_test(&buf->refcount))
 127                return;
 128
 129        if (buf->sgt_base) {
 130                sg_free_table(buf->sgt_base);
 131                kfree(buf->sgt_base);
 132        }
 133        dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
 134                       buf->attrs);
 135        put_device(buf->dev);
 136        kfree(buf);
 137}
 138
 139static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
 140                          unsigned long size, enum dma_data_direction dma_dir,
 141                          gfp_t gfp_flags)
 142{
 143        struct vb2_dc_buf *buf;
 144
 145        if (WARN_ON(!dev))
 146                return ERR_PTR(-EINVAL);
 147
 148        buf = kzalloc(sizeof *buf, GFP_KERNEL);
 149        if (!buf)
 150                return ERR_PTR(-ENOMEM);
 151
 152        if (attrs)
 153                buf->attrs = attrs;
 154        buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
 155                                        GFP_KERNEL | gfp_flags, buf->attrs);
 156        if (!buf->cookie) {
 157                dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
 158                kfree(buf);
 159                return ERR_PTR(-ENOMEM);
 160        }
 161
 162        if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
 163                buf->vaddr = buf->cookie;
 164
 165        /* Prevent the device from being released while the buffer is used */
 166        buf->dev = get_device(dev);
 167        buf->size = size;
 168        buf->dma_dir = dma_dir;
 169
 170        buf->handler.refcount = &buf->refcount;
 171        buf->handler.put = vb2_dc_put;
 172        buf->handler.arg = buf;
 173
 174        refcount_set(&buf->refcount, 1);
 175
 176        return buf;
 177}
 178
 179static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
 180{
 181        struct vb2_dc_buf *buf = buf_priv;
 182        int ret;
 183
 184        if (!buf) {
 185                printk(KERN_ERR "No buffer to map\n");
 186                return -EINVAL;
 187        }
 188
 189        ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
 190                buf->dma_addr, buf->size, buf->attrs);
 191
 192        if (ret) {
 193                pr_err("Remapping memory failed, error: %d\n", ret);
 194                return ret;
 195        }
 196
 197        vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
 198        vma->vm_private_data    = &buf->handler;
 199        vma->vm_ops             = &vb2_common_vm_ops;
 200
 201        vma->vm_ops->open(vma);
 202
 203        pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
 204                __func__, (unsigned long)buf->dma_addr, vma->vm_start,
 205                buf->size);
 206
 207        return 0;
 208}
 209
 210/*********************************************/
 211/*         DMABUF ops for exporters          */
 212/*********************************************/
 213
 214struct vb2_dc_attachment {
 215        struct sg_table sgt;
 216        enum dma_data_direction dma_dir;
 217};
 218
 219static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
 220        struct dma_buf_attachment *dbuf_attach)
 221{
 222        struct vb2_dc_attachment *attach;
 223        unsigned int i;
 224        struct scatterlist *rd, *wr;
 225        struct sg_table *sgt;
 226        struct vb2_dc_buf *buf = dbuf->priv;
 227        int ret;
 228
 229        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 230        if (!attach)
 231                return -ENOMEM;
 232
 233        sgt = &attach->sgt;
 234        /* Copy the buf->base_sgt scatter list to the attachment, as we can't
 235         * map the same scatter list to multiple attachments at the same time.
 236         */
 237        ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
 238        if (ret) {
 239                kfree(attach);
 240                return -ENOMEM;
 241        }
 242
 243        rd = buf->sgt_base->sgl;
 244        wr = sgt->sgl;
 245        for (i = 0; i < sgt->orig_nents; ++i) {
 246                sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
 247                rd = sg_next(rd);
 248                wr = sg_next(wr);
 249        }
 250
 251        attach->dma_dir = DMA_NONE;
 252        dbuf_attach->priv = attach;
 253
 254        return 0;
 255}
 256
 257static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
 258        struct dma_buf_attachment *db_attach)
 259{
 260        struct vb2_dc_attachment *attach = db_attach->priv;
 261        struct sg_table *sgt;
 262
 263        if (!attach)
 264                return;
 265
 266        sgt = &attach->sgt;
 267
 268        /* release the scatterlist cache */
 269        if (attach->dma_dir != DMA_NONE)
 270                dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 271                        attach->dma_dir);
 272        sg_free_table(sgt);
 273        kfree(attach);
 274        db_attach->priv = NULL;
 275}
 276
 277static struct sg_table *vb2_dc_dmabuf_ops_map(
 278        struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
 279{
 280        struct vb2_dc_attachment *attach = db_attach->priv;
 281        /* stealing dmabuf mutex to serialize map/unmap operations */
 282        struct mutex *lock = &db_attach->dmabuf->lock;
 283        struct sg_table *sgt;
 284
 285        mutex_lock(lock);
 286
 287        sgt = &attach->sgt;
 288        /* return previously mapped sg table */
 289        if (attach->dma_dir == dma_dir) {
 290                mutex_unlock(lock);
 291                return sgt;
 292        }
 293
 294        /* release any previous cache */
 295        if (attach->dma_dir != DMA_NONE) {
 296                dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 297                        attach->dma_dir);
 298                attach->dma_dir = DMA_NONE;
 299        }
 300
 301        /* mapping to the client with new direction */
 302        sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 303                                dma_dir);
 304        if (!sgt->nents) {
 305                pr_err("failed to map scatterlist\n");
 306                mutex_unlock(lock);
 307                return ERR_PTR(-EIO);
 308        }
 309
 310        attach->dma_dir = dma_dir;
 311
 312        mutex_unlock(lock);
 313
 314        return sgt;
 315}
 316
 317static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
 318        struct sg_table *sgt, enum dma_data_direction dma_dir)
 319{
 320        /* nothing to be done here */
 321}
 322
 323static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
 324{
 325        /* drop reference obtained in vb2_dc_get_dmabuf */
 326        vb2_dc_put(dbuf->priv);
 327}
 328
 329static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
 330{
 331        struct vb2_dc_buf *buf = dbuf->priv;
 332
 333        return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
 334}
 335
 336static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
 337{
 338        struct vb2_dc_buf *buf = dbuf->priv;
 339
 340        return buf->vaddr;
 341}
 342
 343static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
 344        struct vm_area_struct *vma)
 345{
 346        return vb2_dc_mmap(dbuf->priv, vma);
 347}
 348
 349static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
 350        .attach = vb2_dc_dmabuf_ops_attach,
 351        .detach = vb2_dc_dmabuf_ops_detach,
 352        .map_dma_buf = vb2_dc_dmabuf_ops_map,
 353        .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
 354        .map = vb2_dc_dmabuf_ops_kmap,
 355        .vmap = vb2_dc_dmabuf_ops_vmap,
 356        .mmap = vb2_dc_dmabuf_ops_mmap,
 357        .release = vb2_dc_dmabuf_ops_release,
 358};
 359
 360static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
 361{
 362        int ret;
 363        struct sg_table *sgt;
 364
 365        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 366        if (!sgt) {
 367                dev_err(buf->dev, "failed to alloc sg table\n");
 368                return NULL;
 369        }
 370
 371        ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
 372                buf->size, buf->attrs);
 373        if (ret < 0) {
 374                dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
 375                kfree(sgt);
 376                return NULL;
 377        }
 378
 379        return sgt;
 380}
 381
 382static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
 383{
 384        struct vb2_dc_buf *buf = buf_priv;
 385        struct dma_buf *dbuf;
 386        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 387
 388        exp_info.ops = &vb2_dc_dmabuf_ops;
 389        exp_info.size = buf->size;
 390        exp_info.flags = flags;
 391        exp_info.priv = buf;
 392
 393        if (!buf->sgt_base)
 394                buf->sgt_base = vb2_dc_get_base_sgt(buf);
 395
 396        if (WARN_ON(!buf->sgt_base))
 397                return NULL;
 398
 399        dbuf = dma_buf_export(&exp_info);
 400        if (IS_ERR(dbuf))
 401                return NULL;
 402
 403        /* dmabuf keeps reference to vb2 buffer */
 404        refcount_inc(&buf->refcount);
 405
 406        return dbuf;
 407}
 408
 409/*********************************************/
 410/*       callbacks for USERPTR buffers       */
 411/*********************************************/
 412
 413static void vb2_dc_put_userptr(void *buf_priv)
 414{
 415        struct vb2_dc_buf *buf = buf_priv;
 416        struct sg_table *sgt = buf->dma_sgt;
 417        int i;
 418        struct page **pages;
 419
 420        if (sgt) {
 421                /*
 422                 * No need to sync to CPU, it's already synced to the CPU
 423                 * since the finish() memop will have been called before this.
 424                 */
 425                dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 426                                   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 427                pages = frame_vector_pages(buf->vec);
 428                /* sgt should exist only if vector contains pages... */
 429                BUG_ON(IS_ERR(pages));
 430                if (buf->dma_dir == DMA_FROM_DEVICE ||
 431                    buf->dma_dir == DMA_BIDIRECTIONAL)
 432                        for (i = 0; i < frame_vector_count(buf->vec); i++)
 433                                set_page_dirty_lock(pages[i]);
 434                sg_free_table(sgt);
 435                kfree(sgt);
 436        } else {
 437                dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
 438                                   buf->dma_dir, 0);
 439        }
 440        vb2_destroy_framevec(buf->vec);
 441        kfree(buf);
 442}
 443
 444static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
 445        unsigned long size, enum dma_data_direction dma_dir)
 446{
 447        struct vb2_dc_buf *buf;
 448        struct frame_vector *vec;
 449        unsigned int offset;
 450        int n_pages, i;
 451        int ret = 0;
 452        struct sg_table *sgt;
 453        unsigned long contig_size;
 454        unsigned long dma_align = dma_get_cache_alignment();
 455
 456        /* Only cache aligned DMA transfers are reliable */
 457        if (!IS_ALIGNED(vaddr | size, dma_align)) {
 458                pr_debug("user data must be aligned to %lu bytes\n", dma_align);
 459                return ERR_PTR(-EINVAL);
 460        }
 461
 462        if (!size) {
 463                pr_debug("size is zero\n");
 464                return ERR_PTR(-EINVAL);
 465        }
 466
 467        if (WARN_ON(!dev))
 468                return ERR_PTR(-EINVAL);
 469
 470        buf = kzalloc(sizeof *buf, GFP_KERNEL);
 471        if (!buf)
 472                return ERR_PTR(-ENOMEM);
 473
 474        buf->dev = dev;
 475        buf->dma_dir = dma_dir;
 476
 477        offset = lower_32_bits(offset_in_page(vaddr));
 478        vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
 479                                               dma_dir == DMA_BIDIRECTIONAL);
 480        if (IS_ERR(vec)) {
 481                ret = PTR_ERR(vec);
 482                goto fail_buf;
 483        }
 484        buf->vec = vec;
 485        n_pages = frame_vector_count(vec);
 486        ret = frame_vector_to_pages(vec);
 487        if (ret < 0) {
 488                unsigned long *nums = frame_vector_pfns(vec);
 489
 490                /*
 491                 * Failed to convert to pages... Check the memory is physically
 492                 * contiguous and use direct mapping
 493                 */
 494                for (i = 1; i < n_pages; i++)
 495                        if (nums[i-1] + 1 != nums[i])
 496                                goto fail_pfnvec;
 497                buf->dma_addr = dma_map_resource(buf->dev,
 498                                __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
 499                if (dma_mapping_error(buf->dev, buf->dma_addr)) {
 500                        ret = -ENOMEM;
 501                        goto fail_pfnvec;
 502                }
 503                goto out;
 504        }
 505
 506        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 507        if (!sgt) {
 508                pr_err("failed to allocate sg table\n");
 509                ret = -ENOMEM;
 510                goto fail_pfnvec;
 511        }
 512
 513        ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
 514                offset, size, GFP_KERNEL);
 515        if (ret) {
 516                pr_err("failed to initialize sg table\n");
 517                goto fail_sgt;
 518        }
 519
 520        /*
 521         * No need to sync to the device, this will happen later when the
 522         * prepare() memop is called.
 523         */
 524        sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 525                                      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 526        if (sgt->nents <= 0) {
 527                pr_err("failed to map scatterlist\n");
 528                ret = -EIO;
 529                goto fail_sgt_init;
 530        }
 531
 532        contig_size = vb2_dc_get_contiguous_size(sgt);
 533        if (contig_size < size) {
 534                pr_err("contiguous mapping is too small %lu/%lu\n",
 535                        contig_size, size);
 536                ret = -EFAULT;
 537                goto fail_map_sg;
 538        }
 539
 540        buf->dma_addr = sg_dma_address(sgt->sgl);
 541        buf->dma_sgt = sgt;
 542out:
 543        buf->size = size;
 544
 545        return buf;
 546
 547fail_map_sg:
 548        dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 549                           buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 550
 551fail_sgt_init:
 552        sg_free_table(sgt);
 553
 554fail_sgt:
 555        kfree(sgt);
 556
 557fail_pfnvec:
 558        vb2_destroy_framevec(vec);
 559
 560fail_buf:
 561        kfree(buf);
 562
 563        return ERR_PTR(ret);
 564}
 565
 566/*********************************************/
 567/*       callbacks for DMABUF buffers        */
 568/*********************************************/
 569
 570static int vb2_dc_map_dmabuf(void *mem_priv)
 571{
 572        struct vb2_dc_buf *buf = mem_priv;
 573        struct sg_table *sgt;
 574        unsigned long contig_size;
 575
 576        if (WARN_ON(!buf->db_attach)) {
 577                pr_err("trying to pin a non attached buffer\n");
 578                return -EINVAL;
 579        }
 580
 581        if (WARN_ON(buf->dma_sgt)) {
 582                pr_err("dmabuf buffer is already pinned\n");
 583                return 0;
 584        }
 585
 586        /* get the associated scatterlist for this buffer */
 587        sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
 588        if (IS_ERR(sgt)) {
 589                pr_err("Error getting dmabuf scatterlist\n");
 590                return -EINVAL;
 591        }
 592
 593        /* checking if dmabuf is big enough to store contiguous chunk */
 594        contig_size = vb2_dc_get_contiguous_size(sgt);
 595        if (contig_size < buf->size) {
 596                pr_err("contiguous chunk is too small %lu/%lu b\n",
 597                        contig_size, buf->size);
 598                dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
 599                return -EFAULT;
 600        }
 601
 602        buf->dma_addr = sg_dma_address(sgt->sgl);
 603        buf->dma_sgt = sgt;
 604        buf->vaddr = NULL;
 605
 606        return 0;
 607}
 608
 609static void vb2_dc_unmap_dmabuf(void *mem_priv)
 610{
 611        struct vb2_dc_buf *buf = mem_priv;
 612        struct sg_table *sgt = buf->dma_sgt;
 613
 614        if (WARN_ON(!buf->db_attach)) {
 615                pr_err("trying to unpin a not attached buffer\n");
 616                return;
 617        }
 618
 619        if (WARN_ON(!sgt)) {
 620                pr_err("dmabuf buffer is already unpinned\n");
 621                return;
 622        }
 623
 624        if (buf->vaddr) {
 625                dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
 626                buf->vaddr = NULL;
 627        }
 628        dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
 629
 630        buf->dma_addr = 0;
 631        buf->dma_sgt = NULL;
 632}
 633
 634static void vb2_dc_detach_dmabuf(void *mem_priv)
 635{
 636        struct vb2_dc_buf *buf = mem_priv;
 637
 638        /* if vb2 works correctly you should never detach mapped buffer */
 639        if (WARN_ON(buf->dma_addr))
 640                vb2_dc_unmap_dmabuf(buf);
 641
 642        /* detach this attachment */
 643        dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
 644        kfree(buf);
 645}
 646
 647static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
 648        unsigned long size, enum dma_data_direction dma_dir)
 649{
 650        struct vb2_dc_buf *buf;
 651        struct dma_buf_attachment *dba;
 652
 653        if (dbuf->size < size)
 654                return ERR_PTR(-EFAULT);
 655
 656        if (WARN_ON(!dev))
 657                return ERR_PTR(-EINVAL);
 658
 659        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 660        if (!buf)
 661                return ERR_PTR(-ENOMEM);
 662
 663        buf->dev = dev;
 664        /* create attachment for the dmabuf with the user device */
 665        dba = dma_buf_attach(dbuf, buf->dev);
 666        if (IS_ERR(dba)) {
 667                pr_err("failed to attach dmabuf\n");
 668                kfree(buf);
 669                return dba;
 670        }
 671
 672        buf->dma_dir = dma_dir;
 673        buf->size = size;
 674        buf->db_attach = dba;
 675
 676        return buf;
 677}
 678
 679/*********************************************/
 680/*       DMA CONTIG exported functions       */
 681/*********************************************/
 682
 683const struct vb2_mem_ops vb2_dma_contig_memops = {
 684        .alloc          = vb2_dc_alloc,
 685        .put            = vb2_dc_put,
 686        .get_dmabuf     = vb2_dc_get_dmabuf,
 687        .cookie         = vb2_dc_cookie,
 688        .vaddr          = vb2_dc_vaddr,
 689        .mmap           = vb2_dc_mmap,
 690        .get_userptr    = vb2_dc_get_userptr,
 691        .put_userptr    = vb2_dc_put_userptr,
 692        .prepare        = vb2_dc_prepare,
 693        .finish         = vb2_dc_finish,
 694        .map_dmabuf     = vb2_dc_map_dmabuf,
 695        .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
 696        .attach_dmabuf  = vb2_dc_attach_dmabuf,
 697        .detach_dmabuf  = vb2_dc_detach_dmabuf,
 698        .num_users      = vb2_dc_num_users,
 699};
 700EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
 701
 702/**
 703 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
 704 * @dev:        device for configuring DMA parameters
 705 * @size:       size of DMA max segment size to set
 706 *
 707 * To allow mapping the scatter-list into a single chunk in the DMA
 708 * address space, the device is required to have the DMA max segment
 709 * size parameter set to a value larger than the buffer size. Otherwise,
 710 * the DMA-mapping subsystem will split the mapping into max segment
 711 * size chunks. This function sets the DMA max segment size
 712 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
 713 * address space.
 714 * This code assumes that the DMA-mapping subsystem will merge all
 715 * scatterlist segments if this is really possible (for example when
 716 * an IOMMU is available and enabled).
 717 * Ideally, this parameter should be set by the generic bus code, but it
 718 * is left with the default 64KiB value due to historical litmiations in
 719 * other subsystems (like limited USB host drivers) and there no good
 720 * place to set it to the proper value.
 721 * This function should be called from the drivers, which are known to
 722 * operate on platforms with IOMMU and provide access to shared buffers
 723 * (either USERPTR or DMABUF). This should be done before initializing
 724 * videobuf2 queue.
 725 */
 726int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
 727{
 728        if (!dev->dma_parms) {
 729                dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
 730                if (!dev->dma_parms)
 731                        return -ENOMEM;
 732        }
 733        if (dma_get_max_seg_size(dev) < size)
 734                return dma_set_max_seg_size(dev, size);
 735
 736        return 0;
 737}
 738EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
 739
 740/*
 741 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
 742 * @dev:        device for configuring DMA parameters
 743 *
 744 * This function releases resources allocated to configure DMA parameters
 745 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
 746 * device drivers on driver remove.
 747 */
 748void vb2_dma_contig_clear_max_seg_size(struct device *dev)
 749{
 750        kfree(dev->dma_parms);
 751        dev->dma_parms = NULL;
 752}
 753EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
 754
 755MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
 756MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
 757MODULE_LICENSE("GPL");
 758