linux/drivers/media/v4l2-core/videobuf2-vmalloc.c
<<
>>
Prefs
   1/*
   2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
   3 *
   4 * Copyright (C) 2010 Samsung Electronics
   5 *
   6 * Author: Pawel Osciak <pawel@osciak.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation.
  11 */
  12
  13#include <linux/io.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/vmalloc.h>
  19
  20#include <media/videobuf2-core.h>
  21#include <media/videobuf2-vmalloc.h>
  22#include <media/videobuf2-memops.h>
  23
  24struct vb2_vmalloc_buf {
  25        void                            *vaddr;
  26        struct frame_vector             *vec;
  27        enum dma_data_direction         dma_dir;
  28        unsigned long                   size;
  29        atomic_t                        refcount;
  30        struct vb2_vmarea_handler       handler;
  31        struct dma_buf                  *dbuf;
  32};
  33
  34static void vb2_vmalloc_put(void *buf_priv);
  35
  36static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
  37                               enum dma_data_direction dma_dir, gfp_t gfp_flags)
  38{
  39        struct vb2_vmalloc_buf *buf;
  40
  41        buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  42        if (!buf)
  43                return NULL;
  44
  45        buf->size = size;
  46        buf->vaddr = vmalloc_user(buf->size);
  47        buf->dma_dir = dma_dir;
  48        buf->handler.refcount = &buf->refcount;
  49        buf->handler.put = vb2_vmalloc_put;
  50        buf->handler.arg = buf;
  51
  52        if (!buf->vaddr) {
  53                pr_debug("vmalloc of size %ld failed\n", buf->size);
  54                kfree(buf);
  55                return NULL;
  56        }
  57
  58        atomic_inc(&buf->refcount);
  59        return buf;
  60}
  61
  62static void vb2_vmalloc_put(void *buf_priv)
  63{
  64        struct vb2_vmalloc_buf *buf = buf_priv;
  65
  66        if (atomic_dec_and_test(&buf->refcount)) {
  67                vfree(buf->vaddr);
  68                kfree(buf);
  69        }
  70}
  71
  72static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  73                                     unsigned long size,
  74                                     enum dma_data_direction dma_dir)
  75{
  76        struct vb2_vmalloc_buf *buf;
  77        struct frame_vector *vec;
  78        int n_pages, offset, i;
  79
  80        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  81        if (!buf)
  82                return NULL;
  83
  84        buf->dma_dir = dma_dir;
  85        offset = vaddr & ~PAGE_MASK;
  86        buf->size = size;
  87        vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  88        if (IS_ERR(vec))
  89                goto fail_pfnvec_create;
  90        buf->vec = vec;
  91        n_pages = frame_vector_count(vec);
  92        if (frame_vector_to_pages(vec) < 0) {
  93                unsigned long *nums = frame_vector_pfns(vec);
  94
  95                /*
  96                 * We cannot get page pointers for these pfns. Check memory is
  97                 * physically contiguous and use direct mapping.
  98                 */
  99                for (i = 1; i < n_pages; i++)
 100                        if (nums[i-1] + 1 != nums[i])
 101                                goto fail_map;
 102                buf->vaddr = (__force void *)
 103                                ioremap_nocache(nums[0] << PAGE_SHIFT, size);
 104        } else {
 105                buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
 106                                        PAGE_KERNEL);
 107        }
 108
 109        if (!buf->vaddr)
 110                goto fail_map;
 111        buf->vaddr += offset;
 112        return buf;
 113
 114fail_map:
 115        vb2_destroy_framevec(vec);
 116fail_pfnvec_create:
 117        kfree(buf);
 118
 119        return NULL;
 120}
 121
 122static void vb2_vmalloc_put_userptr(void *buf_priv)
 123{
 124        struct vb2_vmalloc_buf *buf = buf_priv;
 125        unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
 126        unsigned int i;
 127        struct page **pages;
 128        unsigned int n_pages;
 129
 130        if (!buf->vec->is_pfns) {
 131                n_pages = frame_vector_count(buf->vec);
 132                pages = frame_vector_pages(buf->vec);
 133                if (vaddr)
 134                        vm_unmap_ram((void *)vaddr, n_pages);
 135                if (buf->dma_dir == DMA_FROM_DEVICE)
 136                        for (i = 0; i < n_pages; i++)
 137                                set_page_dirty_lock(pages[i]);
 138        } else {
 139                iounmap((__force void __iomem *)buf->vaddr);
 140        }
 141        vb2_destroy_framevec(buf->vec);
 142        kfree(buf);
 143}
 144
 145static void *vb2_vmalloc_vaddr(void *buf_priv)
 146{
 147        struct vb2_vmalloc_buf *buf = buf_priv;
 148
 149        if (!buf->vaddr) {
 150                pr_err("Address of an unallocated plane requested "
 151                       "or cannot map user pointer\n");
 152                return NULL;
 153        }
 154
 155        return buf->vaddr;
 156}
 157
 158static unsigned int vb2_vmalloc_num_users(void *buf_priv)
 159{
 160        struct vb2_vmalloc_buf *buf = buf_priv;
 161        return atomic_read(&buf->refcount);
 162}
 163
 164static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
 165{
 166        struct vb2_vmalloc_buf *buf = buf_priv;
 167        int ret;
 168
 169        if (!buf) {
 170                pr_err("No memory to map\n");
 171                return -EINVAL;
 172        }
 173
 174        ret = remap_vmalloc_range(vma, buf->vaddr, 0);
 175        if (ret) {
 176                pr_err("Remapping vmalloc memory, error: %d\n", ret);
 177                return ret;
 178        }
 179
 180        /*
 181         * Make sure that vm_areas for 2 buffers won't be merged together
 182         */
 183        vma->vm_flags           |= VM_DONTEXPAND;
 184
 185        /*
 186         * Use common vm_area operations to track buffer refcount.
 187         */
 188        vma->vm_private_data    = &buf->handler;
 189        vma->vm_ops             = &vb2_common_vm_ops;
 190
 191        vma->vm_ops->open(vma);
 192
 193        return 0;
 194}
 195
 196#ifdef CONFIG_HAS_DMA
 197/*********************************************/
 198/*         DMABUF ops for exporters          */
 199/*********************************************/
 200
 201struct vb2_vmalloc_attachment {
 202        struct sg_table sgt;
 203        enum dma_data_direction dma_dir;
 204};
 205
 206static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
 207        struct dma_buf_attachment *dbuf_attach)
 208{
 209        struct vb2_vmalloc_attachment *attach;
 210        struct vb2_vmalloc_buf *buf = dbuf->priv;
 211        int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
 212        struct sg_table *sgt;
 213        struct scatterlist *sg;
 214        void *vaddr = buf->vaddr;
 215        int ret;
 216        int i;
 217
 218        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 219        if (!attach)
 220                return -ENOMEM;
 221
 222        sgt = &attach->sgt;
 223        ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
 224        if (ret) {
 225                kfree(attach);
 226                return ret;
 227        }
 228        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 229                struct page *page = vmalloc_to_page(vaddr);
 230
 231                if (!page) {
 232                        sg_free_table(sgt);
 233                        kfree(attach);
 234                        return -ENOMEM;
 235                }
 236                sg_set_page(sg, page, PAGE_SIZE, 0);
 237                vaddr += PAGE_SIZE;
 238        }
 239
 240        attach->dma_dir = DMA_NONE;
 241        dbuf_attach->priv = attach;
 242        return 0;
 243}
 244
 245static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
 246        struct dma_buf_attachment *db_attach)
 247{
 248        struct vb2_vmalloc_attachment *attach = db_attach->priv;
 249        struct sg_table *sgt;
 250
 251        if (!attach)
 252                return;
 253
 254        sgt = &attach->sgt;
 255
 256        /* release the scatterlist cache */
 257        if (attach->dma_dir != DMA_NONE)
 258                dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 259                        attach->dma_dir);
 260        sg_free_table(sgt);
 261        kfree(attach);
 262        db_attach->priv = NULL;
 263}
 264
 265static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
 266        struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
 267{
 268        struct vb2_vmalloc_attachment *attach = db_attach->priv;
 269        /* stealing dmabuf mutex to serialize map/unmap operations */
 270        struct mutex *lock = &db_attach->dmabuf->lock;
 271        struct sg_table *sgt;
 272
 273        mutex_lock(lock);
 274
 275        sgt = &attach->sgt;
 276        /* return previously mapped sg table */
 277        if (attach->dma_dir == dma_dir) {
 278                mutex_unlock(lock);
 279                return sgt;
 280        }
 281
 282        /* release any previous cache */
 283        if (attach->dma_dir != DMA_NONE) {
 284                dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 285                        attach->dma_dir);
 286                attach->dma_dir = DMA_NONE;
 287        }
 288
 289        /* mapping to the client with new direction */
 290        sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 291                                dma_dir);
 292        if (!sgt->nents) {
 293                pr_err("failed to map scatterlist\n");
 294                mutex_unlock(lock);
 295                return ERR_PTR(-EIO);
 296        }
 297
 298        attach->dma_dir = dma_dir;
 299
 300        mutex_unlock(lock);
 301
 302        return sgt;
 303}
 304
 305static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
 306        struct sg_table *sgt, enum dma_data_direction dma_dir)
 307{
 308        /* nothing to be done here */
 309}
 310
 311static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
 312{
 313        /* drop reference obtained in vb2_vmalloc_get_dmabuf */
 314        vb2_vmalloc_put(dbuf->priv);
 315}
 316
 317static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
 318{
 319        struct vb2_vmalloc_buf *buf = dbuf->priv;
 320
 321        return buf->vaddr + pgnum * PAGE_SIZE;
 322}
 323
 324static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
 325{
 326        struct vb2_vmalloc_buf *buf = dbuf->priv;
 327
 328        return buf->vaddr;
 329}
 330
 331static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
 332        struct vm_area_struct *vma)
 333{
 334        return vb2_vmalloc_mmap(dbuf->priv, vma);
 335}
 336
 337static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
 338        .attach = vb2_vmalloc_dmabuf_ops_attach,
 339        .detach = vb2_vmalloc_dmabuf_ops_detach,
 340        .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
 341        .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
 342        .kmap = vb2_vmalloc_dmabuf_ops_kmap,
 343        .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
 344        .vmap = vb2_vmalloc_dmabuf_ops_vmap,
 345        .mmap = vb2_vmalloc_dmabuf_ops_mmap,
 346        .release = vb2_vmalloc_dmabuf_ops_release,
 347};
 348
 349static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
 350{
 351        struct vb2_vmalloc_buf *buf = buf_priv;
 352        struct dma_buf *dbuf;
 353        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 354
 355        exp_info.ops = &vb2_vmalloc_dmabuf_ops;
 356        exp_info.size = buf->size;
 357        exp_info.flags = flags;
 358        exp_info.priv = buf;
 359
 360        if (WARN_ON(!buf->vaddr))
 361                return NULL;
 362
 363        dbuf = dma_buf_export(&exp_info);
 364        if (IS_ERR(dbuf))
 365                return NULL;
 366
 367        /* dmabuf keeps reference to vb2 buffer */
 368        atomic_inc(&buf->refcount);
 369
 370        return dbuf;
 371}
 372#endif /* CONFIG_HAS_DMA */
 373
 374
 375/*********************************************/
 376/*       callbacks for DMABUF buffers        */
 377/*********************************************/
 378
 379static int vb2_vmalloc_map_dmabuf(void *mem_priv)
 380{
 381        struct vb2_vmalloc_buf *buf = mem_priv;
 382
 383        buf->vaddr = dma_buf_vmap(buf->dbuf);
 384
 385        return buf->vaddr ? 0 : -EFAULT;
 386}
 387
 388static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
 389{
 390        struct vb2_vmalloc_buf *buf = mem_priv;
 391
 392        dma_buf_vunmap(buf->dbuf, buf->vaddr);
 393        buf->vaddr = NULL;
 394}
 395
 396static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
 397{
 398        struct vb2_vmalloc_buf *buf = mem_priv;
 399
 400        if (buf->vaddr)
 401                dma_buf_vunmap(buf->dbuf, buf->vaddr);
 402
 403        kfree(buf);
 404}
 405
 406static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
 407        unsigned long size, enum dma_data_direction dma_dir)
 408{
 409        struct vb2_vmalloc_buf *buf;
 410
 411        if (dbuf->size < size)
 412                return ERR_PTR(-EFAULT);
 413
 414        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 415        if (!buf)
 416                return ERR_PTR(-ENOMEM);
 417
 418        buf->dbuf = dbuf;
 419        buf->dma_dir = dma_dir;
 420        buf->size = size;
 421
 422        return buf;
 423}
 424
 425
 426const struct vb2_mem_ops vb2_vmalloc_memops = {
 427        .alloc          = vb2_vmalloc_alloc,
 428        .put            = vb2_vmalloc_put,
 429        .get_userptr    = vb2_vmalloc_get_userptr,
 430        .put_userptr    = vb2_vmalloc_put_userptr,
 431#ifdef CONFIG_HAS_DMA
 432        .get_dmabuf     = vb2_vmalloc_get_dmabuf,
 433#endif
 434        .map_dmabuf     = vb2_vmalloc_map_dmabuf,
 435        .unmap_dmabuf   = vb2_vmalloc_unmap_dmabuf,
 436        .attach_dmabuf  = vb2_vmalloc_attach_dmabuf,
 437        .detach_dmabuf  = vb2_vmalloc_detach_dmabuf,
 438        .vaddr          = vb2_vmalloc_vaddr,
 439        .mmap           = vb2_vmalloc_mmap,
 440        .num_users      = vb2_vmalloc_num_users,
 441};
 442EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
 443
 444MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
 445MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
 446MODULE_LICENSE("GPL");
 447