linux/drivers/media/common/videobuf2/videobuf2-vmalloc.c
<<
>>
Prefs
   1/*
   2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
   3 *
   4 * Copyright (C) 2010 Samsung Electronics
   5 *
   6 * Author: Pawel Osciak <pawel@osciak.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation.
  11 */
  12
  13#include <linux/io.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/refcount.h>
  17#include <linux/sched.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20
  21#include <media/videobuf2-v4l2.h>
  22#include <media/videobuf2-vmalloc.h>
  23#include <media/videobuf2-memops.h>
  24
  25struct vb2_vmalloc_buf {
  26        void                            *vaddr;
  27        struct frame_vector             *vec;
  28        enum dma_data_direction         dma_dir;
  29        unsigned long                   size;
  30        refcount_t                      refcount;
  31        struct vb2_vmarea_handler       handler;
  32        struct dma_buf                  *dbuf;
  33};
  34
  35static void vb2_vmalloc_put(void *buf_priv);
  36
  37static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
  38                               unsigned long size, enum dma_data_direction dma_dir,
  39                               gfp_t gfp_flags)
  40{
  41        struct vb2_vmalloc_buf *buf;
  42
  43        buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  44        if (!buf)
  45                return ERR_PTR(-ENOMEM);
  46
  47        buf->size = size;
  48        buf->vaddr = vmalloc_user(buf->size);
  49        buf->dma_dir = dma_dir;
  50        buf->handler.refcount = &buf->refcount;
  51        buf->handler.put = vb2_vmalloc_put;
  52        buf->handler.arg = buf;
  53
  54        if (!buf->vaddr) {
  55                pr_debug("vmalloc of size %ld failed\n", buf->size);
  56                kfree(buf);
  57                return ERR_PTR(-ENOMEM);
  58        }
  59
  60        refcount_set(&buf->refcount, 1);
  61        return buf;
  62}
  63
  64static void vb2_vmalloc_put(void *buf_priv)
  65{
  66        struct vb2_vmalloc_buf *buf = buf_priv;
  67
  68        if (refcount_dec_and_test(&buf->refcount)) {
  69                vfree(buf->vaddr);
  70                kfree(buf);
  71        }
  72}
  73
  74static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
  75                                     unsigned long size,
  76                                     enum dma_data_direction dma_dir)
  77{
  78        struct vb2_vmalloc_buf *buf;
  79        struct frame_vector *vec;
  80        int n_pages, offset, i;
  81        int ret = -ENOMEM;
  82
  83        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  84        if (!buf)
  85                return ERR_PTR(-ENOMEM);
  86
  87        buf->dma_dir = dma_dir;
  88        offset = vaddr & ~PAGE_MASK;
  89        buf->size = size;
  90        vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
  91                                               dma_dir == DMA_BIDIRECTIONAL);
  92        if (IS_ERR(vec)) {
  93                ret = PTR_ERR(vec);
  94                goto fail_pfnvec_create;
  95        }
  96        buf->vec = vec;
  97        n_pages = frame_vector_count(vec);
  98        if (frame_vector_to_pages(vec) < 0) {
  99                unsigned long *nums = frame_vector_pfns(vec);
 100
 101                /*
 102                 * We cannot get page pointers for these pfns. Check memory is
 103                 * physically contiguous and use direct mapping.
 104                 */
 105                for (i = 1; i < n_pages; i++)
 106                        if (nums[i-1] + 1 != nums[i])
 107                                goto fail_map;
 108                buf->vaddr = (__force void *)
 109                        ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
 110        } else {
 111                buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
 112                                        PAGE_KERNEL);
 113        }
 114
 115        if (!buf->vaddr)
 116                goto fail_map;
 117        buf->vaddr += offset;
 118        return buf;
 119
 120fail_map:
 121        vb2_destroy_framevec(vec);
 122fail_pfnvec_create:
 123        kfree(buf);
 124
 125        return ERR_PTR(ret);
 126}
 127
 128static void vb2_vmalloc_put_userptr(void *buf_priv)
 129{
 130        struct vb2_vmalloc_buf *buf = buf_priv;
 131        unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
 132        unsigned int i;
 133        struct page **pages;
 134        unsigned int n_pages;
 135
 136        if (!buf->vec->is_pfns) {
 137                n_pages = frame_vector_count(buf->vec);
 138                pages = frame_vector_pages(buf->vec);
 139                if (vaddr)
 140                        vm_unmap_ram((void *)vaddr, n_pages);
 141                if (buf->dma_dir == DMA_FROM_DEVICE ||
 142                    buf->dma_dir == DMA_BIDIRECTIONAL)
 143                        for (i = 0; i < n_pages; i++)
 144                                set_page_dirty_lock(pages[i]);
 145        } else {
 146                iounmap((__force void __iomem *)buf->vaddr);
 147        }
 148        vb2_destroy_framevec(buf->vec);
 149        kfree(buf);
 150}
 151
 152static void *vb2_vmalloc_vaddr(void *buf_priv)
 153{
 154        struct vb2_vmalloc_buf *buf = buf_priv;
 155
 156        if (!buf->vaddr) {
 157                pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
 158                return NULL;
 159        }
 160
 161        return buf->vaddr;
 162}
 163
 164static unsigned int vb2_vmalloc_num_users(void *buf_priv)
 165{
 166        struct vb2_vmalloc_buf *buf = buf_priv;
 167        return refcount_read(&buf->refcount);
 168}
 169
 170static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
 171{
 172        struct vb2_vmalloc_buf *buf = buf_priv;
 173        int ret;
 174
 175        if (!buf) {
 176                pr_err("No memory to map\n");
 177                return -EINVAL;
 178        }
 179
 180        ret = remap_vmalloc_range(vma, buf->vaddr, 0);
 181        if (ret) {
 182                pr_err("Remapping vmalloc memory, error: %d\n", ret);
 183                return ret;
 184        }
 185
 186        /*
 187         * Make sure that vm_areas for 2 buffers won't be merged together
 188         */
 189        vma->vm_flags           |= VM_DONTEXPAND;
 190
 191        /*
 192         * Use common vm_area operations to track buffer refcount.
 193         */
 194        vma->vm_private_data    = &buf->handler;
 195        vma->vm_ops             = &vb2_common_vm_ops;
 196
 197        vma->vm_ops->open(vma);
 198
 199        return 0;
 200}
 201
 202#ifdef CONFIG_HAS_DMA
 203/*********************************************/
 204/*         DMABUF ops for exporters          */
 205/*********************************************/
 206
 207struct vb2_vmalloc_attachment {
 208        struct sg_table sgt;
 209        enum dma_data_direction dma_dir;
 210};
 211
 212static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
 213        struct dma_buf_attachment *dbuf_attach)
 214{
 215        struct vb2_vmalloc_attachment *attach;
 216        struct vb2_vmalloc_buf *buf = dbuf->priv;
 217        int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
 218        struct sg_table *sgt;
 219        struct scatterlist *sg;
 220        void *vaddr = buf->vaddr;
 221        int ret;
 222        int i;
 223
 224        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 225        if (!attach)
 226                return -ENOMEM;
 227
 228        sgt = &attach->sgt;
 229        ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
 230        if (ret) {
 231                kfree(attach);
 232                return ret;
 233        }
 234        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 235                struct page *page = vmalloc_to_page(vaddr);
 236
 237                if (!page) {
 238                        sg_free_table(sgt);
 239                        kfree(attach);
 240                        return -ENOMEM;
 241                }
 242                sg_set_page(sg, page, PAGE_SIZE, 0);
 243                vaddr += PAGE_SIZE;
 244        }
 245
 246        attach->dma_dir = DMA_NONE;
 247        dbuf_attach->priv = attach;
 248        return 0;
 249}
 250
 251static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
 252        struct dma_buf_attachment *db_attach)
 253{
 254        struct vb2_vmalloc_attachment *attach = db_attach->priv;
 255        struct sg_table *sgt;
 256
 257        if (!attach)
 258                return;
 259
 260        sgt = &attach->sgt;
 261
 262        /* release the scatterlist cache */
 263        if (attach->dma_dir != DMA_NONE)
 264                dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 265                        attach->dma_dir);
 266        sg_free_table(sgt);
 267        kfree(attach);
 268        db_attach->priv = NULL;
 269}
 270
 271static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
 272        struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
 273{
 274        struct vb2_vmalloc_attachment *attach = db_attach->priv;
 275        /* stealing dmabuf mutex to serialize map/unmap operations */
 276        struct mutex *lock = &db_attach->dmabuf->lock;
 277        struct sg_table *sgt;
 278
 279        mutex_lock(lock);
 280
 281        sgt = &attach->sgt;
 282        /* return previously mapped sg table */
 283        if (attach->dma_dir == dma_dir) {
 284                mutex_unlock(lock);
 285                return sgt;
 286        }
 287
 288        /* release any previous cache */
 289        if (attach->dma_dir != DMA_NONE) {
 290                dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 291                        attach->dma_dir);
 292                attach->dma_dir = DMA_NONE;
 293        }
 294
 295        /* mapping to the client with new direction */
 296        sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 297                                dma_dir);
 298        if (!sgt->nents) {
 299                pr_err("failed to map scatterlist\n");
 300                mutex_unlock(lock);
 301                return ERR_PTR(-EIO);
 302        }
 303
 304        attach->dma_dir = dma_dir;
 305
 306        mutex_unlock(lock);
 307
 308        return sgt;
 309}
 310
 311static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
 312        struct sg_table *sgt, enum dma_data_direction dma_dir)
 313{
 314        /* nothing to be done here */
 315}
 316
 317static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
 318{
 319        /* drop reference obtained in vb2_vmalloc_get_dmabuf */
 320        vb2_vmalloc_put(dbuf->priv);
 321}
 322
 323static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
 324{
 325        struct vb2_vmalloc_buf *buf = dbuf->priv;
 326
 327        return buf->vaddr + pgnum * PAGE_SIZE;
 328}
 329
 330static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
 331{
 332        struct vb2_vmalloc_buf *buf = dbuf->priv;
 333
 334        return buf->vaddr;
 335}
 336
 337static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
 338        struct vm_area_struct *vma)
 339{
 340        return vb2_vmalloc_mmap(dbuf->priv, vma);
 341}
 342
 343static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
 344        .attach = vb2_vmalloc_dmabuf_ops_attach,
 345        .detach = vb2_vmalloc_dmabuf_ops_detach,
 346        .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
 347        .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
 348        .map = vb2_vmalloc_dmabuf_ops_kmap,
 349        .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
 350        .vmap = vb2_vmalloc_dmabuf_ops_vmap,
 351        .mmap = vb2_vmalloc_dmabuf_ops_mmap,
 352        .release = vb2_vmalloc_dmabuf_ops_release,
 353};
 354
 355static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
 356{
 357        struct vb2_vmalloc_buf *buf = buf_priv;
 358        struct dma_buf *dbuf;
 359        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 360
 361        exp_info.ops = &vb2_vmalloc_dmabuf_ops;
 362        exp_info.size = buf->size;
 363        exp_info.flags = flags;
 364        exp_info.priv = buf;
 365
 366        if (WARN_ON(!buf->vaddr))
 367                return NULL;
 368
 369        dbuf = dma_buf_export(&exp_info);
 370        if (IS_ERR(dbuf))
 371                return NULL;
 372
 373        /* dmabuf keeps reference to vb2 buffer */
 374        refcount_inc(&buf->refcount);
 375
 376        return dbuf;
 377}
 378#endif /* CONFIG_HAS_DMA */
 379
 380
 381/*********************************************/
 382/*       callbacks for DMABUF buffers        */
 383/*********************************************/
 384
 385static int vb2_vmalloc_map_dmabuf(void *mem_priv)
 386{
 387        struct vb2_vmalloc_buf *buf = mem_priv;
 388
 389        buf->vaddr = dma_buf_vmap(buf->dbuf);
 390
 391        return buf->vaddr ? 0 : -EFAULT;
 392}
 393
 394static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
 395{
 396        struct vb2_vmalloc_buf *buf = mem_priv;
 397
 398        dma_buf_vunmap(buf->dbuf, buf->vaddr);
 399        buf->vaddr = NULL;
 400}
 401
 402static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
 403{
 404        struct vb2_vmalloc_buf *buf = mem_priv;
 405
 406        if (buf->vaddr)
 407                dma_buf_vunmap(buf->dbuf, buf->vaddr);
 408
 409        kfree(buf);
 410}
 411
 412static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
 413        unsigned long size, enum dma_data_direction dma_dir)
 414{
 415        struct vb2_vmalloc_buf *buf;
 416
 417        if (dbuf->size < size)
 418                return ERR_PTR(-EFAULT);
 419
 420        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 421        if (!buf)
 422                return ERR_PTR(-ENOMEM);
 423
 424        buf->dbuf = dbuf;
 425        buf->dma_dir = dma_dir;
 426        buf->size = size;
 427
 428        return buf;
 429}
 430
 431
 432const struct vb2_mem_ops vb2_vmalloc_memops = {
 433        .alloc          = vb2_vmalloc_alloc,
 434        .put            = vb2_vmalloc_put,
 435        .get_userptr    = vb2_vmalloc_get_userptr,
 436        .put_userptr    = vb2_vmalloc_put_userptr,
 437#ifdef CONFIG_HAS_DMA
 438        .get_dmabuf     = vb2_vmalloc_get_dmabuf,
 439#endif
 440        .map_dmabuf     = vb2_vmalloc_map_dmabuf,
 441        .unmap_dmabuf   = vb2_vmalloc_unmap_dmabuf,
 442        .attach_dmabuf  = vb2_vmalloc_attach_dmabuf,
 443        .detach_dmabuf  = vb2_vmalloc_detach_dmabuf,
 444        .vaddr          = vb2_vmalloc_vaddr,
 445        .mmap           = vb2_vmalloc_mmap,
 446        .num_users      = vb2_vmalloc_num_users,
 447};
 448EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
 449
 450MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
 451MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
 452MODULE_LICENSE("GPL");
 453