1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/refcount.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20
21#include <media/videobuf2-v4l2.h>
22#include <media/videobuf2-vmalloc.h>
23#include <media/videobuf2-memops.h>
24
25struct vb2_vmalloc_buf {
26 void *vaddr;
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
29 unsigned long size;
30 refcount_t refcount;
31 struct vb2_vmarea_handler handler;
32 struct dma_buf *dbuf;
33};
34
35static void vb2_vmalloc_put(void *buf_priv);
36
37static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
38 unsigned long size, enum dma_data_direction dma_dir,
39 gfp_t gfp_flags)
40{
41 struct vb2_vmalloc_buf *buf;
42
43 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
44 if (!buf)
45 return ERR_PTR(-ENOMEM);
46
47 buf->size = size;
48 buf->vaddr = vmalloc_user(buf->size);
49 if (!buf->vaddr) {
50 pr_debug("vmalloc of size %ld failed\n", buf->size);
51 kfree(buf);
52 return ERR_PTR(-ENOMEM);
53 }
54
55 buf->dma_dir = dma_dir;
56 buf->handler.refcount = &buf->refcount;
57 buf->handler.put = vb2_vmalloc_put;
58 buf->handler.arg = buf;
59
60 refcount_set(&buf->refcount, 1);
61 return buf;
62}
63
64static void vb2_vmalloc_put(void *buf_priv)
65{
66 struct vb2_vmalloc_buf *buf = buf_priv;
67
68 if (refcount_dec_and_test(&buf->refcount)) {
69 vfree(buf->vaddr);
70 kfree(buf);
71 }
72}
73
74static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
75 unsigned long size,
76 enum dma_data_direction dma_dir)
77{
78 struct vb2_vmalloc_buf *buf;
79 struct frame_vector *vec;
80 int n_pages, offset, i;
81 int ret = -ENOMEM;
82
83 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
84 if (!buf)
85 return ERR_PTR(-ENOMEM);
86
87 buf->dma_dir = dma_dir;
88 offset = vaddr & ~PAGE_MASK;
89 buf->size = size;
90 vec = vb2_create_framevec(vaddr, size);
91 if (IS_ERR(vec)) {
92 ret = PTR_ERR(vec);
93 goto fail_pfnvec_create;
94 }
95 buf->vec = vec;
96 n_pages = frame_vector_count(vec);
97 if (frame_vector_to_pages(vec) < 0) {
98 unsigned long *nums = frame_vector_pfns(vec);
99
100
101
102
103
104 for (i = 1; i < n_pages; i++)
105 if (nums[i-1] + 1 != nums[i])
106 goto fail_map;
107 buf->vaddr = (__force void *)
108 ioremap(__pfn_to_phys(nums[0]), size + offset);
109 } else {
110 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
111 }
112
113 if (!buf->vaddr)
114 goto fail_map;
115 buf->vaddr += offset;
116 return buf;
117
118fail_map:
119 vb2_destroy_framevec(vec);
120fail_pfnvec_create:
121 kfree(buf);
122
123 return ERR_PTR(ret);
124}
125
126static void vb2_vmalloc_put_userptr(void *buf_priv)
127{
128 struct vb2_vmalloc_buf *buf = buf_priv;
129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int i;
131 struct page **pages;
132 unsigned int n_pages;
133
134 if (!buf->vec->is_pfns) {
135 n_pages = frame_vector_count(buf->vec);
136 pages = frame_vector_pages(buf->vec);
137 if (vaddr)
138 vm_unmap_ram((void *)vaddr, n_pages);
139 if (buf->dma_dir == DMA_FROM_DEVICE ||
140 buf->dma_dir == DMA_BIDIRECTIONAL)
141 for (i = 0; i < n_pages; i++)
142 set_page_dirty_lock(pages[i]);
143 } else {
144 iounmap((__force void __iomem *)buf->vaddr);
145 }
146 vb2_destroy_framevec(buf->vec);
147 kfree(buf);
148}
149
150static void *vb2_vmalloc_vaddr(void *buf_priv)
151{
152 struct vb2_vmalloc_buf *buf = buf_priv;
153
154 if (!buf->vaddr) {
155 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
156 return NULL;
157 }
158
159 return buf->vaddr;
160}
161
162static unsigned int vb2_vmalloc_num_users(void *buf_priv)
163{
164 struct vb2_vmalloc_buf *buf = buf_priv;
165 return refcount_read(&buf->refcount);
166}
167
168static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
169{
170 struct vb2_vmalloc_buf *buf = buf_priv;
171 int ret;
172
173 if (!buf) {
174 pr_err("No memory to map\n");
175 return -EINVAL;
176 }
177
178 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
179 if (ret) {
180 pr_err("Remapping vmalloc memory, error: %d\n", ret);
181 return ret;
182 }
183
184
185
186
187 vma->vm_flags |= VM_DONTEXPAND;
188
189
190
191
192 vma->vm_private_data = &buf->handler;
193 vma->vm_ops = &vb2_common_vm_ops;
194
195 vma->vm_ops->open(vma);
196
197 return 0;
198}
199
200#ifdef CONFIG_HAS_DMA
201
202
203
204
205struct vb2_vmalloc_attachment {
206 struct sg_table sgt;
207 enum dma_data_direction dma_dir;
208};
209
210static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
211 struct dma_buf_attachment *dbuf_attach)
212{
213 struct vb2_vmalloc_attachment *attach;
214 struct vb2_vmalloc_buf *buf = dbuf->priv;
215 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
216 struct sg_table *sgt;
217 struct scatterlist *sg;
218 void *vaddr = buf->vaddr;
219 int ret;
220 int i;
221
222 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
223 if (!attach)
224 return -ENOMEM;
225
226 sgt = &attach->sgt;
227 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
228 if (ret) {
229 kfree(attach);
230 return ret;
231 }
232 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
233 struct page *page = vmalloc_to_page(vaddr);
234
235 if (!page) {
236 sg_free_table(sgt);
237 kfree(attach);
238 return -ENOMEM;
239 }
240 sg_set_page(sg, page, PAGE_SIZE, 0);
241 vaddr += PAGE_SIZE;
242 }
243
244 attach->dma_dir = DMA_NONE;
245 dbuf_attach->priv = attach;
246 return 0;
247}
248
249static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
250 struct dma_buf_attachment *db_attach)
251{
252 struct vb2_vmalloc_attachment *attach = db_attach->priv;
253 struct sg_table *sgt;
254
255 if (!attach)
256 return;
257
258 sgt = &attach->sgt;
259
260
261 if (attach->dma_dir != DMA_NONE)
262 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
263 attach->dma_dir);
264 sg_free_table(sgt);
265 kfree(attach);
266 db_attach->priv = NULL;
267}
268
269static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
270 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
271{
272 struct vb2_vmalloc_attachment *attach = db_attach->priv;
273
274 struct mutex *lock = &db_attach->dmabuf->lock;
275 struct sg_table *sgt;
276
277 mutex_lock(lock);
278
279 sgt = &attach->sgt;
280
281 if (attach->dma_dir == dma_dir) {
282 mutex_unlock(lock);
283 return sgt;
284 }
285
286
287 if (attach->dma_dir != DMA_NONE) {
288 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
289 attach->dma_dir);
290 attach->dma_dir = DMA_NONE;
291 }
292
293
294 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
295 dma_dir);
296 if (!sgt->nents) {
297 pr_err("failed to map scatterlist\n");
298 mutex_unlock(lock);
299 return ERR_PTR(-EIO);
300 }
301
302 attach->dma_dir = dma_dir;
303
304 mutex_unlock(lock);
305
306 return sgt;
307}
308
309static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
310 struct sg_table *sgt, enum dma_data_direction dma_dir)
311{
312
313}
314
315static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
316{
317
318 vb2_vmalloc_put(dbuf->priv);
319}
320
321static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
322{
323 struct vb2_vmalloc_buf *buf = dbuf->priv;
324
325 return buf->vaddr;
326}
327
328static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
329 struct vm_area_struct *vma)
330{
331 return vb2_vmalloc_mmap(dbuf->priv, vma);
332}
333
334static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
335 .attach = vb2_vmalloc_dmabuf_ops_attach,
336 .detach = vb2_vmalloc_dmabuf_ops_detach,
337 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
338 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
339 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
340 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
341 .release = vb2_vmalloc_dmabuf_ops_release,
342};
343
344static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
345{
346 struct vb2_vmalloc_buf *buf = buf_priv;
347 struct dma_buf *dbuf;
348 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
349
350 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
351 exp_info.size = buf->size;
352 exp_info.flags = flags;
353 exp_info.priv = buf;
354
355 if (WARN_ON(!buf->vaddr))
356 return NULL;
357
358 dbuf = dma_buf_export(&exp_info);
359 if (IS_ERR(dbuf))
360 return NULL;
361
362
363 refcount_inc(&buf->refcount);
364
365 return dbuf;
366}
367#endif
368
369
370
371
372
373
374static int vb2_vmalloc_map_dmabuf(void *mem_priv)
375{
376 struct vb2_vmalloc_buf *buf = mem_priv;
377
378 buf->vaddr = dma_buf_vmap(buf->dbuf);
379
380 return buf->vaddr ? 0 : -EFAULT;
381}
382
383static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
384{
385 struct vb2_vmalloc_buf *buf = mem_priv;
386
387 dma_buf_vunmap(buf->dbuf, buf->vaddr);
388 buf->vaddr = NULL;
389}
390
391static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
392{
393 struct vb2_vmalloc_buf *buf = mem_priv;
394
395 if (buf->vaddr)
396 dma_buf_vunmap(buf->dbuf, buf->vaddr);
397
398 kfree(buf);
399}
400
401static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
402 unsigned long size, enum dma_data_direction dma_dir)
403{
404 struct vb2_vmalloc_buf *buf;
405
406 if (dbuf->size < size)
407 return ERR_PTR(-EFAULT);
408
409 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
410 if (!buf)
411 return ERR_PTR(-ENOMEM);
412
413 buf->dbuf = dbuf;
414 buf->dma_dir = dma_dir;
415 buf->size = size;
416
417 return buf;
418}
419
420
421const struct vb2_mem_ops vb2_vmalloc_memops = {
422 .alloc = vb2_vmalloc_alloc,
423 .put = vb2_vmalloc_put,
424 .get_userptr = vb2_vmalloc_get_userptr,
425 .put_userptr = vb2_vmalloc_put_userptr,
426#ifdef CONFIG_HAS_DMA
427 .get_dmabuf = vb2_vmalloc_get_dmabuf,
428#endif
429 .map_dmabuf = vb2_vmalloc_map_dmabuf,
430 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
431 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
432 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
433 .vaddr = vb2_vmalloc_vaddr,
434 .mmap = vb2_vmalloc_mmap,
435 .num_users = vb2_vmalloc_num_users,
436};
437EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
438
439MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
440MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
441MODULE_LICENSE("GPL");
442