1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-v4l2.h>
21#include <media/videobuf2-vmalloc.h>
22#include <media/videobuf2-memops.h>
23
24struct vb2_vmalloc_buf {
25 void *vaddr;
26 struct frame_vector *vec;
27 enum dma_data_direction dma_dir;
28 unsigned long size;
29 atomic_t refcount;
30 struct vb2_vmarea_handler handler;
31 struct dma_buf *dbuf;
32};
33
34static void vb2_vmalloc_put(void *buf_priv);
35
36static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
37 unsigned long size, enum dma_data_direction dma_dir,
38 gfp_t gfp_flags)
39{
40 struct vb2_vmalloc_buf *buf;
41
42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
43 if (!buf)
44 return ERR_PTR(-ENOMEM);
45
46 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size);
48 buf->dma_dir = dma_dir;
49 buf->handler.refcount = &buf->refcount;
50 buf->handler.put = vb2_vmalloc_put;
51 buf->handler.arg = buf;
52
53 if (!buf->vaddr) {
54 pr_debug("vmalloc of size %ld failed\n", buf->size);
55 kfree(buf);
56 return ERR_PTR(-ENOMEM);
57 }
58
59 atomic_inc(&buf->refcount);
60 return buf;
61}
62
63static void vb2_vmalloc_put(void *buf_priv)
64{
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (atomic_dec_and_test(&buf->refcount)) {
68 vfree(buf->vaddr);
69 kfree(buf);
70 }
71}
72
73static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
74 unsigned long size,
75 enum dma_data_direction dma_dir)
76{
77 struct vb2_vmalloc_buf *buf;
78 struct frame_vector *vec;
79 int n_pages, offset, i;
80 int ret = -ENOMEM;
81
82 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
83 if (!buf)
84 return ERR_PTR(-ENOMEM);
85
86 buf->dma_dir = dma_dir;
87 offset = vaddr & ~PAGE_MASK;
88 buf->size = size;
89 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
90 if (IS_ERR(vec)) {
91 ret = PTR_ERR(vec);
92 goto fail_pfnvec_create;
93 }
94 buf->vec = vec;
95 n_pages = frame_vector_count(vec);
96 if (frame_vector_to_pages(vec) < 0) {
97 unsigned long *nums = frame_vector_pfns(vec);
98
99
100
101
102
103 for (i = 1; i < n_pages; i++)
104 if (nums[i-1] + 1 != nums[i])
105 goto fail_map;
106 buf->vaddr = (__force void *)
107 ioremap_nocache(nums[0] << PAGE_SHIFT, size);
108 } else {
109 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
110 PAGE_KERNEL);
111 }
112
113 if (!buf->vaddr)
114 goto fail_map;
115 buf->vaddr += offset;
116 return buf;
117
118fail_map:
119 vb2_destroy_framevec(vec);
120fail_pfnvec_create:
121 kfree(buf);
122
123 return ERR_PTR(ret);
124}
125
126static void vb2_vmalloc_put_userptr(void *buf_priv)
127{
128 struct vb2_vmalloc_buf *buf = buf_priv;
129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int i;
131 struct page **pages;
132 unsigned int n_pages;
133
134 if (!buf->vec->is_pfns) {
135 n_pages = frame_vector_count(buf->vec);
136 pages = frame_vector_pages(buf->vec);
137 if (vaddr)
138 vm_unmap_ram((void *)vaddr, n_pages);
139 if (buf->dma_dir == DMA_FROM_DEVICE)
140 for (i = 0; i < n_pages; i++)
141 set_page_dirty_lock(pages[i]);
142 } else {
143 iounmap((__force void __iomem *)buf->vaddr);
144 }
145 vb2_destroy_framevec(buf->vec);
146 kfree(buf);
147}
148
149static void *vb2_vmalloc_vaddr(void *buf_priv)
150{
151 struct vb2_vmalloc_buf *buf = buf_priv;
152
153 if (!buf->vaddr) {
154 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
155 return NULL;
156 }
157
158 return buf->vaddr;
159}
160
161static unsigned int vb2_vmalloc_num_users(void *buf_priv)
162{
163 struct vb2_vmalloc_buf *buf = buf_priv;
164 return atomic_read(&buf->refcount);
165}
166
167static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
168{
169 struct vb2_vmalloc_buf *buf = buf_priv;
170 int ret;
171
172 if (!buf) {
173 pr_err("No memory to map\n");
174 return -EINVAL;
175 }
176
177 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
178 if (ret) {
179 pr_err("Remapping vmalloc memory, error: %d\n", ret);
180 return ret;
181 }
182
183
184
185
186 vma->vm_flags |= VM_DONTEXPAND;
187
188
189
190
191 vma->vm_private_data = &buf->handler;
192 vma->vm_ops = &vb2_common_vm_ops;
193
194 vma->vm_ops->open(vma);
195
196 return 0;
197}
198
199#ifdef CONFIG_HAS_DMA
200
201
202
203
204struct vb2_vmalloc_attachment {
205 struct sg_table sgt;
206 enum dma_data_direction dma_dir;
207};
208
209static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
210 struct dma_buf_attachment *dbuf_attach)
211{
212 struct vb2_vmalloc_attachment *attach;
213 struct vb2_vmalloc_buf *buf = dbuf->priv;
214 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
215 struct sg_table *sgt;
216 struct scatterlist *sg;
217 void *vaddr = buf->vaddr;
218 int ret;
219 int i;
220
221 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
222 if (!attach)
223 return -ENOMEM;
224
225 sgt = &attach->sgt;
226 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
227 if (ret) {
228 kfree(attach);
229 return ret;
230 }
231 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
232 struct page *page = vmalloc_to_page(vaddr);
233
234 if (!page) {
235 sg_free_table(sgt);
236 kfree(attach);
237 return -ENOMEM;
238 }
239 sg_set_page(sg, page, PAGE_SIZE, 0);
240 vaddr += PAGE_SIZE;
241 }
242
243 attach->dma_dir = DMA_NONE;
244 dbuf_attach->priv = attach;
245 return 0;
246}
247
248static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
249 struct dma_buf_attachment *db_attach)
250{
251 struct vb2_vmalloc_attachment *attach = db_attach->priv;
252 struct sg_table *sgt;
253
254 if (!attach)
255 return;
256
257 sgt = &attach->sgt;
258
259
260 if (attach->dma_dir != DMA_NONE)
261 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
262 attach->dma_dir);
263 sg_free_table(sgt);
264 kfree(attach);
265 db_attach->priv = NULL;
266}
267
268static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
269 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
270{
271 struct vb2_vmalloc_attachment *attach = db_attach->priv;
272
273 struct mutex *lock = &db_attach->dmabuf->lock;
274 struct sg_table *sgt;
275
276 mutex_lock(lock);
277
278 sgt = &attach->sgt;
279
280 if (attach->dma_dir == dma_dir) {
281 mutex_unlock(lock);
282 return sgt;
283 }
284
285
286 if (attach->dma_dir != DMA_NONE) {
287 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
288 attach->dma_dir);
289 attach->dma_dir = DMA_NONE;
290 }
291
292
293 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
294 dma_dir);
295 if (!sgt->nents) {
296 pr_err("failed to map scatterlist\n");
297 mutex_unlock(lock);
298 return ERR_PTR(-EIO);
299 }
300
301 attach->dma_dir = dma_dir;
302
303 mutex_unlock(lock);
304
305 return sgt;
306}
307
308static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
309 struct sg_table *sgt, enum dma_data_direction dma_dir)
310{
311
312}
313
314static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
315{
316
317 vb2_vmalloc_put(dbuf->priv);
318}
319
320static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
321{
322 struct vb2_vmalloc_buf *buf = dbuf->priv;
323
324 return buf->vaddr + pgnum * PAGE_SIZE;
325}
326
327static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
328{
329 struct vb2_vmalloc_buf *buf = dbuf->priv;
330
331 return buf->vaddr;
332}
333
334static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
335 struct vm_area_struct *vma)
336{
337 return vb2_vmalloc_mmap(dbuf->priv, vma);
338}
339
340static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
341 .attach = vb2_vmalloc_dmabuf_ops_attach,
342 .detach = vb2_vmalloc_dmabuf_ops_detach,
343 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
344 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
345 .kmap = vb2_vmalloc_dmabuf_ops_kmap,
346 .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
347 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
348 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
349 .release = vb2_vmalloc_dmabuf_ops_release,
350};
351
352static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
353{
354 struct vb2_vmalloc_buf *buf = buf_priv;
355 struct dma_buf *dbuf;
356 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
357
358 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
359 exp_info.size = buf->size;
360 exp_info.flags = flags;
361 exp_info.priv = buf;
362
363 if (WARN_ON(!buf->vaddr))
364 return NULL;
365
366 dbuf = dma_buf_export(&exp_info);
367 if (IS_ERR(dbuf))
368 return NULL;
369
370
371 atomic_inc(&buf->refcount);
372
373 return dbuf;
374}
375#endif
376
377
378
379
380
381
382static int vb2_vmalloc_map_dmabuf(void *mem_priv)
383{
384 struct vb2_vmalloc_buf *buf = mem_priv;
385
386 buf->vaddr = dma_buf_vmap(buf->dbuf);
387
388 return buf->vaddr ? 0 : -EFAULT;
389}
390
391static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
392{
393 struct vb2_vmalloc_buf *buf = mem_priv;
394
395 dma_buf_vunmap(buf->dbuf, buf->vaddr);
396 buf->vaddr = NULL;
397}
398
399static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
400{
401 struct vb2_vmalloc_buf *buf = mem_priv;
402
403 if (buf->vaddr)
404 dma_buf_vunmap(buf->dbuf, buf->vaddr);
405
406 kfree(buf);
407}
408
409static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
410 unsigned long size, enum dma_data_direction dma_dir)
411{
412 struct vb2_vmalloc_buf *buf;
413
414 if (dbuf->size < size)
415 return ERR_PTR(-EFAULT);
416
417 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
418 if (!buf)
419 return ERR_PTR(-ENOMEM);
420
421 buf->dbuf = dbuf;
422 buf->dma_dir = dma_dir;
423 buf->size = size;
424
425 return buf;
426}
427
428
429const struct vb2_mem_ops vb2_vmalloc_memops = {
430 .alloc = vb2_vmalloc_alloc,
431 .put = vb2_vmalloc_put,
432 .get_userptr = vb2_vmalloc_get_userptr,
433 .put_userptr = vb2_vmalloc_put_userptr,
434#ifdef CONFIG_HAS_DMA
435 .get_dmabuf = vb2_vmalloc_get_dmabuf,
436#endif
437 .map_dmabuf = vb2_vmalloc_map_dmabuf,
438 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
439 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
440 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
441 .vaddr = vb2_vmalloc_vaddr,
442 .mmap = vb2_vmalloc_mmap,
443 .num_users = vb2_vmalloc_num_users,
444};
445EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
446
447MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
448MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
449MODULE_LICENSE("GPL");
450