1
2
3
4
5
6
7
8#include <linux/debugfs.h>
9#include <linux/device.h>
10#include <linux/dma-buf.h>
11#include <linux/err.h>
12#include <linux/export.h>
13#include <linux/file.h>
14#include <linux/freezer.h>
15#include <linux/fs.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/miscdevice.h>
19#include <linux/mm.h>
20#include <linux/mm_types.h>
21#include <linux/rbtree.h>
22#include <linux/sched/task.h>
23#include <linux/slab.h>
24#include <linux/uaccess.h>
25#include <linux/vmalloc.h>
26
27#include "ion.h"
28
29static struct ion_device *internal_dev;
30static int heap_id;
31
32
33static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
34 struct ion_device *dev,
35 unsigned long len,
36 unsigned long flags)
37{
38 struct ion_buffer *buffer;
39 int ret;
40
41 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
42 if (!buffer)
43 return ERR_PTR(-ENOMEM);
44
45 buffer->heap = heap;
46 buffer->flags = flags;
47 buffer->dev = dev;
48 buffer->size = len;
49
50 ret = heap->ops->allocate(heap, buffer, len, flags);
51
52 if (ret) {
53 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
54 goto err2;
55
56 ion_heap_freelist_drain(heap, 0);
57 ret = heap->ops->allocate(heap, buffer, len, flags);
58 if (ret)
59 goto err2;
60 }
61
62 if (!buffer->sg_table) {
63 WARN_ONCE(1, "This heap needs to set the sgtable");
64 ret = -EINVAL;
65 goto err1;
66 }
67
68 spin_lock(&heap->stat_lock);
69 heap->num_of_buffers++;
70 heap->num_of_alloc_bytes += len;
71 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
72 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
73 spin_unlock(&heap->stat_lock);
74
75 INIT_LIST_HEAD(&buffer->attachments);
76 mutex_init(&buffer->lock);
77 return buffer;
78
79err1:
80 heap->ops->free(buffer);
81err2:
82 kfree(buffer);
83 return ERR_PTR(ret);
84}
85
86void ion_buffer_destroy(struct ion_buffer *buffer)
87{
88 if (buffer->kmap_cnt > 0) {
89 pr_warn_once("%s: buffer still mapped in the kernel\n",
90 __func__);
91 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
92 }
93 buffer->heap->ops->free(buffer);
94 spin_lock(&buffer->heap->stat_lock);
95 buffer->heap->num_of_buffers--;
96 buffer->heap->num_of_alloc_bytes -= buffer->size;
97 spin_unlock(&buffer->heap->stat_lock);
98
99 kfree(buffer);
100}
101
102static void _ion_buffer_destroy(struct ion_buffer *buffer)
103{
104 struct ion_heap *heap = buffer->heap;
105
106 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
107 ion_heap_freelist_add(heap, buffer);
108 else
109 ion_buffer_destroy(buffer);
110}
111
112static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
113{
114 void *vaddr;
115
116 if (buffer->kmap_cnt) {
117 buffer->kmap_cnt++;
118 return buffer->vaddr;
119 }
120 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
121 if (WARN_ONCE(!vaddr,
122 "heap->ops->map_kernel should return ERR_PTR on error"))
123 return ERR_PTR(-EINVAL);
124 if (IS_ERR(vaddr))
125 return vaddr;
126 buffer->vaddr = vaddr;
127 buffer->kmap_cnt++;
128 return vaddr;
129}
130
131static void ion_buffer_kmap_put(struct ion_buffer *buffer)
132{
133 buffer->kmap_cnt--;
134 if (!buffer->kmap_cnt) {
135 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
136 buffer->vaddr = NULL;
137 }
138}
139
140static struct sg_table *dup_sg_table(struct sg_table *table)
141{
142 struct sg_table *new_table;
143 int ret, i;
144 struct scatterlist *sg, *new_sg;
145
146 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
147 if (!new_table)
148 return ERR_PTR(-ENOMEM);
149
150 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
151 if (ret) {
152 kfree(new_table);
153 return ERR_PTR(-ENOMEM);
154 }
155
156 new_sg = new_table->sgl;
157 for_each_sgtable_sg(table, sg, i) {
158 memcpy(new_sg, sg, sizeof(*sg));
159 new_sg->dma_address = 0;
160 new_sg = sg_next(new_sg);
161 }
162
163 return new_table;
164}
165
166static void free_duped_table(struct sg_table *table)
167{
168 sg_free_table(table);
169 kfree(table);
170}
171
172struct ion_dma_buf_attachment {
173 struct device *dev;
174 struct sg_table *table;
175 struct list_head list;
176};
177
178static int ion_dma_buf_attach(struct dma_buf *dmabuf,
179 struct dma_buf_attachment *attachment)
180{
181 struct ion_dma_buf_attachment *a;
182 struct sg_table *table;
183 struct ion_buffer *buffer = dmabuf->priv;
184
185 a = kzalloc(sizeof(*a), GFP_KERNEL);
186 if (!a)
187 return -ENOMEM;
188
189 table = dup_sg_table(buffer->sg_table);
190 if (IS_ERR(table)) {
191 kfree(a);
192 return -ENOMEM;
193 }
194
195 a->table = table;
196 a->dev = attachment->dev;
197 INIT_LIST_HEAD(&a->list);
198
199 attachment->priv = a;
200
201 mutex_lock(&buffer->lock);
202 list_add(&a->list, &buffer->attachments);
203 mutex_unlock(&buffer->lock);
204
205 return 0;
206}
207
208static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
209 struct dma_buf_attachment *attachment)
210{
211 struct ion_dma_buf_attachment *a = attachment->priv;
212 struct ion_buffer *buffer = dmabuf->priv;
213
214 mutex_lock(&buffer->lock);
215 list_del(&a->list);
216 mutex_unlock(&buffer->lock);
217 free_duped_table(a->table);
218
219 kfree(a);
220}
221
222static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
223 enum dma_data_direction direction)
224{
225 struct ion_dma_buf_attachment *a = attachment->priv;
226 struct sg_table *table;
227 int ret;
228
229 table = a->table;
230
231 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
232 if (ret)
233 return ERR_PTR(ret);
234
235 return table;
236}
237
238static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
239 struct sg_table *table,
240 enum dma_data_direction direction)
241{
242 dma_unmap_sgtable(attachment->dev, table, direction, 0);
243}
244
245static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
246{
247 struct ion_buffer *buffer = dmabuf->priv;
248 int ret = 0;
249
250 if (!buffer->heap->ops->map_user) {
251 pr_err("%s: this heap does not define a method for mapping to userspace\n",
252 __func__);
253 return -EINVAL;
254 }
255
256 if (!(buffer->flags & ION_FLAG_CACHED))
257 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
258
259 mutex_lock(&buffer->lock);
260
261 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
262 mutex_unlock(&buffer->lock);
263
264 if (ret)
265 pr_err("%s: failure mapping buffer to userspace\n",
266 __func__);
267
268 return ret;
269}
270
271static void ion_dma_buf_release(struct dma_buf *dmabuf)
272{
273 struct ion_buffer *buffer = dmabuf->priv;
274
275 _ion_buffer_destroy(buffer);
276}
277
278static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
279 enum dma_data_direction direction)
280{
281 struct ion_buffer *buffer = dmabuf->priv;
282 void *vaddr;
283 struct ion_dma_buf_attachment *a;
284 int ret = 0;
285
286
287
288
289 if (buffer->heap->ops->map_kernel) {
290 mutex_lock(&buffer->lock);
291 vaddr = ion_buffer_kmap_get(buffer);
292 if (IS_ERR(vaddr)) {
293 ret = PTR_ERR(vaddr);
294 goto unlock;
295 }
296 mutex_unlock(&buffer->lock);
297 }
298
299 mutex_lock(&buffer->lock);
300 list_for_each_entry(a, &buffer->attachments, list)
301 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
302
303unlock:
304 mutex_unlock(&buffer->lock);
305 return ret;
306}
307
308static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
309 enum dma_data_direction direction)
310{
311 struct ion_buffer *buffer = dmabuf->priv;
312 struct ion_dma_buf_attachment *a;
313
314 if (buffer->heap->ops->map_kernel) {
315 mutex_lock(&buffer->lock);
316 ion_buffer_kmap_put(buffer);
317 mutex_unlock(&buffer->lock);
318 }
319
320 mutex_lock(&buffer->lock);
321 list_for_each_entry(a, &buffer->attachments, list)
322 dma_sync_sgtable_for_device(a->dev, a->table, direction);
323 mutex_unlock(&buffer->lock);
324
325 return 0;
326}
327
328static const struct dma_buf_ops dma_buf_ops = {
329 .map_dma_buf = ion_map_dma_buf,
330 .unmap_dma_buf = ion_unmap_dma_buf,
331 .mmap = ion_mmap,
332 .release = ion_dma_buf_release,
333 .attach = ion_dma_buf_attach,
334 .detach = ion_dma_buf_detatch,
335 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
336 .end_cpu_access = ion_dma_buf_end_cpu_access,
337};
338
339static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
340{
341 struct ion_device *dev = internal_dev;
342 struct ion_buffer *buffer = NULL;
343 struct ion_heap *heap;
344 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
345 int fd;
346 struct dma_buf *dmabuf;
347
348 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
349 len, heap_id_mask, flags);
350
351
352
353
354
355
356 len = PAGE_ALIGN(len);
357
358 if (!len)
359 return -EINVAL;
360
361 down_read(&dev->lock);
362 plist_for_each_entry(heap, &dev->heaps, node) {
363
364 if (!((1 << heap->id) & heap_id_mask))
365 continue;
366 buffer = ion_buffer_create(heap, dev, len, flags);
367 if (!IS_ERR(buffer))
368 break;
369 }
370 up_read(&dev->lock);
371
372 if (!buffer)
373 return -ENODEV;
374
375 if (IS_ERR(buffer))
376 return PTR_ERR(buffer);
377
378 exp_info.ops = &dma_buf_ops;
379 exp_info.size = buffer->size;
380 exp_info.flags = O_RDWR;
381 exp_info.priv = buffer;
382
383 dmabuf = dma_buf_export(&exp_info);
384 if (IS_ERR(dmabuf)) {
385 _ion_buffer_destroy(buffer);
386 return PTR_ERR(dmabuf);
387 }
388
389 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
390 if (fd < 0)
391 dma_buf_put(dmabuf);
392
393 return fd;
394}
395
396static int ion_query_heaps(struct ion_heap_query *query)
397{
398 struct ion_device *dev = internal_dev;
399 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
400 int ret = -EINVAL, cnt = 0, max_cnt;
401 struct ion_heap *heap;
402 struct ion_heap_data hdata;
403
404 memset(&hdata, 0, sizeof(hdata));
405
406 down_read(&dev->lock);
407 if (!buffer) {
408 query->cnt = dev->heap_cnt;
409 ret = 0;
410 goto out;
411 }
412
413 if (query->cnt <= 0)
414 goto out;
415
416 max_cnt = query->cnt;
417
418 plist_for_each_entry(heap, &dev->heaps, node) {
419 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
420 hdata.name[sizeof(hdata.name) - 1] = '\0';
421 hdata.type = heap->type;
422 hdata.heap_id = heap->id;
423
424 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
425 ret = -EFAULT;
426 goto out;
427 }
428
429 cnt++;
430 if (cnt >= max_cnt)
431 break;
432 }
433
434 query->cnt = cnt;
435 ret = 0;
436out:
437 up_read(&dev->lock);
438 return ret;
439}
440
441union ion_ioctl_arg {
442 struct ion_allocation_data allocation;
443 struct ion_heap_query query;
444};
445
446static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
447{
448 switch (cmd) {
449 case ION_IOC_HEAP_QUERY:
450 if (arg->query.reserved0 ||
451 arg->query.reserved1 ||
452 arg->query.reserved2)
453 return -EINVAL;
454 break;
455 default:
456 break;
457 }
458
459 return 0;
460}
461
462static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
463{
464 int ret = 0;
465 union ion_ioctl_arg data;
466
467 if (_IOC_SIZE(cmd) > sizeof(data))
468 return -EINVAL;
469
470
471
472
473
474
475 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
476 return -EFAULT;
477
478 ret = validate_ioctl_arg(cmd, &data);
479 if (ret) {
480 pr_warn_once("%s: ioctl validate failed\n", __func__);
481 return ret;
482 }
483
484 if (!(_IOC_DIR(cmd) & _IOC_WRITE))
485 memset(&data, 0, sizeof(data));
486
487 switch (cmd) {
488 case ION_IOC_ALLOC:
489 {
490 int fd;
491
492 fd = ion_alloc(data.allocation.len,
493 data.allocation.heap_id_mask,
494 data.allocation.flags);
495 if (fd < 0)
496 return fd;
497
498 data.allocation.fd = fd;
499
500 break;
501 }
502 case ION_IOC_HEAP_QUERY:
503 ret = ion_query_heaps(&data.query);
504 break;
505 default:
506 return -ENOTTY;
507 }
508
509 if (_IOC_DIR(cmd) & _IOC_READ) {
510 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
511 return -EFAULT;
512 }
513 return ret;
514}
515
516static const struct file_operations ion_fops = {
517 .owner = THIS_MODULE,
518 .unlocked_ioctl = ion_ioctl,
519 .compat_ioctl = compat_ptr_ioctl,
520};
521
522static int debug_shrink_set(void *data, u64 val)
523{
524 struct ion_heap *heap = data;
525 struct shrink_control sc;
526 int objs;
527
528 sc.gfp_mask = GFP_HIGHUSER;
529 sc.nr_to_scan = val;
530
531 if (!val) {
532 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
533 sc.nr_to_scan = objs;
534 }
535
536 heap->shrinker.scan_objects(&heap->shrinker, &sc);
537 return 0;
538}
539
540static int debug_shrink_get(void *data, u64 *val)
541{
542 struct ion_heap *heap = data;
543 struct shrink_control sc;
544 int objs;
545
546 sc.gfp_mask = GFP_HIGHUSER;
547 sc.nr_to_scan = 0;
548
549 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
550 *val = objs;
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
555 debug_shrink_set, "%llu\n");
556
557void ion_device_add_heap(struct ion_heap *heap)
558{
559 struct ion_device *dev = internal_dev;
560 int ret;
561 struct dentry *heap_root;
562 char debug_name[64];
563
564 if (!heap->ops->allocate || !heap->ops->free)
565 pr_err("%s: can not add heap with invalid ops struct.\n",
566 __func__);
567
568 spin_lock_init(&heap->free_lock);
569 spin_lock_init(&heap->stat_lock);
570 heap->free_list_size = 0;
571
572 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
573 ion_heap_init_deferred_free(heap);
574
575 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
576 ret = ion_heap_init_shrinker(heap);
577 if (ret)
578 pr_err("%s: Failed to register shrinker\n", __func__);
579 }
580
581 heap->dev = dev;
582 heap->num_of_buffers = 0;
583 heap->num_of_alloc_bytes = 0;
584 heap->alloc_bytes_wm = 0;
585
586 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
587 debugfs_create_u64("num_of_buffers",
588 0444, heap_root,
589 &heap->num_of_buffers);
590 debugfs_create_u64("num_of_alloc_bytes",
591 0444,
592 heap_root,
593 &heap->num_of_alloc_bytes);
594 debugfs_create_u64("alloc_bytes_wm",
595 0444,
596 heap_root,
597 &heap->alloc_bytes_wm);
598
599 if (heap->shrinker.count_objects &&
600 heap->shrinker.scan_objects) {
601 snprintf(debug_name, 64, "%s_shrink", heap->name);
602 debugfs_create_file(debug_name,
603 0644,
604 heap_root,
605 heap,
606 &debug_shrink_fops);
607 }
608
609 down_write(&dev->lock);
610 heap->id = heap_id++;
611
612
613
614
615 plist_node_init(&heap->node, -heap->id);
616 plist_add(&heap->node, &dev->heaps);
617
618 dev->heap_cnt++;
619 up_write(&dev->lock);
620}
621EXPORT_SYMBOL(ion_device_add_heap);
622
623static int ion_device_create(void)
624{
625 struct ion_device *idev;
626 int ret;
627
628 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
629 if (!idev)
630 return -ENOMEM;
631
632 idev->dev.minor = MISC_DYNAMIC_MINOR;
633 idev->dev.name = "ion";
634 idev->dev.fops = &ion_fops;
635 idev->dev.parent = NULL;
636 ret = misc_register(&idev->dev);
637 if (ret) {
638 pr_err("ion: failed to register misc device.\n");
639 kfree(idev);
640 return ret;
641 }
642
643 idev->debug_root = debugfs_create_dir("ion", NULL);
644 init_rwsem(&idev->lock);
645 plist_head_init(&idev->heaps);
646 internal_dev = idev;
647 return 0;
648}
649subsys_initcall(ion_device_create);
650