1
2
3
4
5
6
7
8#include <linux/debugfs.h>
9#include <linux/device.h>
10#include <linux/dma-buf.h>
11#include <linux/err.h>
12#include <linux/export.h>
13#include <linux/file.h>
14#include <linux/freezer.h>
15#include <linux/fs.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/miscdevice.h>
19#include <linux/mm.h>
20#include <linux/mm_types.h>
21#include <linux/rbtree.h>
22#include <linux/sched/task.h>
23#include <linux/slab.h>
24#include <linux/uaccess.h>
25#include <linux/vmalloc.h>
26
27#include "ion.h"
28
29static struct ion_device *internal_dev;
30static int heap_id;
31
32
33static void ion_buffer_add(struct ion_device *dev,
34 struct ion_buffer *buffer)
35{
36 struct rb_node **p = &dev->buffers.rb_node;
37 struct rb_node *parent = NULL;
38 struct ion_buffer *entry;
39
40 while (*p) {
41 parent = *p;
42 entry = rb_entry(parent, struct ion_buffer, node);
43
44 if (buffer < entry) {
45 p = &(*p)->rb_left;
46 } else if (buffer > entry) {
47 p = &(*p)->rb_right;
48 } else {
49 pr_err("%s: buffer already found.", __func__);
50 BUG();
51 }
52 }
53
54 rb_link_node(&buffer->node, parent, p);
55 rb_insert_color(&buffer->node, &dev->buffers);
56}
57
58
59static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
60 struct ion_device *dev,
61 unsigned long len,
62 unsigned long flags)
63{
64 struct ion_buffer *buffer;
65 int ret;
66
67 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
68 if (!buffer)
69 return ERR_PTR(-ENOMEM);
70
71 buffer->heap = heap;
72 buffer->flags = flags;
73 buffer->dev = dev;
74 buffer->size = len;
75
76 ret = heap->ops->allocate(heap, buffer, len, flags);
77
78 if (ret) {
79 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
80 goto err2;
81
82 ion_heap_freelist_drain(heap, 0);
83 ret = heap->ops->allocate(heap, buffer, len, flags);
84 if (ret)
85 goto err2;
86 }
87
88 if (!buffer->sg_table) {
89 WARN_ONCE(1, "This heap needs to set the sgtable");
90 ret = -EINVAL;
91 goto err1;
92 }
93
94 spin_lock(&heap->stat_lock);
95 heap->num_of_buffers++;
96 heap->num_of_alloc_bytes += len;
97 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
98 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
99 spin_unlock(&heap->stat_lock);
100
101 INIT_LIST_HEAD(&buffer->attachments);
102 mutex_init(&buffer->lock);
103 mutex_lock(&dev->buffer_lock);
104 ion_buffer_add(dev, buffer);
105 mutex_unlock(&dev->buffer_lock);
106 return buffer;
107
108err1:
109 heap->ops->free(buffer);
110err2:
111 kfree(buffer);
112 return ERR_PTR(ret);
113}
114
115void ion_buffer_destroy(struct ion_buffer *buffer)
116{
117 if (buffer->kmap_cnt > 0) {
118 pr_warn_once("%s: buffer still mapped in the kernel\n",
119 __func__);
120 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
121 }
122 buffer->heap->ops->free(buffer);
123 spin_lock(&buffer->heap->stat_lock);
124 buffer->heap->num_of_buffers--;
125 buffer->heap->num_of_alloc_bytes -= buffer->size;
126 spin_unlock(&buffer->heap->stat_lock);
127
128 kfree(buffer);
129}
130
131static void _ion_buffer_destroy(struct ion_buffer *buffer)
132{
133 struct ion_heap *heap = buffer->heap;
134 struct ion_device *dev = buffer->dev;
135
136 mutex_lock(&dev->buffer_lock);
137 rb_erase(&buffer->node, &dev->buffers);
138 mutex_unlock(&dev->buffer_lock);
139
140 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
141 ion_heap_freelist_add(heap, buffer);
142 else
143 ion_buffer_destroy(buffer);
144}
145
146static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
147{
148 void *vaddr;
149
150 if (buffer->kmap_cnt) {
151 buffer->kmap_cnt++;
152 return buffer->vaddr;
153 }
154 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
155 if (WARN_ONCE(!vaddr,
156 "heap->ops->map_kernel should return ERR_PTR on error"))
157 return ERR_PTR(-EINVAL);
158 if (IS_ERR(vaddr))
159 return vaddr;
160 buffer->vaddr = vaddr;
161 buffer->kmap_cnt++;
162 return vaddr;
163}
164
165static void ion_buffer_kmap_put(struct ion_buffer *buffer)
166{
167 buffer->kmap_cnt--;
168 if (!buffer->kmap_cnt) {
169 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
170 buffer->vaddr = NULL;
171 }
172}
173
174static struct sg_table *dup_sg_table(struct sg_table *table)
175{
176 struct sg_table *new_table;
177 int ret, i;
178 struct scatterlist *sg, *new_sg;
179
180 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
181 if (!new_table)
182 return ERR_PTR(-ENOMEM);
183
184 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
185 if (ret) {
186 kfree(new_table);
187 return ERR_PTR(-ENOMEM);
188 }
189
190 new_sg = new_table->sgl;
191 for_each_sg(table->sgl, sg, table->nents, i) {
192 memcpy(new_sg, sg, sizeof(*sg));
193 new_sg->dma_address = 0;
194 new_sg = sg_next(new_sg);
195 }
196
197 return new_table;
198}
199
200static void free_duped_table(struct sg_table *table)
201{
202 sg_free_table(table);
203 kfree(table);
204}
205
206struct ion_dma_buf_attachment {
207 struct device *dev;
208 struct sg_table *table;
209 struct list_head list;
210};
211
212static int ion_dma_buf_attach(struct dma_buf *dmabuf,
213 struct dma_buf_attachment *attachment)
214{
215 struct ion_dma_buf_attachment *a;
216 struct sg_table *table;
217 struct ion_buffer *buffer = dmabuf->priv;
218
219 a = kzalloc(sizeof(*a), GFP_KERNEL);
220 if (!a)
221 return -ENOMEM;
222
223 table = dup_sg_table(buffer->sg_table);
224 if (IS_ERR(table)) {
225 kfree(a);
226 return -ENOMEM;
227 }
228
229 a->table = table;
230 a->dev = attachment->dev;
231 INIT_LIST_HEAD(&a->list);
232
233 attachment->priv = a;
234
235 mutex_lock(&buffer->lock);
236 list_add(&a->list, &buffer->attachments);
237 mutex_unlock(&buffer->lock);
238
239 return 0;
240}
241
242static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
243 struct dma_buf_attachment *attachment)
244{
245 struct ion_dma_buf_attachment *a = attachment->priv;
246 struct ion_buffer *buffer = dmabuf->priv;
247
248 mutex_lock(&buffer->lock);
249 list_del(&a->list);
250 mutex_unlock(&buffer->lock);
251 free_duped_table(a->table);
252
253 kfree(a);
254}
255
256static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
257 enum dma_data_direction direction)
258{
259 struct ion_dma_buf_attachment *a = attachment->priv;
260 struct sg_table *table;
261
262 table = a->table;
263
264 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
265 direction))
266 return ERR_PTR(-ENOMEM);
267
268 return table;
269}
270
271static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
272 struct sg_table *table,
273 enum dma_data_direction direction)
274{
275 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
276}
277
278static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
279{
280 struct ion_buffer *buffer = dmabuf->priv;
281 int ret = 0;
282
283 if (!buffer->heap->ops->map_user) {
284 pr_err("%s: this heap does not define a method for mapping to userspace\n",
285 __func__);
286 return -EINVAL;
287 }
288
289 if (!(buffer->flags & ION_FLAG_CACHED))
290 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
291
292 mutex_lock(&buffer->lock);
293
294 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
295 mutex_unlock(&buffer->lock);
296
297 if (ret)
298 pr_err("%s: failure mapping buffer to userspace\n",
299 __func__);
300
301 return ret;
302}
303
304static void ion_dma_buf_release(struct dma_buf *dmabuf)
305{
306 struct ion_buffer *buffer = dmabuf->priv;
307
308 _ion_buffer_destroy(buffer);
309}
310
311static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
312{
313 struct ion_buffer *buffer = dmabuf->priv;
314
315 return buffer->vaddr + offset * PAGE_SIZE;
316}
317
318static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
319 void *ptr)
320{
321}
322
323static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
324 enum dma_data_direction direction)
325{
326 struct ion_buffer *buffer = dmabuf->priv;
327 void *vaddr;
328 struct ion_dma_buf_attachment *a;
329 int ret = 0;
330
331
332
333
334 if (buffer->heap->ops->map_kernel) {
335 mutex_lock(&buffer->lock);
336 vaddr = ion_buffer_kmap_get(buffer);
337 if (IS_ERR(vaddr)) {
338 ret = PTR_ERR(vaddr);
339 goto unlock;
340 }
341 mutex_unlock(&buffer->lock);
342 }
343
344 mutex_lock(&buffer->lock);
345 list_for_each_entry(a, &buffer->attachments, list) {
346 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
347 direction);
348 }
349
350unlock:
351 mutex_unlock(&buffer->lock);
352 return ret;
353}
354
355static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
356 enum dma_data_direction direction)
357{
358 struct ion_buffer *buffer = dmabuf->priv;
359 struct ion_dma_buf_attachment *a;
360
361 if (buffer->heap->ops->map_kernel) {
362 mutex_lock(&buffer->lock);
363 ion_buffer_kmap_put(buffer);
364 mutex_unlock(&buffer->lock);
365 }
366
367 mutex_lock(&buffer->lock);
368 list_for_each_entry(a, &buffer->attachments, list) {
369 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
370 direction);
371 }
372 mutex_unlock(&buffer->lock);
373
374 return 0;
375}
376
377static const struct dma_buf_ops dma_buf_ops = {
378 .map_dma_buf = ion_map_dma_buf,
379 .unmap_dma_buf = ion_unmap_dma_buf,
380 .mmap = ion_mmap,
381 .release = ion_dma_buf_release,
382 .attach = ion_dma_buf_attach,
383 .detach = ion_dma_buf_detatch,
384 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
385 .end_cpu_access = ion_dma_buf_end_cpu_access,
386 .map = ion_dma_buf_kmap,
387 .unmap = ion_dma_buf_kunmap,
388};
389
390static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
391{
392 struct ion_device *dev = internal_dev;
393 struct ion_buffer *buffer = NULL;
394 struct ion_heap *heap;
395 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
396 int fd;
397 struct dma_buf *dmabuf;
398
399 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
400 len, heap_id_mask, flags);
401
402
403
404
405
406
407 len = PAGE_ALIGN(len);
408
409 if (!len)
410 return -EINVAL;
411
412 down_read(&dev->lock);
413 plist_for_each_entry(heap, &dev->heaps, node) {
414
415 if (!((1 << heap->id) & heap_id_mask))
416 continue;
417 buffer = ion_buffer_create(heap, dev, len, flags);
418 if (!IS_ERR(buffer))
419 break;
420 }
421 up_read(&dev->lock);
422
423 if (!buffer)
424 return -ENODEV;
425
426 if (IS_ERR(buffer))
427 return PTR_ERR(buffer);
428
429 exp_info.ops = &dma_buf_ops;
430 exp_info.size = buffer->size;
431 exp_info.flags = O_RDWR;
432 exp_info.priv = buffer;
433
434 dmabuf = dma_buf_export(&exp_info);
435 if (IS_ERR(dmabuf)) {
436 _ion_buffer_destroy(buffer);
437 return PTR_ERR(dmabuf);
438 }
439
440 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
441 if (fd < 0)
442 dma_buf_put(dmabuf);
443
444 return fd;
445}
446
447static int ion_query_heaps(struct ion_heap_query *query)
448{
449 struct ion_device *dev = internal_dev;
450 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
451 int ret = -EINVAL, cnt = 0, max_cnt;
452 struct ion_heap *heap;
453 struct ion_heap_data hdata;
454
455 memset(&hdata, 0, sizeof(hdata));
456
457 down_read(&dev->lock);
458 if (!buffer) {
459 query->cnt = dev->heap_cnt;
460 ret = 0;
461 goto out;
462 }
463
464 if (query->cnt <= 0)
465 goto out;
466
467 max_cnt = query->cnt;
468
469 plist_for_each_entry(heap, &dev->heaps, node) {
470 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
471 hdata.name[sizeof(hdata.name) - 1] = '\0';
472 hdata.type = heap->type;
473 hdata.heap_id = heap->id;
474
475 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
476 ret = -EFAULT;
477 goto out;
478 }
479
480 cnt++;
481 if (cnt >= max_cnt)
482 break;
483 }
484
485 query->cnt = cnt;
486 ret = 0;
487out:
488 up_read(&dev->lock);
489 return ret;
490}
491
492union ion_ioctl_arg {
493 struct ion_allocation_data allocation;
494 struct ion_heap_query query;
495};
496
497static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
498{
499 switch (cmd) {
500 case ION_IOC_HEAP_QUERY:
501 if (arg->query.reserved0 ||
502 arg->query.reserved1 ||
503 arg->query.reserved2)
504 return -EINVAL;
505 break;
506 default:
507 break;
508 }
509
510 return 0;
511}
512
513static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
514{
515 int ret = 0;
516 union ion_ioctl_arg data;
517
518 if (_IOC_SIZE(cmd) > sizeof(data))
519 return -EINVAL;
520
521
522
523
524
525
526 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
527 return -EFAULT;
528
529 ret = validate_ioctl_arg(cmd, &data);
530 if (ret) {
531 pr_warn_once("%s: ioctl validate failed\n", __func__);
532 return ret;
533 }
534
535 if (!(_IOC_DIR(cmd) & _IOC_WRITE))
536 memset(&data, 0, sizeof(data));
537
538 switch (cmd) {
539 case ION_IOC_ALLOC:
540 {
541 int fd;
542
543 fd = ion_alloc(data.allocation.len,
544 data.allocation.heap_id_mask,
545 data.allocation.flags);
546 if (fd < 0)
547 return fd;
548
549 data.allocation.fd = fd;
550
551 break;
552 }
553 case ION_IOC_HEAP_QUERY:
554 ret = ion_query_heaps(&data.query);
555 break;
556 default:
557 return -ENOTTY;
558 }
559
560 if (_IOC_DIR(cmd) & _IOC_READ) {
561 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
562 return -EFAULT;
563 }
564 return ret;
565}
566
567static const struct file_operations ion_fops = {
568 .owner = THIS_MODULE,
569 .unlocked_ioctl = ion_ioctl,
570#ifdef CONFIG_COMPAT
571 .compat_ioctl = ion_ioctl,
572#endif
573};
574
575static int debug_shrink_set(void *data, u64 val)
576{
577 struct ion_heap *heap = data;
578 struct shrink_control sc;
579 int objs;
580
581 sc.gfp_mask = GFP_HIGHUSER;
582 sc.nr_to_scan = val;
583
584 if (!val) {
585 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
586 sc.nr_to_scan = objs;
587 }
588
589 heap->shrinker.scan_objects(&heap->shrinker, &sc);
590 return 0;
591}
592
593static int debug_shrink_get(void *data, u64 *val)
594{
595 struct ion_heap *heap = data;
596 struct shrink_control sc;
597 int objs;
598
599 sc.gfp_mask = GFP_HIGHUSER;
600 sc.nr_to_scan = 0;
601
602 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
603 *val = objs;
604 return 0;
605}
606
607DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
608 debug_shrink_set, "%llu\n");
609
610void ion_device_add_heap(struct ion_heap *heap)
611{
612 struct ion_device *dev = internal_dev;
613 int ret;
614 struct dentry *heap_root;
615 char debug_name[64];
616
617 if (!heap->ops->allocate || !heap->ops->free)
618 pr_err("%s: can not add heap with invalid ops struct.\n",
619 __func__);
620
621 spin_lock_init(&heap->free_lock);
622 spin_lock_init(&heap->stat_lock);
623 heap->free_list_size = 0;
624
625 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
626 ion_heap_init_deferred_free(heap);
627
628 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
629 ret = ion_heap_init_shrinker(heap);
630 if (ret)
631 pr_err("%s: Failed to register shrinker\n", __func__);
632 }
633
634 heap->dev = dev;
635 heap->num_of_buffers = 0;
636 heap->num_of_alloc_bytes = 0;
637 heap->alloc_bytes_wm = 0;
638
639 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
640 debugfs_create_u64("num_of_buffers",
641 0444, heap_root,
642 &heap->num_of_buffers);
643 debugfs_create_u64("num_of_alloc_bytes",
644 0444,
645 heap_root,
646 &heap->num_of_alloc_bytes);
647 debugfs_create_u64("alloc_bytes_wm",
648 0444,
649 heap_root,
650 &heap->alloc_bytes_wm);
651
652 if (heap->shrinker.count_objects &&
653 heap->shrinker.scan_objects) {
654 snprintf(debug_name, 64, "%s_shrink", heap->name);
655 debugfs_create_file(debug_name,
656 0644,
657 heap_root,
658 heap,
659 &debug_shrink_fops);
660 }
661
662 down_write(&dev->lock);
663 heap->id = heap_id++;
664
665
666
667
668 plist_node_init(&heap->node, -heap->id);
669 plist_add(&heap->node, &dev->heaps);
670
671 dev->heap_cnt++;
672 up_write(&dev->lock);
673}
674EXPORT_SYMBOL(ion_device_add_heap);
675
676static int ion_device_create(void)
677{
678 struct ion_device *idev;
679 int ret;
680
681 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
682 if (!idev)
683 return -ENOMEM;
684
685 idev->dev.minor = MISC_DYNAMIC_MINOR;
686 idev->dev.name = "ion";
687 idev->dev.fops = &ion_fops;
688 idev->dev.parent = NULL;
689 ret = misc_register(&idev->dev);
690 if (ret) {
691 pr_err("ion: failed to register misc device.\n");
692 kfree(idev);
693 return ret;
694 }
695
696 idev->debug_root = debugfs_create_dir("ion", NULL);
697 idev->buffers = RB_ROOT;
698 mutex_init(&idev->buffer_lock);
699 init_rwsem(&idev->lock);
700 plist_head_init(&idev->heaps);
701 internal_dev = idev;
702 return 0;
703}
704subsys_initcall(ion_device_create);
705