1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <drm/drmP.h>
32#include <drm/drm_gem.h>
33
34#include "drm_internal.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63struct drm_prime_member {
64 struct list_head entry;
65 struct dma_buf *dma_buf;
66 uint32_t handle;
67};
68
69struct drm_prime_attachment {
70 struct sg_table *sgt;
71 enum dma_data_direction dir;
72};
73
74static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
75 struct dma_buf *dma_buf, uint32_t handle)
76{
77 struct drm_prime_member *member;
78
79 member = kmalloc(sizeof(*member), GFP_KERNEL);
80 if (!member)
81 return -ENOMEM;
82
83 get_dma_buf(dma_buf);
84 member->dma_buf = dma_buf;
85 member->handle = handle;
86 list_add(&member->entry, &prime_fpriv->head);
87 return 0;
88}
89
90static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
91 uint32_t handle)
92{
93 struct drm_prime_member *member;
94
95 list_for_each_entry(member, &prime_fpriv->head, entry) {
96 if (member->handle == handle)
97 return member->dma_buf;
98 }
99
100 return NULL;
101}
102
103static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
104 struct dma_buf *dma_buf,
105 uint32_t *handle)
106{
107 struct drm_prime_member *member;
108
109 list_for_each_entry(member, &prime_fpriv->head, entry) {
110 if (member->dma_buf == dma_buf) {
111 *handle = member->handle;
112 return 0;
113 }
114 }
115 return -ENOENT;
116}
117
118static int drm_gem_map_attach(struct dma_buf *dma_buf,
119 struct device *target_dev,
120 struct dma_buf_attachment *attach)
121{
122 struct drm_prime_attachment *prime_attach;
123 struct drm_gem_object *obj = dma_buf->priv;
124 struct drm_device *dev = obj->dev;
125
126 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
127 if (!prime_attach)
128 return -ENOMEM;
129
130 prime_attach->dir = DMA_NONE;
131 attach->priv = prime_attach;
132
133 if (!dev->driver->gem_prime_pin)
134 return 0;
135
136 return dev->driver->gem_prime_pin(obj);
137}
138
139static void drm_gem_map_detach(struct dma_buf *dma_buf,
140 struct dma_buf_attachment *attach)
141{
142 struct drm_prime_attachment *prime_attach = attach->priv;
143 struct drm_gem_object *obj = dma_buf->priv;
144 struct drm_device *dev = obj->dev;
145 struct sg_table *sgt;
146
147 if (dev->driver->gem_prime_unpin)
148 dev->driver->gem_prime_unpin(obj);
149
150 if (!prime_attach)
151 return;
152
153 sgt = prime_attach->sgt;
154 if (sgt) {
155 if (prime_attach->dir != DMA_NONE)
156 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
157 prime_attach->dir);
158 sg_free_table(sgt);
159 }
160
161 kfree(sgt);
162 kfree(prime_attach);
163 attach->priv = NULL;
164}
165
166void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
167 struct dma_buf *dma_buf)
168{
169 struct drm_prime_member *member, *safe;
170
171 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
172 if (member->dma_buf == dma_buf) {
173 dma_buf_put(dma_buf);
174 list_del(&member->entry);
175 kfree(member);
176 }
177 }
178}
179
180static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
181 enum dma_data_direction dir)
182{
183 struct drm_prime_attachment *prime_attach = attach->priv;
184 struct drm_gem_object *obj = attach->dmabuf->priv;
185 struct sg_table *sgt;
186
187 if (WARN_ON(dir == DMA_NONE || !prime_attach))
188 return ERR_PTR(-EINVAL);
189
190
191 if (prime_attach->dir == dir)
192 return prime_attach->sgt;
193
194
195
196
197
198 if (WARN_ON(prime_attach->dir != DMA_NONE))
199 return ERR_PTR(-EBUSY);
200
201 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
202
203 if (!IS_ERR(sgt)) {
204 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
205 sg_free_table(sgt);
206 kfree(sgt);
207 sgt = ERR_PTR(-ENOMEM);
208 } else {
209 prime_attach->sgt = sgt;
210 prime_attach->dir = dir;
211 }
212 }
213
214 return sgt;
215}
216
217static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
218 struct sg_table *sgt,
219 enum dma_data_direction dir)
220{
221
222}
223
224
225
226
227
228
229
230
231void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
232{
233 struct drm_gem_object *obj = dma_buf->priv;
234
235
236 drm_gem_object_unreference_unlocked(obj);
237}
238EXPORT_SYMBOL(drm_gem_dmabuf_release);
239
240static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
241{
242 struct drm_gem_object *obj = dma_buf->priv;
243 struct drm_device *dev = obj->dev;
244
245 return dev->driver->gem_prime_vmap(obj);
246}
247
248static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
249{
250 struct drm_gem_object *obj = dma_buf->priv;
251 struct drm_device *dev = obj->dev;
252
253 dev->driver->gem_prime_vunmap(obj, vaddr);
254}
255
256static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
257 unsigned long page_num)
258{
259 return NULL;
260}
261
262static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
263 unsigned long page_num, void *addr)
264{
265
266}
267static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
268 unsigned long page_num)
269{
270 return NULL;
271}
272
273static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
274 unsigned long page_num, void *addr)
275{
276
277}
278
279static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
280 struct vm_area_struct *vma)
281{
282 struct drm_gem_object *obj = dma_buf->priv;
283 struct drm_device *dev = obj->dev;
284
285 if (!dev->driver->gem_prime_mmap)
286 return -ENOSYS;
287
288 return dev->driver->gem_prime_mmap(obj, vma);
289}
290
291static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
292 .attach = drm_gem_map_attach,
293 .detach = drm_gem_map_detach,
294 .map_dma_buf = drm_gem_map_dma_buf,
295 .unmap_dma_buf = drm_gem_unmap_dma_buf,
296 .release = drm_gem_dmabuf_release,
297 .kmap = drm_gem_dmabuf_kmap,
298 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
299 .kunmap = drm_gem_dmabuf_kunmap,
300 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
301 .mmap = drm_gem_dmabuf_mmap,
302 .vmap = drm_gem_dmabuf_vmap,
303 .vunmap = drm_gem_dmabuf_vunmap,
304};
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
338 struct drm_gem_object *obj, int flags)
339{
340 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341
342 exp_info.ops = &drm_gem_prime_dmabuf_ops;
343 exp_info.size = obj->size;
344 exp_info.flags = flags;
345 exp_info.priv = obj;
346
347 if (dev->driver->gem_prime_res_obj)
348 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
349
350 return dma_buf_export(&exp_info);
351}
352EXPORT_SYMBOL(drm_gem_prime_export);
353
354static struct dma_buf *export_and_register_object(struct drm_device *dev,
355 struct drm_gem_object *obj,
356 uint32_t flags)
357{
358 struct dma_buf *dmabuf;
359
360
361 if (obj->handle_count == 0) {
362 dmabuf = ERR_PTR(-ENOENT);
363 return dmabuf;
364 }
365
366 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
367 if (IS_ERR(dmabuf)) {
368
369
370
371 return dmabuf;
372 }
373
374
375
376
377
378
379 obj->dma_buf = dmabuf;
380 get_dma_buf(obj->dma_buf);
381
382 drm_gem_object_reference(obj);
383
384 return dmabuf;
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400int drm_gem_prime_handle_to_fd(struct drm_device *dev,
401 struct drm_file *file_priv, uint32_t handle,
402 uint32_t flags,
403 int *prime_fd)
404{
405 struct drm_gem_object *obj;
406 int ret = 0;
407 struct dma_buf *dmabuf;
408
409 mutex_lock(&file_priv->prime.lock);
410 obj = drm_gem_object_lookup(file_priv, handle);
411 if (!obj) {
412 ret = -ENOENT;
413 goto out_unlock;
414 }
415
416 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
417 if (dmabuf) {
418 get_dma_buf(dmabuf);
419 goto out_have_handle;
420 }
421
422 mutex_lock(&dev->object_name_lock);
423
424 if (obj->import_attach) {
425 dmabuf = obj->import_attach->dmabuf;
426 get_dma_buf(dmabuf);
427 goto out_have_obj;
428 }
429
430 if (obj->dma_buf) {
431 get_dma_buf(obj->dma_buf);
432 dmabuf = obj->dma_buf;
433 goto out_have_obj;
434 }
435
436 dmabuf = export_and_register_object(dev, obj, flags);
437 if (IS_ERR(dmabuf)) {
438
439
440
441 ret = PTR_ERR(dmabuf);
442 mutex_unlock(&dev->object_name_lock);
443 goto out;
444 }
445
446out_have_obj:
447
448
449
450
451
452
453 ret = drm_prime_add_buf_handle(&file_priv->prime,
454 dmabuf, handle);
455 mutex_unlock(&dev->object_name_lock);
456 if (ret)
457 goto fail_put_dmabuf;
458
459out_have_handle:
460 ret = dma_buf_fd(dmabuf, flags);
461
462
463
464
465
466
467 if (ret < 0) {
468 goto fail_put_dmabuf;
469 } else {
470 *prime_fd = ret;
471 ret = 0;
472 }
473
474 goto out;
475
476fail_put_dmabuf:
477 dma_buf_put(dmabuf);
478out:
479 drm_gem_object_unreference_unlocked(obj);
480out_unlock:
481 mutex_unlock(&file_priv->prime.lock);
482
483 return ret;
484}
485EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
486
487
488
489
490
491
492
493
494
495struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
496 struct dma_buf *dma_buf)
497{
498 struct dma_buf_attachment *attach;
499 struct sg_table *sgt;
500 struct drm_gem_object *obj;
501 int ret;
502
503 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
504 obj = dma_buf->priv;
505 if (obj->dev == dev) {
506
507
508
509
510 drm_gem_object_reference(obj);
511 return obj;
512 }
513 }
514
515 if (!dev->driver->gem_prime_import_sg_table)
516 return ERR_PTR(-EINVAL);
517
518 attach = dma_buf_attach(dma_buf, dev->dev);
519 if (IS_ERR(attach))
520 return ERR_CAST(attach);
521
522 get_dma_buf(dma_buf);
523
524 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
525 if (IS_ERR(sgt)) {
526 ret = PTR_ERR(sgt);
527 goto fail_detach;
528 }
529
530 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
531 if (IS_ERR(obj)) {
532 ret = PTR_ERR(obj);
533 goto fail_unmap;
534 }
535
536 obj->import_attach = attach;
537
538 return obj;
539
540fail_unmap:
541 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
542fail_detach:
543 dma_buf_detach(dma_buf, attach);
544 dma_buf_put(dma_buf);
545
546 return ERR_PTR(ret);
547}
548EXPORT_SYMBOL(drm_gem_prime_import);
549
550
551
552
553
554
555
556
557
558
559
560
561
562int drm_gem_prime_fd_to_handle(struct drm_device *dev,
563 struct drm_file *file_priv, int prime_fd,
564 uint32_t *handle)
565{
566 struct dma_buf *dma_buf;
567 struct drm_gem_object *obj;
568 int ret;
569
570 dma_buf = dma_buf_get(prime_fd);
571 if (IS_ERR(dma_buf))
572 return PTR_ERR(dma_buf);
573
574 mutex_lock(&file_priv->prime.lock);
575
576 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
577 dma_buf, handle);
578 if (ret == 0)
579 goto out_put;
580
581
582 mutex_lock(&dev->object_name_lock);
583 obj = dev->driver->gem_prime_import(dev, dma_buf);
584 if (IS_ERR(obj)) {
585 ret = PTR_ERR(obj);
586 goto out_unlock;
587 }
588
589 if (obj->dma_buf) {
590 WARN_ON(obj->dma_buf != dma_buf);
591 } else {
592 obj->dma_buf = dma_buf;
593 get_dma_buf(dma_buf);
594 }
595
596
597 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
598 drm_gem_object_unreference_unlocked(obj);
599 if (ret)
600 goto out_put;
601
602 ret = drm_prime_add_buf_handle(&file_priv->prime,
603 dma_buf, *handle);
604 mutex_unlock(&file_priv->prime.lock);
605 if (ret)
606 goto fail;
607
608 dma_buf_put(dma_buf);
609
610 return 0;
611
612fail:
613
614
615
616 drm_gem_handle_delete(file_priv, *handle);
617 dma_buf_put(dma_buf);
618 return ret;
619
620out_unlock:
621 mutex_unlock(&dev->object_name_lock);
622out_put:
623 mutex_unlock(&file_priv->prime.lock);
624 dma_buf_put(dma_buf);
625 return ret;
626}
627EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
628
629int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
630 struct drm_file *file_priv)
631{
632 struct drm_prime_handle *args = data;
633
634 if (!drm_core_check_feature(dev, DRIVER_PRIME))
635 return -EINVAL;
636
637 if (!dev->driver->prime_handle_to_fd)
638 return -ENOSYS;
639
640
641 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
642 return -EINVAL;
643
644 return dev->driver->prime_handle_to_fd(dev, file_priv,
645 args->handle, args->flags, &args->fd);
646}
647
648int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
649 struct drm_file *file_priv)
650{
651 struct drm_prime_handle *args = data;
652
653 if (!drm_core_check_feature(dev, DRIVER_PRIME))
654 return -EINVAL;
655
656 if (!dev->driver->prime_fd_to_handle)
657 return -ENOSYS;
658
659 return dev->driver->prime_fd_to_handle(dev, file_priv,
660 args->fd, &args->handle);
661}
662
663
664
665
666
667
668
669
670
671
672struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
673{
674 struct sg_table *sg = NULL;
675 int ret;
676
677 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
678 if (!sg) {
679 ret = -ENOMEM;
680 goto out;
681 }
682
683 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
684 nr_pages << PAGE_SHIFT, GFP_KERNEL);
685 if (ret)
686 goto out;
687
688 return sg;
689out:
690 kfree(sg);
691 return ERR_PTR(ret);
692}
693EXPORT_SYMBOL(drm_prime_pages_to_sg);
694
695
696
697
698
699
700
701
702
703
704
705int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
706 dma_addr_t *addrs, int max_pages)
707{
708 unsigned count;
709 struct scatterlist *sg;
710 struct page *page;
711 u32 len;
712 int pg_index;
713 dma_addr_t addr;
714
715 pg_index = 0;
716 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
717 len = sg->length;
718 page = sg_page(sg);
719 addr = sg_dma_address(sg);
720
721 while (len > 0) {
722 if (WARN_ON(pg_index >= max_pages))
723 return -1;
724 pages[pg_index] = page;
725 if (addrs)
726 addrs[pg_index] = addr;
727
728 page++;
729 addr += PAGE_SIZE;
730 len -= PAGE_SIZE;
731 pg_index++;
732 }
733 }
734 return 0;
735}
736EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
737
738
739
740
741
742
743
744
745
746void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
747{
748 struct dma_buf_attachment *attach;
749 struct dma_buf *dma_buf;
750 attach = obj->import_attach;
751 if (sg)
752 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
753 dma_buf = attach->dmabuf;
754 dma_buf_detach(attach->dmabuf, attach);
755
756 dma_buf_put(dma_buf);
757}
758EXPORT_SYMBOL(drm_prime_gem_destroy);
759
760void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
761{
762 INIT_LIST_HEAD(&prime_fpriv->head);
763 mutex_init(&prime_fpriv->lock);
764}
765
766void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
767{
768
769 WARN_ON(!list_empty(&prime_fpriv->head));
770}
771