1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <drm/drmP.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64};
65
66struct drm_prime_attachment {
67 struct sg_table *sgt;
68 enum dma_data_direction dir;
69};
70
71static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
72 struct dma_buf *dma_buf, uint32_t handle)
73{
74 struct drm_prime_member *member;
75
76 member = kmalloc(sizeof(*member), GFP_KERNEL);
77 if (!member)
78 return -ENOMEM;
79
80 get_dma_buf(dma_buf);
81 member->dma_buf = dma_buf;
82 member->handle = handle;
83 list_add(&member->entry, &prime_fpriv->head);
84 return 0;
85}
86
87static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
88 uint32_t handle)
89{
90 struct drm_prime_member *member;
91
92 list_for_each_entry(member, &prime_fpriv->head, entry) {
93 if (member->handle == handle)
94 return member->dma_buf;
95 }
96
97 return NULL;
98}
99
100static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
101 struct dma_buf *dma_buf,
102 uint32_t *handle)
103{
104 struct drm_prime_member *member;
105
106 list_for_each_entry(member, &prime_fpriv->head, entry) {
107 if (member->dma_buf == dma_buf) {
108 *handle = member->handle;
109 return 0;
110 }
111 }
112 return -ENOENT;
113}
114
115static int drm_gem_map_attach(struct dma_buf *dma_buf,
116 struct device *target_dev,
117 struct dma_buf_attachment *attach)
118{
119 struct drm_prime_attachment *prime_attach;
120 struct drm_gem_object *obj = dma_buf->priv;
121 struct drm_device *dev = obj->dev;
122
123 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
124 if (!prime_attach)
125 return -ENOMEM;
126
127 prime_attach->dir = DMA_NONE;
128 attach->priv = prime_attach;
129
130 if (!dev->driver->gem_prime_pin)
131 return 0;
132
133 return dev->driver->gem_prime_pin(obj);
134}
135
136static void drm_gem_map_detach(struct dma_buf *dma_buf,
137 struct dma_buf_attachment *attach)
138{
139 struct drm_prime_attachment *prime_attach = attach->priv;
140 struct drm_gem_object *obj = dma_buf->priv;
141 struct drm_device *dev = obj->dev;
142 struct sg_table *sgt;
143
144 if (dev->driver->gem_prime_unpin)
145 dev->driver->gem_prime_unpin(obj);
146
147 if (!prime_attach)
148 return;
149
150 sgt = prime_attach->sgt;
151 if (sgt) {
152 if (prime_attach->dir != DMA_NONE)
153 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
154 prime_attach->dir);
155 sg_free_table(sgt);
156 }
157
158 kfree(sgt);
159 kfree(prime_attach);
160 attach->priv = NULL;
161}
162
163void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
164 struct dma_buf *dma_buf)
165{
166 struct drm_prime_member *member, *safe;
167
168 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
169 if (member->dma_buf == dma_buf) {
170 dma_buf_put(dma_buf);
171 list_del(&member->entry);
172 kfree(member);
173 }
174 }
175}
176
177static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
178 enum dma_data_direction dir)
179{
180 struct drm_prime_attachment *prime_attach = attach->priv;
181 struct drm_gem_object *obj = attach->dmabuf->priv;
182 struct sg_table *sgt;
183
184 if (WARN_ON(dir == DMA_NONE || !prime_attach))
185 return ERR_PTR(-EINVAL);
186
187
188 if (prime_attach->dir == dir)
189 return prime_attach->sgt;
190
191
192
193
194
195 if (WARN_ON(prime_attach->dir != DMA_NONE))
196 return ERR_PTR(-EBUSY);
197
198 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
199
200 if (!IS_ERR(sgt)) {
201 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
202 sg_free_table(sgt);
203 kfree(sgt);
204 sgt = ERR_PTR(-ENOMEM);
205 } else {
206 prime_attach->sgt = sgt;
207 prime_attach->dir = dir;
208 }
209 }
210
211 return sgt;
212}
213
214static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
215 struct sg_table *sgt,
216 enum dma_data_direction dir)
217{
218
219}
220
221
222
223
224
225
226
227
228void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
229{
230 struct drm_gem_object *obj = dma_buf->priv;
231
232
233 drm_gem_object_unreference_unlocked(obj);
234}
235EXPORT_SYMBOL(drm_gem_dmabuf_release);
236
237static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
238{
239 struct drm_gem_object *obj = dma_buf->priv;
240 struct drm_device *dev = obj->dev;
241
242 return dev->driver->gem_prime_vmap(obj);
243}
244
245static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
246{
247 struct drm_gem_object *obj = dma_buf->priv;
248 struct drm_device *dev = obj->dev;
249
250 dev->driver->gem_prime_vunmap(obj, vaddr);
251}
252
253static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
254 unsigned long page_num)
255{
256 return NULL;
257}
258
259static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
260 unsigned long page_num, void *addr)
261{
262
263}
264static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
265 unsigned long page_num)
266{
267 return NULL;
268}
269
270static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
271 unsigned long page_num, void *addr)
272{
273
274}
275
276static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
277 struct vm_area_struct *vma)
278{
279 struct drm_gem_object *obj = dma_buf->priv;
280 struct drm_device *dev = obj->dev;
281
282 if (!dev->driver->gem_prime_mmap)
283 return -ENOSYS;
284
285 return dev->driver->gem_prime_mmap(obj, vma);
286}
287
288static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
289 .attach = drm_gem_map_attach,
290 .detach = drm_gem_map_detach,
291 .map_dma_buf = drm_gem_map_dma_buf,
292 .unmap_dma_buf = drm_gem_unmap_dma_buf,
293 .release = drm_gem_dmabuf_release,
294 .kmap = drm_gem_dmabuf_kmap,
295 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
296 .kunmap = drm_gem_dmabuf_kunmap,
297 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
298 .mmap = drm_gem_dmabuf_mmap,
299 .vmap = drm_gem_dmabuf_vmap,
300 .vunmap = drm_gem_dmabuf_vunmap,
301};
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
337 struct drm_gem_object *obj, int flags)
338{
339 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
340}
341EXPORT_SYMBOL(drm_gem_prime_export);
342
343static struct dma_buf *export_and_register_object(struct drm_device *dev,
344 struct drm_gem_object *obj,
345 uint32_t flags)
346{
347 struct dma_buf *dmabuf;
348
349
350 if (obj->handle_count == 0) {
351 dmabuf = ERR_PTR(-ENOENT);
352 return dmabuf;
353 }
354
355 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
356 if (IS_ERR(dmabuf)) {
357
358
359
360 return dmabuf;
361 }
362
363
364
365
366
367
368 obj->dma_buf = dmabuf;
369 get_dma_buf(obj->dma_buf);
370
371 drm_gem_object_reference(obj);
372
373 return dmabuf;
374}
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389int drm_gem_prime_handle_to_fd(struct drm_device *dev,
390 struct drm_file *file_priv, uint32_t handle,
391 uint32_t flags,
392 int *prime_fd)
393{
394 struct drm_gem_object *obj;
395 int ret = 0;
396 struct dma_buf *dmabuf;
397
398 mutex_lock(&file_priv->prime.lock);
399 obj = drm_gem_object_lookup(dev, file_priv, handle);
400 if (!obj) {
401 ret = -ENOENT;
402 goto out_unlock;
403 }
404
405 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
406 if (dmabuf) {
407 get_dma_buf(dmabuf);
408 goto out_have_handle;
409 }
410
411 mutex_lock(&dev->object_name_lock);
412
413 if (obj->import_attach) {
414 dmabuf = obj->import_attach->dmabuf;
415 get_dma_buf(dmabuf);
416 goto out_have_obj;
417 }
418
419 if (obj->dma_buf) {
420 get_dma_buf(obj->dma_buf);
421 dmabuf = obj->dma_buf;
422 goto out_have_obj;
423 }
424
425 dmabuf = export_and_register_object(dev, obj, flags);
426 if (IS_ERR(dmabuf)) {
427
428
429
430 ret = PTR_ERR(dmabuf);
431 mutex_unlock(&dev->object_name_lock);
432 goto out;
433 }
434
435out_have_obj:
436
437
438
439
440
441
442 ret = drm_prime_add_buf_handle(&file_priv->prime,
443 dmabuf, handle);
444 mutex_unlock(&dev->object_name_lock);
445 if (ret)
446 goto fail_put_dmabuf;
447
448out_have_handle:
449 ret = dma_buf_fd(dmabuf, flags);
450
451
452
453
454
455
456 if (ret < 0) {
457 goto fail_put_dmabuf;
458 } else {
459 *prime_fd = ret;
460 ret = 0;
461 }
462
463 goto out;
464
465fail_put_dmabuf:
466 dma_buf_put(dmabuf);
467out:
468 drm_gem_object_unreference_unlocked(obj);
469out_unlock:
470 mutex_unlock(&file_priv->prime.lock);
471
472 return ret;
473}
474EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
475
476
477
478
479
480
481
482
483
484struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
485 struct dma_buf *dma_buf)
486{
487 struct dma_buf_attachment *attach;
488 struct sg_table *sgt;
489 struct drm_gem_object *obj;
490 int ret;
491
492 if (!dev->driver->gem_prime_import_sg_table)
493 return ERR_PTR(-EINVAL);
494
495 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
496 obj = dma_buf->priv;
497 if (obj->dev == dev) {
498
499
500
501
502 drm_gem_object_reference(obj);
503 return obj;
504 }
505 }
506
507 attach = dma_buf_attach(dma_buf, dev->dev);
508 if (IS_ERR(attach))
509 return ERR_CAST(attach);
510
511 get_dma_buf(dma_buf);
512
513 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
514 if (IS_ERR(sgt)) {
515 ret = PTR_ERR(sgt);
516 goto fail_detach;
517 }
518
519 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
520 if (IS_ERR(obj)) {
521 ret = PTR_ERR(obj);
522 goto fail_unmap;
523 }
524
525 obj->import_attach = attach;
526
527 return obj;
528
529fail_unmap:
530 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
531fail_detach:
532 dma_buf_detach(dma_buf, attach);
533 dma_buf_put(dma_buf);
534
535 return ERR_PTR(ret);
536}
537EXPORT_SYMBOL(drm_gem_prime_import);
538
539
540
541
542
543
544
545
546
547
548
549
550
551int drm_gem_prime_fd_to_handle(struct drm_device *dev,
552 struct drm_file *file_priv, int prime_fd,
553 uint32_t *handle)
554{
555 struct dma_buf *dma_buf;
556 struct drm_gem_object *obj;
557 int ret;
558
559 dma_buf = dma_buf_get(prime_fd);
560 if (IS_ERR(dma_buf))
561 return PTR_ERR(dma_buf);
562
563 mutex_lock(&file_priv->prime.lock);
564
565 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
566 dma_buf, handle);
567 if (ret == 0)
568 goto out_put;
569
570
571 mutex_lock(&dev->object_name_lock);
572 obj = dev->driver->gem_prime_import(dev, dma_buf);
573 if (IS_ERR(obj)) {
574 ret = PTR_ERR(obj);
575 goto out_unlock;
576 }
577
578 if (obj->dma_buf) {
579 WARN_ON(obj->dma_buf != dma_buf);
580 } else {
581 obj->dma_buf = dma_buf;
582 get_dma_buf(dma_buf);
583 }
584
585
586 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
587 drm_gem_object_unreference_unlocked(obj);
588 if (ret)
589 goto out_put;
590
591 ret = drm_prime_add_buf_handle(&file_priv->prime,
592 dma_buf, *handle);
593 if (ret)
594 goto fail;
595
596 mutex_unlock(&file_priv->prime.lock);
597
598 dma_buf_put(dma_buf);
599
600 return 0;
601
602fail:
603
604
605
606 drm_gem_handle_delete(file_priv, *handle);
607out_unlock:
608 mutex_unlock(&dev->object_name_lock);
609out_put:
610 dma_buf_put(dma_buf);
611 mutex_unlock(&file_priv->prime.lock);
612 return ret;
613}
614EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
615
616int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
617 struct drm_file *file_priv)
618{
619 struct drm_prime_handle *args = data;
620 uint32_t flags;
621
622 if (!drm_core_check_feature(dev, DRIVER_PRIME))
623 return -EINVAL;
624
625 if (!dev->driver->prime_handle_to_fd)
626 return -ENOSYS;
627
628
629 if (args->flags & ~DRM_CLOEXEC)
630 return -EINVAL;
631
632
633 flags = args->flags & DRM_CLOEXEC;
634
635 return dev->driver->prime_handle_to_fd(dev, file_priv,
636 args->handle, flags, &args->fd);
637}
638
639int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_priv)
641{
642 struct drm_prime_handle *args = data;
643
644 if (!drm_core_check_feature(dev, DRIVER_PRIME))
645 return -EINVAL;
646
647 if (!dev->driver->prime_fd_to_handle)
648 return -ENOSYS;
649
650 return dev->driver->prime_fd_to_handle(dev, file_priv,
651 args->fd, &args->handle);
652}
653
654
655
656
657
658
659
660
661
662
663struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
664{
665 struct sg_table *sg = NULL;
666 int ret;
667
668 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
669 if (!sg) {
670 ret = -ENOMEM;
671 goto out;
672 }
673
674 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
675 nr_pages << PAGE_SHIFT, GFP_KERNEL);
676 if (ret)
677 goto out;
678
679 return sg;
680out:
681 kfree(sg);
682 return ERR_PTR(ret);
683}
684EXPORT_SYMBOL(drm_prime_pages_to_sg);
685
686
687
688
689
690
691
692
693
694
695
696int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
697 dma_addr_t *addrs, int max_pages)
698{
699 unsigned count;
700 struct scatterlist *sg;
701 struct page *page;
702 u32 len;
703 int pg_index;
704 dma_addr_t addr;
705
706 pg_index = 0;
707 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
708 len = sg->length;
709 page = sg_page(sg);
710 addr = sg_dma_address(sg);
711
712 while (len > 0) {
713 if (WARN_ON(pg_index >= max_pages))
714 return -1;
715 pages[pg_index] = page;
716 if (addrs)
717 addrs[pg_index] = addr;
718
719 page++;
720 addr += PAGE_SIZE;
721 len -= PAGE_SIZE;
722 pg_index++;
723 }
724 }
725 return 0;
726}
727EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
728
729
730
731
732
733
734
735
736
737void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
738{
739 struct dma_buf_attachment *attach;
740 struct dma_buf *dma_buf;
741 attach = obj->import_attach;
742 if (sg)
743 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
744 dma_buf = attach->dmabuf;
745 dma_buf_detach(attach->dmabuf, attach);
746
747 dma_buf_put(dma_buf);
748}
749EXPORT_SYMBOL(drm_prime_gem_destroy);
750
751void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
752{
753 INIT_LIST_HEAD(&prime_fpriv->head);
754 mutex_init(&prime_fpriv->lock);
755}
756
757void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
758{
759
760 WARN_ON(!list_empty(&prime_fpriv->head));
761}
762