1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/dma-buf.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17#include <linux/module.h>
18
19#include <xen/xen.h>
20#include <xen/grant_table.h>
21
22#include "gntdev-common.h"
23#include "gntdev-dmabuf.h"
24
25MODULE_IMPORT_NS(DMA_BUF);
26
27#ifndef GRANT_INVALID_REF
28
29
30
31
32
33#define GRANT_INVALID_REF 0
34#endif
35
36struct gntdev_dmabuf {
37 struct gntdev_dmabuf_priv *priv;
38 struct dma_buf *dmabuf;
39 struct list_head next;
40 int fd;
41
42 union {
43 struct {
44
45 struct kref refcount;
46
47 struct gntdev_priv *priv;
48 struct gntdev_grant_map *map;
49 } exp;
50 struct {
51
52 grant_ref_t *refs;
53
54 struct sg_table *sgt;
55
56 struct dma_buf_attachment *attach;
57 } imp;
58 } u;
59
60
61 int nr_pages;
62
63 struct page **pages;
64};
65
66struct gntdev_dmabuf_wait_obj {
67 struct list_head next;
68 struct gntdev_dmabuf *gntdev_dmabuf;
69 struct completion completion;
70};
71
72struct gntdev_dmabuf_attachment {
73 struct sg_table *sgt;
74 enum dma_data_direction dir;
75};
76
77struct gntdev_dmabuf_priv {
78
79 struct list_head exp_list;
80
81 struct list_head exp_wait_list;
82
83 struct list_head imp_list;
84
85 struct mutex lock;
86
87
88
89
90
91 struct file *filp;
92};
93
94
95
96
97
98static void dmabuf_exp_release(struct kref *kref);
99
100static struct gntdev_dmabuf_wait_obj *
101dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
102 struct gntdev_dmabuf *gntdev_dmabuf)
103{
104 struct gntdev_dmabuf_wait_obj *obj;
105
106 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
107 if (!obj)
108 return ERR_PTR(-ENOMEM);
109
110 init_completion(&obj->completion);
111 obj->gntdev_dmabuf = gntdev_dmabuf;
112
113 mutex_lock(&priv->lock);
114 list_add(&obj->next, &priv->exp_wait_list);
115
116 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
117 mutex_unlock(&priv->lock);
118 return obj;
119}
120
121static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
122 struct gntdev_dmabuf_wait_obj *obj)
123{
124 mutex_lock(&priv->lock);
125 list_del(&obj->next);
126 mutex_unlock(&priv->lock);
127 kfree(obj);
128}
129
130static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
131 u32 wait_to_ms)
132{
133 if (wait_for_completion_timeout(&obj->completion,
134 msecs_to_jiffies(wait_to_ms)) <= 0)
135 return -ETIMEDOUT;
136
137 return 0;
138}
139
140static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
141 struct gntdev_dmabuf *gntdev_dmabuf)
142{
143 struct gntdev_dmabuf_wait_obj *obj;
144
145 list_for_each_entry(obj, &priv->exp_wait_list, next)
146 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
147 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
148 complete_all(&obj->completion);
149 break;
150 }
151}
152
153static struct gntdev_dmabuf *
154dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
155{
156 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
157
158 mutex_lock(&priv->lock);
159 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
160 if (gntdev_dmabuf->fd == fd) {
161 pr_debug("Found gntdev_dmabuf in the wait list\n");
162 kref_get(&gntdev_dmabuf->u.exp.refcount);
163 ret = gntdev_dmabuf;
164 break;
165 }
166 mutex_unlock(&priv->lock);
167 return ret;
168}
169
170static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
171 int wait_to_ms)
172{
173 struct gntdev_dmabuf *gntdev_dmabuf;
174 struct gntdev_dmabuf_wait_obj *obj;
175 int ret;
176
177 pr_debug("Will wait for dma-buf with fd %d\n", fd);
178
179
180
181
182
183 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
184 if (IS_ERR(gntdev_dmabuf))
185 return PTR_ERR(gntdev_dmabuf);
186
187
188
189
190
191
192 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
193 if (IS_ERR(obj))
194 return PTR_ERR(obj);
195
196 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
197 dmabuf_exp_wait_obj_free(priv, obj);
198 return ret;
199}
200
201
202
203static struct sg_table *
204dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
205{
206 struct sg_table *sgt;
207 int ret;
208
209 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
210 if (!sgt) {
211 ret = -ENOMEM;
212 goto out;
213 }
214
215 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
216 nr_pages << PAGE_SHIFT,
217 GFP_KERNEL);
218 if (ret)
219 goto out;
220
221 return sgt;
222
223out:
224 kfree(sgt);
225 return ERR_PTR(ret);
226}
227
228static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
229 struct dma_buf_attachment *attach)
230{
231 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
232
233 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
234 GFP_KERNEL);
235 if (!gntdev_dmabuf_attach)
236 return -ENOMEM;
237
238 gntdev_dmabuf_attach->dir = DMA_NONE;
239 attach->priv = gntdev_dmabuf_attach;
240 return 0;
241}
242
243static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
244 struct dma_buf_attachment *attach)
245{
246 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
247
248 if (gntdev_dmabuf_attach) {
249 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
250
251 if (sgt) {
252 if (gntdev_dmabuf_attach->dir != DMA_NONE)
253 dma_unmap_sgtable(attach->dev, sgt,
254 gntdev_dmabuf_attach->dir,
255 DMA_ATTR_SKIP_CPU_SYNC);
256 sg_free_table(sgt);
257 }
258
259 kfree(sgt);
260 kfree(gntdev_dmabuf_attach);
261 attach->priv = NULL;
262 }
263}
264
265static struct sg_table *
266dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
267 enum dma_data_direction dir)
268{
269 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
270 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
271 struct sg_table *sgt;
272
273 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
274 attach->dev);
275
276 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
277 return ERR_PTR(-EINVAL);
278
279
280 if (gntdev_dmabuf_attach->dir == dir)
281 return gntdev_dmabuf_attach->sgt;
282
283
284
285
286
287 if (gntdev_dmabuf_attach->dir != DMA_NONE)
288 return ERR_PTR(-EBUSY);
289
290 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
291 gntdev_dmabuf->nr_pages);
292 if (!IS_ERR(sgt)) {
293 if (dma_map_sgtable(attach->dev, sgt, dir,
294 DMA_ATTR_SKIP_CPU_SYNC)) {
295 sg_free_table(sgt);
296 kfree(sgt);
297 sgt = ERR_PTR(-ENOMEM);
298 } else {
299 gntdev_dmabuf_attach->sgt = sgt;
300 gntdev_dmabuf_attach->dir = dir;
301 }
302 }
303 if (IS_ERR(sgt))
304 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
305 return sgt;
306}
307
308static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
309 struct sg_table *sgt,
310 enum dma_data_direction dir)
311{
312
313}
314
315static void dmabuf_exp_release(struct kref *kref)
316{
317 struct gntdev_dmabuf *gntdev_dmabuf =
318 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
319
320 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
321 list_del(&gntdev_dmabuf->next);
322 fput(gntdev_dmabuf->priv->filp);
323 kfree(gntdev_dmabuf);
324}
325
326static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
327 struct gntdev_grant_map *map)
328{
329 mutex_lock(&priv->lock);
330 list_del(&map->next);
331 gntdev_put_map(NULL , map);
332 mutex_unlock(&priv->lock);
333}
334
335static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
336{
337 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
338 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
339
340 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
341 gntdev_dmabuf->u.exp.map);
342 mutex_lock(&priv->lock);
343 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
344 mutex_unlock(&priv->lock);
345}
346
347static const struct dma_buf_ops dmabuf_exp_ops = {
348 .attach = dmabuf_exp_ops_attach,
349 .detach = dmabuf_exp_ops_detach,
350 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
351 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
352 .release = dmabuf_exp_ops_release,
353};
354
355struct gntdev_dmabuf_export_args {
356 struct gntdev_priv *priv;
357 struct gntdev_grant_map *map;
358 struct gntdev_dmabuf_priv *dmabuf_priv;
359 struct device *dev;
360 int count;
361 struct page **pages;
362 u32 fd;
363};
364
365static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
366{
367 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
368 struct gntdev_dmabuf *gntdev_dmabuf;
369 int ret;
370
371 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
372 if (!gntdev_dmabuf)
373 return -ENOMEM;
374
375 kref_init(&gntdev_dmabuf->u.exp.refcount);
376
377 gntdev_dmabuf->priv = args->dmabuf_priv;
378 gntdev_dmabuf->nr_pages = args->count;
379 gntdev_dmabuf->pages = args->pages;
380 gntdev_dmabuf->u.exp.priv = args->priv;
381 gntdev_dmabuf->u.exp.map = args->map;
382
383 exp_info.exp_name = KBUILD_MODNAME;
384 if (args->dev->driver && args->dev->driver->owner)
385 exp_info.owner = args->dev->driver->owner;
386 else
387 exp_info.owner = THIS_MODULE;
388 exp_info.ops = &dmabuf_exp_ops;
389 exp_info.size = args->count << PAGE_SHIFT;
390 exp_info.flags = O_RDWR;
391 exp_info.priv = gntdev_dmabuf;
392
393 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
394 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
395 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
396 gntdev_dmabuf->dmabuf = NULL;
397 goto fail;
398 }
399
400 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
401 if (ret < 0)
402 goto fail;
403
404 gntdev_dmabuf->fd = ret;
405 args->fd = ret;
406
407 pr_debug("Exporting DMA buffer with fd %d\n", ret);
408
409 mutex_lock(&args->dmabuf_priv->lock);
410 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
411 mutex_unlock(&args->dmabuf_priv->lock);
412 get_file(gntdev_dmabuf->priv->filp);
413 return 0;
414
415fail:
416 if (gntdev_dmabuf->dmabuf)
417 dma_buf_put(gntdev_dmabuf->dmabuf);
418 kfree(gntdev_dmabuf);
419 return ret;
420}
421
422static struct gntdev_grant_map *
423dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
424 int count)
425{
426 struct gntdev_grant_map *map;
427
428 if (unlikely(gntdev_test_page_count(count)))
429 return ERR_PTR(-EINVAL);
430
431 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
432 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
433 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
434 return ERR_PTR(-EINVAL);
435 }
436
437 map = gntdev_alloc_map(priv, count, dmabuf_flags);
438 if (!map)
439 return ERR_PTR(-ENOMEM);
440
441 return map;
442}
443
444static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
445 int count, u32 domid, u32 *refs, u32 *fd)
446{
447 struct gntdev_grant_map *map;
448 struct gntdev_dmabuf_export_args args;
449 int i, ret;
450
451 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
452 if (IS_ERR(map))
453 return PTR_ERR(map);
454
455 for (i = 0; i < count; i++) {
456 map->grants[i].domid = domid;
457 map->grants[i].ref = refs[i];
458 }
459
460 mutex_lock(&priv->lock);
461 gntdev_add_map(priv, map);
462 mutex_unlock(&priv->lock);
463
464 map->flags |= GNTMAP_host_map;
465#if defined(CONFIG_X86)
466 map->flags |= GNTMAP_device_map;
467#endif
468
469 ret = gntdev_map_grant_pages(map);
470 if (ret < 0)
471 goto out;
472
473 args.priv = priv;
474 args.map = map;
475 args.dev = priv->dma_dev;
476 args.dmabuf_priv = priv->dmabuf_priv;
477 args.count = map->count;
478 args.pages = map->pages;
479 args.fd = -1;
480
481 ret = dmabuf_exp_from_pages(&args);
482 if (ret < 0)
483 goto out;
484
485 *fd = args.fd;
486 return 0;
487
488out:
489 dmabuf_exp_remove_map(priv, map);
490 return ret;
491}
492
493
494
495static int
496dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
497 int count, int domid)
498{
499 grant_ref_t priv_gref_head;
500 int i, ret;
501
502 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
503 if (ret < 0) {
504 pr_debug("Cannot allocate grant references, ret %d\n", ret);
505 return ret;
506 }
507
508 for (i = 0; i < count; i++) {
509 int cur_ref;
510
511 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
512 if (cur_ref < 0) {
513 ret = cur_ref;
514 pr_debug("Cannot claim grant reference, ret %d\n", ret);
515 goto out;
516 }
517
518 gnttab_grant_foreign_access_ref(cur_ref, domid,
519 xen_page_to_gfn(pages[i]), 0);
520 refs[i] = cur_ref;
521 }
522
523 return 0;
524
525out:
526 gnttab_free_grant_references(priv_gref_head);
527 return ret;
528}
529
530static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
531{
532 int i;
533
534 for (i = 0; i < count; i++)
535 if (refs[i] != GRANT_INVALID_REF)
536 gnttab_end_foreign_access(refs[i], 0, 0UL);
537}
538
539static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
540{
541 kfree(gntdev_dmabuf->pages);
542 kfree(gntdev_dmabuf->u.imp.refs);
543 kfree(gntdev_dmabuf);
544}
545
546static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
547{
548 struct gntdev_dmabuf *gntdev_dmabuf;
549 int i;
550
551 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
552 if (!gntdev_dmabuf)
553 goto fail_no_free;
554
555 gntdev_dmabuf->u.imp.refs = kcalloc(count,
556 sizeof(gntdev_dmabuf->u.imp.refs[0]),
557 GFP_KERNEL);
558 if (!gntdev_dmabuf->u.imp.refs)
559 goto fail;
560
561 gntdev_dmabuf->pages = kcalloc(count,
562 sizeof(gntdev_dmabuf->pages[0]),
563 GFP_KERNEL);
564 if (!gntdev_dmabuf->pages)
565 goto fail;
566
567 gntdev_dmabuf->nr_pages = count;
568
569 for (i = 0; i < count; i++)
570 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
571
572 return gntdev_dmabuf;
573
574fail:
575 dmabuf_imp_free_storage(gntdev_dmabuf);
576fail_no_free:
577 return ERR_PTR(-ENOMEM);
578}
579
580static struct gntdev_dmabuf *
581dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
582 int fd, int count, int domid)
583{
584 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
585 struct dma_buf *dma_buf;
586 struct dma_buf_attachment *attach;
587 struct sg_table *sgt;
588 struct sg_page_iter sg_iter;
589 int i;
590
591 dma_buf = dma_buf_get(fd);
592 if (IS_ERR(dma_buf))
593 return ERR_CAST(dma_buf);
594
595 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
596 if (IS_ERR(gntdev_dmabuf)) {
597 ret = gntdev_dmabuf;
598 goto fail_put;
599 }
600
601 gntdev_dmabuf->priv = priv;
602 gntdev_dmabuf->fd = fd;
603
604 attach = dma_buf_attach(dma_buf, dev);
605 if (IS_ERR(attach)) {
606 ret = ERR_CAST(attach);
607 goto fail_free_obj;
608 }
609
610 gntdev_dmabuf->u.imp.attach = attach;
611
612 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
613 if (IS_ERR(sgt)) {
614 ret = ERR_CAST(sgt);
615 goto fail_detach;
616 }
617
618
619 if (sgt->sgl->offset) {
620 ret = ERR_PTR(-EINVAL);
621 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
622 sgt->sgl->offset);
623 goto fail_unmap;
624 }
625
626
627 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
628 ret = ERR_PTR(-EINVAL);
629 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
630 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
631 goto fail_unmap;
632 }
633
634 gntdev_dmabuf->u.imp.sgt = sgt;
635
636
637 i = 0;
638 for_each_sgtable_page(sgt, &sg_iter, 0) {
639 struct page *page = sg_page_iter_page(&sg_iter);
640
641
642
643
644
645 if (!pfn_valid(page_to_pfn(page))) {
646 ret = ERR_PTR(-EINVAL);
647 goto fail_unmap;
648 }
649
650 gntdev_dmabuf->pages[i++] = page;
651 }
652
653 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
654 gntdev_dmabuf->u.imp.refs,
655 count, domid));
656 if (IS_ERR(ret))
657 goto fail_end_access;
658
659 pr_debug("Imported DMA buffer with fd %d\n", fd);
660
661 mutex_lock(&priv->lock);
662 list_add(&gntdev_dmabuf->next, &priv->imp_list);
663 mutex_unlock(&priv->lock);
664
665 return gntdev_dmabuf;
666
667fail_end_access:
668 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
669fail_unmap:
670 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
671fail_detach:
672 dma_buf_detach(dma_buf, attach);
673fail_free_obj:
674 dmabuf_imp_free_storage(gntdev_dmabuf);
675fail_put:
676 dma_buf_put(dma_buf);
677 return ret;
678}
679
680
681
682
683
684static struct gntdev_dmabuf *
685dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
686{
687 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
688
689 mutex_lock(&priv->lock);
690 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
691 if (gntdev_dmabuf->fd == fd) {
692 pr_debug("Found gntdev_dmabuf in the import list\n");
693 ret = gntdev_dmabuf;
694 list_del(&gntdev_dmabuf->next);
695 break;
696 }
697 }
698 mutex_unlock(&priv->lock);
699 return ret;
700}
701
702static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
703{
704 struct gntdev_dmabuf *gntdev_dmabuf;
705 struct dma_buf_attachment *attach;
706 struct dma_buf *dma_buf;
707
708 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
709 if (IS_ERR(gntdev_dmabuf))
710 return PTR_ERR(gntdev_dmabuf);
711
712 pr_debug("Releasing DMA buffer with fd %d\n", fd);
713
714 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
715 gntdev_dmabuf->nr_pages);
716
717 attach = gntdev_dmabuf->u.imp.attach;
718
719 if (gntdev_dmabuf->u.imp.sgt)
720 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
721 DMA_BIDIRECTIONAL);
722 dma_buf = attach->dmabuf;
723 dma_buf_detach(attach->dmabuf, attach);
724 dma_buf_put(dma_buf);
725
726 dmabuf_imp_free_storage(gntdev_dmabuf);
727 return 0;
728}
729
730static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
731{
732 struct gntdev_dmabuf *q, *gntdev_dmabuf;
733
734 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
735 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
736}
737
738
739
740long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
741 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
742{
743 struct ioctl_gntdev_dmabuf_exp_from_refs op;
744 u32 *refs;
745 long ret;
746
747 if (use_ptemod) {
748 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
749 use_ptemod);
750 return -EINVAL;
751 }
752
753 if (copy_from_user(&op, u, sizeof(op)) != 0)
754 return -EFAULT;
755
756 if (unlikely(gntdev_test_page_count(op.count)))
757 return -EINVAL;
758
759 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
760 if (!refs)
761 return -ENOMEM;
762
763 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
764 ret = -EFAULT;
765 goto out;
766 }
767
768 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
769 op.domid, refs, &op.fd);
770 if (ret)
771 goto out;
772
773 if (copy_to_user(u, &op, sizeof(op)) != 0)
774 ret = -EFAULT;
775
776out:
777 kfree(refs);
778 return ret;
779}
780
781long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
782 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
783{
784 struct ioctl_gntdev_dmabuf_exp_wait_released op;
785
786 if (copy_from_user(&op, u, sizeof(op)) != 0)
787 return -EFAULT;
788
789 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
790 op.wait_to_ms);
791}
792
793long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
794 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
795{
796 struct ioctl_gntdev_dmabuf_imp_to_refs op;
797 struct gntdev_dmabuf *gntdev_dmabuf;
798 long ret;
799
800 if (copy_from_user(&op, u, sizeof(op)) != 0)
801 return -EFAULT;
802
803 if (unlikely(gntdev_test_page_count(op.count)))
804 return -EINVAL;
805
806 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
807 priv->dma_dev, op.fd,
808 op.count, op.domid);
809 if (IS_ERR(gntdev_dmabuf))
810 return PTR_ERR(gntdev_dmabuf);
811
812 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
813 sizeof(*u->refs) * op.count) != 0) {
814 ret = -EFAULT;
815 goto out_release;
816 }
817 return 0;
818
819out_release:
820 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
821 return ret;
822}
823
824long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
825 struct ioctl_gntdev_dmabuf_imp_release __user *u)
826{
827 struct ioctl_gntdev_dmabuf_imp_release op;
828
829 if (copy_from_user(&op, u, sizeof(op)) != 0)
830 return -EFAULT;
831
832 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
833}
834
835struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
836{
837 struct gntdev_dmabuf_priv *priv;
838
839 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
840 if (!priv)
841 return ERR_PTR(-ENOMEM);
842
843 mutex_init(&priv->lock);
844 INIT_LIST_HEAD(&priv->exp_list);
845 INIT_LIST_HEAD(&priv->exp_wait_list);
846 INIT_LIST_HEAD(&priv->imp_list);
847
848 priv->filp = filp;
849
850 return priv;
851}
852
853void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
854{
855 dmabuf_imp_release_all(priv);
856 kfree(priv);
857}
858