1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <drm/drmP.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64};
65
66static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
67 enum dma_data_direction dir)
68{
69 struct drm_gem_object *obj = attach->dmabuf->priv;
70 struct sg_table *sgt;
71
72 mutex_lock(&obj->dev->struct_mutex);
73
74 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
75
76 if (!IS_ERR_OR_NULL(sgt))
77 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
78
79 mutex_unlock(&obj->dev->struct_mutex);
80 return sgt;
81}
82
83static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
84 struct sg_table *sgt, enum dma_data_direction dir)
85{
86 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
87 sg_free_table(sgt);
88 kfree(sgt);
89}
90
91static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
92{
93 struct drm_gem_object *obj = dma_buf->priv;
94
95 if (obj->export_dma_buf == dma_buf) {
96
97 obj->export_dma_buf = NULL;
98 drm_gem_object_unreference_unlocked(obj);
99 }
100}
101
102static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
103{
104 struct drm_gem_object *obj = dma_buf->priv;
105 struct drm_device *dev = obj->dev;
106
107 return dev->driver->gem_prime_vmap(obj);
108}
109
110static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
111{
112 struct drm_gem_object *obj = dma_buf->priv;
113 struct drm_device *dev = obj->dev;
114
115 dev->driver->gem_prime_vunmap(obj, vaddr);
116}
117
118static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
119 unsigned long page_num)
120{
121 return NULL;
122}
123
124static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
125 unsigned long page_num, void *addr)
126{
127
128}
129static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
130 unsigned long page_num)
131{
132 return NULL;
133}
134
135static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
136 unsigned long page_num, void *addr)
137{
138
139}
140
141static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
142 struct vm_area_struct *vma)
143{
144 return -EINVAL;
145}
146
147static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
148 .map_dma_buf = drm_gem_map_dma_buf,
149 .unmap_dma_buf = drm_gem_unmap_dma_buf,
150 .release = drm_gem_dmabuf_release,
151 .kmap = drm_gem_dmabuf_kmap,
152 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
153 .kunmap = drm_gem_dmabuf_kunmap,
154 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
155 .mmap = drm_gem_dmabuf_mmap,
156 .vmap = drm_gem_dmabuf_vmap,
157 .vunmap = drm_gem_dmabuf_vunmap,
158};
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
185 struct drm_gem_object *obj, int flags)
186{
187 if (dev->driver->gem_prime_pin) {
188 int ret = dev->driver->gem_prime_pin(obj);
189 if (ret)
190 return ERR_PTR(ret);
191 }
192 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
193 0600);
194}
195EXPORT_SYMBOL(drm_gem_prime_export);
196
197int drm_gem_prime_handle_to_fd(struct drm_device *dev,
198 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
199 int *prime_fd)
200{
201 struct drm_gem_object *obj;
202 void *buf;
203 int ret;
204
205 obj = drm_gem_object_lookup(dev, file_priv, handle);
206 if (!obj)
207 return -ENOENT;
208
209 mutex_lock(&file_priv->prime.lock);
210
211 if (obj->import_attach) {
212 get_dma_buf(obj->import_attach->dmabuf);
213 *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
214 drm_gem_object_unreference_unlocked(obj);
215 mutex_unlock(&file_priv->prime.lock);
216 return 0;
217 }
218
219 if (obj->export_dma_buf) {
220 get_dma_buf(obj->export_dma_buf);
221 *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
222 drm_gem_object_unreference_unlocked(obj);
223 } else {
224 buf = dev->driver->gem_prime_export(dev, obj, flags);
225 if (IS_ERR(buf)) {
226
227
228
229 drm_gem_object_unreference_unlocked(obj);
230 mutex_unlock(&file_priv->prime.lock);
231 return PTR_ERR(buf);
232 }
233 obj->export_dma_buf = buf;
234 *prime_fd = dma_buf_fd(buf, flags);
235 }
236
237
238
239 ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
240 obj->export_dma_buf, handle);
241 if (ret) {
242 drm_gem_object_unreference_unlocked(obj);
243 mutex_unlock(&file_priv->prime.lock);
244 return ret;
245 }
246
247 mutex_unlock(&file_priv->prime.lock);
248 return 0;
249}
250EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
251
252struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
253 struct dma_buf *dma_buf)
254{
255 struct dma_buf_attachment *attach;
256 struct sg_table *sgt;
257 struct drm_gem_object *obj;
258 int ret;
259
260 if (!dev->driver->gem_prime_import_sg_table)
261 return ERR_PTR(-EINVAL);
262
263 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
264 obj = dma_buf->priv;
265 if (obj->dev == dev) {
266
267
268
269
270 drm_gem_object_reference(obj);
271 dma_buf_put(dma_buf);
272 return obj;
273 }
274 }
275
276 attach = dma_buf_attach(dma_buf, dev->dev);
277 if (IS_ERR(attach))
278 return ERR_PTR(PTR_ERR(attach));
279
280 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
281 if (IS_ERR_OR_NULL(sgt)) {
282 ret = PTR_ERR(sgt);
283 goto fail_detach;
284 }
285
286 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
287 if (IS_ERR(obj)) {
288 ret = PTR_ERR(obj);
289 goto fail_unmap;
290 }
291
292 obj->import_attach = attach;
293
294 return obj;
295
296fail_unmap:
297 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
298fail_detach:
299 dma_buf_detach(dma_buf, attach);
300 return ERR_PTR(ret);
301}
302EXPORT_SYMBOL(drm_gem_prime_import);
303
304int drm_gem_prime_fd_to_handle(struct drm_device *dev,
305 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
306{
307 struct dma_buf *dma_buf;
308 struct drm_gem_object *obj;
309 int ret;
310
311 dma_buf = dma_buf_get(prime_fd);
312 if (IS_ERR(dma_buf))
313 return PTR_ERR(dma_buf);
314
315 mutex_lock(&file_priv->prime.lock);
316
317 ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
318 dma_buf, handle);
319 if (!ret) {
320 ret = 0;
321 goto out_put;
322 }
323
324
325 obj = dev->driver->gem_prime_import(dev, dma_buf);
326 if (IS_ERR(obj)) {
327 ret = PTR_ERR(obj);
328 goto out_put;
329 }
330
331 ret = drm_gem_handle_create(file_priv, obj, handle);
332 drm_gem_object_unreference_unlocked(obj);
333 if (ret)
334 goto out_put;
335
336 ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
337 dma_buf, *handle);
338 if (ret)
339 goto fail;
340
341 mutex_unlock(&file_priv->prime.lock);
342 return 0;
343
344fail:
345
346
347
348 drm_gem_object_handle_unreference_unlocked(obj);
349out_put:
350 dma_buf_put(dma_buf);
351 mutex_unlock(&file_priv->prime.lock);
352 return ret;
353}
354EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
355
356int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv)
358{
359 struct drm_prime_handle *args = data;
360 uint32_t flags;
361
362 if (!drm_core_check_feature(dev, DRIVER_PRIME))
363 return -EINVAL;
364
365 if (!dev->driver->prime_handle_to_fd)
366 return -ENOSYS;
367
368
369 if (args->flags & ~DRM_CLOEXEC)
370 return -EINVAL;
371
372
373 flags = args->flags & DRM_CLOEXEC;
374
375 return dev->driver->prime_handle_to_fd(dev, file_priv,
376 args->handle, flags, &args->fd);
377}
378
379int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
380 struct drm_file *file_priv)
381{
382 struct drm_prime_handle *args = data;
383
384 if (!drm_core_check_feature(dev, DRIVER_PRIME))
385 return -EINVAL;
386
387 if (!dev->driver->prime_fd_to_handle)
388 return -ENOSYS;
389
390 return dev->driver->prime_fd_to_handle(dev, file_priv,
391 args->fd, &args->handle);
392}
393
394
395
396
397
398
399
400
401struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
402{
403 struct sg_table *sg = NULL;
404 struct scatterlist *iter;
405 int i;
406 int ret;
407
408 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
409 if (!sg)
410 goto out;
411
412 ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
413 if (ret)
414 goto out;
415
416 for_each_sg(sg->sgl, iter, nr_pages, i)
417 sg_set_page(iter, pages[i], PAGE_SIZE, 0);
418
419 return sg;
420out:
421 kfree(sg);
422 return NULL;
423}
424EXPORT_SYMBOL(drm_prime_pages_to_sg);
425
426
427
428
429int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
430 dma_addr_t *addrs, int max_pages)
431{
432 unsigned count;
433 struct scatterlist *sg;
434 struct page *page;
435 u32 len, offset;
436 int pg_index;
437 dma_addr_t addr;
438
439 pg_index = 0;
440 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
441 len = sg->length;
442 offset = sg->offset;
443 page = sg_page(sg);
444 addr = sg_dma_address(sg);
445
446 while (len > 0) {
447 if (WARN_ON(pg_index >= max_pages))
448 return -1;
449 pages[pg_index] = page;
450 if (addrs)
451 addrs[pg_index] = addr;
452
453 page++;
454 addr += PAGE_SIZE;
455 len -= PAGE_SIZE;
456 pg_index++;
457 }
458 }
459 return 0;
460}
461EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
462
463void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
464{
465 struct dma_buf_attachment *attach;
466 struct dma_buf *dma_buf;
467 attach = obj->import_attach;
468 if (sg)
469 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
470 dma_buf = attach->dmabuf;
471 dma_buf_detach(attach->dmabuf, attach);
472
473 dma_buf_put(dma_buf);
474}
475EXPORT_SYMBOL(drm_prime_gem_destroy);
476
477void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
478{
479 INIT_LIST_HEAD(&prime_fpriv->head);
480 mutex_init(&prime_fpriv->lock);
481}
482EXPORT_SYMBOL(drm_prime_init_file_private);
483
484void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
485{
486 struct drm_prime_member *member, *safe;
487 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
488 list_del(&member->entry);
489 kfree(member);
490 }
491}
492EXPORT_SYMBOL(drm_prime_destroy_file_private);
493
494int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
495{
496 struct drm_prime_member *member;
497
498 member = kmalloc(sizeof(*member), GFP_KERNEL);
499 if (!member)
500 return -ENOMEM;
501
502 member->dma_buf = dma_buf;
503 member->handle = handle;
504 list_add(&member->entry, &prime_fpriv->head);
505 return 0;
506}
507EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
508
509int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
510{
511 struct drm_prime_member *member;
512
513 list_for_each_entry(member, &prime_fpriv->head, entry) {
514 if (member->dma_buf == dma_buf) {
515 *handle = member->handle;
516 return 0;
517 }
518 }
519 return -ENOENT;
520}
521EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
522
523void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
524{
525 struct drm_prime_member *member, *safe;
526
527 mutex_lock(&prime_fpriv->lock);
528 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
529 if (member->dma_buf == dma_buf) {
530 list_del(&member->entry);
531 kfree(member);
532 }
533 }
534 mutex_unlock(&prime_fpriv->lock);
535}
536EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
537