1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <drm/drmP.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64};
65static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
66
67static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
68 enum dma_data_direction dir)
69{
70 struct drm_gem_object *obj = attach->dmabuf->priv;
71 struct sg_table *sgt;
72
73 mutex_lock(&obj->dev->struct_mutex);
74
75 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
76
77 if (!IS_ERR_OR_NULL(sgt))
78 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
79
80 mutex_unlock(&obj->dev->struct_mutex);
81 return sgt;
82}
83
84static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
85 struct sg_table *sgt, enum dma_data_direction dir)
86{
87 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
88 sg_free_table(sgt);
89 kfree(sgt);
90}
91
92static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
93{
94 struct drm_gem_object *obj = dma_buf->priv;
95
96 if (obj->export_dma_buf == dma_buf) {
97
98 obj->export_dma_buf = NULL;
99 drm_gem_object_unreference_unlocked(obj);
100 }
101}
102
103static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
104{
105 struct drm_gem_object *obj = dma_buf->priv;
106 struct drm_device *dev = obj->dev;
107
108 return dev->driver->gem_prime_vmap(obj);
109}
110
111static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
112{
113 struct drm_gem_object *obj = dma_buf->priv;
114 struct drm_device *dev = obj->dev;
115
116 dev->driver->gem_prime_vunmap(obj, vaddr);
117}
118
119static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
120 unsigned long page_num)
121{
122 return NULL;
123}
124
125static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
126 unsigned long page_num, void *addr)
127{
128
129}
130static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
131 unsigned long page_num)
132{
133 return NULL;
134}
135
136static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
137 unsigned long page_num, void *addr)
138{
139
140}
141
142static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
143 struct vm_area_struct *vma)
144{
145 return -EINVAL;
146}
147
148static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
149 .map_dma_buf = drm_gem_map_dma_buf,
150 .unmap_dma_buf = drm_gem_unmap_dma_buf,
151 .release = drm_gem_dmabuf_release,
152 .kmap = drm_gem_dmabuf_kmap,
153 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
154 .kunmap = drm_gem_dmabuf_kunmap,
155 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
156 .mmap = drm_gem_dmabuf_mmap,
157 .vmap = drm_gem_dmabuf_vmap,
158 .vunmap = drm_gem_dmabuf_vunmap,
159};
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
186 struct drm_gem_object *obj, int flags)
187{
188 if (dev->driver->gem_prime_pin) {
189 int ret = dev->driver->gem_prime_pin(obj);
190 if (ret)
191 return ERR_PTR(ret);
192 }
193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
194}
195EXPORT_SYMBOL(drm_gem_prime_export);
196
197int drm_gem_prime_handle_to_fd(struct drm_device *dev,
198 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
199 int *prime_fd)
200{
201 struct drm_gem_object *obj;
202 void *buf;
203 int ret = 0;
204 struct dma_buf *dmabuf;
205
206 obj = drm_gem_object_lookup(dev, file_priv, handle);
207 if (!obj)
208 return -ENOENT;
209
210 mutex_lock(&file_priv->prime.lock);
211
212 if (obj->import_attach) {
213 dmabuf = obj->import_attach->dmabuf;
214 goto out_have_obj;
215 }
216
217 if (obj->export_dma_buf) {
218 dmabuf = obj->export_dma_buf;
219 goto out_have_obj;
220 }
221
222 buf = dev->driver->gem_prime_export(dev, obj, flags);
223 if (IS_ERR(buf)) {
224
225
226
227 ret = PTR_ERR(buf);
228 goto out;
229 }
230 obj->export_dma_buf = buf;
231
232
233
234
235 ret = drm_prime_add_buf_handle(&file_priv->prime,
236 obj->export_dma_buf, handle);
237 if (ret)
238 goto out;
239
240 *prime_fd = dma_buf_fd(buf, flags);
241 mutex_unlock(&file_priv->prime.lock);
242 return 0;
243
244out_have_obj:
245 get_dma_buf(dmabuf);
246 *prime_fd = dma_buf_fd(dmabuf, flags);
247out:
248 drm_gem_object_unreference_unlocked(obj);
249 mutex_unlock(&file_priv->prime.lock);
250 return ret;
251}
252EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
253
254struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
255 struct dma_buf *dma_buf)
256{
257 struct dma_buf_attachment *attach;
258 struct sg_table *sgt;
259 struct drm_gem_object *obj;
260 int ret;
261
262 if (!dev->driver->gem_prime_import_sg_table)
263 return ERR_PTR(-EINVAL);
264
265 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
266 obj = dma_buf->priv;
267 if (obj->dev == dev) {
268
269
270
271
272 drm_gem_object_reference(obj);
273 return obj;
274 }
275 }
276
277 attach = dma_buf_attach(dma_buf, dev->dev);
278 if (IS_ERR(attach))
279 return ERR_PTR(PTR_ERR(attach));
280
281 get_dma_buf(dma_buf);
282
283 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
284 if (IS_ERR_OR_NULL(sgt)) {
285 ret = PTR_ERR(sgt);
286 goto fail_detach;
287 }
288
289 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
290 if (IS_ERR(obj)) {
291 ret = PTR_ERR(obj);
292 goto fail_unmap;
293 }
294
295 obj->import_attach = attach;
296
297 return obj;
298
299fail_unmap:
300 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
301fail_detach:
302 dma_buf_detach(dma_buf, attach);
303 dma_buf_put(dma_buf);
304
305 return ERR_PTR(ret);
306}
307EXPORT_SYMBOL(drm_gem_prime_import);
308
309int drm_gem_prime_fd_to_handle(struct drm_device *dev,
310 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
311{
312 struct dma_buf *dma_buf;
313 struct drm_gem_object *obj;
314 int ret;
315
316 dma_buf = dma_buf_get(prime_fd);
317 if (IS_ERR(dma_buf))
318 return PTR_ERR(dma_buf);
319
320 mutex_lock(&file_priv->prime.lock);
321
322 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
323 dma_buf, handle);
324 if (!ret) {
325 ret = 0;
326 goto out_put;
327 }
328
329
330 obj = dev->driver->gem_prime_import(dev, dma_buf);
331 if (IS_ERR(obj)) {
332 ret = PTR_ERR(obj);
333 goto out_put;
334 }
335
336 ret = drm_gem_handle_create(file_priv, obj, handle);
337 drm_gem_object_unreference_unlocked(obj);
338 if (ret)
339 goto out_put;
340
341 ret = drm_prime_add_buf_handle(&file_priv->prime,
342 dma_buf, *handle);
343 if (ret)
344 goto fail;
345
346 mutex_unlock(&file_priv->prime.lock);
347
348 dma_buf_put(dma_buf);
349
350 return 0;
351
352fail:
353
354
355
356 drm_gem_object_handle_unreference_unlocked(obj);
357out_put:
358 dma_buf_put(dma_buf);
359 mutex_unlock(&file_priv->prime.lock);
360 return ret;
361}
362EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
363
364int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
365 struct drm_file *file_priv)
366{
367 struct drm_prime_handle *args = data;
368 uint32_t flags;
369
370 if (!drm_core_check_feature(dev, DRIVER_PRIME))
371 return -EINVAL;
372
373 if (!dev->driver->prime_handle_to_fd)
374 return -ENOSYS;
375
376
377 if (args->flags & ~DRM_CLOEXEC)
378 return -EINVAL;
379
380
381 flags = args->flags & DRM_CLOEXEC;
382
383 return dev->driver->prime_handle_to_fd(dev, file_priv,
384 args->handle, flags, &args->fd);
385}
386
387int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *file_priv)
389{
390 struct drm_prime_handle *args = data;
391
392 if (!drm_core_check_feature(dev, DRIVER_PRIME))
393 return -EINVAL;
394
395 if (!dev->driver->prime_fd_to_handle)
396 return -ENOSYS;
397
398 return dev->driver->prime_fd_to_handle(dev, file_priv,
399 args->fd, &args->handle);
400}
401
402
403
404
405
406
407
408
409struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
410{
411 struct sg_table *sg = NULL;
412 int ret;
413
414 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
415 if (!sg)
416 goto out;
417
418 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
419 nr_pages << PAGE_SHIFT, GFP_KERNEL);
420 if (ret)
421 goto out;
422
423 return sg;
424out:
425 kfree(sg);
426 return NULL;
427}
428EXPORT_SYMBOL(drm_prime_pages_to_sg);
429
430
431
432
433int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
434 dma_addr_t *addrs, int max_pages)
435{
436 unsigned count;
437 struct scatterlist *sg;
438 struct page *page;
439 u32 len, offset;
440 int pg_index;
441 dma_addr_t addr;
442
443 pg_index = 0;
444 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
445 len = sg->length;
446 offset = sg->offset;
447 page = sg_page(sg);
448 addr = sg_dma_address(sg);
449
450 while (len > 0) {
451 if (WARN_ON(pg_index >= max_pages))
452 return -1;
453 pages[pg_index] = page;
454 if (addrs)
455 addrs[pg_index] = addr;
456
457 page++;
458 addr += PAGE_SIZE;
459 len -= PAGE_SIZE;
460 pg_index++;
461 }
462 }
463 return 0;
464}
465EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
466
467void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
468{
469 struct dma_buf_attachment *attach;
470 struct dma_buf *dma_buf;
471 attach = obj->import_attach;
472 if (sg)
473 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
474 dma_buf = attach->dmabuf;
475 dma_buf_detach(attach->dmabuf, attach);
476
477 dma_buf_put(dma_buf);
478}
479EXPORT_SYMBOL(drm_prime_gem_destroy);
480
481void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
482{
483 INIT_LIST_HEAD(&prime_fpriv->head);
484 mutex_init(&prime_fpriv->lock);
485}
486EXPORT_SYMBOL(drm_prime_init_file_private);
487
488void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
489{
490
491 WARN_ON(!list_empty(&prime_fpriv->head));
492}
493EXPORT_SYMBOL(drm_prime_destroy_file_private);
494
495static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
496{
497 struct drm_prime_member *member;
498
499 member = kmalloc(sizeof(*member), GFP_KERNEL);
500 if (!member)
501 return -ENOMEM;
502
503 get_dma_buf(dma_buf);
504 member->dma_buf = dma_buf;
505 member->handle = handle;
506 list_add(&member->entry, &prime_fpriv->head);
507 return 0;
508}
509
510int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
511{
512 struct drm_prime_member *member;
513
514 list_for_each_entry(member, &prime_fpriv->head, entry) {
515 if (member->dma_buf == dma_buf) {
516 *handle = member->handle;
517 return 0;
518 }
519 }
520 return -ENOENT;
521}
522EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
523
524void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
525{
526 struct drm_prime_member *member, *safe;
527
528 mutex_lock(&prime_fpriv->lock);
529 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
530 if (member->dma_buf == dma_buf) {
531 dma_buf_put(dma_buf);
532 list_del(&member->entry);
533 kfree(member);
534 }
535 }
536 mutex_unlock(&prime_fpriv->lock);
537}
538EXPORT_SYMBOL(drm_prime_remove_buf_handle);
539