1
2
3
4
5
6
7
8
9
10
11
12#include <linux/dma-buf.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/mm.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18
19#include <drm/drm.h>
20#include <drm/drm_device.h>
21#include <drm/drm_drv.h>
22#include <drm/drm_gem_cma_helper.h>
23#include <drm/drm_vma_manager.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48static struct drm_gem_cma_object *
49__drm_gem_cma_create(struct drm_device *drm, size_t size)
50{
51 struct drm_gem_cma_object *cma_obj;
52 struct drm_gem_object *gem_obj;
53 int ret;
54
55 if (drm->driver->gem_create_object)
56 gem_obj = drm->driver->gem_create_object(drm, size);
57 else
58 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
59 if (!gem_obj)
60 return ERR_PTR(-ENOMEM);
61 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
62
63 ret = drm_gem_object_init(drm, gem_obj, size);
64 if (ret)
65 goto error;
66
67 ret = drm_gem_create_mmap_offset(gem_obj);
68 if (ret) {
69 drm_gem_object_release(gem_obj);
70 goto error;
71 }
72
73 return cma_obj;
74
75error:
76 kfree(cma_obj);
77 return ERR_PTR(ret);
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
94 size_t size)
95{
96 struct drm_gem_cma_object *cma_obj;
97 int ret;
98
99 size = round_up(size, PAGE_SIZE);
100
101 cma_obj = __drm_gem_cma_create(drm, size);
102 if (IS_ERR(cma_obj))
103 return cma_obj;
104
105 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
106 GFP_KERNEL | __GFP_NOWARN);
107 if (!cma_obj->vaddr) {
108 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
109 size);
110 ret = -ENOMEM;
111 goto error;
112 }
113
114 return cma_obj;
115
116error:
117 drm_gem_object_put(&cma_obj->base);
118 return ERR_PTR(ret);
119}
120EXPORT_SYMBOL_GPL(drm_gem_cma_create);
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138static struct drm_gem_cma_object *
139drm_gem_cma_create_with_handle(struct drm_file *file_priv,
140 struct drm_device *drm, size_t size,
141 uint32_t *handle)
142{
143 struct drm_gem_cma_object *cma_obj;
144 struct drm_gem_object *gem_obj;
145 int ret;
146
147 cma_obj = drm_gem_cma_create(drm, size);
148 if (IS_ERR(cma_obj))
149 return cma_obj;
150
151 gem_obj = &cma_obj->base;
152
153
154
155
156
157 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
158
159 drm_gem_object_put(gem_obj);
160 if (ret)
161 return ERR_PTR(ret);
162
163 return cma_obj;
164}
165
166
167
168
169
170
171
172
173
174
175
176void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
177{
178 struct drm_gem_cma_object *cma_obj;
179
180 cma_obj = to_drm_gem_cma_obj(gem_obj);
181
182 if (gem_obj->import_attach) {
183 if (cma_obj->vaddr)
184 dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
185 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
186 } else if (cma_obj->vaddr) {
187 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
188 cma_obj->vaddr, cma_obj->paddr);
189 }
190
191 drm_gem_object_release(gem_obj);
192
193 kfree(cma_obj);
194}
195EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
212 struct drm_device *drm,
213 struct drm_mode_create_dumb *args)
214{
215 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
216 struct drm_gem_cma_object *cma_obj;
217
218 if (args->pitch < min_pitch)
219 args->pitch = min_pitch;
220
221 if (args->size < args->pitch * args->height)
222 args->size = args->pitch * args->height;
223
224 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
225 &args->handle);
226 return PTR_ERR_OR_ZERO(cma_obj);
227}
228EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248int drm_gem_cma_dumb_create(struct drm_file *file_priv,
249 struct drm_device *drm,
250 struct drm_mode_create_dumb *args)
251{
252 struct drm_gem_cma_object *cma_obj;
253
254 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
255 args->size = args->pitch * args->height;
256
257 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
258 &args->handle);
259 return PTR_ERR_OR_ZERO(cma_obj);
260}
261EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
262
263const struct vm_operations_struct drm_gem_cma_vm_ops = {
264 .open = drm_gem_vm_open,
265 .close = drm_gem_vm_close,
266};
267EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
268
269static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
270 struct vm_area_struct *vma)
271{
272 int ret;
273
274
275
276
277
278
279 vma->vm_flags &= ~VM_PFNMAP;
280 vma->vm_pgoff = 0;
281
282 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
283 cma_obj->paddr, vma->vm_end - vma->vm_start);
284 if (ret)
285 drm_gem_vm_close(vma);
286
287 return ret;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
309{
310 struct drm_gem_cma_object *cma_obj;
311 struct drm_gem_object *gem_obj;
312 int ret;
313
314 ret = drm_gem_mmap(filp, vma);
315 if (ret)
316 return ret;
317
318 gem_obj = vma->vm_private_data;
319 cma_obj = to_drm_gem_cma_obj(gem_obj);
320
321 return drm_gem_cma_mmap_obj(cma_obj, vma);
322}
323EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
324
325#ifndef CONFIG_MMU
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
343 unsigned long addr,
344 unsigned long len,
345 unsigned long pgoff,
346 unsigned long flags)
347{
348 struct drm_gem_cma_object *cma_obj;
349 struct drm_gem_object *obj = NULL;
350 struct drm_file *priv = filp->private_data;
351 struct drm_device *dev = priv->minor->dev;
352 struct drm_vma_offset_node *node;
353
354 if (drm_dev_is_unplugged(dev))
355 return -ENODEV;
356
357 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
358 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
359 pgoff,
360 len >> PAGE_SHIFT);
361 if (likely(node)) {
362 obj = container_of(node, struct drm_gem_object, vma_node);
363
364
365
366
367
368
369
370
371
372
373 if (!kref_get_unless_zero(&obj->refcount))
374 obj = NULL;
375 }
376
377 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
378
379 if (!obj)
380 return -EINVAL;
381
382 if (!drm_vma_node_is_allowed(node, priv)) {
383 drm_gem_object_put(obj);
384 return -EACCES;
385 }
386
387 cma_obj = to_drm_gem_cma_obj(obj);
388
389 drm_gem_object_put(obj);
390
391 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
392}
393EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
394#endif
395
396
397
398
399
400
401
402
403
404
405void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
406 const struct drm_gem_object *obj)
407{
408 const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
409
410 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
411 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
412}
413EXPORT_SYMBOL(drm_gem_cma_print_info);
414
415
416
417
418
419
420
421
422
423
424
425
426
427struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
428{
429 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
430 struct sg_table *sgt;
431 int ret;
432
433 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
434 if (!sgt)
435 return ERR_PTR(-ENOMEM);
436
437 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
438 cma_obj->paddr, obj->size);
439 if (ret < 0)
440 goto out;
441
442 return sgt;
443
444out:
445 kfree(sgt);
446 return ERR_PTR(ret);
447}
448EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467struct drm_gem_object *
468drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
469 struct dma_buf_attachment *attach,
470 struct sg_table *sgt)
471{
472 struct drm_gem_cma_object *cma_obj;
473
474
475 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
476 return ERR_PTR(-EINVAL);
477
478
479 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
480 if (IS_ERR(cma_obj))
481 return ERR_CAST(cma_obj);
482
483 cma_obj->paddr = sg_dma_address(sgt->sgl);
484 cma_obj->sgt = sgt;
485
486 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
487
488 return &cma_obj->base;
489}
490EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
491
492
493
494
495
496
497
498
499
500
501
502
503
504int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
505 struct vm_area_struct *vma)
506{
507 struct drm_gem_cma_object *cma_obj;
508 int ret;
509
510 ret = drm_gem_mmap_obj(obj, obj->size, vma);
511 if (ret < 0)
512 return ret;
513
514 cma_obj = to_drm_gem_cma_obj(obj);
515 return drm_gem_cma_mmap_obj(cma_obj, vma);
516}
517EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
534{
535 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
536
537 return cma_obj->vaddr;
538}
539EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
540
541
542
543
544
545
546
547
548
549
550
551
552void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
553{
554
555}
556EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
557
558static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
559 .free = drm_gem_cma_free_object,
560 .print_info = drm_gem_cma_print_info,
561 .get_sg_table = drm_gem_cma_prime_get_sg_table,
562 .vmap = drm_gem_cma_prime_vmap,
563 .vm_ops = &drm_gem_cma_vm_ops,
564};
565
566
567
568
569
570
571
572
573
574
575
576
577
578struct drm_gem_object *
579drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size)
580{
581 struct drm_gem_cma_object *cma_obj;
582
583 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
584 if (!cma_obj)
585 return NULL;
586
587 cma_obj->base.funcs = &drm_gem_cma_default_funcs;
588
589 return &cma_obj->base;
590}
591EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs);
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613struct drm_gem_object *
614drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
615 struct dma_buf_attachment *attach,
616 struct sg_table *sgt)
617{
618 struct drm_gem_cma_object *cma_obj;
619 struct drm_gem_object *obj;
620 void *vaddr;
621
622 vaddr = dma_buf_vmap(attach->dmabuf);
623 if (!vaddr) {
624 DRM_ERROR("Failed to vmap PRIME buffer\n");
625 return ERR_PTR(-ENOMEM);
626 }
627
628 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
629 if (IS_ERR(obj)) {
630 dma_buf_vunmap(attach->dmabuf, vaddr);
631 return obj;
632 }
633
634 cma_obj = to_drm_gem_cma_obj(obj);
635 cma_obj->vaddr = vaddr;
636
637 return obj;
638}
639EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
640