1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/export.h>
24#include <linux/dma-buf.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29#include <drm/drm_gem_cma_helper.h>
30#include <drm/drm_vma_manager.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static struct drm_gem_cma_object *
56__drm_gem_cma_create(struct drm_device *drm, size_t size)
57{
58 struct drm_gem_cma_object *cma_obj;
59 struct drm_gem_object *gem_obj;
60 int ret;
61
62 if (drm->driver->gem_create_object)
63 gem_obj = drm->driver->gem_create_object(drm, size);
64 else
65 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
66 if (!gem_obj)
67 return ERR_PTR(-ENOMEM);
68 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
69
70 ret = drm_gem_object_init(drm, gem_obj, size);
71 if (ret)
72 goto error;
73
74 ret = drm_gem_create_mmap_offset(gem_obj);
75 if (ret) {
76 drm_gem_object_release(gem_obj);
77 goto error;
78 }
79
80 return cma_obj;
81
82error:
83 kfree(cma_obj);
84 return ERR_PTR(ret);
85}
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
101 size_t size)
102{
103 struct drm_gem_cma_object *cma_obj;
104 int ret;
105
106 size = round_up(size, PAGE_SIZE);
107
108 cma_obj = __drm_gem_cma_create(drm, size);
109 if (IS_ERR(cma_obj))
110 return cma_obj;
111
112 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
113 GFP_KERNEL | __GFP_NOWARN);
114 if (!cma_obj->vaddr) {
115 dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
116 size);
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 return cma_obj;
122
123error:
124 drm_gem_object_put_unlocked(&cma_obj->base);
125 return ERR_PTR(ret);
126}
127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145static struct drm_gem_cma_object *
146drm_gem_cma_create_with_handle(struct drm_file *file_priv,
147 struct drm_device *drm, size_t size,
148 uint32_t *handle)
149{
150 struct drm_gem_cma_object *cma_obj;
151 struct drm_gem_object *gem_obj;
152 int ret;
153
154 cma_obj = drm_gem_cma_create(drm, size);
155 if (IS_ERR(cma_obj))
156 return cma_obj;
157
158 gem_obj = &cma_obj->base;
159
160
161
162
163
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165
166 drm_gem_object_put_unlocked(gem_obj);
167 if (ret)
168 return ERR_PTR(ret);
169
170 return cma_obj;
171}
172
173
174
175
176
177
178
179
180
181
182void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
183{
184 struct drm_gem_cma_object *cma_obj;
185
186 cma_obj = to_drm_gem_cma_obj(gem_obj);
187
188 if (cma_obj->vaddr) {
189 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
190 cma_obj->vaddr, cma_obj->paddr);
191 } else if (gem_obj->import_attach) {
192 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
193 }
194
195 drm_gem_object_release(gem_obj);
196
197 kfree(cma_obj);
198}
199EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
216 struct drm_device *drm,
217 struct drm_mode_create_dumb *args)
218{
219 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
220 struct drm_gem_cma_object *cma_obj;
221
222 if (args->pitch < min_pitch)
223 args->pitch = min_pitch;
224
225 if (args->size < args->pitch * args->height)
226 args->size = args->pitch * args->height;
227
228 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
229 &args->handle);
230 return PTR_ERR_OR_ZERO(cma_obj);
231}
232EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252int drm_gem_cma_dumb_create(struct drm_file *file_priv,
253 struct drm_device *drm,
254 struct drm_mode_create_dumb *args)
255{
256 struct drm_gem_cma_object *cma_obj;
257
258 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
259 args->size = args->pitch * args->height;
260
261 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
262 &args->handle);
263 return PTR_ERR_OR_ZERO(cma_obj);
264}
265EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
266
267const struct vm_operations_struct drm_gem_cma_vm_ops = {
268 .open = drm_gem_vm_open,
269 .close = drm_gem_vm_close,
270};
271EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
272
273static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
274 struct vm_area_struct *vma)
275{
276 int ret;
277
278
279
280
281
282
283 vma->vm_flags &= ~VM_PFNMAP;
284 vma->vm_pgoff = 0;
285
286 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
287 cma_obj->paddr, vma->vm_end - vma->vm_start);
288 if (ret)
289 drm_gem_vm_close(vma);
290
291 return ret;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
313{
314 struct drm_gem_cma_object *cma_obj;
315 struct drm_gem_object *gem_obj;
316 int ret;
317
318 ret = drm_gem_mmap(filp, vma);
319 if (ret)
320 return ret;
321
322 gem_obj = vma->vm_private_data;
323 cma_obj = to_drm_gem_cma_obj(gem_obj);
324
325 return drm_gem_cma_mmap_obj(cma_obj, vma);
326}
327EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
328
329#ifndef CONFIG_MMU
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
347 unsigned long addr,
348 unsigned long len,
349 unsigned long pgoff,
350 unsigned long flags)
351{
352 struct drm_gem_cma_object *cma_obj;
353 struct drm_gem_object *obj = NULL;
354 struct drm_file *priv = filp->private_data;
355 struct drm_device *dev = priv->minor->dev;
356 struct drm_vma_offset_node *node;
357
358 if (drm_dev_is_unplugged(dev))
359 return -ENODEV;
360
361 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
362 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
363 pgoff,
364 len >> PAGE_SHIFT);
365 if (likely(node)) {
366 obj = container_of(node, struct drm_gem_object, vma_node);
367
368
369
370
371
372
373
374
375
376
377 if (!kref_get_unless_zero(&obj->refcount))
378 obj = NULL;
379 }
380
381 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
382
383 if (!obj)
384 return -EINVAL;
385
386 if (!drm_vma_node_is_allowed(node, priv)) {
387 drm_gem_object_put_unlocked(obj);
388 return -EACCES;
389 }
390
391 cma_obj = to_drm_gem_cma_obj(obj);
392
393 drm_gem_object_put_unlocked(obj);
394
395 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
396}
397EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
398#endif
399
400#ifdef CONFIG_DEBUG_FS
401
402
403
404
405
406
407
408
409void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
410 struct seq_file *m)
411{
412 struct drm_gem_object *obj = &cma_obj->base;
413 uint64_t off;
414
415 off = drm_vma_node_start(&obj->vma_node);
416
417 seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
418 obj->name, kref_read(&obj->refcount),
419 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
420
421 seq_printf(m, "\n");
422}
423EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
424#endif
425
426
427
428
429
430
431
432
433
434
435
436
437
438struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
439{
440 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
441 struct sg_table *sgt;
442 int ret;
443
444 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
445 if (!sgt)
446 return NULL;
447
448 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
449 cma_obj->paddr, obj->size);
450 if (ret < 0)
451 goto out;
452
453 return sgt;
454
455out:
456 kfree(sgt);
457 return NULL;
458}
459EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478struct drm_gem_object *
479drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
480 struct dma_buf_attachment *attach,
481 struct sg_table *sgt)
482{
483 struct drm_gem_cma_object *cma_obj;
484
485 if (sgt->nents != 1)
486 return ERR_PTR(-EINVAL);
487
488
489 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
490 if (IS_ERR(cma_obj))
491 return ERR_CAST(cma_obj);
492
493 cma_obj->paddr = sg_dma_address(sgt->sgl);
494 cma_obj->sgt = sgt;
495
496 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
497
498 return &cma_obj->base;
499}
500EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
501
502
503
504
505
506
507
508
509
510
511
512
513
514int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
515 struct vm_area_struct *vma)
516{
517 struct drm_gem_cma_object *cma_obj;
518 int ret;
519
520 ret = drm_gem_mmap_obj(obj, obj->size, vma);
521 if (ret < 0)
522 return ret;
523
524 cma_obj = to_drm_gem_cma_obj(obj);
525 return drm_gem_cma_mmap_obj(cma_obj, vma);
526}
527EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
544{
545 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
546
547 return cma_obj->vaddr;
548}
549EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
550
551
552
553
554
555
556
557
558
559
560
561
562void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
563{
564
565}
566EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
567